1
0
mirror of https://github.com/google/benchmark.git synced 2025-03-30 14:10:25 +08:00
This commit is contained in:
Joao Paulo Magalhaes 2017-04-27 12:38:28 +01:00
commit 27b3bd4dc4
13 changed files with 119 additions and 41 deletions

View File

@ -1,16 +1,18 @@
version: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013 Win64"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
@ -18,9 +20,16 @@ environment:
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
@ -30,12 +39,6 @@ install:
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
# TODO Remove this. This is a hack to work around bogus warning messages
# See http://goo.gl/euguBI for more information.
before_build:
- del "C:\Program Files (x86)\MSBuild\14.0\Microsoft.Common.targets\ImportAfter\Xamarin.Common.targets"
- del "C:\Program Files (x86)\MSBuild\12.0\Microsoft.Common.targets\ImportAfter\Xamarin.Common.targets"
build_script:
- md _build -Force
- cd _build
@ -51,4 +54,3 @@ artifacts:
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results

View File

@ -577,9 +577,17 @@ class Benchmark {
// Set the minimum amount of time to use when running this benchmark. This
// option overrides the `benchmark_min_time` flag.
// REQUIRES: `t > 0`
// REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark.
Benchmark* MinTime(double t);
// Specify the amount of iterations that should be run by this benchmark.
// REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark.
//
// NOTE: This function should only be used when *exact* iteration control is
// needed and never to control or limit how long a benchmark runs, where
// `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
Benchmark* Iterations(size_t n);
// Specify the amount of times to repeat this benchmark. This option overrides
// the `benchmark_repetitions` flag.
// REQUIRES: `n > 0`
@ -668,6 +676,7 @@ class Benchmark {
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
size_t iterations_;
int repetitions_;
bool use_real_time_;
bool use_manual_time_;

View File

@ -49,7 +49,7 @@ write_basic_package_version_file(
"${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion
)
configure_file("${CMAKE_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
install(

View File

@ -285,7 +285,8 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
std::vector<BenchmarkReporter::Run>* complexity_reports) {
std::vector<BenchmarkReporter::Run> reports; // return value
size_t iters = 1;
const bool has_explicit_iteration_count = b.iterations != 0;
size_t iters = has_explicit_iteration_count ? b.iterations : 1;
std::unique_ptr<internal::ThreadManager> manager;
std::vector<std::thread> pool(b.threads - 1);
const int repeats =
@ -295,7 +296,7 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
(b.report_mode == internal::RM_Unspecified
? FLAGS_benchmark_report_aggregates_only
: b.report_mode == internal::RM_ReportAggregatesOnly);
for (int i = 0; i < repeats; i++) {
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
for (;;) {
// Try benchmark
VLOG(2) << "Running " << b.name << " for " << iters << "\n";
@ -331,10 +332,20 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
const double min_time =
!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
// If this was the first run, was elapsed time or cpu time large enough?
// If this is not the first run, go with the current value of iter.
if ((i > 0) || results.has_error_ || (iters >= kMaxIterations) ||
(seconds >= min_time) || (results.real_time_used >= 5 * min_time)) {
// Determine if this run should be reported; Either it has
// run for a sufficient amount of time or because an error was reported.
const bool should_report = repetition_num > 0
|| has_explicit_iteration_count // An exact iteration count was requested
|| results.has_error_
|| iters >= kMaxIterations
|| seconds >= min_time // the elapsed time is large enough
// CPU time is specified but the elapsed real time greatly exceeds the
// minimum time. Note that user provided timers are except from this
// sanity check.
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
if (should_report) {
BenchmarkReporter::Run report =
CreateRunReport(b, results, iters, seconds);
if (!report.error_occurred && b.complexity != oNone)

View File

@ -28,6 +28,7 @@ struct Benchmark::Instance {
bool last_benchmark_instance;
int repetitions;
double min_time;
size_t iterations;
int threads; // Number of concurrent threads to us
};

View File

@ -31,6 +31,7 @@
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <thread>
#include "check.h"
@ -143,6 +144,7 @@ bool BenchmarkFamilies::FindBenchmarks(
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
@ -162,17 +164,18 @@ bool BenchmarkFamilies::FindBenchmarks(
StringPrintF("%s:", family->arg_names_[arg_i].c_str());
}
}
instance.name += std::to_string(arg);
instance.name += StringPrintF("%d", arg);
++arg_i;
}
if (!IsZero(family->min_time_)) {
if (!IsZero(family->min_time_))
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
}
if (family->repetitions_ != 0) {
if (family->iterations_ != 0)
instance.name += StringPrintF("/iterations:%d", family->iterations_);
if (family->repetitions_ != 0)
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
}
if (family->use_manual_time_) {
instance.name += "/manual_time";
} else if (family->use_real_time_) {
@ -219,6 +222,7 @@ Benchmark::Benchmark(const char* name)
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
use_real_time_(false),
use_manual_time_(false),
@ -344,6 +348,22 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) {
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(size_t n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
CHECK(n > 0);
repetitions_ = n;
@ -355,12 +375,6 @@ Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";

View File

@ -89,7 +89,7 @@ std::string FormatString(const char* msg, va_list args) {
std::size_t size = 256;
char local_buff[256];
auto ret = std::vsnprintf(local_buff, size, msg, args_cp);
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
@ -104,7 +104,7 @@ std::string FormatString(const char* msg, va_list args) {
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = std::vsnprintf(buff.get(), size, msg, args);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}

View File

@ -35,9 +35,9 @@ BigOFunc* FittingCurve(BigO complexity) {
case oNCubed:
return [](int n) -> double { return std::pow(n, 3); };
case oLogN:
return [](int n) { return std::log2(n); };
return [](int n) { return log2(n); };
case oNLogN:
return [](int n) { return n * std::log2(n); };
return [](int n) { return n * log2(n); };
case o1:
default:
return [](int) { return 1.0; };

View File

@ -2,6 +2,25 @@
find_package(Threads REQUIRED)
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
add_definitions( -UNDEBUG )
add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
# Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
foreach (flags_var_to_scrub
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_MINSIZEREL)
string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
"${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
endforeach()
endif()
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
# they will break the configuration check.
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)

View File

@ -141,7 +141,7 @@ BENCHMARK(BM_Complexity_O_N_log_N)
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](int n) { return n * std::log2(n); });
->Complexity([](int n) { return n * log2(n); });
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)

View File

@ -26,7 +26,7 @@ void TestHandler() {
}
void try_invalid_pause_resume(benchmark::State& state) {
#if !defined(NDEBUG) && !defined(TEST_HAS_NO_EXCEPTIONS)
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
state.PauseTiming();
std::abort();

View File

@ -1,8 +1,12 @@
#include "benchmark/benchmark_api.h"
#include <chrono>
#include <thread>
#if defined(NDEBUG)
#undef NDEBUG
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
}
@ -40,4 +44,22 @@ void CustomArgs(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& st) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(st.max_iterations == 42);
size_t actual_iterations = 0;
while (st.KeepRunning())
++actual_iterations;
assert(st.iterations() == st.max_iterations);
assert(st.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
BENCHMARK_MAIN()

View File

@ -114,14 +114,14 @@ void TestRegistrationAtRuntime() {
#endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{
int x = 42;
const char* x = "42";
auto capturing_lam = [=](benchmark::State& st) {
while (st.KeepRunning()) {
}
st.SetLabel(std::to_string(x));
st.SetLabel(x);
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", "42"}});
AddCases({{"lambda_benchmark", x}});
}
#endif
}