From 867f9145a0a45f8b993cec8b48309c19391acaa0 Mon Sep 17 00:00:00 2001 From: Ismael Date: Wed, 1 Jun 2016 23:08:01 +0200 Subject: [PATCH 01/12] added lambdas to complexity report --- include/benchmark/benchmark_api.h | 32 ++- include/benchmark/reporter.h | 43 ++-- src/benchmark.cc | 161 +++++++------- src/complexity.cc | 119 +++++------ src/complexity.h | 20 +- src/console_reporter.cc | 4 +- src/csv_reporter.cc | 7 +- src/json_reporter.cc | 29 ++- test/complexity_test.cc | 340 ++++++++++++++++++++++++------ 9 files changed, 493 insertions(+), 262 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index e705d751..8d5189a1 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -152,6 +152,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #include #include #include +#include #include "macros.h" @@ -247,15 +248,20 @@ enum BigO { oNCubed, oLogN, oNLogN, - oAuto + oAuto, + oLambda }; +// BigOFunc is passed to a benchmark in order to specify the asymptotic +// computational complexity for the benchmark. +typedef double(BigOFunc)(size_t); + // State is passed to a running Benchmark and contains state for the // benchmark to use. class State { public: State(size_t max_iters, bool has_x, int x, bool has_y, int y, - int thread_i, int n_threads); + int thread_i, int n_threads); // Returns true iff the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has @@ -268,13 +274,13 @@ public: } bool const res = total_iterations_++ < max_iterations; if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { - assert(started_ && (!finished_ || error_occurred_)); - if (!error_occurred_) { - PauseTiming(); - } - // Total iterations now is one greater than max iterations. Fix this. - total_iterations_ = max_iterations; - finished_ = true; + assert(started_ && (!finished_ || error_occurred_)); + if (!error_occurred_) { + PauseTiming(); + } + // Total iterations now is one greater than max iterations. Fix this. + total_iterations_ = max_iterations; + finished_ = true; } return res; } @@ -359,7 +365,7 @@ public: // represent the length of N. BENCHMARK_ALWAYS_INLINE void SetComplexityN(size_t complexity_n) { - complexity_n_ = complexity_n; + complexity_n_ = complexity_n; } BENCHMARK_ALWAYS_INLINE @@ -533,10 +539,14 @@ public: // to control how many iterations are run, and in the printing of items/second // or MB/second values. Benchmark* UseManualTime(); - + // Set the asymptotic computational complexity for the benchmark. If called // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigO complexity = benchmark::oAuto); + + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigOFunc* complexity); // Support for running multiple copies of the same benchmark concurrently // in multiple threads. This may be useful when measuring the scaling diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 02627547..4c7bff3f 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -83,11 +83,12 @@ class BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; - + // Keep track of arguments to compute asymptotic complexity - BigO complexity; - int complexity_n; - + BigO complexity; + BigOFunc* complexity_lambda; + size_t complexity_n; + // Inform print function whether the current run is a complexity report bool report_big_o; bool report_rms; @@ -113,7 +114,7 @@ class BenchmarkReporter { // 'reports' contains additional entries representing the asymptotic // complexity and RMS of that benchmark family. virtual void ReportRuns(const std::vector& report) = 0; - + // Called once and only once after ever group of benchmarks is run and // reported. virtual void Finalize() {} @@ -159,7 +160,7 @@ class ConsoleReporter : public BenchmarkReporter { virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); -protected: + protected: virtual void PrintRunData(const Run& report); size_t name_field_width_; @@ -189,25 +190,25 @@ private: inline const char* GetTimeUnitString(TimeUnit unit) { switch (unit) { - case kMillisecond: - return "ms"; - case kMicrosecond: - return "us"; - case kNanosecond: - default: - return "ns"; + case kMillisecond: + return "ms"; + case kMicrosecond: + return "us"; + case kNanosecond: + default: + return "ns"; } } inline double GetTimeUnitMultiplier(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return 1e3; - case kMicrosecond: - return 1e6; - case kNanosecond: - default: - return 1e9; + switch (unit) { + case kMillisecond: + return 1e3; + case kMicrosecond: + return 1e6; + case kNanosecond: + default: + return 1e9; } } diff --git a/src/benchmark.cc b/src/benchmark.cc index d86705ef..2b55ac74 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -124,7 +124,7 @@ struct ThreadStats { ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {} int64_t bytes_processed; int64_t items_processed; - int complexity_n; + size_t complexity_n; }; // Timer management class @@ -140,7 +140,7 @@ class TimerManager { manual_time_used_(0), num_finalized_(0), phase_number_(0), - entered_(0) + entered_(0) { } @@ -277,11 +277,11 @@ class TimerManager { int phase_number_cp = phase_number_; auto cb = [this, phase_number_cp]() { return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error + entered_ == running_threads_; // A thread has aborted in error }; phase_condition_.wait(ml.native_handle(), cb); if (phase_number_ > phase_number_cp) - return false; + return false; // else (running_threads_ == entered_) and we are the last thread. } // Last thread has reached the barrier @@ -311,6 +311,7 @@ struct Benchmark::Instance { bool use_real_time; bool use_manual_time; BigO complexity; + BigOFunc* complexity_lambda; bool last_benchmark_instance; int repetitions; double min_time; @@ -356,6 +357,7 @@ public: void UseRealTime(); void UseManualTime(); void Complexity(BigO complexity); + void ComplexityLambda(BigOFunc* complexity); void Threads(int t); void ThreadRange(int min_threads, int max_threads); void ThreadPerCpu(); @@ -376,6 +378,7 @@ private: bool use_real_time_; bool use_manual_time_; BigO complexity_; + BigOFunc* complexity_lambda_; std::vector thread_counts_; BenchmarkImp& operator=(BenchmarkImp const&); @@ -440,6 +443,7 @@ bool BenchmarkFamilies::FindBenchmarks( instance.use_real_time = family->use_real_time_; instance.use_manual_time = family->use_manual_time_; instance.complexity = family->complexity_; + instance.complexity_lambda = family->complexity_lambda_; instance.threads = num_threads; instance.multithreaded = !(family->thread_counts_.empty()); @@ -567,6 +571,10 @@ void BenchmarkImp::Complexity(BigO complexity){ complexity_ = complexity; } +void BenchmarkImp::ComplexityLambda(BigOFunc* complexity) { + complexity_lambda_ = complexity; +} + void BenchmarkImp::Threads(int t) { CHECK_GT(t, 0); thread_counts_.push_back(t); @@ -691,6 +699,12 @@ Benchmark* Benchmark::Complexity(BigO complexity) { return this; } +Benchmark* Benchmark::Complexity(BigOFunc* complexity) { + imp_->Complexity(oLambda); + imp_->ComplexityLambda(complexity); + return this; +} + Benchmark* Benchmark::Threads(int t) { imp_->Threads(t); return this; @@ -717,7 +731,7 @@ void FunctionBenchmark::Run(State& st) { } // end namespace internal namespace { - + // Execute one thread of benchmark b for the specified number of iterations. // Adds the stats collected for the thread into *total. void RunInThread(const benchmark::internal::Benchmark::Instance* b, @@ -731,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, MutexLock l(GetBenchmarkLock()); total->bytes_processed += st.bytes_processed(); total->items_processed += st.items_processed(); - total->complexity_n += st.complexity_length_n(); + total->complexity_n += st.complexity_length_n(); } timer_manager->Finalize(); } void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, - BenchmarkReporter* br, - std::vector& complexity_reports) + BenchmarkReporter* br, + std::vector& complexity_reports) EXCLUDES(GetBenchmarkLock()) { size_t iters = 1; @@ -750,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, pool.resize(b.threads); const int repeats = b.repetitions != 0 ? b.repetitions - : FLAGS_benchmark_repetitions; + : FLAGS_benchmark_repetitions; for (int i = 0; i < repeats; i++) { std::string mem; for (;;) { @@ -830,27 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, report.time_unit = b.time_unit; if (!report.error_occurred) { - double bytes_per_second = 0; - if (total.bytes_processed > 0 && seconds > 0.0) { - bytes_per_second = (total.bytes_processed / seconds); - } - double items_per_second = 0; - if (total.items_processed > 0 && seconds > 0.0) { - items_per_second = (total.items_processed / seconds); - } + double bytes_per_second = 0; + if (total.bytes_processed > 0 && seconds > 0.0) { + bytes_per_second = (total.bytes_processed / seconds); + } + double items_per_second = 0; + if (total.items_processed > 0 && seconds > 0.0) { + items_per_second = (total.items_processed / seconds); + } - if (b.use_manual_time) { - report.real_accumulated_time = manual_accumulated_time; - } else { - report.real_accumulated_time = real_accumulated_time; - } - report.cpu_accumulated_time = cpu_accumulated_time; - report.bytes_per_second = bytes_per_second; - report.items_per_second = items_per_second; - report.complexity_n = total.complexity_n; - report.complexity = b.complexity; - if(report.complexity != oNone) - complexity_reports.push_back(report); + if (b.use_manual_time) { + report.real_accumulated_time = manual_accumulated_time; + } else { + report.real_accumulated_time = real_accumulated_time; + } + report.cpu_accumulated_time = cpu_accumulated_time; + report.bytes_per_second = bytes_per_second; + report.items_per_second = items_per_second; + report.complexity_n = total.complexity_n; + report.complexity = b.complexity; + report.complexity_lambda = b.complexity_lambda; + if(report.complexity != oNone) + complexity_reports.push_back(report); } reports.push_back(report); @@ -878,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, } std::vector additional_run_stats = ComputeStats(reports); reports.insert(reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); + additional_run_stats.end()); if((b.complexity != oNone) && b.last_benchmark_instance) { additional_run_stats = ComputeBigO(complexity_reports); reports.insert(reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); + additional_run_stats.end()); complexity_reports.clear(); } br->ReportRuns(reports); - + if (b.multithreaded) { for (std::thread& thread : pool) thread.join(); @@ -949,56 +964,56 @@ void State::SetLabel(const char* label) { } namespace internal { -namespace { + namespace { -void RunMatchingBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* reporter) { - CHECK(reporter != nullptr); + void RunMatchingBenchmarks(const std::vector& benchmarks, + BenchmarkReporter* reporter) { + CHECK(reporter != nullptr); - // Determine the width of the name field using a minimum width of 10. - bool has_repetitions = FLAGS_benchmark_repetitions > 1; - size_t name_field_width = 10; - for (const Benchmark::Instance& benchmark : benchmarks) { - name_field_width = - std::max(name_field_width, benchmark.name.size()); - has_repetitions |= benchmark.repetitions > 1; - } - if (has_repetitions) - name_field_width += std::strlen("_stddev"); + // Determine the width of the name field using a minimum width of 10. + bool has_repetitions = FLAGS_benchmark_repetitions > 1; + size_t name_field_width = 10; + for (const Benchmark::Instance& benchmark : benchmarks) { + name_field_width = + std::max(name_field_width, benchmark.name.size()); + has_repetitions |= benchmark.repetitions > 1; + } + if (has_repetitions) + name_field_width += std::strlen("_stddev"); - // Print header here - BenchmarkReporter::Context context; - context.num_cpus = NumCPUs(); - context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f; + // Print header here + BenchmarkReporter::Context context; + context.num_cpus = NumCPUs(); + context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f; - context.cpu_scaling_enabled = CpuScalingEnabled(); - context.name_field_width = name_field_width; + context.cpu_scaling_enabled = CpuScalingEnabled(); + context.name_field_width = name_field_width; - // Keep track of runing times of all instances of current benchmark - std::vector complexity_reports; + // Keep track of runing times of all instances of current benchmark + std::vector complexity_reports; - if (reporter->ReportContext(context)) { - for (const auto& benchmark : benchmarks) { - RunBenchmark(benchmark, reporter, complexity_reports); + if (reporter->ReportContext(context)) { + for (const auto& benchmark : benchmarks) { + RunBenchmark(benchmark, reporter, complexity_reports); + } + } } - } -} -std::unique_ptr GetDefaultReporter() { - typedef std::unique_ptr PtrType; - if (FLAGS_benchmark_format == "console") { - return PtrType(new ConsoleReporter); - } else if (FLAGS_benchmark_format == "json") { - return PtrType(new JSONReporter); - } else if (FLAGS_benchmark_format == "csv") { - return PtrType(new CSVReporter); - } else { - std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n"; - std::exit(1); - } -} + std::unique_ptr GetDefaultReporter() { + typedef std::unique_ptr PtrType; + if (FLAGS_benchmark_format == "console") { + return PtrType(new ConsoleReporter); + } else if (FLAGS_benchmark_format == "json") { + return PtrType(new JSONReporter); + } else if (FLAGS_benchmark_format == "csv") { + return PtrType(new CSVReporter); + } else { + std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n"; + std::exit(1); + } + } -} // end namespace + } // end namespace } // end namespace internal size_t RunSpecifiedBenchmarks() { diff --git a/src/complexity.cc b/src/complexity.cc index 3e42f5dd..97f86d8e 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -25,43 +25,43 @@ #include namespace benchmark { - + // Internal function to calculate the different scalability forms -std::function FittingCurve(BigO complexity) { +BigOFunc* FittingCurve(BigO complexity) { switch (complexity) { - case oN: - return [](int n) {return n; }; - case oNSquared: - return [](int n) {return n*n; }; - case oNCubed: - return [](int n) {return n*n*n; }; - case oLogN: - return [](int n) {return log2(n); }; - case oNLogN: - return [](int n) {return n * log2(n); }; - case o1: - default: - return [](int) {return 1; }; + case oN: + return [](size_t n) -> double {return n; }; + case oNSquared: + return [](size_t n) -> double {return n * n; }; + case oNCubed: + return [](size_t n) -> double {return n * n * n; }; + case oLogN: + return [](size_t n) {return log2(n); }; + case oNLogN: + return [](size_t n) {return n * log2(n); }; + case o1: + default: + return [](size_t) {return 1.0; }; } } // Function to return an string for the calculated complexity std::string GetBigOString(BigO complexity) { switch (complexity) { - case oN: - return "* N"; - case oNSquared: - return "* N**2"; - case oNCubed: - return "* N**3"; - case oLogN: - return "* lgN"; - case oNLogN: - return "* NlgN"; - case o1: - return "* 1"; - default: - return ""; + case oN: + return "N"; + case oNSquared: + return "N^2"; + case oNCubed: + return "N^3"; + case oLogN: + return "lgN"; + case oNLogN: + return "NlgN"; + case o1: + return "(1)"; + default: + return "f(N)"; } } @@ -75,21 +75,9 @@ std::string GetBigOString(BigO complexity) { // For a deeper explanation on the algorithm logic, look the README file at // http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit -// This interface is currently not used from the oustide, but it has been -// provided for future upgrades. If in the future it is not needed to support -// Cxx03, then all the calculations could be upgraded to use lambdas because -// they are more powerful and provide a cleaner inferface than enumerators, -// but complete implementation with lambdas will not work for Cxx03 -// (e.g. lack of std::function). -// In case lambdas are implemented, the interface would be like : -// -> Complexity([](int n) {return n;};) -// and any arbitrary and valid equation would be allowed, but the option to -// calculate the best fit to the most common scalability curves will still -// be kept. - -LeastSq CalculateLeastSq(const std::vector& n, - const std::vector& time, - std::function fitting_curve) { +LeastSq MinimalLeastSq(const std::vector& n, + const std::vector& time, + BigOFunc* fitting_curve) { double sigma_gn = 0.0; double sigma_gn_squared = 0.0; double sigma_time = 0.0; @@ -105,6 +93,7 @@ LeastSq CalculateLeastSq(const std::vector& n, } LeastSq result; + result.complexity = oLambda; // Calculate complexity. result.coef = sigma_time_gn / sigma_gn_squared; @@ -144,19 +133,19 @@ LeastSq MinimalLeastSq(const std::vector& n, oLogN, oN, oNLogN, oNSquared, oNCubed }; // Take o1 as default best fitting curve - best_fit = CalculateLeastSq(n, time, FittingCurve(o1)); + best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); best_fit.complexity = o1; // Compute all possible fitting curves and stick to the best one for (const auto& fit : fit_curves) { - LeastSq current_fit = CalculateLeastSq(n, time, FittingCurve(fit)); + LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); if (current_fit.rms < best_fit.rms) { best_fit = current_fit; best_fit.complexity = fit; } } } else { - best_fit = CalculateLeastSq(n, time, FittingCurve(complexity)); + best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); best_fit.complexity = complexity; } @@ -164,14 +153,14 @@ LeastSq MinimalLeastSq(const std::vector& n, } std::vector ComputeStats( - const std::vector& reports) + const std::vector& reports) { typedef BenchmarkReporter::Run Run; std::vector results; auto error_count = std::count_if( - reports.begin(), reports.end(), - [](Run const& run) {return run.error_occurred;}); + reports.begin(), reports.end(), + [](Run const& run) {return run.error_occurred;}); if (reports.size() - error_count < 2) { // We don't report aggregated data if there was a single run. @@ -193,9 +182,9 @@ std::vector ComputeStats( if (run.error_occurred) continue; real_accumulated_time_stat += - Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); + Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); cpu_accumulated_time_stat += - Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations); + Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations); items_per_second_stat += Stat1_d(run.items_per_second, run.iterations); bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations); } @@ -205,9 +194,9 @@ std::vector ComputeStats( mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; mean_data.iterations = run_iterations; mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * - run_iterations; + run_iterations; mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * - run_iterations; + run_iterations; mean_data.bytes_per_second = bytes_per_second_stat.Mean(); mean_data.items_per_second = items_per_second_stat.Mean(); @@ -225,9 +214,9 @@ std::vector ComputeStats( stddev_data.report_label = mean_data.report_label; stddev_data.iterations = 0; stddev_data.real_accumulated_time = - real_accumulated_time_stat.StdDev(); + real_accumulated_time_stat.StdDev(); stddev_data.cpu_accumulated_time = - cpu_accumulated_time_stat.StdDev(); + cpu_accumulated_time_stat.StdDev(); stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); stddev_data.items_per_second = items_per_second_stat.StdDev(); @@ -237,7 +226,7 @@ std::vector ComputeStats( } std::vector ComputeBigO( - const std::vector& reports) + const std::vector& reports) { typedef BenchmarkReporter::Run Run; std::vector results; @@ -256,14 +245,16 @@ std::vector ComputeBigO( cpu_time.push_back(run.cpu_accumulated_time/run.iterations); } - LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - - // result_cpu.complexity is passed as parameter to result_real because in case - // reports[0].complexity is oAuto, the noise on the measured data could make - // the best fit function of Cpu and Real differ. In order to solve this, we - // take the best fitting function for the Cpu, and apply it to Real data. - LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + LeastSq result_cpu; + LeastSq result_real; + if (reports[0].complexity != oLambda) { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); + result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + } else { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); + result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); + } std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); // Get the data from the accumulator to BenchmarkReporter::Run's. diff --git a/src/complexity.h b/src/complexity.h index be095a96..798154a2 100644 --- a/src/complexity.h +++ b/src/complexity.h @@ -26,15 +26,15 @@ namespace benchmark { -// Return a vector containing the mean and standard devation information for -// the specified list of reports. If 'reports' contains less than two -// non-errored runs an empty vector is returned -std::vector ComputeStats( + // Return a vector containing the mean and standard devation information for + // the specified list of reports. If 'reports' contains less than two + // non-errored runs an empty vector is returned + std::vector ComputeStats( const std::vector& reports); -// Return a vector containing the bigO and RMS information for the specified -// list of reports. If 'reports.size() < 2' an empty vector is returned. -std::vector ComputeBigO( + // Return a vector containing the bigO and RMS information for the specified + // list of reports. If 'reports.size() < 2' an empty vector is returned. + std::vector ComputeBigO( const std::vector& reports); // This data structure will contain the result returned by MinimalLeastSq @@ -60,11 +60,5 @@ struct LeastSq { // Function to return an string for the calculated complexity std::string GetBigOString(BigO complexity); -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error. -LeastSq MinimalLeastSq(const std::vector& n, - const std::vector& time, - const BigO complexity = oAuto); - } // end namespace benchmark #endif // COMPLEXITY_H_ diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 9b20ac8b..27830974 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -90,8 +90,8 @@ void ConsoleReporter::PrintRunData(const Run& result) { const double cpu_time = result.GetAdjustedCPUTime(); if(result.report_big_o) { - std::string big_o = result.report_big_o ? GetBigOString(result.complexity) : ""; - ColorPrintf(Out, COLOR_YELLOW, "%10.4f %s %10.4f %s ", + std::string big_o = GetBigOString(result.complexity); + ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(), cpu_time, big_o.c_str()); } else if(result.report_rms) { ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 5c18c9e5..775a46cb 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "benchmark/reporter.h" +#include "complexity.h" #include #include @@ -87,8 +88,10 @@ void CSVReporter::PrintRunData(const Run & run) { Out << run.GetAdjustedRealTime() << ","; Out << run.GetAdjustedCPUTime() << ","; - // Do not print timeLabel on RMS report - if(!run.report_rms) { + // Do not print timeLabel on bigO and RMS report + if(run.report_big_o) { + Out << GetBigOString(run.complexity); + } else if(!run.report_rms){ Out << GetTimeUnitString(run.time_unit); } Out << ","; diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 8ec18e0c..04cb490c 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "benchmark/reporter.h" +#include "complexity.h" #include #include @@ -132,15 +133,29 @@ void JSONReporter::PrintRunData(Run const& run) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; - } - out << indent - << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime())) - << ",\n"; - out << indent - << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime())); - if(!run.report_rms) { + out << indent + << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime())) + << ",\n"; + out << indent + << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime())); out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if(run.report_big_o) { + out << indent + << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime())) + << ",\n"; + out << indent + << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime())) + << ",\n"; + out << indent + << FormatKV("big_o", GetBigOString(run.complexity)) + << ",\n"; + out << indent + << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if(run.report_rms) { + out << indent + << FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100)) + << "%"; } if (run.bytes_per_second > 0.0) { out << ",\n" << indent diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 225a1811..9ef1f107 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -1,12 +1,183 @@ -#include "benchmark/benchmark_api.h" - -#include -#include +#undef NDEBUG +#include "benchmark/benchmark.h" +#include "../src/check.h" // NOTE: check.h is for internal use only! +#include "../src/re.h" // NOTE: re.h is for internal use only +#include +#include +#include +#include #include -#include +#include #include +namespace { + +// ========================================================================= // +// -------------------------- Testing Case --------------------------------- // +// ========================================================================= // + +enum MatchRules { + MR_Default, // Skip non-matching lines until a match is found. + MR_Next // Match must occur on the next line. +}; + +struct TestCase { + std::string regex; + int match_rule; + + TestCase(std::string re, int rule = MR_Default) : regex(re), match_rule(rule) {} + + void Check(std::stringstream& remaining_output) const { + benchmark::Regex r; + std::string err_str; + r.Init(regex, &err_str); + CHECK(err_str.empty()) << "Could not construct regex \"" << regex << "\"" + << " got Error: " << err_str; + + std::string line; + while (remaining_output.eof() == false) { + CHECK(remaining_output.good()); + std::getline(remaining_output, line); + if (r.Match(line)) return; + CHECK(match_rule != MR_Next) << "Expected line \"" << line + << "\" to match regex \"" << regex << "\""; + } + + CHECK(remaining_output.eof() == false) + << "End of output reached before match for regex \"" << regex + << "\" was found"; + } +}; + +std::vector ConsoleOutputTests; +std::vector JSONOutputTests; +std::vector CSVOutputTests; + +// ========================================================================= // +// -------------------------- Test Helpers --------------------------------- // +// ========================================================================= // + +class TestReporter : public benchmark::BenchmarkReporter { +public: + TestReporter(std::vector reps) + : reporters_(reps) {} + + virtual bool ReportContext(const Context& context) { + bool last_ret = false; + bool first = true; + for (auto rep : reporters_) { + bool new_ret = rep->ReportContext(context); + CHECK(first || new_ret == last_ret) + << "Reports return different values for ReportContext"; + first = false; + last_ret = new_ret; + } + return last_ret; + } + + virtual void ReportRuns(const std::vector& report) { + for (auto rep : reporters_) + rep->ReportRuns(report); + } + + virtual void Finalize() { + for (auto rep : reporters_) + rep->Finalize(); + } + +private: + std::vector reporters_; +}; + + +#define CONCAT2(x, y) x##y +#define CONCAT(x, y) CONCAT2(x, y) + +#define ADD_CASES(...) \ + int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) + +int AddCases(std::vector* out, std::initializer_list const& v) { + for (auto const& TC : v) + out->push_back(TC); + return 0; +} + +template +std::string join(First f) { return f; } + +template +std::string join(First f, Args&&... args) { + return std::string(std::move(f)) + "[ ]+" + join(std::forward(args)...); +} + +std::string dec_re = "[0-9]+\\.[0-9]+"; + +#define ADD_COMPLEXITY_CASES(...) \ + int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) + +int AddComplexityTest(std::vector* console_out, std::vector* json_out, + std::vector* csv_out, std::string big_o_test_name, + std::string rms_test_name, std::string big_o) { + std::string big_o_str = dec_re + " " + big_o; + AddCases(console_out, { + {join("^" + big_o_test_name + "", big_o_str, big_o_str) + "[ ]*$"}, + {join("^" + rms_test_name + "", "[0-9]+ %", "[0-9]+ %") + "[ ]*$"} + }); + AddCases(json_out, { + {"\"name\": \"" + big_o_test_name + "\",$"}, + {"\"cpu_coefficient\": [0-9]+,$", MR_Next}, + {"\"real_coefficient\": [0-9]{1,5},$", MR_Next}, + {"\"big_o\": \"" + big_o + "\",$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}, + {"\"name\": \"" + rms_test_name + "\",$"}, + {"\"rms\": [0-9]+%$", MR_Next}, + {"}", MR_Next} + }); + AddCases(csv_out, { + {"^\"" + big_o_test_name + "\",," + dec_re + "," + dec_re + "," + big_o + ",,,,,$"}, + {"^\"" + rms_test_name + "\",," + dec_re + "," + dec_re + ",,,,,,$"} + }); + return 0; +} + +} // end namespace + +// ========================================================================= // +// --------------------------- Testing BigO O(1) --------------------------- // +// ========================================================================= // + +void BM_Complexity_O1(benchmark::State& state) { + while (state.KeepRunning()) { + } + state.SetComplexityN(state.range_x()); +} +BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(); +BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1); +BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity([](size_t){return 1.0; }); + +std::string big_o_1_test_name = "BM_Complexity_O1_BigO"; +std::string rms_o_1_test_name = "BM_Complexity_O1_RMS"; +std::string enum_auto_big_o_1 = "\\([0-9]+\\)"; +std::string lambda_big_o_1 = "f\\(N\\)"; + +// Add automatic tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1); + +// Add enum tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1); + +// Add lambda tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1); + +// ========================================================================= // +// --------------------------- Testing BigO O(N) --------------------------- // +// ========================================================================= // + std::vector ConstructRandomVector(int size) { std::vector v; v.reserve(size); @@ -16,22 +187,7 @@ std::vector ConstructRandomVector(int size) { return v; } -std::map ConstructRandomMap(int size) { - std::map m; - for (int i = 0; i < size; ++i) { - m.insert(std::make_pair(rand() % size, rand() % size)); - } - return m; -} - -void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { - } - state.SetComplexityN(state.range_x()); -} -BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); - -static void BM_Complexity_O_N(benchmark::State& state) { +void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector) while (state.KeepRunning()) { @@ -39,51 +195,30 @@ static void BM_Complexity_O_N(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) -> double{return n; }); -static void BM_Complexity_O_N_Squared(benchmark::State& state) { - std::string s1(state.range_x(), '-'); - std::string s2(state.range_x(), '-'); - state.SetComplexityN(state.range_x()); - while (state.KeepRunning()) - for(char& c1 : s1) { - for(char& c2 : s2) { - benchmark::DoNotOptimize(c1 = 'a'); - benchmark::DoNotOptimize(c2 = 'b'); - } - } -} -BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::oNSquared); +std::string big_o_n_test_name = "BM_Complexity_O_N_BigO"; +std::string rms_o_n_test_name = "BM_Complexity_O_N_RMS"; +std::string enum_auto_big_o_n = "N"; +std::string lambda_big_o_n = "f\\(N\\)"; -static void BM_Complexity_O_N_Cubed(benchmark::State& state) { - std::string s1(state.range_x(), '-'); - std::string s2(state.range_x(), '-'); - std::string s3(state.range_x(), '-'); - state.SetComplexityN(state.range_x()); - while (state.KeepRunning()) - for(char& c1 : s1) { - for(char& c2 : s2) { - for(char& c3 : s3) { - benchmark::DoNotOptimize(c1 = 'a'); - benchmark::DoNotOptimize(c2 = 'b'); - benchmark::DoNotOptimize(c3 = 'c'); - } - } - } -} -BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::oNCubed); +// Add automatic tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n); -static void BM_Complexity_O_log_N(benchmark::State& state) { - auto m = ConstructRandomMap(state.range_x()); - const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector) - while (state.KeepRunning()) { - benchmark::DoNotOptimize(m.find(item_not_in_vector)); - } - state.SetComplexityN(state.range_x()); -} -BENCHMARK(BM_Complexity_O_log_N) - -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN); +// Add enum tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n); + +// Add lambda tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n); + +// ========================================================================= // +// ------------------------- Testing BigO O(N*lgN) ------------------------- // +// ========================================================================= // static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -92,15 +227,82 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) {return n * log2(n); }); -// Test benchmark with no range and check no complexity is calculated. -void BM_Extreme_Cases(benchmark::State& state) { - while (state.KeepRunning()) { +std::string big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; +std::string rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; +std::string enum_auto_big_o_n_lg_n = "NlgN"; +std::string lambda_big_o_n_lg_n = "f\\(N\\)"; + +// Add automatic tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); + +// Add enum tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); + +// Add lambda tests +ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, + big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n); + + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + + +int main(int argc, char* argv[]) { + // Add --color_print=false to argv since we don't want to match color codes. + char new_arg[64]; + char* new_argv[64]; + std::copy(argv, argv + argc, new_argv); + new_argv[argc++] = std::strcpy(new_arg, "--color_print=false"); + benchmark::Initialize(&argc, new_argv); + + benchmark::ConsoleReporter CR; + benchmark::JSONReporter JR; + benchmark::CSVReporter CSVR; + struct ReporterTest { + const char* name; + std::vector& output_cases; + benchmark::BenchmarkReporter& reporter; + std::stringstream out_stream; + std::stringstream err_stream; + + ReporterTest(const char* n, + std::vector& out_tc, + benchmark::BenchmarkReporter& br) + : name(n), output_cases(out_tc), reporter(br) { + reporter.SetOutputStream(&out_stream); + reporter.SetErrorStream(&err_stream); + } + } TestCases[] = { + {"ConsoleReporter", ConsoleOutputTests, CR}, + {"JSONReporter", JSONOutputTests, JR}, + {"CSVReporter", CSVOutputTests, CSVR} + }; + + // Create the test reporter and run the benchmarks. + std::cout << "Running benchmarks...\n"; + TestReporter test_rep({&CR, &JR, &CSVR}); + benchmark::RunSpecifiedBenchmarks(&test_rep); + + for (auto& rep_test : TestCases) { + std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; + std::string banner(msg.size() - 1, '-'); + std::cout << banner << msg << banner << "\n"; + + std::cerr << rep_test.err_stream.str(); + std::cout << rep_test.out_stream.str(); + + for (const auto& TC : rep_test.output_cases) + TC.Check(rep_test.out_stream); + + std::cout << "\n"; } + return 0; } -BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::oNLogN); -BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity(); -BENCHMARK_MAIN() From 8c73d49b775610a4ee027a7f44b37962684a2370 Mon Sep 17 00:00:00 2001 From: Ismael Date: Wed, 1 Jun 2016 23:13:10 +0200 Subject: [PATCH 02/12] fixed reporter_output_test --- test/reporter_output_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc index c09fbb68..b3898acc 100644 --- a/test/reporter_output_test.cc +++ b/test/reporter_output_test.cc @@ -189,7 +189,7 @@ void BM_Complexity_O1(benchmark::State& state) { } BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1); -std::string bigOStr = "[0-9]+\\.[0-9]+ \\* [0-9]+"; +std::string bigOStr = "[0-9]+\\.[0-9]+ \\([0-9]+\\)"; ADD_CASES(&ConsoleOutputTests, { {join("^BM_Complexity_O1_BigO", bigOStr, bigOStr) + "[ ]*$"}, From 212cfe1c2e659d9523cbc453917d0cdde4699bcd Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 19:01:10 +0200 Subject: [PATCH 03/12] removed check on automatic fit, to avoid random convergence misfits breaking the build --- test/complexity_test.cc | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 9ef1f107..ed434fe1 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -153,19 +153,15 @@ void BM_Complexity_O1(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(); BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity([](size_t){return 1.0; }); +BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(); std::string big_o_1_test_name = "BM_Complexity_O1_BigO"; std::string rms_o_1_test_name = "BM_Complexity_O1_RMS"; std::string enum_auto_big_o_1 = "\\([0-9]+\\)"; std::string lambda_big_o_1 = "f\\(N\\)"; -// Add automatic tests -ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, - big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1); - // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1); @@ -195,19 +191,15 @@ void BM_Complexity_O_N(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) -> double{return n; }); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); std::string big_o_n_test_name = "BM_Complexity_O_N_BigO"; std::string rms_o_n_test_name = "BM_Complexity_O_N_RMS"; std::string enum_auto_big_o_n = "N"; std::string lambda_big_o_n = "f\\(N\\)"; -// Add automatic tests -ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, - big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n); - // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n); @@ -227,19 +219,15 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) {return n * log2(n); }); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); std::string big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; std::string rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; std::string enum_auto_big_o_n_lg_n = "NlgN"; std::string lambda_big_o_n_lg_n = "f\\(N\\)"; -// Add automatic tests -ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, - big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); - // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); From 11e304355492670709c60e6d39eb42fc01fd878a Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 19:42:08 +0200 Subject: [PATCH 04/12] checked format before pull request --- include/benchmark/benchmark_api.h | 16 ++-- include/benchmark/reporter.h | 10 +-- src/benchmark.cc | 144 +++++++++++++++--------------- src/complexity.cc | 4 +- src/complexity.h | 14 +-- src/json_reporter.cc | 2 +- 6 files changed, 95 insertions(+), 95 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 8d5189a1..49167ffb 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -261,16 +261,16 @@ typedef double(BigOFunc)(size_t); class State { public: State(size_t max_iters, bool has_x, int x, bool has_y, int y, - int thread_i, int n_threads); + int thread_i, int n_threads); - // Returns true iff the benchmark should continue through another iteration. + // Returns true if the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. bool KeepRunning() { if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { - assert(!finished_); - started_ = true; - ResumeTiming(); + assert(!finished_); + started_ = true; + ResumeTiming(); } bool const res = total_iterations_++ < max_iterations; if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { @@ -365,7 +365,7 @@ public: // represent the length of N. BENCHMARK_ALWAYS_INLINE void SetComplexityN(size_t complexity_n) { - complexity_n_ = complexity_n; + complexity_n_ = complexity_n; } BENCHMARK_ALWAYS_INLINE @@ -539,11 +539,11 @@ public: // to control how many iterations are run, and in the printing of items/second // or MB/second values. Benchmark* UseManualTime(); - + // Set the asymptotic computational complexity for the benchmark. If called // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigO complexity = benchmark::oAuto); - + // Set the asymptotic computational complexity for the benchmark. If called // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigOFunc* complexity); diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 4c7bff3f..f37e0a31 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -83,12 +83,12 @@ class BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; - + // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; size_t complexity_n; - + // Inform print function whether the current run is a complexity report bool report_big_o; bool report_rms; @@ -114,7 +114,7 @@ class BenchmarkReporter { // 'reports' contains additional entries representing the asymptotic // complexity and RMS of that benchmark family. virtual void ReportRuns(const std::vector& report) = 0; - + // Called once and only once after ever group of benchmarks is run and // reported. virtual void Finalize() {} @@ -156,11 +156,11 @@ private: // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). class ConsoleReporter : public BenchmarkReporter { - public: +public: virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); - protected: +protected: virtual void PrintRunData(const Run& report); size_t name_field_width_; diff --git a/src/benchmark.cc b/src/benchmark.cc index 2b55ac74..c56faa92 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -140,7 +140,7 @@ class TimerManager { manual_time_used_(0), num_finalized_(0), phase_number_(0), - entered_(0) + entered_(0) { } @@ -277,7 +277,7 @@ class TimerManager { int phase_number_cp = phase_number_; auto cb = [this, phase_number_cp]() { return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error + entered_ == running_threads_; // A thread has aborted in error }; phase_condition_.wait(ml.native_handle(), cb); if (phase_number_ > phase_number_cp) @@ -731,7 +731,7 @@ void FunctionBenchmark::Run(State& st) { } // end namespace internal namespace { - + // Execute one thread of benchmark b for the specified number of iterations. // Adds the stats collected for the thread into *total. void RunInThread(const benchmark::internal::Benchmark::Instance* b, @@ -745,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, MutexLock l(GetBenchmarkLock()); total->bytes_processed += st.bytes_processed(); total->items_processed += st.items_processed(); - total->complexity_n += st.complexity_length_n(); + total->complexity_n += st.complexity_length_n(); } timer_manager->Finalize(); } void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, - BenchmarkReporter* br, - std::vector& complexity_reports) + BenchmarkReporter* br, + std::vector& complexity_reports) EXCLUDES(GetBenchmarkLock()) { size_t iters = 1; @@ -764,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, pool.resize(b.threads); const int repeats = b.repetitions != 0 ? b.repetitions - : FLAGS_benchmark_repetitions; + : FLAGS_benchmark_repetitions; for (int i = 0; i < repeats; i++) { std::string mem; for (;;) { @@ -844,28 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, report.time_unit = b.time_unit; if (!report.error_occurred) { - double bytes_per_second = 0; - if (total.bytes_processed > 0 && seconds > 0.0) { - bytes_per_second = (total.bytes_processed / seconds); - } - double items_per_second = 0; - if (total.items_processed > 0 && seconds > 0.0) { - items_per_second = (total.items_processed / seconds); - } + double bytes_per_second = 0; + if (total.bytes_processed > 0 && seconds > 0.0) { + bytes_per_second = (total.bytes_processed / seconds); + } + double items_per_second = 0; + if (total.items_processed > 0 && seconds > 0.0) { + items_per_second = (total.items_processed / seconds); + } - if (b.use_manual_time) { - report.real_accumulated_time = manual_accumulated_time; - } else { - report.real_accumulated_time = real_accumulated_time; - } - report.cpu_accumulated_time = cpu_accumulated_time; - report.bytes_per_second = bytes_per_second; - report.items_per_second = items_per_second; - report.complexity_n = total.complexity_n; - report.complexity = b.complexity; - report.complexity_lambda = b.complexity_lambda; - if(report.complexity != oNone) - complexity_reports.push_back(report); + if (b.use_manual_time) { + report.real_accumulated_time = manual_accumulated_time; + } else { + report.real_accumulated_time = real_accumulated_time; + } + report.cpu_accumulated_time = cpu_accumulated_time; + report.bytes_per_second = bytes_per_second; + report.items_per_second = items_per_second; + report.complexity_n = total.complexity_n; + report.complexity = b.complexity; + report.complexity_lambda = b.complexity_lambda; + if(report.complexity != oNone) + complexity_reports.push_back(report); } reports.push_back(report); @@ -893,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, } std::vector additional_run_stats = ComputeStats(reports); reports.insert(reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); + additional_run_stats.end()); if((b.complexity != oNone) && b.last_benchmark_instance) { additional_run_stats = ComputeBigO(complexity_reports); reports.insert(reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); + additional_run_stats.end()); complexity_reports.clear(); } br->ReportRuns(reports); - + if (b.multithreaded) { for (std::thread& thread : pool) thread.join(); @@ -964,56 +964,56 @@ void State::SetLabel(const char* label) { } namespace internal { - namespace { +namespace { - void RunMatchingBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* reporter) { - CHECK(reporter != nullptr); +void RunMatchingBenchmarks(const std::vector& benchmarks, + BenchmarkReporter* reporter) { + CHECK(reporter != nullptr); - // Determine the width of the name field using a minimum width of 10. - bool has_repetitions = FLAGS_benchmark_repetitions > 1; - size_t name_field_width = 10; - for (const Benchmark::Instance& benchmark : benchmarks) { - name_field_width = - std::max(name_field_width, benchmark.name.size()); - has_repetitions |= benchmark.repetitions > 1; - } - if (has_repetitions) - name_field_width += std::strlen("_stddev"); + // Determine the width of the name field using a minimum width of 10. + bool has_repetitions = FLAGS_benchmark_repetitions > 1; + size_t name_field_width = 10; + for (const Benchmark::Instance& benchmark : benchmarks) { + name_field_width = + std::max(name_field_width, benchmark.name.size()); + has_repetitions |= benchmark.repetitions > 1; + } + if (has_repetitions) + name_field_width += std::strlen("_stddev"); - // Print header here - BenchmarkReporter::Context context; - context.num_cpus = NumCPUs(); - context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f; + // Print header here + BenchmarkReporter::Context context; + context.num_cpus = NumCPUs(); + context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f; - context.cpu_scaling_enabled = CpuScalingEnabled(); - context.name_field_width = name_field_width; + context.cpu_scaling_enabled = CpuScalingEnabled(); + context.name_field_width = name_field_width; - // Keep track of runing times of all instances of current benchmark - std::vector complexity_reports; + // Keep track of runing times of all instances of current benchmark + std::vector complexity_reports; - if (reporter->ReportContext(context)) { - for (const auto& benchmark : benchmarks) { - RunBenchmark(benchmark, reporter, complexity_reports); - } - } + if (reporter->ReportContext(context)) { + for (const auto& benchmark : benchmarks) { + RunBenchmark(benchmark, reporter, complexity_reports); } + } +} - std::unique_ptr GetDefaultReporter() { - typedef std::unique_ptr PtrType; - if (FLAGS_benchmark_format == "console") { - return PtrType(new ConsoleReporter); - } else if (FLAGS_benchmark_format == "json") { - return PtrType(new JSONReporter); - } else if (FLAGS_benchmark_format == "csv") { - return PtrType(new CSVReporter); - } else { - std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n"; - std::exit(1); - } - } +std::unique_ptr GetDefaultReporter() { + typedef std::unique_ptr PtrType; + if (FLAGS_benchmark_format == "console") { + return PtrType(new ConsoleReporter); + } else if (FLAGS_benchmark_format == "json") { + return PtrType(new JSONReporter); + } else if (FLAGS_benchmark_format == "csv") { + return PtrType(new CSVReporter); + } else { + std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n"; + std::exit(1); + } +} - } // end namespace +} // end namespace } // end namespace internal size_t RunSpecifiedBenchmarks() { diff --git a/src/complexity.cc b/src/complexity.cc index 97f86d8e..0d6f90bc 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -194,9 +194,9 @@ std::vector ComputeStats( mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; mean_data.iterations = run_iterations; mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * - run_iterations; + run_iterations; mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * - run_iterations; + run_iterations; mean_data.bytes_per_second = bytes_per_second_stat.Mean(); mean_data.items_per_second = items_per_second_stat.Mean(); diff --git a/src/complexity.h b/src/complexity.h index 798154a2..85cc1250 100644 --- a/src/complexity.h +++ b/src/complexity.h @@ -26,15 +26,15 @@ namespace benchmark { - // Return a vector containing the mean and standard devation information for - // the specified list of reports. If 'reports' contains less than two - // non-errored runs an empty vector is returned - std::vector ComputeStats( +// Return a vector containing the mean and standard devation information for +// the specified list of reports. If 'reports' contains less than two +// non-errored runs an empty vector is returned +std::vector ComputeStats( const std::vector& reports); - // Return a vector containing the bigO and RMS information for the specified - // list of reports. If 'reports.size() < 2' an empty vector is returned. - std::vector ComputeBigO( +// Return a vector containing the bigO and RMS information for the specified +// list of reports. If 'reports.size() < 2' an empty vector is returned. +std::vector ComputeBigO( const std::vector& reports); // This data structure will contain the result returned by MinimalLeastSq diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 04cb490c..da883559 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -155,7 +155,7 @@ void JSONReporter::PrintRunData(Run const& run) { } else if(run.report_rms) { out << indent << FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100)) - << "%"; + << '%'; } if (run.bytes_per_second > 0.0) { out << ",\n" << indent From 109f528a4039c298e1f02ff4ce2fd32c552c5a38 Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 19:48:53 +0200 Subject: [PATCH 05/12] removed functional library not needed --- include/benchmark/benchmark_api.h | 1 - src/complexity.cc | 1 - test/complexity_test.cc | 6 +++--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 49167ffb..0cb43488 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -152,7 +152,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #include #include #include -#include #include "macros.h" diff --git a/src/complexity.cc b/src/complexity.cc index 0d6f90bc..1b444cea 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -22,7 +22,6 @@ #include "stat.h" #include #include -#include namespace benchmark { diff --git a/test/complexity_test.cc b/test/complexity_test.cc index ed434fe1..03bd2dcc 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -153,9 +153,9 @@ void BM_Complexity_O1(benchmark::State& state) { } state.SetComplexityN(state.range_x()); } -BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1); -BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity([](size_t){return 1.0; }); -BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](size_t){return 1.0; }); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(); std::string big_o_1_test_name = "BM_Complexity_O1_BigO"; std::string rms_o_1_test_name = "BM_Complexity_O1_RMS"; From 3ef63399716c7ac213d4016ab9454422f4f9d6d1 Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 20:58:14 +0200 Subject: [PATCH 06/12] Update Readme.md --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 5be51532..f34e8870 100644 --- a/README.md +++ b/README.md @@ -142,6 +142,14 @@ BENCHMARK(BM_StringCompare) ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); ``` +The following code will specify asymptotic complexity with a lambda function, +that might be used to customize high-order term calculation. + +```c++ +BENCHMARK(BM_StringCompare)->RangeMultiplier(2) + ->Range(1<<10, 1<<18)->Complexity([](size_t n)->double{return n; }); +``` + ### Templated benchmarks Templated benchmarks work the same way: This example produces and consumes messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the From 22cb9d9ce0ff12219f5ca6c4a28124d11730e66f Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 22:01:31 +0200 Subject: [PATCH 07/12] google formated --- include/benchmark/reporter.h | 52 ++++++++-------- src/complexity.cc | 114 +++++++++++++++++------------------ src/console_reporter.cc | 26 ++++---- src/csv_reporter.cc | 10 +-- src/json_reporter.cc | 75 ++++++++++++----------- 5 files changed, 138 insertions(+), 139 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index f37e0a31..e3a8f573 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -20,7 +20,7 @@ #include #include -#include "benchmark_api.h" // For forward declaration of BenchmarkReporter +#include "benchmark_api.h" // For forward declaration of BenchmarkReporter namespace benchmark { @@ -133,14 +133,14 @@ class BenchmarkReporter { error_stream_ = err; } - std::ostream& GetOutputStream() const { + std::ostream& GetOutputStream() const { return *output_stream_; } std::ostream& GetErrorStream() const { return *error_stream_; } - + virtual ~BenchmarkReporter(); // Write a human readable string to 'out' representing the specified @@ -148,7 +148,7 @@ class BenchmarkReporter { // REQUIRES: 'out' is non-null. static void PrintBasicContext(std::ostream* out, Context const& context); -private: + private: std::ostream* output_stream_; std::ostream* error_stream_; }; @@ -156,61 +156,61 @@ private: // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). class ConsoleReporter : public BenchmarkReporter { -public: + public: virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); -protected: + protected: virtual void PrintRunData(const Run& report); size_t name_field_width_; }; class JSONReporter : public BenchmarkReporter { -public: + public: JSONReporter() : first_report_(true) {} virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); virtual void Finalize(); -private: + private: void PrintRunData(const Run& report); bool first_report_; }; class CSVReporter : public BenchmarkReporter { -public: + public: virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); -private: + private: void PrintRunData(const Run& report); }; inline const char* GetTimeUnitString(TimeUnit unit) { switch (unit) { - case kMillisecond: - return "ms"; - case kMicrosecond: - return "us"; - case kNanosecond: - default: - return "ns"; + case kMillisecond: + return "ms"; + case kMicrosecond: + return "us"; + case kNanosecond: + default: + return "ns"; } } inline double GetTimeUnitMultiplier(TimeUnit unit) { switch (unit) { - case kMillisecond: - return 1e3; - case kMicrosecond: - return 1e6; - case kNanosecond: - default: - return 1e9; + case kMillisecond: + return 1e3; + case kMicrosecond: + return 1e6; + case kNanosecond: + default: + return 1e9; } } -} // end namespace benchmark -#endif // BENCHMARK_REPORTER_H_ +} // end namespace benchmark +#endif // BENCHMARK_REPORTER_H_ diff --git a/src/complexity.cc b/src/complexity.cc index 1b444cea..0d53ade0 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -17,55 +17,55 @@ #include "benchmark/benchmark_api.h" -#include "complexity.h" -#include "check.h" -#include "stat.h" -#include #include +#include +#include "check.h" +#include "complexity.h" +#include "stat.h" namespace benchmark { // Internal function to calculate the different scalability forms BigOFunc* FittingCurve(BigO complexity) { switch (complexity) { - case oN: - return [](size_t n) -> double {return n; }; - case oNSquared: - return [](size_t n) -> double {return n * n; }; - case oNCubed: - return [](size_t n) -> double {return n * n * n; }; - case oLogN: - return [](size_t n) {return log2(n); }; - case oNLogN: - return [](size_t n) {return n * log2(n); }; - case o1: - default: - return [](size_t) {return 1.0; }; + case oN: + return [](size_t n) -> double { return n; }; + case oNSquared: + return [](size_t n) -> double { return n * n; }; + case oNCubed: + return [](size_t n) -> double { return n * n * n; }; + case oLogN: + return [](size_t n) { return log2(n); }; + case oNLogN: + return [](size_t n) { return n * log2(n); }; + case o1: + default: + return [](size_t) { return 1.0; }; } } // Function to return an string for the calculated complexity std::string GetBigOString(BigO complexity) { switch (complexity) { - case oN: - return "N"; - case oNSquared: - return "N^2"; - case oNCubed: - return "N^3"; - case oLogN: - return "lgN"; - case oNLogN: - return "NlgN"; - case o1: - return "(1)"; - default: - return "f(N)"; + case oN: + return "N"; + case oNSquared: + return "N^2"; + case oNCubed: + return "N^3"; + case oLogN: + return "lgN"; + case oNLogN: + return "NlgN"; + case o1: + return "(1)"; + default: + return "f(N)"; } } -// Find the coefficient for the high-order term in the running time, by -// minimizing the sum of squares of relative error, for the fitting curve +// Find the coefficient for the high-order term in the running time, by +// minimizing the sum of squares of relative error, for the fitting curve // given by the lambda expresion. // - n : Vector containing the size of the benchmark tests. // - time : Vector containing the times for the benchmark tests. @@ -122,14 +122,14 @@ LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const BigO complexity) { CHECK_EQ(n.size(), time.size()); - CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given + CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two + // benchmark runs are given CHECK_NE(complexity, oNone); LeastSq best_fit; - if(complexity == oAuto) { - std::vector fit_curves = { - oLogN, oN, oNLogN, oNSquared, oNCubed }; + if (complexity == oAuto) { + std::vector fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; // Take o1 as default best fitting curve best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); @@ -152,14 +152,13 @@ LeastSq MinimalLeastSq(const std::vector& n, } std::vector ComputeStats( - const std::vector& reports) -{ + const std::vector& reports) { typedef BenchmarkReporter::Run Run; std::vector results; - auto error_count = std::count_if( - reports.begin(), reports.end(), - [](Run const& run) {return run.error_occurred;}); + auto error_count = + std::count_if(reports.begin(), reports.end(), + [](Run const& run) { return run.error_occurred; }); if (reports.size() - error_count < 2) { // We don't report aggregated data if there was a single run. @@ -178,12 +177,11 @@ std::vector ComputeStats( for (Run const& run : reports) { CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); CHECK_EQ(run_iterations, run.iterations); - if (run.error_occurred) - continue; + if (run.error_occurred) continue; real_accumulated_time_stat += - Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); + Stat1_d(run.real_accumulated_time / run.iterations, run.iterations); cpu_accumulated_time_stat += - Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations); + Stat1_d(run.cpu_accumulated_time / run.iterations, run.iterations); items_per_second_stat += Stat1_d(run.items_per_second, run.iterations); bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations); } @@ -192,10 +190,10 @@ std::vector ComputeStats( Run mean_data; mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; mean_data.iterations = run_iterations; - mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * - run_iterations; - mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * - run_iterations; + mean_data.real_accumulated_time = + real_accumulated_time_stat.Mean() * run_iterations; + mean_data.cpu_accumulated_time = + cpu_accumulated_time_stat.Mean() * run_iterations; mean_data.bytes_per_second = bytes_per_second_stat.Mean(); mean_data.items_per_second = items_per_second_stat.Mean(); @@ -212,10 +210,8 @@ std::vector ComputeStats( stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev"; stddev_data.report_label = mean_data.report_label; stddev_data.iterations = 0; - stddev_data.real_accumulated_time = - real_accumulated_time_stat.StdDev(); - stddev_data.cpu_accumulated_time = - cpu_accumulated_time_stat.StdDev(); + stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev(); + stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev(); stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); stddev_data.items_per_second = items_per_second_stat.StdDev(); @@ -225,8 +221,7 @@ std::vector ComputeStats( } std::vector ComputeBigO( - const std::vector& reports) -{ + const std::vector& reports) { typedef BenchmarkReporter::Run Run; std::vector results; @@ -240,8 +235,8 @@ std::vector ComputeBigO( // Populate the accumulators. for (const Run& run : reports) { n.push_back(run.complexity_n); - real_time.push_back(run.real_accumulated_time/run.iterations); - cpu_time.push_back(run.cpu_accumulated_time/run.iterations); + real_time.push_back(run.real_accumulated_time / run.iterations); + cpu_time.push_back(run.cpu_accumulated_time / run.iterations); } LeastSq result_cpu; @@ -254,7 +249,8 @@ std::vector ComputeBigO( result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); } - std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); + std::string benchmark_name = + reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); // Get the data from the accumulator to BenchmarkReporter::Run's. Run big_o; diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 27830974..080c324a 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -15,9 +15,9 @@ #include "benchmark/reporter.h" #include "complexity.h" +#include #include #include -#include #include #include #include @@ -62,8 +62,8 @@ void ConsoleReporter::ReportRuns(const std::vector& reports) { void ConsoleReporter::PrintRunData(const Run& result) { auto& Out = GetOutputStream(); - auto name_color = (result.report_big_o || result.report_rms) - ? COLOR_BLUE : COLOR_GREEN; + auto name_color = + (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; ColorPrintf(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name.c_str()); @@ -84,25 +84,25 @@ void ConsoleReporter::PrintRunData(const Run& result) { if (result.items_per_second > 0) { items = StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s"); - } + } const double real_time = result.GetAdjustedRealTime(); const double cpu_time = result.GetAdjustedCPUTime(); - if(result.report_big_o) { + if (result.report_big_o) { std::string big_o = GetBigOString(result.complexity); - ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", - real_time, big_o.c_str(), cpu_time, big_o.c_str()); - } else if(result.report_rms) { - ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", - real_time * 100, cpu_time * 100); + ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, + big_o.c_str(), cpu_time, big_o.c_str()); + } else if (result.report_rms) { + ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100, + cpu_time * 100); } else { const char* timeLabel = GetTimeUnitString(result.time_unit); - ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", - real_time, timeLabel, cpu_time, timeLabel); + ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel, + cpu_time, timeLabel); } - if(!result.report_big_o && !result.report_rms) { + if (!result.report_big_o && !result.report_rms) { ColorPrintf(Out, COLOR_CYAN, "%10lld", result.iterations); } diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 775a46cb..7bc7ef3d 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -15,8 +15,8 @@ #include "benchmark/reporter.h" #include "complexity.h" -#include #include +#include #include #include #include @@ -80,7 +80,7 @@ void CSVReporter::PrintRunData(const Run & run) { } // Do not print iteration on bigO and RMS report - if(!run.report_big_o && !run.report_rms) { + if (!run.report_big_o && !run.report_rms) { Out << run.iterations; } Out << ","; @@ -89,9 +89,9 @@ void CSVReporter::PrintRunData(const Run & run) { Out << run.GetAdjustedCPUTime() << ","; // Do not print timeLabel on bigO and RMS report - if(run.report_big_o) { + if (run.report_big_o) { Out << GetBigOString(run.complexity); - } else if(!run.report_rms){ + } else if (!run.report_rms) { Out << GetTimeUnitString(run.time_unit); } Out << ","; @@ -111,7 +111,7 @@ void CSVReporter::PrintRunData(const Run & run) { ReplaceAll(&label, "\"", "\"\""); Out << "\"" << label << "\""; } - Out << ",,"; // for error_occurred and error_message + Out << ",,"; // for error_occurred and error_message Out << '\n'; } diff --git a/src/json_reporter.cc b/src/json_reporter.cc index da883559..485d3052 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -15,8 +15,8 @@ #include "benchmark/reporter.h" #include "complexity.h" -#include #include +#include #include #include #include @@ -100,24 +100,24 @@ void JSONReporter::ReportRuns(std::vector const& reports) { first_report_ = false; for (auto it = reports.begin(); it != reports.end(); ++it) { - out << indent << "{\n"; - PrintRunData(*it); - out << indent << '}'; - auto it_cp = it; - if (++it_cp != reports.end()) { - out << ",\n"; - } + out << indent << "{\n"; + PrintRunData(*it); + out << indent << '}'; + auto it_cp = it; + if (++it_cp != reports.end()) { + out << ",\n"; + } } } void JSONReporter::Finalize() { - // Close the list of benchmarks and the top level object. - GetOutputStream() << "\n ]\n}\n"; + // Close the list of benchmarks and the top level object. + GetOutputStream() << "\n ]\n}\n"; } void JSONReporter::PrintRunData(Run const& run) { - std::string indent(6, ' '); - std::ostream& out = GetOutputStream(); + std::string indent(6, ' '); + std::ostream& out = GetOutputStream(); out << indent << FormatKV("name", run.benchmark_name) << ",\n"; @@ -129,7 +129,7 @@ void JSONReporter::PrintRunData(Run const& run) { << FormatKV("error_message", run.error_message) << ",\n"; } - if(!run.report_big_o && !run.report_rms) { + if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; @@ -140,14 +140,14 @@ void JSONReporter::PrintRunData(Run const& run) { << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime())); out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); - } else if(run.report_big_o) { - out << indent - << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime())) - << ",\n"; - out << indent - << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime())) - << ",\n"; - out << indent + } else if (run.report_big_o) { + out << indent + << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime())) + << ",\n"; + out << indent + << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime())) + << ",\n"; + out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; out << indent @@ -156,20 +156,23 @@ void JSONReporter::PrintRunData(Run const& run) { out << indent << FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100)) << '%'; - } - if (run.bytes_per_second > 0.0) { - out << ",\n" << indent - << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second)); - } - if (run.items_per_second > 0.0) { - out << ",\n" << indent - << FormatKV("items_per_second", RoundDouble(run.items_per_second)); - } - if (!run.report_label.empty()) { - out << ",\n" << indent - << FormatKV("label", run.report_label); - } - out << '\n'; + } + if (run.bytes_per_second > 0.0) { + out << ",\n" + << indent + << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second)); + } + if (run.items_per_second > 0.0) { + out << ",\n" + << indent + << FormatKV("items_per_second", RoundDouble(run.items_per_second)); + } + if (!run.report_label.empty()) { + out << ",\n" + << indent + << FormatKV("label", run.report_label); + } + out << '\n'; } -} // end namespace benchmark +} // end namespace benchmark From 240ba4e64eb46e1f5acbafaadae34c2b2ca701eb Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 22:21:52 +0200 Subject: [PATCH 08/12] changed BigOFunc argument from size_t to int --- README.md | 2 +- include/benchmark/benchmark_api.h | 2 +- src/complexity.cc | 12 ++++++------ test/complexity_test.cc | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index f34e8870..e30052dc 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,7 @@ that might be used to customize high-order term calculation. ```c++ BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](size_t n)->double{return n; }); + ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; }); ``` ### Templated benchmarks diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 0cb43488..34cd0b5c 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -253,7 +253,7 @@ enum BigO { // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. -typedef double(BigOFunc)(size_t); +typedef double(BigOFunc)(int); // State is passed to a running Benchmark and contains state for the // benchmark to use. diff --git a/src/complexity.cc b/src/complexity.cc index 0d53ade0..e25aa3c4 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -29,18 +29,18 @@ namespace benchmark { BigOFunc* FittingCurve(BigO complexity) { switch (complexity) { case oN: - return [](size_t n) -> double { return n; }; + return [](int n) -> double { return n; }; case oNSquared: - return [](size_t n) -> double { return n * n; }; + return [](int n) -> double { return n * n; }; case oNCubed: - return [](size_t n) -> double { return n * n * n; }; + return [](int n) -> double { return n * n * n; }; case oLogN: - return [](size_t n) { return log2(n); }; + return [](int n) { return log2(n); }; case oNLogN: - return [](size_t n) { return n * log2(n); }; + return [](int n) { return n * log2(n); }; case o1: default: - return [](size_t) { return 1.0; }; + return [](int) { return 1.0; }; } } diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 03bd2dcc..662b627a 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -154,7 +154,7 @@ void BM_Complexity_O1(benchmark::State& state) { state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); -BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](size_t){return 1.0; }); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](int){return 1.0; }); BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(); std::string big_o_1_test_name = "BM_Complexity_O1_BigO"; @@ -192,7 +192,7 @@ void BM_Complexity_O_N(benchmark::State& state) { state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) -> double{return n; }); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) -> double{return n; }); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); std::string big_o_n_test_name = "BM_Complexity_O_N_BigO"; @@ -220,7 +220,7 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); -BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) {return n * log2(n); }); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) {return n * log2(n); }); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); std::string big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; From 1a633969b31b2a486bdfae80576c53f40293281e Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 22:23:39 +0200 Subject: [PATCH 09/12] changed BigO logic order --- src/complexity.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/complexity.cc b/src/complexity.cc index e25aa3c4..24f1cf4e 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -242,12 +242,12 @@ std::vector ComputeBigO( LeastSq result_cpu; LeastSq result_real; - if (reports[0].complexity != oLambda) { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); - } else { + if (reports[0].complexity == oLambda) { result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); + } else { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); + result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); } std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); From 8ba94b4c1842d9424c592258a6dfc9beea4912c8 Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 22:40:21 +0200 Subject: [PATCH 10/12] changed global string to const char * --- test/complexity_test.cc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 662b627a..ee242021 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -157,10 +157,10 @@ BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](int){return 1.0; }); BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(); -std::string big_o_1_test_name = "BM_Complexity_O1_BigO"; -std::string rms_o_1_test_name = "BM_Complexity_O1_RMS"; -std::string enum_auto_big_o_1 = "\\([0-9]+\\)"; -std::string lambda_big_o_1 = "f\\(N\\)"; +const char* big_o_1_test_name = "BM_Complexity_O1_BigO"; +const char* rms_o_1_test_name = "BM_Complexity_O1_RMS"; +const char* enum_auto_big_o_1 = "\\([0-9]+\\)"; +const char* lambda_big_o_1 = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, @@ -195,10 +195,10 @@ BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Com BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) -> double{return n; }); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); -std::string big_o_n_test_name = "BM_Complexity_O_N_BigO"; -std::string rms_o_n_test_name = "BM_Complexity_O_N_RMS"; -std::string enum_auto_big_o_n = "N"; -std::string lambda_big_o_n = "f\\(N\\)"; +const char* big_o_n_test_name = "BM_Complexity_O_N_BigO"; +const char* rms_o_n_test_name = "BM_Complexity_O_N_RMS"; +const char* enum_auto_big_o_n = "N"; +const char* lambda_big_o_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, @@ -223,10 +223,10 @@ BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) {return n * log2(n); }); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(); -std::string big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; -std::string rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; -std::string enum_auto_big_o_n_lg_n = "NlgN"; -std::string lambda_big_o_n_lg_n = "f\\(N\\)"; +const char* big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; +const char* rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; +const char* enum_auto_big_o_n_lg_n = "NlgN"; +const char* lambda_big_o_n_lg_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests, From 2859ae93949a7a3415082e65001f25e8e5e78284 Mon Sep 17 00:00:00 2001 From: Ismael Date: Thu, 2 Jun 2016 23:27:29 +0200 Subject: [PATCH 11/12] changed complexity_n to int and fix some whitespaces --- include/benchmark/benchmark_api.h | 4 ++-- include/benchmark/reporter.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 34cd0b5c..f38dc974 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -363,7 +363,7 @@ public: // family benchmark, then current benchmark will be part of the computation and complexity_n will // represent the length of N. BENCHMARK_ALWAYS_INLINE - void SetComplexityN(size_t complexity_n) { + void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; } @@ -444,7 +444,7 @@ private: size_t bytes_processed_; size_t items_processed_; - size_t complexity_n_; + int complexity_n_; public: // FIXME: Make this private somehow. diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index e3a8f573..22c97a01 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -87,7 +87,7 @@ class BenchmarkReporter { // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; - size_t complexity_n; + int complexity_n; // Inform print function whether the current run is a complexity report bool report_big_o; @@ -133,14 +133,14 @@ class BenchmarkReporter { error_stream_ = err; } - std::ostream& GetOutputStream() const { + std::ostream& GetOutputStream() const { return *output_stream_; } std::ostream& GetErrorStream() const { return *error_stream_; } - + virtual ~BenchmarkReporter(); // Write a human readable string to 'out' representing the specified From e49814316891c8d6e125da5d3f72c7bc45f31bf4 Mon Sep 17 00:00:00 2001 From: Dominic Hamon Date: Thu, 2 Jun 2016 14:37:14 -0700 Subject: [PATCH 12/12] fix warning on loss of integer precision --- src/benchmark.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/benchmark.cc b/src/benchmark.cc index 00654334..f6c4fc2b 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -130,7 +130,7 @@ struct ThreadStats { ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {} int64_t bytes_processed; int64_t items_processed; - size_t complexity_n; + int complexity_n; }; // Timer management class