mirror of
https://github.com/google/benchmark.git
synced 2025-01-14 05:40:14 +08:00
added lambdas to complexity report
This commit is contained in:
parent
74a278e206
commit
867f9145a0
@ -152,6 +152,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
||||
#include <assert.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <functional>
|
||||
|
||||
#include "macros.h"
|
||||
|
||||
@ -247,15 +248,20 @@ enum BigO {
|
||||
oNCubed,
|
||||
oLogN,
|
||||
oNLogN,
|
||||
oAuto
|
||||
oAuto,
|
||||
oLambda
|
||||
};
|
||||
|
||||
// BigOFunc is passed to a benchmark in order to specify the asymptotic
|
||||
// computational complexity for the benchmark.
|
||||
typedef double(BigOFunc)(size_t);
|
||||
|
||||
// State is passed to a running Benchmark and contains state for the
|
||||
// benchmark to use.
|
||||
class State {
|
||||
public:
|
||||
State(size_t max_iters, bool has_x, int x, bool has_y, int y,
|
||||
int thread_i, int n_threads);
|
||||
int thread_i, int n_threads);
|
||||
|
||||
// Returns true iff the benchmark should continue through another iteration.
|
||||
// NOTE: A benchmark may not return from the test until KeepRunning() has
|
||||
@ -268,13 +274,13 @@ public:
|
||||
}
|
||||
bool const res = total_iterations_++ < max_iterations;
|
||||
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
|
||||
assert(started_ && (!finished_ || error_occurred_));
|
||||
if (!error_occurred_) {
|
||||
PauseTiming();
|
||||
}
|
||||
// Total iterations now is one greater than max iterations. Fix this.
|
||||
total_iterations_ = max_iterations;
|
||||
finished_ = true;
|
||||
assert(started_ && (!finished_ || error_occurred_));
|
||||
if (!error_occurred_) {
|
||||
PauseTiming();
|
||||
}
|
||||
// Total iterations now is one greater than max iterations. Fix this.
|
||||
total_iterations_ = max_iterations;
|
||||
finished_ = true;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -359,7 +365,7 @@ public:
|
||||
// represent the length of N.
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
void SetComplexityN(size_t complexity_n) {
|
||||
complexity_n_ = complexity_n;
|
||||
complexity_n_ = complexity_n;
|
||||
}
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
@ -533,10 +539,14 @@ public:
|
||||
// to control how many iterations are run, and in the printing of items/second
|
||||
// or MB/second values.
|
||||
Benchmark* UseManualTime();
|
||||
|
||||
|
||||
// Set the asymptotic computational complexity for the benchmark. If called
|
||||
// the asymptotic computational complexity will be shown on the output.
|
||||
Benchmark* Complexity(BigO complexity = benchmark::oAuto);
|
||||
|
||||
// Set the asymptotic computational complexity for the benchmark. If called
|
||||
// the asymptotic computational complexity will be shown on the output.
|
||||
Benchmark* Complexity(BigOFunc* complexity);
|
||||
|
||||
// Support for running multiple copies of the same benchmark concurrently
|
||||
// in multiple threads. This may be useful when measuring the scaling
|
||||
|
@ -83,11 +83,12 @@ class BenchmarkReporter {
|
||||
|
||||
// This is set to 0.0 if memory tracing is not enabled.
|
||||
double max_heapbytes_used;
|
||||
|
||||
|
||||
// Keep track of arguments to compute asymptotic complexity
|
||||
BigO complexity;
|
||||
int complexity_n;
|
||||
|
||||
BigO complexity;
|
||||
BigOFunc* complexity_lambda;
|
||||
size_t complexity_n;
|
||||
|
||||
// Inform print function whether the current run is a complexity report
|
||||
bool report_big_o;
|
||||
bool report_rms;
|
||||
@ -113,7 +114,7 @@ class BenchmarkReporter {
|
||||
// 'reports' contains additional entries representing the asymptotic
|
||||
// complexity and RMS of that benchmark family.
|
||||
virtual void ReportRuns(const std::vector<Run>& report) = 0;
|
||||
|
||||
|
||||
// Called once and only once after ever group of benchmarks is run and
|
||||
// reported.
|
||||
virtual void Finalize() {}
|
||||
@ -159,7 +160,7 @@ class ConsoleReporter : public BenchmarkReporter {
|
||||
virtual bool ReportContext(const Context& context);
|
||||
virtual void ReportRuns(const std::vector<Run>& reports);
|
||||
|
||||
protected:
|
||||
protected:
|
||||
virtual void PrintRunData(const Run& report);
|
||||
|
||||
size_t name_field_width_;
|
||||
@ -189,25 +190,25 @@ private:
|
||||
|
||||
inline const char* GetTimeUnitString(TimeUnit unit) {
|
||||
switch (unit) {
|
||||
case kMillisecond:
|
||||
return "ms";
|
||||
case kMicrosecond:
|
||||
return "us";
|
||||
case kNanosecond:
|
||||
default:
|
||||
return "ns";
|
||||
case kMillisecond:
|
||||
return "ms";
|
||||
case kMicrosecond:
|
||||
return "us";
|
||||
case kNanosecond:
|
||||
default:
|
||||
return "ns";
|
||||
}
|
||||
}
|
||||
|
||||
inline double GetTimeUnitMultiplier(TimeUnit unit) {
|
||||
switch (unit) {
|
||||
case kMillisecond:
|
||||
return 1e3;
|
||||
case kMicrosecond:
|
||||
return 1e6;
|
||||
case kNanosecond:
|
||||
default:
|
||||
return 1e9;
|
||||
switch (unit) {
|
||||
case kMillisecond:
|
||||
return 1e3;
|
||||
case kMicrosecond:
|
||||
return 1e6;
|
||||
case kNanosecond:
|
||||
default:
|
||||
return 1e9;
|
||||
}
|
||||
}
|
||||
|
||||
|
161
src/benchmark.cc
161
src/benchmark.cc
@ -124,7 +124,7 @@ struct ThreadStats {
|
||||
ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {}
|
||||
int64_t bytes_processed;
|
||||
int64_t items_processed;
|
||||
int complexity_n;
|
||||
size_t complexity_n;
|
||||
};
|
||||
|
||||
// Timer management class
|
||||
@ -140,7 +140,7 @@ class TimerManager {
|
||||
manual_time_used_(0),
|
||||
num_finalized_(0),
|
||||
phase_number_(0),
|
||||
entered_(0)
|
||||
entered_(0)
|
||||
{
|
||||
}
|
||||
|
||||
@ -277,11 +277,11 @@ class TimerManager {
|
||||
int phase_number_cp = phase_number_;
|
||||
auto cb = [this, phase_number_cp]() {
|
||||
return this->phase_number_ > phase_number_cp ||
|
||||
entered_ == running_threads_; // A thread has aborted in error
|
||||
entered_ == running_threads_; // A thread has aborted in error
|
||||
};
|
||||
phase_condition_.wait(ml.native_handle(), cb);
|
||||
if (phase_number_ > phase_number_cp)
|
||||
return false;
|
||||
return false;
|
||||
// else (running_threads_ == entered_) and we are the last thread.
|
||||
}
|
||||
// Last thread has reached the barrier
|
||||
@ -311,6 +311,7 @@ struct Benchmark::Instance {
|
||||
bool use_real_time;
|
||||
bool use_manual_time;
|
||||
BigO complexity;
|
||||
BigOFunc* complexity_lambda;
|
||||
bool last_benchmark_instance;
|
||||
int repetitions;
|
||||
double min_time;
|
||||
@ -356,6 +357,7 @@ public:
|
||||
void UseRealTime();
|
||||
void UseManualTime();
|
||||
void Complexity(BigO complexity);
|
||||
void ComplexityLambda(BigOFunc* complexity);
|
||||
void Threads(int t);
|
||||
void ThreadRange(int min_threads, int max_threads);
|
||||
void ThreadPerCpu();
|
||||
@ -376,6 +378,7 @@ private:
|
||||
bool use_real_time_;
|
||||
bool use_manual_time_;
|
||||
BigO complexity_;
|
||||
BigOFunc* complexity_lambda_;
|
||||
std::vector<int> thread_counts_;
|
||||
|
||||
BenchmarkImp& operator=(BenchmarkImp const&);
|
||||
@ -440,6 +443,7 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
instance.use_real_time = family->use_real_time_;
|
||||
instance.use_manual_time = family->use_manual_time_;
|
||||
instance.complexity = family->complexity_;
|
||||
instance.complexity_lambda = family->complexity_lambda_;
|
||||
instance.threads = num_threads;
|
||||
instance.multithreaded = !(family->thread_counts_.empty());
|
||||
|
||||
@ -567,6 +571,10 @@ void BenchmarkImp::Complexity(BigO complexity){
|
||||
complexity_ = complexity;
|
||||
}
|
||||
|
||||
void BenchmarkImp::ComplexityLambda(BigOFunc* complexity) {
|
||||
complexity_lambda_ = complexity;
|
||||
}
|
||||
|
||||
void BenchmarkImp::Threads(int t) {
|
||||
CHECK_GT(t, 0);
|
||||
thread_counts_.push_back(t);
|
||||
@ -691,6 +699,12 @@ Benchmark* Benchmark::Complexity(BigO complexity) {
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
|
||||
imp_->Complexity(oLambda);
|
||||
imp_->ComplexityLambda(complexity);
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Threads(int t) {
|
||||
imp_->Threads(t);
|
||||
return this;
|
||||
@ -717,7 +731,7 @@ void FunctionBenchmark::Run(State& st) {
|
||||
} // end namespace internal
|
||||
|
||||
namespace {
|
||||
|
||||
|
||||
// Execute one thread of benchmark b for the specified number of iterations.
|
||||
// Adds the stats collected for the thread into *total.
|
||||
void RunInThread(const benchmark::internal::Benchmark::Instance* b,
|
||||
@ -731,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
|
||||
MutexLock l(GetBenchmarkLock());
|
||||
total->bytes_processed += st.bytes_processed();
|
||||
total->items_processed += st.items_processed();
|
||||
total->complexity_n += st.complexity_length_n();
|
||||
total->complexity_n += st.complexity_length_n();
|
||||
}
|
||||
|
||||
timer_manager->Finalize();
|
||||
}
|
||||
|
||||
void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
BenchmarkReporter* br,
|
||||
std::vector<BenchmarkReporter::Run>& complexity_reports)
|
||||
BenchmarkReporter* br,
|
||||
std::vector<BenchmarkReporter::Run>& complexity_reports)
|
||||
EXCLUDES(GetBenchmarkLock()) {
|
||||
size_t iters = 1;
|
||||
|
||||
@ -750,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
pool.resize(b.threads);
|
||||
|
||||
const int repeats = b.repetitions != 0 ? b.repetitions
|
||||
: FLAGS_benchmark_repetitions;
|
||||
: FLAGS_benchmark_repetitions;
|
||||
for (int i = 0; i < repeats; i++) {
|
||||
std::string mem;
|
||||
for (;;) {
|
||||
@ -830,27 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
report.time_unit = b.time_unit;
|
||||
|
||||
if (!report.error_occurred) {
|
||||
double bytes_per_second = 0;
|
||||
if (total.bytes_processed > 0 && seconds > 0.0) {
|
||||
bytes_per_second = (total.bytes_processed / seconds);
|
||||
}
|
||||
double items_per_second = 0;
|
||||
if (total.items_processed > 0 && seconds > 0.0) {
|
||||
items_per_second = (total.items_processed / seconds);
|
||||
}
|
||||
double bytes_per_second = 0;
|
||||
if (total.bytes_processed > 0 && seconds > 0.0) {
|
||||
bytes_per_second = (total.bytes_processed / seconds);
|
||||
}
|
||||
double items_per_second = 0;
|
||||
if (total.items_processed > 0 && seconds > 0.0) {
|
||||
items_per_second = (total.items_processed / seconds);
|
||||
}
|
||||
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = manual_accumulated_time;
|
||||
} else {
|
||||
report.real_accumulated_time = real_accumulated_time;
|
||||
}
|
||||
report.cpu_accumulated_time = cpu_accumulated_time;
|
||||
report.bytes_per_second = bytes_per_second;
|
||||
report.items_per_second = items_per_second;
|
||||
report.complexity_n = total.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
if(report.complexity != oNone)
|
||||
complexity_reports.push_back(report);
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = manual_accumulated_time;
|
||||
} else {
|
||||
report.real_accumulated_time = real_accumulated_time;
|
||||
}
|
||||
report.cpu_accumulated_time = cpu_accumulated_time;
|
||||
report.bytes_per_second = bytes_per_second;
|
||||
report.items_per_second = items_per_second;
|
||||
report.complexity_n = total.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
report.complexity_lambda = b.complexity_lambda;
|
||||
if(report.complexity != oNone)
|
||||
complexity_reports.push_back(report);
|
||||
}
|
||||
|
||||
reports.push_back(report);
|
||||
@ -878,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
}
|
||||
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
|
||||
reports.insert(reports.end(), additional_run_stats.begin(),
|
||||
additional_run_stats.end());
|
||||
additional_run_stats.end());
|
||||
|
||||
if((b.complexity != oNone) && b.last_benchmark_instance) {
|
||||
additional_run_stats = ComputeBigO(complexity_reports);
|
||||
reports.insert(reports.end(), additional_run_stats.begin(),
|
||||
additional_run_stats.end());
|
||||
additional_run_stats.end());
|
||||
complexity_reports.clear();
|
||||
}
|
||||
|
||||
br->ReportRuns(reports);
|
||||
|
||||
|
||||
if (b.multithreaded) {
|
||||
for (std::thread& thread : pool)
|
||||
thread.join();
|
||||
@ -949,56 +964,56 @@ void State::SetLabel(const char* label) {
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
|
||||
BenchmarkReporter* reporter) {
|
||||
CHECK(reporter != nullptr);
|
||||
void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
|
||||
BenchmarkReporter* reporter) {
|
||||
CHECK(reporter != nullptr);
|
||||
|
||||
// Determine the width of the name field using a minimum width of 10.
|
||||
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
|
||||
size_t name_field_width = 10;
|
||||
for (const Benchmark::Instance& benchmark : benchmarks) {
|
||||
name_field_width =
|
||||
std::max<size_t>(name_field_width, benchmark.name.size());
|
||||
has_repetitions |= benchmark.repetitions > 1;
|
||||
}
|
||||
if (has_repetitions)
|
||||
name_field_width += std::strlen("_stddev");
|
||||
// Determine the width of the name field using a minimum width of 10.
|
||||
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
|
||||
size_t name_field_width = 10;
|
||||
for (const Benchmark::Instance& benchmark : benchmarks) {
|
||||
name_field_width =
|
||||
std::max<size_t>(name_field_width, benchmark.name.size());
|
||||
has_repetitions |= benchmark.repetitions > 1;
|
||||
}
|
||||
if (has_repetitions)
|
||||
name_field_width += std::strlen("_stddev");
|
||||
|
||||
// Print header here
|
||||
BenchmarkReporter::Context context;
|
||||
context.num_cpus = NumCPUs();
|
||||
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
|
||||
// Print header here
|
||||
BenchmarkReporter::Context context;
|
||||
context.num_cpus = NumCPUs();
|
||||
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
|
||||
|
||||
context.cpu_scaling_enabled = CpuScalingEnabled();
|
||||
context.name_field_width = name_field_width;
|
||||
context.cpu_scaling_enabled = CpuScalingEnabled();
|
||||
context.name_field_width = name_field_width;
|
||||
|
||||
// Keep track of runing times of all instances of current benchmark
|
||||
std::vector<BenchmarkReporter::Run> complexity_reports;
|
||||
// Keep track of runing times of all instances of current benchmark
|
||||
std::vector<BenchmarkReporter::Run> complexity_reports;
|
||||
|
||||
if (reporter->ReportContext(context)) {
|
||||
for (const auto& benchmark : benchmarks) {
|
||||
RunBenchmark(benchmark, reporter, complexity_reports);
|
||||
if (reporter->ReportContext(context)) {
|
||||
for (const auto& benchmark : benchmarks) {
|
||||
RunBenchmark(benchmark, reporter, complexity_reports);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
|
||||
typedef std::unique_ptr<BenchmarkReporter> PtrType;
|
||||
if (FLAGS_benchmark_format == "console") {
|
||||
return PtrType(new ConsoleReporter);
|
||||
} else if (FLAGS_benchmark_format == "json") {
|
||||
return PtrType(new JSONReporter);
|
||||
} else if (FLAGS_benchmark_format == "csv") {
|
||||
return PtrType(new CSVReporter);
|
||||
} else {
|
||||
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
|
||||
typedef std::unique_ptr<BenchmarkReporter> PtrType;
|
||||
if (FLAGS_benchmark_format == "console") {
|
||||
return PtrType(new ConsoleReporter);
|
||||
} else if (FLAGS_benchmark_format == "json") {
|
||||
return PtrType(new JSONReporter);
|
||||
} else if (FLAGS_benchmark_format == "csv") {
|
||||
return PtrType(new CSVReporter);
|
||||
} else {
|
||||
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
} // end namespace
|
||||
} // end namespace internal
|
||||
|
||||
size_t RunSpecifiedBenchmarks() {
|
||||
|
@ -25,43 +25,43 @@
|
||||
#include <functional>
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
|
||||
// Internal function to calculate the different scalability forms
|
||||
std::function<double(int)> FittingCurve(BigO complexity) {
|
||||
BigOFunc* FittingCurve(BigO complexity) {
|
||||
switch (complexity) {
|
||||
case oN:
|
||||
return [](int n) {return n; };
|
||||
case oNSquared:
|
||||
return [](int n) {return n*n; };
|
||||
case oNCubed:
|
||||
return [](int n) {return n*n*n; };
|
||||
case oLogN:
|
||||
return [](int n) {return log2(n); };
|
||||
case oNLogN:
|
||||
return [](int n) {return n * log2(n); };
|
||||
case o1:
|
||||
default:
|
||||
return [](int) {return 1; };
|
||||
case oN:
|
||||
return [](size_t n) -> double {return n; };
|
||||
case oNSquared:
|
||||
return [](size_t n) -> double {return n * n; };
|
||||
case oNCubed:
|
||||
return [](size_t n) -> double {return n * n * n; };
|
||||
case oLogN:
|
||||
return [](size_t n) {return log2(n); };
|
||||
case oNLogN:
|
||||
return [](size_t n) {return n * log2(n); };
|
||||
case o1:
|
||||
default:
|
||||
return [](size_t) {return 1.0; };
|
||||
}
|
||||
}
|
||||
|
||||
// Function to return an string for the calculated complexity
|
||||
std::string GetBigOString(BigO complexity) {
|
||||
switch (complexity) {
|
||||
case oN:
|
||||
return "* N";
|
||||
case oNSquared:
|
||||
return "* N**2";
|
||||
case oNCubed:
|
||||
return "* N**3";
|
||||
case oLogN:
|
||||
return "* lgN";
|
||||
case oNLogN:
|
||||
return "* NlgN";
|
||||
case o1:
|
||||
return "* 1";
|
||||
default:
|
||||
return "";
|
||||
case oN:
|
||||
return "N";
|
||||
case oNSquared:
|
||||
return "N^2";
|
||||
case oNCubed:
|
||||
return "N^3";
|
||||
case oLogN:
|
||||
return "lgN";
|
||||
case oNLogN:
|
||||
return "NlgN";
|
||||
case o1:
|
||||
return "(1)";
|
||||
default:
|
||||
return "f(N)";
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,21 +75,9 @@ std::string GetBigOString(BigO complexity) {
|
||||
// For a deeper explanation on the algorithm logic, look the README file at
|
||||
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
|
||||
|
||||
// This interface is currently not used from the oustide, but it has been
|
||||
// provided for future upgrades. If in the future it is not needed to support
|
||||
// Cxx03, then all the calculations could be upgraded to use lambdas because
|
||||
// they are more powerful and provide a cleaner inferface than enumerators,
|
||||
// but complete implementation with lambdas will not work for Cxx03
|
||||
// (e.g. lack of std::function).
|
||||
// In case lambdas are implemented, the interface would be like :
|
||||
// -> Complexity([](int n) {return n;};)
|
||||
// and any arbitrary and valid equation would be allowed, but the option to
|
||||
// calculate the best fit to the most common scalability curves will still
|
||||
// be kept.
|
||||
|
||||
LeastSq CalculateLeastSq(const std::vector<int>& n,
|
||||
const std::vector<double>& time,
|
||||
std::function<double(int)> fitting_curve) {
|
||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
const std::vector<double>& time,
|
||||
BigOFunc* fitting_curve) {
|
||||
double sigma_gn = 0.0;
|
||||
double sigma_gn_squared = 0.0;
|
||||
double sigma_time = 0.0;
|
||||
@ -105,6 +93,7 @@ LeastSq CalculateLeastSq(const std::vector<int>& n,
|
||||
}
|
||||
|
||||
LeastSq result;
|
||||
result.complexity = oLambda;
|
||||
|
||||
// Calculate complexity.
|
||||
result.coef = sigma_time_gn / sigma_gn_squared;
|
||||
@ -144,19 +133,19 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
oLogN, oN, oNLogN, oNSquared, oNCubed };
|
||||
|
||||
// Take o1 as default best fitting curve
|
||||
best_fit = CalculateLeastSq(n, time, FittingCurve(o1));
|
||||
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
|
||||
best_fit.complexity = o1;
|
||||
|
||||
// Compute all possible fitting curves and stick to the best one
|
||||
for (const auto& fit : fit_curves) {
|
||||
LeastSq current_fit = CalculateLeastSq(n, time, FittingCurve(fit));
|
||||
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
|
||||
if (current_fit.rms < best_fit.rms) {
|
||||
best_fit = current_fit;
|
||||
best_fit.complexity = fit;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
best_fit = CalculateLeastSq(n, time, FittingCurve(complexity));
|
||||
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
|
||||
best_fit.complexity = complexity;
|
||||
}
|
||||
|
||||
@ -164,14 +153,14 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
}
|
||||
|
||||
std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
const std::vector<BenchmarkReporter::Run>& reports)
|
||||
const std::vector<BenchmarkReporter::Run>& reports)
|
||||
{
|
||||
typedef BenchmarkReporter::Run Run;
|
||||
std::vector<Run> results;
|
||||
|
||||
auto error_count = std::count_if(
|
||||
reports.begin(), reports.end(),
|
||||
[](Run const& run) {return run.error_occurred;});
|
||||
reports.begin(), reports.end(),
|
||||
[](Run const& run) {return run.error_occurred;});
|
||||
|
||||
if (reports.size() - error_count < 2) {
|
||||
// We don't report aggregated data if there was a single run.
|
||||
@ -193,9 +182,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
if (run.error_occurred)
|
||||
continue;
|
||||
real_accumulated_time_stat +=
|
||||
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
|
||||
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
|
||||
cpu_accumulated_time_stat +=
|
||||
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
|
||||
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
|
||||
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
|
||||
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
|
||||
}
|
||||
@ -205,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
|
||||
mean_data.iterations = run_iterations;
|
||||
mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() *
|
||||
run_iterations;
|
||||
run_iterations;
|
||||
mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
|
||||
run_iterations;
|
||||
run_iterations;
|
||||
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
|
||||
mean_data.items_per_second = items_per_second_stat.Mean();
|
||||
|
||||
@ -225,9 +214,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
stddev_data.report_label = mean_data.report_label;
|
||||
stddev_data.iterations = 0;
|
||||
stddev_data.real_accumulated_time =
|
||||
real_accumulated_time_stat.StdDev();
|
||||
real_accumulated_time_stat.StdDev();
|
||||
stddev_data.cpu_accumulated_time =
|
||||
cpu_accumulated_time_stat.StdDev();
|
||||
cpu_accumulated_time_stat.StdDev();
|
||||
stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
|
||||
stddev_data.items_per_second = items_per_second_stat.StdDev();
|
||||
|
||||
@ -237,7 +226,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
}
|
||||
|
||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
const std::vector<BenchmarkReporter::Run>& reports)
|
||||
const std::vector<BenchmarkReporter::Run>& reports)
|
||||
{
|
||||
typedef BenchmarkReporter::Run Run;
|
||||
std::vector<Run> results;
|
||||
@ -256,14 +245,16 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
cpu_time.push_back(run.cpu_accumulated_time/run.iterations);
|
||||
}
|
||||
|
||||
LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
|
||||
|
||||
// result_cpu.complexity is passed as parameter to result_real because in case
|
||||
// reports[0].complexity is oAuto, the noise on the measured data could make
|
||||
// the best fit function of Cpu and Real differ. In order to solve this, we
|
||||
// take the best fitting function for the Cpu, and apply it to Real data.
|
||||
LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
|
||||
LeastSq result_cpu;
|
||||
LeastSq result_real;
|
||||
|
||||
if (reports[0].complexity != oLambda) {
|
||||
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
|
||||
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
|
||||
} else {
|
||||
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
|
||||
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
|
||||
}
|
||||
std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
|
||||
|
||||
// Get the data from the accumulator to BenchmarkReporter::Run's.
|
||||
|
@ -26,15 +26,15 @@
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
// Return a vector containing the mean and standard devation information for
|
||||
// the specified list of reports. If 'reports' contains less than two
|
||||
// non-errored runs an empty vector is returned
|
||||
std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
// Return a vector containing the mean and standard devation information for
|
||||
// the specified list of reports. If 'reports' contains less than two
|
||||
// non-errored runs an empty vector is returned
|
||||
std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
const std::vector<BenchmarkReporter::Run>& reports);
|
||||
|
||||
// Return a vector containing the bigO and RMS information for the specified
|
||||
// list of reports. If 'reports.size() < 2' an empty vector is returned.
|
||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
// Return a vector containing the bigO and RMS information for the specified
|
||||
// list of reports. If 'reports.size() < 2' an empty vector is returned.
|
||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
const std::vector<BenchmarkReporter::Run>& reports);
|
||||
|
||||
// This data structure will contain the result returned by MinimalLeastSq
|
||||
@ -60,11 +60,5 @@ struct LeastSq {
|
||||
// Function to return an string for the calculated complexity
|
||||
std::string GetBigOString(BigO complexity);
|
||||
|
||||
// Find the coefficient for the high-order term in the running time, by
|
||||
// minimizing the sum of squares of relative error.
|
||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
const std::vector<double>& time,
|
||||
const BigO complexity = oAuto);
|
||||
|
||||
} // end namespace benchmark
|
||||
#endif // COMPLEXITY_H_
|
||||
|
@ -90,8 +90,8 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
const double cpu_time = result.GetAdjustedCPUTime();
|
||||
|
||||
if(result.report_big_o) {
|
||||
std::string big_o = result.report_big_o ? GetBigOString(result.complexity) : "";
|
||||
ColorPrintf(Out, COLOR_YELLOW, "%10.4f %s %10.4f %s ",
|
||||
std::string big_o = GetBigOString(result.complexity);
|
||||
ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ",
|
||||
real_time, big_o.c_str(), cpu_time, big_o.c_str());
|
||||
} else if(result.report_rms) {
|
||||
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ",
|
||||
|
@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark/reporter.h"
|
||||
#include "complexity.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <algorithm>
|
||||
@ -87,8 +88,10 @@ void CSVReporter::PrintRunData(const Run & run) {
|
||||
Out << run.GetAdjustedRealTime() << ",";
|
||||
Out << run.GetAdjustedCPUTime() << ",";
|
||||
|
||||
// Do not print timeLabel on RMS report
|
||||
if(!run.report_rms) {
|
||||
// Do not print timeLabel on bigO and RMS report
|
||||
if(run.report_big_o) {
|
||||
Out << GetBigOString(run.complexity);
|
||||
} else if(!run.report_rms){
|
||||
Out << GetTimeUnitString(run.time_unit);
|
||||
}
|
||||
Out << ",";
|
||||
|
@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark/reporter.h"
|
||||
#include "complexity.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <algorithm>
|
||||
@ -132,15 +133,29 @@ void JSONReporter::PrintRunData(Run const& run) {
|
||||
out << indent
|
||||
<< FormatKV("iterations", run.iterations)
|
||||
<< ",\n";
|
||||
}
|
||||
out << indent
|
||||
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
|
||||
if(!run.report_rms) {
|
||||
out << indent
|
||||
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
|
||||
out << ",\n" << indent
|
||||
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||
} else if(run.report_big_o) {
|
||||
out << indent
|
||||
<< FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("big_o", GetBigOString(run.complexity))
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||
} else if(run.report_rms) {
|
||||
out << indent
|
||||
<< FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100))
|
||||
<< "%";
|
||||
}
|
||||
if (run.bytes_per_second > 0.0) {
|
||||
out << ",\n" << indent
|
||||
|
@ -1,12 +1,183 @@
|
||||
|
||||
#include "benchmark/benchmark_api.h"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#undef NDEBUG
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "../src/check.h" // NOTE: check.h is for internal use only!
|
||||
#include "../src/re.h" // NOTE: re.h is for internal use only
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <utility>
|
||||
#include <algorithm>
|
||||
|
||||
namespace {
|
||||
|
||||
// ========================================================================= //
|
||||
// -------------------------- Testing Case --------------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
enum MatchRules {
|
||||
MR_Default, // Skip non-matching lines until a match is found.
|
||||
MR_Next // Match must occur on the next line.
|
||||
};
|
||||
|
||||
struct TestCase {
|
||||
std::string regex;
|
||||
int match_rule;
|
||||
|
||||
TestCase(std::string re, int rule = MR_Default) : regex(re), match_rule(rule) {}
|
||||
|
||||
void Check(std::stringstream& remaining_output) const {
|
||||
benchmark::Regex r;
|
||||
std::string err_str;
|
||||
r.Init(regex, &err_str);
|
||||
CHECK(err_str.empty()) << "Could not construct regex \"" << regex << "\""
|
||||
<< " got Error: " << err_str;
|
||||
|
||||
std::string line;
|
||||
while (remaining_output.eof() == false) {
|
||||
CHECK(remaining_output.good());
|
||||
std::getline(remaining_output, line);
|
||||
if (r.Match(line)) return;
|
||||
CHECK(match_rule != MR_Next) << "Expected line \"" << line
|
||||
<< "\" to match regex \"" << regex << "\"";
|
||||
}
|
||||
|
||||
CHECK(remaining_output.eof() == false)
|
||||
<< "End of output reached before match for regex \"" << regex
|
||||
<< "\" was found";
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<TestCase> ConsoleOutputTests;
|
||||
std::vector<TestCase> JSONOutputTests;
|
||||
std::vector<TestCase> CSVOutputTests;
|
||||
|
||||
// ========================================================================= //
|
||||
// -------------------------- Test Helpers --------------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
class TestReporter : public benchmark::BenchmarkReporter {
|
||||
public:
|
||||
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
|
||||
: reporters_(reps) {}
|
||||
|
||||
virtual bool ReportContext(const Context& context) {
|
||||
bool last_ret = false;
|
||||
bool first = true;
|
||||
for (auto rep : reporters_) {
|
||||
bool new_ret = rep->ReportContext(context);
|
||||
CHECK(first || new_ret == last_ret)
|
||||
<< "Reports return different values for ReportContext";
|
||||
first = false;
|
||||
last_ret = new_ret;
|
||||
}
|
||||
return last_ret;
|
||||
}
|
||||
|
||||
virtual void ReportRuns(const std::vector<Run>& report) {
|
||||
for (auto rep : reporters_)
|
||||
rep->ReportRuns(report);
|
||||
}
|
||||
|
||||
virtual void Finalize() {
|
||||
for (auto rep : reporters_)
|
||||
rep->Finalize();
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<benchmark::BenchmarkReporter*> reporters_;
|
||||
};
|
||||
|
||||
|
||||
#define CONCAT2(x, y) x##y
|
||||
#define CONCAT(x, y) CONCAT2(x, y)
|
||||
|
||||
#define ADD_CASES(...) \
|
||||
int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
|
||||
|
||||
int AddCases(std::vector<TestCase>* out, std::initializer_list<TestCase> const& v) {
|
||||
for (auto const& TC : v)
|
||||
out->push_back(TC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class First>
|
||||
std::string join(First f) { return f; }
|
||||
|
||||
template <class First, class ...Args>
|
||||
std::string join(First f, Args&&... args) {
|
||||
return std::string(std::move(f)) + "[ ]+" + join(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
std::string dec_re = "[0-9]+\\.[0-9]+";
|
||||
|
||||
#define ADD_COMPLEXITY_CASES(...) \
|
||||
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
|
||||
|
||||
int AddComplexityTest(std::vector<TestCase>* console_out, std::vector<TestCase>* json_out,
|
||||
std::vector<TestCase>* csv_out, std::string big_o_test_name,
|
||||
std::string rms_test_name, std::string big_o) {
|
||||
std::string big_o_str = dec_re + " " + big_o;
|
||||
AddCases(console_out, {
|
||||
{join("^" + big_o_test_name + "", big_o_str, big_o_str) + "[ ]*$"},
|
||||
{join("^" + rms_test_name + "", "[0-9]+ %", "[0-9]+ %") + "[ ]*$"}
|
||||
});
|
||||
AddCases(json_out, {
|
||||
{"\"name\": \"" + big_o_test_name + "\",$"},
|
||||
{"\"cpu_coefficient\": [0-9]+,$", MR_Next},
|
||||
{"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
|
||||
{"\"big_o\": \"" + big_o + "\",$", MR_Next},
|
||||
{"\"time_unit\": \"ns\"$", MR_Next},
|
||||
{"}", MR_Next},
|
||||
{"\"name\": \"" + rms_test_name + "\",$"},
|
||||
{"\"rms\": [0-9]+%$", MR_Next},
|
||||
{"}", MR_Next}
|
||||
});
|
||||
AddCases(csv_out, {
|
||||
{"^\"" + big_o_test_name + "\",," + dec_re + "," + dec_re + "," + big_o + ",,,,,$"},
|
||||
{"^\"" + rms_test_name + "\",," + dec_re + "," + dec_re + ",,,,,,$"}
|
||||
});
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- Testing BigO O(1) --------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Complexity_O1(benchmark::State& state) {
|
||||
while (state.KeepRunning()) {
|
||||
}
|
||||
state.SetComplexityN(state.range_x());
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity();
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1);
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity([](size_t){return 1.0; });
|
||||
|
||||
std::string big_o_1_test_name = "BM_Complexity_O1_BigO";
|
||||
std::string rms_o_1_test_name = "BM_Complexity_O1_RMS";
|
||||
std::string enum_auto_big_o_1 = "\\([0-9]+\\)";
|
||||
std::string lambda_big_o_1 = "f\\(N\\)";
|
||||
|
||||
// Add automatic tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1);
|
||||
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- Testing BigO O(N) --------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
std::vector<int> ConstructRandomVector(int size) {
|
||||
std::vector<int> v;
|
||||
v.reserve(size);
|
||||
@ -16,22 +187,7 @@ std::vector<int> ConstructRandomVector(int size) {
|
||||
return v;
|
||||
}
|
||||
|
||||
std::map<int, int> ConstructRandomMap(int size) {
|
||||
std::map<int, int> m;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
m.insert(std::make_pair(rand() % size, rand() % size));
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
void BM_Complexity_O1(benchmark::State& state) {
|
||||
while (state.KeepRunning()) {
|
||||
}
|
||||
state.SetComplexityN(state.range_x());
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1);
|
||||
|
||||
static void BM_Complexity_O_N(benchmark::State& state) {
|
||||
void BM_Complexity_O_N(benchmark::State& state) {
|
||||
auto v = ConstructRandomVector(state.range_x());
|
||||
const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector)
|
||||
while (state.KeepRunning()) {
|
||||
@ -39,51 +195,30 @@ static void BM_Complexity_O_N(benchmark::State& state) {
|
||||
}
|
||||
state.SetComplexityN(state.range_x());
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN);
|
||||
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity();
|
||||
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN);
|
||||
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) -> double{return n; });
|
||||
|
||||
static void BM_Complexity_O_N_Squared(benchmark::State& state) {
|
||||
std::string s1(state.range_x(), '-');
|
||||
std::string s2(state.range_x(), '-');
|
||||
state.SetComplexityN(state.range_x());
|
||||
while (state.KeepRunning())
|
||||
for(char& c1 : s1) {
|
||||
for(char& c2 : s2) {
|
||||
benchmark::DoNotOptimize(c1 = 'a');
|
||||
benchmark::DoNotOptimize(c2 = 'b');
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::oNSquared);
|
||||
std::string big_o_n_test_name = "BM_Complexity_O_N_BigO";
|
||||
std::string rms_o_n_test_name = "BM_Complexity_O_N_RMS";
|
||||
std::string enum_auto_big_o_n = "N";
|
||||
std::string lambda_big_o_n = "f\\(N\\)";
|
||||
|
||||
static void BM_Complexity_O_N_Cubed(benchmark::State& state) {
|
||||
std::string s1(state.range_x(), '-');
|
||||
std::string s2(state.range_x(), '-');
|
||||
std::string s3(state.range_x(), '-');
|
||||
state.SetComplexityN(state.range_x());
|
||||
while (state.KeepRunning())
|
||||
for(char& c1 : s1) {
|
||||
for(char& c2 : s2) {
|
||||
for(char& c3 : s3) {
|
||||
benchmark::DoNotOptimize(c1 = 'a');
|
||||
benchmark::DoNotOptimize(c2 = 'b');
|
||||
benchmark::DoNotOptimize(c3 = 'c');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::oNCubed);
|
||||
// Add automatic tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
|
||||
|
||||
static void BM_Complexity_O_log_N(benchmark::State& state) {
|
||||
auto m = ConstructRandomMap(state.range_x());
|
||||
const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector)
|
||||
while (state.KeepRunning()) {
|
||||
benchmark::DoNotOptimize(m.find(item_not_in_vector));
|
||||
}
|
||||
state.SetComplexityN(state.range_x());
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O_log_N)
|
||||
-> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN);
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
||||
auto v = ConstructRandomVector(state.range_x());
|
||||
@ -92,15 +227,82 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
||||
}
|
||||
state.SetComplexityN(state.range_x());
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN);
|
||||
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity();
|
||||
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN);
|
||||
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](size_t n) {return n * log2(n); });
|
||||
|
||||
// Test benchmark with no range and check no complexity is calculated.
|
||||
void BM_Extreme_Cases(benchmark::State& state) {
|
||||
while (state.KeepRunning()) {
|
||||
std::string big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
|
||||
std::string rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
|
||||
std::string enum_auto_big_o_n_lg_n = "NlgN";
|
||||
std::string lambda_big_o_n_lg_n = "f\\(N\\)";
|
||||
|
||||
// Add automatic tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
|
||||
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
|
||||
big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
|
||||
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- TEST CASES END ------------------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
// Add --color_print=false to argv since we don't want to match color codes.
|
||||
char new_arg[64];
|
||||
char* new_argv[64];
|
||||
std::copy(argv, argv + argc, new_argv);
|
||||
new_argv[argc++] = std::strcpy(new_arg, "--color_print=false");
|
||||
benchmark::Initialize(&argc, new_argv);
|
||||
|
||||
benchmark::ConsoleReporter CR;
|
||||
benchmark::JSONReporter JR;
|
||||
benchmark::CSVReporter CSVR;
|
||||
struct ReporterTest {
|
||||
const char* name;
|
||||
std::vector<TestCase>& output_cases;
|
||||
benchmark::BenchmarkReporter& reporter;
|
||||
std::stringstream out_stream;
|
||||
std::stringstream err_stream;
|
||||
|
||||
ReporterTest(const char* n,
|
||||
std::vector<TestCase>& out_tc,
|
||||
benchmark::BenchmarkReporter& br)
|
||||
: name(n), output_cases(out_tc), reporter(br) {
|
||||
reporter.SetOutputStream(&out_stream);
|
||||
reporter.SetErrorStream(&err_stream);
|
||||
}
|
||||
} TestCases[] = {
|
||||
{"ConsoleReporter", ConsoleOutputTests, CR},
|
||||
{"JSONReporter", JSONOutputTests, JR},
|
||||
{"CSVReporter", CSVOutputTests, CSVR}
|
||||
};
|
||||
|
||||
// Create the test reporter and run the benchmarks.
|
||||
std::cout << "Running benchmarks...\n";
|
||||
TestReporter test_rep({&CR, &JR, &CSVR});
|
||||
benchmark::RunSpecifiedBenchmarks(&test_rep);
|
||||
|
||||
for (auto& rep_test : TestCases) {
|
||||
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
|
||||
std::string banner(msg.size() - 1, '-');
|
||||
std::cout << banner << msg << banner << "\n";
|
||||
|
||||
std::cerr << rep_test.err_stream.str();
|
||||
std::cout << rep_test.out_stream.str();
|
||||
|
||||
for (const auto& TC : rep_test.output_cases)
|
||||
TC.Check(rep_test.out_stream);
|
||||
|
||||
std::cout << "\n";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::oNLogN);
|
||||
BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity();
|
||||
|
||||
BENCHMARK_MAIN()
|
||||
|
Loading…
Reference in New Issue
Block a user