Cleanup reporters (#226)

* Move ComputeStats call out of the reporters

* Cleanup adjusted time calculations in reporters

* Move ComputeBigO call out of reporters

* Remove ReportComplexity interface using ReportRuns instead

* Factor out reporting of basic context information

* Attempt to fix GCC 4.6 build errors

* Move ComputeStats to complexity.cc
This commit is contained in:
Eric 2016-05-27 16:45:25 -06:00
parent 238e558fdb
commit 1b263fe6d9
10 changed files with 266 additions and 356 deletions

View File

@ -236,6 +236,30 @@ enum TimeUnit {
kMillisecond
};
inline const char* GetTimeUnitString(TimeUnit unit) {
switch (unit) {
case kMillisecond:
return "ms";
case kMicrosecond:
return "us";
case kNanosecond:
default:
return "ns";
}
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
switch (unit) {
case kMillisecond:
return 1e3;
case kMicrosecond:
return 1e6;
case kNanosecond:
default:
return 1e9;
}
}
// BigO is passed to a benchmark in order to specify the asymptotic computational
// complexity for the benchmark. In case oAuto is selected, complexity will be
// calculated automatically to the best fit.

View File

@ -24,8 +24,6 @@
namespace benchmark {
typedef std::pair<const char*,double> TimeUnitMultiplier;
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
@ -67,6 +65,18 @@ class BenchmarkReporter {
double real_accumulated_time;
double cpu_accumulated_time;
// Return a value representing the real time per iteration in the unit
// specified by 'time_unit'.
// NOTE: If 'iterations' is zero the returned value represents the
// accumulated time.
double GetAdjustedRealTime() const;
// Return a value representing the cpu time per iteration in the unit
// specified by 'time_unit'.
// NOTE: If 'iterations' is zero the returned value represents the
// accumulated time.
double GetAdjustedCPUTime() const;
// Zero if not set by benchmark.
double bytes_per_second;
double items_per_second;
@ -96,20 +106,17 @@ class BenchmarkReporter {
virtual bool ReportContext(const Context& context) = 0;
// Called once for each group of benchmark runs, gives information about
// cpu-time and heap memory usage during the benchmark run.
// Note that all the grouped benchmark runs should refer to the same
// benchmark, thus have the same name.
// cpu-time and heap memory usage during the benchmark run. If the group
// of runs contained more than two entries then 'report' contains additional
// elements representing the mean and standard deviation of those runs.
// Additionally if this group of runs was the last in a family of benchmarks
// 'reports' contains additional entries representing the asymptotic
// complexity and RMS of that benchmark family.
virtual void ReportRuns(const std::vector<Run>& report) = 0;
// Called once at the last benchmark in a family of benchmarks, gives information
// about asymptotic complexity and RMS.
// Note that all the benchmark runs in a range should refer to the same benchmark,
// thus have the same name.
virtual void ReportComplexity(const std::vector<Run>& complexity_reports) = 0;
// Called once and only once after ever group of benchmarks is run and
// reported.
virtual void Finalize();
virtual void Finalize() {}
// REQUIRES: The object referenced by 'out' is valid for the lifetime
// of the reporter.
@ -134,11 +141,11 @@ class BenchmarkReporter {
}
virtual ~BenchmarkReporter();
protected:
static void ComputeStats(const std::vector<Run>& reports,
Run* mean, Run* stddev);
static void ComputeBigO(const std::vector<Run>& reports, Run* bigO, Run* rms);
static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit);
// Write a human readable string to 'out' representing the specified
// 'context'.
// REQUIRES: 'out' is non-null.
static void PrintBasicContext(std::ostream* out, Context const& context);
private:
std::ostream* output_stream_;
@ -151,9 +158,8 @@ class ConsoleReporter : public BenchmarkReporter {
public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
virtual void ReportComplexity(const std::vector<Run>& complexity_reports);
protected:
protected:
virtual void PrintRunData(const Run& report);
size_t name_field_width_;
@ -164,7 +170,6 @@ public:
JSONReporter() : first_report_(true) {}
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
virtual void ReportComplexity(const std::vector<Run>& complexity_reports);
virtual void Finalize();
private:
@ -177,7 +182,6 @@ class CSVReporter : public BenchmarkReporter {
public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
virtual void ReportComplexity(const std::vector<Run>& complexity_reports);
private:
void PrintRunData(const Run& report);

View File

@ -33,6 +33,7 @@
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
@ -717,7 +718,6 @@ void FunctionBenchmark::Run(State& st) {
namespace {
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
void RunInThread(const benchmark::internal::Benchmark::Instance* b,
@ -876,13 +876,19 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
iters = static_cast<int>(next_iters + 0.5);
}
}
br->ReportRuns(reports);
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
if((b.complexity != oNone) && b.last_benchmark_instance) {
br->ReportComplexity(complexity_reports);
additional_run_stats = ComputeBigO(complexity_reports);
reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
br->ReportRuns(reports);
if (b.multithreaded) {
for (std::thread& thread : pool)
thread.join();

View File

@ -19,7 +19,9 @@
#include "complexity.h"
#include "check.h"
#include <math.h>
#include "stat.h"
#include <cmath>
#include <algorithm>
#include <functional>
namespace benchmark {
@ -161,4 +163,134 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count = std::count_if(
reports.begin(), reports.end(),
[](Run const& run) {return run.error_occurred;});
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
Stat1_d bytes_per_second_stat;
Stat1_d items_per_second_stat;
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
int64_t const run_iterations = reports.front().iterations;
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred)
continue;
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
}
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run mean_data;
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
mean_data.iterations = run_iterations;
mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations;
mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations;
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
mean_data.items_per_second = items_per_second_stat.Mean();
// Only add label to mean/stddev if it is same for all runs
mean_data.report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != reports[0].report_label) {
mean_data.report_label = "";
break;
}
}
Run stddev_data;
stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data.report_label = mean_data.report_label;
stddev_data.iterations = 0;
stddev_data.real_accumulated_time =
real_accumulated_time_stat.StdDev();
stddev_data.cpu_accumulated_time =
cpu_accumulated_time_stat.StdDev();
stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data.items_per_second = items_per_second_stat.StdDev();
results.push_back(mean_data);
results.push_back(stddev_data);
return results;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2) return results;
// Accumulators.
std::vector<int> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time/run.iterations);
cpu_time.push_back(run.cpu_accumulated_time/run.iterations);
}
LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
// result_cpu.complexity is passed as parameter to result_real because in case
// reports[0].complexity is oAuto, the noise on the measured data could make
// the best fit function of Cpu and Real differ. In order to solve this, we
// take the best fitting function for the Cpu, and apply it to Real data.
LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.benchmark_name = benchmark_name + "_BigO";
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
Run rms;
big_o.report_label = reports[0].report_label;
rms.benchmark_name = benchmark_name + "_RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
results.push_back(big_o);
results.push_back(rms);
return results;
}
} // end namespace benchmark

View File

@ -22,9 +22,21 @@
#include <vector>
#include "benchmark/benchmark_api.h"
#include "benchmark/reporter.h"
namespace benchmark {
// Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
// interpolated from data.

View File

@ -35,81 +35,26 @@ namespace benchmark {
bool ConsoleReporter::ReportContext(const Context& context) {
name_field_width_ = context.name_field_width;
auto& Out = GetOutputStream();
auto& Err = GetErrorStream();
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if (FLAGS_color_print && &Out != &std::cout) {
Err << "Color printing is only supported for stdout on windows. "
"Disabling color printing\n";
if (FLAGS_color_print && &Out != &GetOutputStream()) {
GetErrorString() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
FLAGS_color_print = false;
}
#endif
Err << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
Err << LocalDateTimeString() << "\n";
if (context.cpu_scaling_enabled) {
Err << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Err << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
std::string str = FormatString("%-*s %13s %13s %10s\n",
static_cast<int>(name_field_width_), "Benchmark",
"Time", "CPU", "Iterations");
Out << str << std::string(str.length() - 1, '-') << "\n";
GetOutputStream() << str << std::string(str.length() - 1, '-') << "\n";
return true;
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
if (reports.empty()) {
return;
}
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
for (const auto& run : reports)
PrintRunData(run);
}
auto error_count = std::count_if(
reports.begin(), reports.end(),
[](Run const& run) {return run.error_occurred;});
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return;
}
Run mean_data;
Run stddev_data;
BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data);
// Output using PrintRun.
PrintRunData(mean_data);
PrintRunData(stddev_data);
}
void ConsoleReporter::ReportComplexity(const std::vector<Run> & complexity_reports) {
if (complexity_reports.size() < 2) {
// We don't report asymptotic complexity data if there was a single run.
return;
}
Run big_o_data;
Run rms_data;
BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
// Output using PrintRun.
PrintRunData(big_o_data);
PrintRunData(rms_data);
}
void ConsoleReporter::PrintRunData(const Run& result) {
@ -139,36 +84,20 @@ void ConsoleReporter::PrintRunData(const Run& result) {
" items/s");
}
double multiplier;
const char* timeLabel;
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit);
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
if(result.report_big_o) {
std::string big_o = result.report_big_o ? GetBigOString(result.complexity) : "";
ColorPrintf(Out, COLOR_YELLOW, "%10.4f %s %10.4f %s ",
result.real_accumulated_time * multiplier,
big_o.c_str(),
result.cpu_accumulated_time * multiplier,
big_o.c_str());
real_time, big_o.c_str(), cpu_time, big_o.c_str());
} else if(result.report_rms) {
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ",
result.real_accumulated_time * multiplier * 100,
result.cpu_accumulated_time * multiplier * 100);
} else if (result.iterations == 0) {
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ",
result.real_accumulated_time * multiplier,
timeLabel,
result.cpu_accumulated_time * multiplier,
timeLabel);
real_time * 100, cpu_time * 100);
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ",
(result.real_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)),
timeLabel,
(result.cpu_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)),
timeLabel);
real_time, timeLabel, cpu_time, timeLabel);
}
if(!result.report_big_o && !result.report_rms) {

View File

@ -44,24 +44,9 @@ std::vector<std::string> elements = {
}
bool CSVReporter::ReportContext(const Context& context) {
std::ostream& Err = GetErrorStream();
PrintBasicContext(&GetErrorStream(), context);
std::ostream& Out = GetOutputStream();
Err << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
Err << LocalDateTimeString() << "\n";
if (context.cpu_scaling_enabled) {
Err << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Err << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
for (auto B = elements.begin(); B != elements.end(); ) {
Out << *B++;
if (B != elements.end())
@ -72,40 +57,8 @@ bool CSVReporter::ReportContext(const Context& context) {
}
void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
if (reports.empty()) {
return;
}
auto error_count = std::count_if(
reports.begin(), reports.end(),
[](Run const& run) {return run.error_occurred;});
std::vector<Run> reports_cp = reports;
if (reports.size() - error_count >= 2) {
Run mean_data;
Run stddev_data;
BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data);
reports_cp.push_back(mean_data);
reports_cp.push_back(stddev_data);
}
for (auto it = reports_cp.begin(); it != reports_cp.end(); ++it) {
PrintRunData(*it);
}
}
void CSVReporter::ReportComplexity(const std::vector<Run>& complexity_reports) {
if (complexity_reports.size() < 2) {
// We don't report asymptotic complexity data if there was a single run.
return;
}
Run big_o_data;
Run rms_data;
BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
// Output using PrintRun.
PrintRunData(big_o_data);
PrintRunData(rms_data);
for (const auto& run : reports)
PrintRunData(run);
}
void CSVReporter::PrintRunData(const Run & run) {
@ -125,29 +78,18 @@ void CSVReporter::PrintRunData(const Run & run) {
return;
}
double multiplier;
const char* timeLabel;
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit);
double cpu_time = run.cpu_accumulated_time * multiplier;
double real_time = run.real_accumulated_time * multiplier;
if (run.iterations != 0) {
real_time = real_time / static_cast<double>(run.iterations);
cpu_time = cpu_time / static_cast<double>(run.iterations);
}
// Do not print iteration on bigO and RMS report
if(!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
Out << real_time << ",";
Out << cpu_time << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on RMS report
if(!run.report_rms) {
Out << timeLabel;
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";

View File

@ -98,71 +98,23 @@ void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
}
first_report_ = false;
auto error_count = std::count_if(
reports.begin(), reports.end(),
[](Run const& run) {return run.error_occurred;});
std::vector<Run> reports_cp = reports;
if (reports.size() - error_count >= 2) {
Run mean_data;
Run stddev_data;
BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data);
reports_cp.push_back(mean_data);
reports_cp.push_back(stddev_data);
}
for (auto it = reports_cp.begin(); it != reports_cp.end(); ++it) {
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports_cp.end()) {
if (++it_cp != reports.end()) {
out << ",\n";
}
}
}
void JSONReporter::ReportComplexity(const std::vector<Run> & complexity_reports) {
if (complexity_reports.size() < 2) {
// We don't report asymptotic complexity data if there was a single run.
return;
}
std::string indent(4, ' ');
std::ostream& out = GetOutputStream();
if (!first_report_) {
out << ",\n";
}
Run big_o_data;
Run rms_data;
BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
// Output using PrintRun.
out << indent << "{\n";
PrintRunData(big_o_data);
out << indent << "},\n";
out << indent << "{\n";
PrintRunData(rms_data);
out << indent << '}';
}
void JSONReporter::Finalize() {
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
double multiplier;
const char* timeLabel;
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit);
double cpu_time = run.cpu_accumulated_time * multiplier;
double real_time = run.real_accumulated_time * multiplier;
if (run.iterations != 0) {
real_time = real_time / static_cast<double>(run.iterations);
cpu_time = cpu_time / static_cast<double>(run.iterations);
}
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent
@ -182,13 +134,13 @@ void JSONReporter::PrintRunData(Run const& run) {
<< ",\n";
}
out << indent
<< FormatKV("real_time", RoundDouble(real_time))
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
<< ",\n";
out << indent
<< FormatKV("cpu_time", RoundDouble(cpu_time));
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
if(!run.report_rms) {
out << ",\n" << indent
<< FormatKV("time_unit", timeLabel);
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
if (run.bytes_per_second > 0.0) {
out << ",\n" << indent

View File

@ -13,7 +13,7 @@
// limitations under the License.
#include "benchmark/reporter.h"
#include "complexity.h"
#include "walltime.h"
#include <cstdlib>
@ -31,131 +31,45 @@ BenchmarkReporter::BenchmarkReporter()
{
}
void BenchmarkReporter::ComputeStats(
const std::vector<Run>& reports,
Run* mean_data, Run* stddev_data) {
CHECK(reports.size() >= 2) << "Cannot compute stats for less than 2 reports";
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
Stat1_d bytes_per_second_stat;
Stat1_d items_per_second_stat;
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
int64_t const run_iterations = reports.front().iterations;
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred)
continue;
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
}
// Get the data from the accumulator to BenchmarkReporter::Run's.
mean_data->benchmark_name = reports[0].benchmark_name + "_mean";
mean_data->iterations = run_iterations;
mean_data->real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations;
mean_data->cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations;
mean_data->bytes_per_second = bytes_per_second_stat.Mean();
mean_data->items_per_second = items_per_second_stat.Mean();
// Only add label to mean/stddev if it is same for all runs
mean_data->report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != reports[0].report_label) {
mean_data->report_label = "";
break;
}
}
stddev_data->benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data->report_label = mean_data->report_label;
stddev_data->iterations = 0;
stddev_data->real_accumulated_time =
real_accumulated_time_stat.StdDev();
stddev_data->cpu_accumulated_time =
cpu_accumulated_time_stat.StdDev();
stddev_data->bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data->items_per_second = items_per_second_stat.StdDev();
}
void BenchmarkReporter::ComputeBigO(
const std::vector<Run>& reports,
Run* big_o, Run* rms) {
CHECK(reports.size() >= 2)
<< "Cannot compute asymptotic complexity for fewer than 2 reports";
// Accumulators.
std::vector<int> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time/run.iterations);
cpu_time.push_back(run.cpu_accumulated_time/run.iterations);
}
LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
// result_cpu.complexity is passed as parameter to result_real because in case
// reports[0].complexity is oAuto, the noise on the measured data could make
// the best fit function of Cpu and Real differ. In order to solve this, we
// take the best fitting function for the Cpu, and apply it to Real data.
LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
big_o->benchmark_name = benchmark_name + "_BigO";
big_o->iterations = 0;
big_o->real_accumulated_time = result_real.coef;
big_o->cpu_accumulated_time = result_cpu.coef;
big_o->report_big_o = true;
big_o->complexity = result_cpu.complexity;
double multiplier;
const char* time_label;
std::tie(time_label, multiplier) =
GetTimeUnitAndMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
big_o->report_label = reports[0].report_label;
rms->benchmark_name = benchmark_name + "_RMS";
rms->report_label = big_o->report_label;
rms->iterations = 0;
rms->real_accumulated_time = result_real.rms / multiplier;
rms->cpu_accumulated_time = result_cpu.rms / multiplier;
rms->report_rms = true;
rms->complexity = result_cpu.complexity;
}
TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) {
switch (unit) {
case kMillisecond:
return std::make_pair("ms", 1e3);
case kMicrosecond:
return std::make_pair("us", 1e6);
case kNanosecond:
default:
return std::make_pair("ns", 1e9);
}
}
void BenchmarkReporter::Finalize() {
}
BenchmarkReporter::~BenchmarkReporter() {
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out_ptr,
Context const &context) {
CHECK(out_ptr) << "cannot be null";
auto& Out = *out_ptr;
Out << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
Out << LocalDateTimeString() << "\n";
if (context.cpu_scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark

View File

@ -84,11 +84,6 @@ public:
rep->ReportRuns(report);
}
virtual void ReportComplexity(const std::vector<Run>& complexity_reports) {
for (auto rep : reporters_)
rep->ReportComplexity(complexity_reports);
}
virtual void Finalize() {
for (auto rep : reporters_)
rep->Finalize();