Make 'complexity reports' cache per-family, not global (#1166)

While the current variant works, it assumes that all the instances of
a single family will be run together, with nothing inbetween them.
Naturally, that won't work once the runs may be interleaved.
This commit is contained in:
Roman Lebedev 2021-06-03 11:46:34 +03:00 committed by GitHub
parent 80a62618e8
commit 0c1da0a713
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 22 additions and 13 deletions

View File

@ -265,8 +265,9 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Keep track of running times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// Keep track of running times of all instances of each benchmark family.
std::map<int /*family_index*/, std::vector<BenchmarkReporter::Run>>
complexity_reports;
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
@ -281,8 +282,15 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) {
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
for (const BenchmarkInstance& benchmark : benchmarks) {
std::vector<BenchmarkReporter::Run>* complexity_reports_for_family =
nullptr;
if (benchmark.complexity() != oNone)
complexity_reports_for_family =
&complexity_reports[benchmark.family_index()];
RunResults run_results =
RunBenchmark(benchmark, complexity_reports_for_family);
auto report = [&run_results](BenchmarkReporter* reporter,
bool report_aggregates_only) {

View File

@ -142,10 +142,11 @@ class BenchmarkRunner {
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
std::vector<BenchmarkReporter::Run>* complexity_reports_)
: b(b_),
complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
complexity_reports(complexity_reports_),
min_time(!IsZero(b.min_time()) ? b.min_time()
: FLAGS_benchmark_min_time),
repeats(b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions),
: FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations() != 0),
pool(b.threads() - 1),
iters(has_explicit_iteration_count ? b.iterations() : 1),
@ -178,12 +179,12 @@ class BenchmarkRunner {
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity() != oNone) && b.last_benchmark_instance) {
auto additional_run_stats = ComputeBigO(complexity_reports);
if (complexity_reports && b.last_benchmark_instance) {
auto additional_run_stats = ComputeBigO(*complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
complexity_reports->clear();
}
}
@ -193,7 +194,7 @@ class BenchmarkRunner {
RunResults run_results;
const benchmark::internal::BenchmarkInstance& b;
std::vector<BenchmarkReporter::Run>& complexity_reports;
std::vector<BenchmarkReporter::Run>* complexity_reports;
const double min_time;
const int repeats;
@ -360,8 +361,8 @@ class BenchmarkRunner {
CreateRunReport(b, i.results, memory_iterations, memory_result,
i.seconds, repetition_index, repeats);
if (!report.error_occurred && b.complexity() != oNone)
complexity_reports.push_back(report);
if (complexity_reports && !report.error_occurred)
complexity_reports->push_back(report);
run_results.non_aggregates.push_back(report);
}