mirror of
https://github.com/google/benchmark.git
synced 2025-03-23 07:30:07 +08:00
Make 'complexity reports' cache per-family, not global (#1166)
While the current variant works, it assumes that all the instances of a single family will be run together, with nothing inbetween them. Naturally, that won't work once the runs may be interleaved.
This commit is contained in:
parent
80a62618e8
commit
0c1da0a713
@ -265,8 +265,9 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
|||||||
BenchmarkReporter::Context context;
|
BenchmarkReporter::Context context;
|
||||||
context.name_field_width = name_field_width;
|
context.name_field_width = name_field_width;
|
||||||
|
|
||||||
// Keep track of running times of all instances of current benchmark
|
// Keep track of running times of all instances of each benchmark family.
|
||||||
std::vector<BenchmarkReporter::Run> complexity_reports;
|
std::map<int /*family_index*/, std::vector<BenchmarkReporter::Run>>
|
||||||
|
complexity_reports;
|
||||||
|
|
||||||
// We flush streams after invoking reporter methods that write to them. This
|
// We flush streams after invoking reporter methods that write to them. This
|
||||||
// ensures users get timely updates even when streams are not line-buffered.
|
// ensures users get timely updates even when streams are not line-buffered.
|
||||||
@ -281,8 +282,15 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
|||||||
flushStreams(display_reporter);
|
flushStreams(display_reporter);
|
||||||
flushStreams(file_reporter);
|
flushStreams(file_reporter);
|
||||||
|
|
||||||
for (const auto& benchmark : benchmarks) {
|
for (const BenchmarkInstance& benchmark : benchmarks) {
|
||||||
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
|
std::vector<BenchmarkReporter::Run>* complexity_reports_for_family =
|
||||||
|
nullptr;
|
||||||
|
if (benchmark.complexity() != oNone)
|
||||||
|
complexity_reports_for_family =
|
||||||
|
&complexity_reports[benchmark.family_index()];
|
||||||
|
|
||||||
|
RunResults run_results =
|
||||||
|
RunBenchmark(benchmark, complexity_reports_for_family);
|
||||||
|
|
||||||
auto report = [&run_results](BenchmarkReporter* reporter,
|
auto report = [&run_results](BenchmarkReporter* reporter,
|
||||||
bool report_aggregates_only) {
|
bool report_aggregates_only) {
|
||||||
|
@ -142,10 +142,11 @@ class BenchmarkRunner {
|
|||||||
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
|
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
|
||||||
std::vector<BenchmarkReporter::Run>* complexity_reports_)
|
std::vector<BenchmarkReporter::Run>* complexity_reports_)
|
||||||
: b(b_),
|
: b(b_),
|
||||||
complexity_reports(*complexity_reports_),
|
complexity_reports(complexity_reports_),
|
||||||
min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
|
min_time(!IsZero(b.min_time()) ? b.min_time()
|
||||||
|
: FLAGS_benchmark_min_time),
|
||||||
repeats(b.repetitions() != 0 ? b.repetitions()
|
repeats(b.repetitions() != 0 ? b.repetitions()
|
||||||
: FLAGS_benchmark_repetitions),
|
: FLAGS_benchmark_repetitions),
|
||||||
has_explicit_iteration_count(b.iterations() != 0),
|
has_explicit_iteration_count(b.iterations() != 0),
|
||||||
pool(b.threads() - 1),
|
pool(b.threads() - 1),
|
||||||
iters(has_explicit_iteration_count ? b.iterations() : 1),
|
iters(has_explicit_iteration_count ? b.iterations() : 1),
|
||||||
@ -178,12 +179,12 @@ class BenchmarkRunner {
|
|||||||
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
|
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
|
||||||
|
|
||||||
// Maybe calculate complexity report
|
// Maybe calculate complexity report
|
||||||
if ((b.complexity() != oNone) && b.last_benchmark_instance) {
|
if (complexity_reports && b.last_benchmark_instance) {
|
||||||
auto additional_run_stats = ComputeBigO(complexity_reports);
|
auto additional_run_stats = ComputeBigO(*complexity_reports);
|
||||||
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
|
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
|
||||||
additional_run_stats.begin(),
|
additional_run_stats.begin(),
|
||||||
additional_run_stats.end());
|
additional_run_stats.end());
|
||||||
complexity_reports.clear();
|
complexity_reports->clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +194,7 @@ class BenchmarkRunner {
|
|||||||
RunResults run_results;
|
RunResults run_results;
|
||||||
|
|
||||||
const benchmark::internal::BenchmarkInstance& b;
|
const benchmark::internal::BenchmarkInstance& b;
|
||||||
std::vector<BenchmarkReporter::Run>& complexity_reports;
|
std::vector<BenchmarkReporter::Run>* complexity_reports;
|
||||||
|
|
||||||
const double min_time;
|
const double min_time;
|
||||||
const int repeats;
|
const int repeats;
|
||||||
@ -360,8 +361,8 @@ class BenchmarkRunner {
|
|||||||
CreateRunReport(b, i.results, memory_iterations, memory_result,
|
CreateRunReport(b, i.results, memory_iterations, memory_result,
|
||||||
i.seconds, repetition_index, repeats);
|
i.seconds, repetition_index, repeats);
|
||||||
|
|
||||||
if (!report.error_occurred && b.complexity() != oNone)
|
if (complexity_reports && !report.error_occurred)
|
||||||
complexity_reports.push_back(report);
|
complexity_reports->push_back(report);
|
||||||
|
|
||||||
run_results.non_aggregates.push_back(report);
|
run_results.non_aggregates.push_back(report);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user