mirror of
https://github.com/google/benchmark.git
synced 2025-01-15 22:30:52 +08:00
[NFCI] Make BenchmarkRunner non-internal to it's .cpp file
Currently the lifetime of a single BenchmarkRunner is constrained to a RunBenchmark(), but that will have to change for interleaved benchmark execution, because we'll need to keep it around to not forget how much repetitions of an instance we've done.
This commit is contained in:
parent
520573fecb
commit
32cc607107
@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark_runner.h"
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "internal_macros.h"
|
||||
@ -106,7 +107,8 @@ BenchmarkReporter::Run CreateRunReport(
|
||||
report.max_bytes_used = memory_result.max_bytes_used;
|
||||
}
|
||||
|
||||
internal::Finish(&report.counters, results.iterations, seconds, b.threads());
|
||||
internal::Finish(&report.counters, results.iterations, seconds,
|
||||
b.threads());
|
||||
}
|
||||
return report;
|
||||
}
|
||||
@ -137,14 +139,14 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
|
||||
manager->NotifyThreadComplete();
|
||||
}
|
||||
|
||||
class BenchmarkRunner {
|
||||
public:
|
||||
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
|
||||
} // end namespace
|
||||
|
||||
BenchmarkRunner::BenchmarkRunner(
|
||||
const benchmark::internal::BenchmarkInstance& b_,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports_)
|
||||
: b(b_),
|
||||
complexity_reports(complexity_reports_),
|
||||
min_time(!IsZero(b.min_time()) ? b.min_time()
|
||||
: FLAGS_benchmark_min_time),
|
||||
min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
|
||||
repeats(b.repetitions() != 0 ? b.repetitions()
|
||||
: FLAGS_benchmark_repetitions),
|
||||
has_explicit_iteration_count(b.iterations() != 0),
|
||||
@ -186,35 +188,9 @@ class BenchmarkRunner {
|
||||
additional_run_stats.end());
|
||||
complexity_reports->clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RunResults&& get_results() { return std::move(run_results); }
|
||||
|
||||
private:
|
||||
RunResults run_results;
|
||||
|
||||
const benchmark::internal::BenchmarkInstance& b;
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports;
|
||||
|
||||
const double min_time;
|
||||
const int repeats;
|
||||
const bool has_explicit_iteration_count;
|
||||
|
||||
std::vector<std::thread> pool;
|
||||
|
||||
IterationCount iters; // preserved between repetitions!
|
||||
// So only the first repetition has to find/calculate it,
|
||||
// the other repetitions will just use that precomputed iteration count.
|
||||
|
||||
PerfCountersMeasurement perf_counters_measurement;
|
||||
PerfCountersMeasurement* const perf_counters_measurement_ptr;
|
||||
|
||||
struct IterationResults {
|
||||
internal::ThreadManager::Result results;
|
||||
IterationCount iters;
|
||||
double seconds;
|
||||
};
|
||||
IterationResults DoNIterations() {
|
||||
BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
|
||||
VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
|
||||
|
||||
std::unique_ptr<internal::ThreadManager> manager;
|
||||
@ -266,9 +242,10 @@ class BenchmarkRunner {
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
IterationCount PredictNumItersNeeded(const IterationResults& i) const {
|
||||
IterationCount BenchmarkRunner::PredictNumItersNeeded(
|
||||
const IterationResults& i) const {
|
||||
// See how much iterations should be increased by.
|
||||
// Note: Avoid division by zero with max(seconds, 1ns).
|
||||
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
|
||||
@ -290,9 +267,10 @@ class BenchmarkRunner {
|
||||
|
||||
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
|
||||
return next_iters; // round up before conversion to integer.
|
||||
}
|
||||
}
|
||||
|
||||
bool ShouldReportIterationResults(const IterationResults& i) const {
|
||||
bool BenchmarkRunner::ShouldReportIterationResults(
|
||||
const IterationResults& i) const {
|
||||
// Determine if this run should be reported;
|
||||
// Either it has run for a sufficient amount of time
|
||||
// or because an error was reported.
|
||||
@ -303,9 +281,9 @@ class BenchmarkRunner {
|
||||
// the minimum time.
|
||||
// Note that user provided timers are except from this sanity check.
|
||||
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time());
|
||||
}
|
||||
}
|
||||
|
||||
void DoOneRepetition(int64_t repetition_index) {
|
||||
void BenchmarkRunner::DoOneRepetition(int64_t repetition_index) {
|
||||
const bool is_the_first_repetition = repetition_index == 0;
|
||||
IterationResults i;
|
||||
|
||||
@ -358,17 +336,14 @@ class BenchmarkRunner {
|
||||
|
||||
// Ok, now actualy report.
|
||||
BenchmarkReporter::Run report =
|
||||
CreateRunReport(b, i.results, memory_iterations, memory_result,
|
||||
i.seconds, repetition_index, repeats);
|
||||
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
|
||||
repetition_index, repeats);
|
||||
|
||||
if (complexity_reports && !report.error_occurred)
|
||||
complexity_reports->push_back(report);
|
||||
|
||||
run_results.non_aggregates.push_back(report);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace
|
||||
}
|
||||
|
||||
RunResults RunBenchmark(
|
||||
const benchmark::internal::BenchmarkInstance& b,
|
||||
|
@ -15,8 +15,13 @@
|
||||
#ifndef BENCHMARK_RUNNER_H_
|
||||
#define BENCHMARK_RUNNER_H_
|
||||
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "internal_macros.h"
|
||||
#include "perf_counters.h"
|
||||
#include "thread_manager.h"
|
||||
|
||||
DECLARE_double(benchmark_min_time);
|
||||
|
||||
@ -42,6 +47,46 @@ struct RunResults {
|
||||
bool file_report_aggregates_only = false;
|
||||
};
|
||||
|
||||
class BenchmarkRunner {
|
||||
public:
|
||||
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports_);
|
||||
|
||||
RunResults&& get_results() { return std::move(run_results); }
|
||||
|
||||
private:
|
||||
RunResults run_results;
|
||||
|
||||
const benchmark::internal::BenchmarkInstance& b;
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports;
|
||||
|
||||
const double min_time;
|
||||
const int repeats;
|
||||
const bool has_explicit_iteration_count;
|
||||
|
||||
std::vector<std::thread> pool;
|
||||
|
||||
IterationCount iters; // preserved between repetitions!
|
||||
// So only the first repetition has to find/calculate it,
|
||||
// the other repetitions will just use that precomputed iteration count.
|
||||
|
||||
PerfCountersMeasurement perf_counters_measurement;
|
||||
PerfCountersMeasurement* const perf_counters_measurement_ptr;
|
||||
|
||||
struct IterationResults {
|
||||
internal::ThreadManager::Result results;
|
||||
IterationCount iters;
|
||||
double seconds;
|
||||
};
|
||||
IterationResults DoNIterations();
|
||||
|
||||
IterationCount PredictNumItersNeeded(const IterationResults& i) const;
|
||||
|
||||
bool ShouldReportIterationResults(const IterationResults& i) const;
|
||||
|
||||
void DoOneRepetition(int64_t repetition_index);
|
||||
};
|
||||
|
||||
RunResults RunBenchmark(
|
||||
const benchmark::internal::BenchmarkInstance& b,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports);
|
||||
|
Loading…
Reference in New Issue
Block a user