[FR] state.SkipWithMessage #963 (#1564)

* Add `SkipWithMessage`

* Added `enum Skipped`

* Fix: error at end of enumerator list

* Fix lint errors

---------

Co-authored-by: dominic <510002+dmah42@users.noreply.github.com>
This commit is contained in:
Mike Apodaca 2023-03-08 10:24:48 -08:00 committed by GitHub
parent 4050b4bda5
commit adb0d3d0bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 105 additions and 50 deletions

View File

@ -667,6 +667,16 @@ enum AggregationReportMode
ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
}; };
enum Skipped
#if defined(BENCHMARK_HAS_CXX11)
: unsigned
#endif
{
NotSkipped = 0,
SkippedWithMessage,
SkippedWithError
};
} // namespace internal } // namespace internal
// State is passed to a running Benchmark and contains state for the // State is passed to a running Benchmark and contains state for the
@ -703,8 +713,8 @@ class BENCHMARK_EXPORT State {
// } // }
bool KeepRunningBatch(IterationCount n); bool KeepRunningBatch(IterationCount n);
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called // REQUIRES: timer is running and 'SkipWithMessage(...)' or
// by the current thread. // 'SkipWithError(...)' has not been called by the current thread.
// Stop the benchmark timer. If not called, the timer will be // Stop the benchmark timer. If not called, the timer will be
// automatically stopped after the last iteration of the benchmark loop. // automatically stopped after the last iteration of the benchmark loop.
// //
@ -719,8 +729,8 @@ class BENCHMARK_EXPORT State {
// within each benchmark iteration, if possible. // within each benchmark iteration, if possible.
void PauseTiming(); void PauseTiming();
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called // REQUIRES: timer is not running and 'SkipWithMessage(...)' or
// by the current thread. // 'SkipWithError(...)' has not been called by the current thread.
// Start the benchmark timer. The timer is NOT running on entrance to the // Start the benchmark timer. The timer is NOT running on entrance to the
// benchmark function. It begins running after control flow enters the // benchmark function. It begins running after control flow enters the
// benchmark loop. // benchmark loop.
@ -730,8 +740,30 @@ class BENCHMARK_EXPORT State {
// within each benchmark iteration, if possible. // within each benchmark iteration, if possible.
void ResumeTiming(); void ResumeTiming();
// REQUIRES: 'SkipWithError(...)' has not been called previously by the // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been
// current thread. // called previously by the current thread.
// Report the benchmark as resulting in being skipped with the specified
// 'msg'.
// After this call the user may explicitly 'return' from the benchmark.
//
// If the ranged-for style of benchmark loop is used, the user must explicitly
// break from the loop, otherwise all future iterations will be run.
// If the 'KeepRunning()' loop is used the current thread will automatically
// exit the loop at the end of the current iteration.
//
// For threaded benchmarks only the current thread stops executing and future
// calls to `KeepRunning()` will block until all threads have completed
// the `KeepRunning()` loop. If multiple threads report being skipped only the
// first skip message is used.
//
// NOTE: Calling 'SkipWithMessage(...)' does not cause the benchmark to exit
// the current scope immediately. If the function is called from within
// the 'KeepRunning()' loop the current iteration will finish. It is the users
// responsibility to exit the scope as needed.
void SkipWithMessage(const char* msg);
// REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been
// called previously by the current thread.
// Report the benchmark as resulting in an error with the specified 'msg'. // Report the benchmark as resulting in an error with the specified 'msg'.
// After this call the user may explicitly 'return' from the benchmark. // After this call the user may explicitly 'return' from the benchmark.
// //
@ -751,8 +783,11 @@ class BENCHMARK_EXPORT State {
// responsibility to exit the scope as needed. // responsibility to exit the scope as needed.
void SkipWithError(const char* msg); void SkipWithError(const char* msg);
// Returns true if 'SkipWithMessage(...)' or 'SkipWithError(...)' was called.
bool skipped() const { return internal::NotSkipped != skipped_; }
// Returns true if an error has been reported with 'SkipWithError(...)'. // Returns true if an error has been reported with 'SkipWithError(...)'.
bool error_occurred() const { return error_occurred_; } bool error_occurred() const { return internal::SkippedWithError == skipped_; }
// REQUIRES: called exactly once per iteration of the benchmarking loop. // REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which // Set the manually measured time for this benchmark iteration, which
@ -878,7 +913,7 @@ class BENCHMARK_EXPORT State {
private: private:
bool started_; bool started_;
bool finished_; bool finished_;
bool error_occurred_; internal::Skipped skipped_;
// items we don't need on the first cache line // items we don't need on the first cache line
std::vector<int64_t> range_; std::vector<int64_t> range_;
@ -933,7 +968,7 @@ inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
} }
if (!started_) { if (!started_) {
StartKeepRunning(); StartKeepRunning();
if (!error_occurred_ && total_iterations_ >= n) { if (!skipped() && total_iterations_ >= n) {
total_iterations_ -= n; total_iterations_ -= n;
return true; return true;
} }
@ -963,7 +998,7 @@ struct State::StateIterator {
BENCHMARK_ALWAYS_INLINE BENCHMARK_ALWAYS_INLINE
explicit StateIterator(State* st) explicit StateIterator(State* st)
: cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} : cached_(st->skipped() ? 0 : st->max_iterations), parent_(st) {}
public: public:
BENCHMARK_ALWAYS_INLINE BENCHMARK_ALWAYS_INLINE
@ -1662,7 +1697,7 @@ class BENCHMARK_EXPORT BenchmarkReporter {
Run() Run()
: run_type(RT_Iteration), : run_type(RT_Iteration),
aggregate_unit(kTime), aggregate_unit(kTime),
error_occurred(false), skipped(internal::NotSkipped),
iterations(1), iterations(1),
threads(1), threads(1),
time_unit(GetDefaultTimeUnit()), time_unit(GetDefaultTimeUnit()),
@ -1685,8 +1720,8 @@ class BENCHMARK_EXPORT BenchmarkReporter {
std::string aggregate_name; std::string aggregate_name;
StatisticUnit aggregate_unit; StatisticUnit aggregate_unit;
std::string report_label; // Empty if not set by benchmark. std::string report_label; // Empty if not set by benchmark.
bool error_occurred; internal::Skipped skipped;
std::string error_message; std::string skip_message;
IterationCount iterations; IterationCount iterations;
int64_t threads; int64_t threads;

View File

@ -166,7 +166,7 @@ State::State(std::string name, IterationCount max_iters,
max_iterations(max_iters), max_iterations(max_iters),
started_(false), started_(false),
finished_(false), finished_(false),
error_occurred_(false), skipped_(internal::NotSkipped),
range_(ranges), range_(ranges),
complexity_n_(0), complexity_n_(0),
name_(std::move(name)), name_(std::move(name)),
@ -198,9 +198,8 @@ State::State(std::string name, IterationCount max_iters,
#endif #endif
// Offset tests to ensure commonly accessed data is on the first cache line. // Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64; const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <= static_assert(
(cache_line_size - sizeof(error_occurred_)), offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), "");
"");
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#pragma warning pop #pragma warning pop
#elif defined(__GNUC__) #elif defined(__GNUC__)
@ -213,7 +212,7 @@ State::State(std::string name, IterationCount max_iters,
void State::PauseTiming() { void State::PauseTiming() {
// Add in time accumulated so far // Add in time accumulated so far
BM_CHECK(started_ && !finished_ && !error_occurred_); BM_CHECK(started_ && !finished_ && !skipped());
timer_->StopTimer(); timer_->StopTimer();
if (perf_counters_measurement_) { if (perf_counters_measurement_) {
std::vector<std::pair<std::string, double>> measurements; std::vector<std::pair<std::string, double>> measurements;
@ -230,21 +229,35 @@ void State::PauseTiming() {
} }
void State::ResumeTiming() { void State::ResumeTiming() {
BM_CHECK(started_ && !finished_ && !error_occurred_); BM_CHECK(started_ && !finished_ && !skipped());
timer_->StartTimer(); timer_->StartTimer();
if (perf_counters_measurement_) { if (perf_counters_measurement_) {
perf_counters_measurement_->Start(); perf_counters_measurement_->Start();
} }
} }
void State::SkipWithError(const char* msg) { void State::SkipWithMessage(const char* msg) {
BM_CHECK(msg); BM_CHECK(msg);
error_occurred_ = true; skipped_ = internal::SkippedWithMessage;
{ {
MutexLock l(manager_->GetBenchmarkMutex()); MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) { if (internal::NotSkipped == manager_->results.skipped_) {
manager_->results.error_message_ = msg; manager_->results.skip_message_ = msg;
manager_->results.has_error_ = true; manager_->results.skipped_ = skipped_;
}
}
total_iterations_ = 0;
if (timer_->running()) timer_->StopTimer();
}
void State::SkipWithError(const char* msg) {
BM_CHECK(msg);
skipped_ = internal::SkippedWithError;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (internal::NotSkipped == manager_->results.skipped_) {
manager_->results.skip_message_ = msg;
manager_->results.skipped_ = skipped_;
} }
} }
total_iterations_ = 0; total_iterations_ = 0;
@ -263,14 +276,14 @@ void State::SetLabel(const char* label) {
void State::StartKeepRunning() { void State::StartKeepRunning() {
BM_CHECK(!started_ && !finished_); BM_CHECK(!started_ && !finished_);
started_ = true; started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations; total_iterations_ = skipped() ? 0 : max_iterations;
manager_->StartStopBarrier(); manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming(); if (!skipped()) ResumeTiming();
} }
void State::FinishKeepRunning() { void State::FinishKeepRunning() {
BM_CHECK(started_ && (!finished_ || error_occurred_)); BM_CHECK(started_ && (!finished_ || skipped()));
if (!error_occurred_) { if (!skipped()) {
PauseTiming(); PauseTiming();
} }
// Total iterations has now wrapped around past 0. Fix this. // Total iterations has now wrapped around past 0. Fix this.

View File

@ -80,8 +80,8 @@ BenchmarkReporter::Run CreateRunReport(
report.run_name = b.name(); report.run_name = b.name();
report.family_index = b.family_index(); report.family_index = b.family_index();
report.per_family_instance_index = b.per_family_instance_index(); report.per_family_instance_index = b.per_family_instance_index();
report.error_occurred = results.has_error_; report.skipped = results.skipped_;
report.error_message = results.error_message_; report.skip_message = results.skip_message_;
report.report_label = results.report_label_; report.report_label = results.report_label_;
// This is the total iterations across all threads. // This is the total iterations across all threads.
report.iterations = results.iterations; report.iterations = results.iterations;
@ -90,7 +90,7 @@ BenchmarkReporter::Run CreateRunReport(
report.repetition_index = repetition_index; report.repetition_index = repetition_index;
report.repetitions = repeats; report.repetitions = repeats;
if (!report.error_occurred) { if (!report.skipped) {
if (b.use_manual_time()) { if (b.use_manual_time()) {
report.real_accumulated_time = results.manual_time_used; report.real_accumulated_time = results.manual_time_used;
} else { } else {
@ -130,7 +130,7 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
State st = State st =
b->Run(iters, thread_id, &timer, manager, perf_counters_measurement); b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!"; << "Benchmark returned before State::KeepRunning() returned false!";
{ {
MutexLock l(manager->GetBenchmarkMutex()); MutexLock l(manager->GetBenchmarkMutex());
@ -341,7 +341,7 @@ bool BenchmarkRunner::ShouldReportIterationResults(
// Determine if this run should be reported; // Determine if this run should be reported;
// Either it has run for a sufficient amount of time // Either it has run for a sufficient amount of time
// or because an error was reported. // or because an error was reported.
return i.results.has_error_ || return i.results.skipped_ ||
i.iters >= kMaxIterations || // Too many iterations already. i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= i.seconds >=
GetMinTimeToApply() || // The elapsed time is large enough. GetMinTimeToApply() || // The elapsed time is large enough.
@ -477,7 +477,7 @@ void BenchmarkRunner::DoOneRepetition() {
if (reports_for_family) { if (reports_for_family) {
++reports_for_family->num_runs_done; ++reports_for_family->num_runs_done;
if (!report.error_occurred) reports_for_family->Runs.push_back(report); if (!report.skipped) reports_for_family->Runs.push_back(report);
} }
run_results.non_aggregates.push_back(report); run_results.non_aggregates.push_back(report);

View File

@ -135,9 +135,13 @@ void ConsoleReporter::PrintRunData(const Run& result) {
printer(Out, name_color, "%-*s ", name_field_width_, printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str()); result.benchmark_name().c_str());
if (result.error_occurred) { if (internal::SkippedWithError == result.skipped) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str()); result.skip_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
} else if (internal::SkippedWithMessage == result.skipped) {
printer(Out, COLOR_WHITE, "SKIPPED: \'%s\'", result.skip_message.c_str());
printer(Out, COLOR_DEFAULT, "\n"); printer(Out, COLOR_DEFAULT, "\n");
return; return;
} }

View File

@ -109,10 +109,10 @@ BENCHMARK_EXPORT
void CSVReporter::PrintRunData(const Run& run) { void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream(); std::ostream& Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ","; Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) { if (run.skipped) {
Out << std::string(elements.size() - 3, ','); Out << std::string(elements.size() - 3, ',');
Out << "true,"; Out << std::boolalpha << (internal::SkippedWithError == run.skipped) << ",";
Out << CsvEscape(run.error_message) << "\n"; Out << CsvEscape(run.skip_message) << "\n";
return; return;
} }

View File

@ -254,9 +254,12 @@ void JSONReporter::PrintRunData(Run const& run) {
BENCHMARK_UNREACHABLE(); BENCHMARK_UNREACHABLE();
}()) << ",\n"; }()) << ",\n";
} }
if (run.error_occurred) { if (internal::SkippedWithError == run.skipped) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; out << indent << FormatKV("error_occurred", true) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n"; out << indent << FormatKV("error_message", run.skip_message) << ",\n";
} else if (internal::SkippedWithMessage == run.skipped) {
out << indent << FormatKV("skipped", true) << ",\n";
out << indent << FormatKV("skip_message", run.skip_message) << ",\n";
} }
if (!run.report_big_o && !run.report_rms) { if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n"; out << indent << FormatKV("iterations", run.iterations) << ",\n";

View File

@ -89,9 +89,8 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
typedef BenchmarkReporter::Run Run; typedef BenchmarkReporter::Run Run;
std::vector<Run> results; std::vector<Run> results;
auto error_count = auto error_count = std::count_if(reports.begin(), reports.end(),
std::count_if(reports.begin(), reports.end(), [](Run const& run) { return run.skipped; });
[](Run const& run) { return run.error_occurred; });
if (reports.size() - error_count < 2) { if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run. // We don't report aggregated data if there was a single run.
@ -133,7 +132,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
for (Run const& run : reports) { for (Run const& run : reports) {
BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
BM_CHECK_EQ(run_iterations, run.iterations); BM_CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue; if (run.skipped) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time); real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters // user counters

View File

@ -43,8 +43,8 @@ class ThreadManager {
double manual_time_used = 0; double manual_time_used = 0;
int64_t complexity_n = 0; int64_t complexity_n = 0;
std::string report_label_; std::string report_label_;
std::string error_message_; std::string skip_message_;
bool has_error_ = false; internal::Skipped skipped_ = internal::NotSkipped;
UserCounters counters; UserCounters counters;
}; };
GUARDED_BY(GetBenchmarkMutex()) Result results; GUARDED_BY(GetBenchmarkMutex()) Result results;

View File

@ -35,8 +35,9 @@ struct TestCase {
void CheckRun(Run const& run) const { void CheckRun(Run const& run) const {
BM_CHECK(name == run.benchmark_name()) BM_CHECK(name == run.benchmark_name())
<< "expected " << name << " got " << run.benchmark_name(); << "expected " << name << " got " << run.benchmark_name();
BM_CHECK(error_occurred == run.error_occurred); BM_CHECK_EQ(error_occurred,
BM_CHECK(error_message == run.error_message); benchmark::internal::SkippedWithError == run.skipped);
BM_CHECK(error_message == run.skip_message);
if (error_occurred) { if (error_occurred) {
// BM_CHECK(run.iterations == 0); // BM_CHECK(run.iterations == 0);
} else { } else {