mirror of
https://github.com/google/benchmark.git
synced 2025-01-27 04:10:16 +08:00
Merge branch 'skip_with_error' of git://github.com/efcs/benchmark into efcs-skip_with_error
This commit is contained in:
commit
9686f1769e
32
README.md
32
README.md
@ -320,6 +320,38 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
|
||||
/* BarTest is now registered */
|
||||
```
|
||||
|
||||
## Exiting Benchmarks in Error
|
||||
|
||||
When errors caused by external influences, such as file I/O and network
|
||||
communication, occur within a benchmark the
|
||||
`State::SkipWithError(const char* msg)` function can be used to skip that run
|
||||
of benchmark and report the error. Note that only future iterations of the
|
||||
`KeepRunning()` are skipped. Users may explicitly return to exit the
|
||||
benchmark immediately.
|
||||
|
||||
The `SkipWithError(...)` function may be used at any point within the benchmark,
|
||||
including before and after the `KeepRunning()` loop.
|
||||
|
||||
For example:
|
||||
|
||||
```c++
|
||||
static void BM_test(benchmark::State& state) {
|
||||
auto resource = GetResource();
|
||||
if (!resource.good()) {
|
||||
state.SkipWithError("Resource is not good!");
|
||||
// KeepRunning() loop will not be entered.
|
||||
}
|
||||
while (state.KeepRunning()) {
|
||||
auto data = resource.read_data();
|
||||
if (!resource.good()) {
|
||||
state.SkipWithError("Failed to read data!");
|
||||
break; // Needed to skip the rest of the iteration.
|
||||
}
|
||||
do_stuff(data);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Output Formats
|
||||
The library supports multiple output formats. Use the
|
||||
`--benchmark_format=<tabular|json>` flag to set the format type. `tabular` is
|
||||
|
@ -257,8 +257,10 @@ public:
|
||||
}
|
||||
bool const res = total_iterations_++ < max_iterations;
|
||||
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
|
||||
assert(started_ && !finished_);
|
||||
PauseTiming();
|
||||
assert(started_ && (!finished_ || error_occurred_));
|
||||
if (!error_occurred_) {
|
||||
PauseTiming();
|
||||
}
|
||||
// Total iterations now is one greater than max iterations. Fix this.
|
||||
total_iterations_ = max_iterations;
|
||||
finished_ = true;
|
||||
@ -266,13 +268,14 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
// REQUIRES: timer is running
|
||||
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
|
||||
// by the current thread.
|
||||
// Stop the benchmark timer. If not called, the timer will be
|
||||
// automatically stopped after KeepRunning() returns false for the first time.
|
||||
//
|
||||
// For threaded benchmarks the PauseTiming() function acts
|
||||
// like a barrier. I.e., the ith call by a particular thread to this
|
||||
// function will block until all threads have made their ith call.
|
||||
// function will block until all active threads have made their ith call.
|
||||
// The timer will stop when the last thread has called this function.
|
||||
//
|
||||
// NOTE: PauseTiming()/ResumeTiming() are relatively
|
||||
@ -280,13 +283,14 @@ public:
|
||||
// within each benchmark iteration, if possible.
|
||||
void PauseTiming();
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
|
||||
// by the current thread.
|
||||
// Start the benchmark timer. The timer is NOT running on entrance to the
|
||||
// benchmark function. It begins running after the first call to KeepRunning()
|
||||
//
|
||||
// For threaded benchmarks the ResumeTiming() function acts
|
||||
// like a barrier. I.e., the ith call by a particular thread to this
|
||||
// function will block until all threads have made their ith call.
|
||||
// function will block until all active threads have made their ith call.
|
||||
// The timer will start when the last thread has called this function.
|
||||
//
|
||||
// NOTE: PauseTiming()/ResumeTiming() are relatively
|
||||
@ -294,6 +298,23 @@ public:
|
||||
// within each benchmark iteration, if possible.
|
||||
void ResumeTiming();
|
||||
|
||||
// REQUIRES: 'SkipWithError(...)' has not been called previously by the
|
||||
// current thread.
|
||||
// Skip any future iterations of the 'KeepRunning()' loop in the current
|
||||
// thread and report an error with the specified 'msg'. After this call
|
||||
// the user may explicitly 'return' from the benchmark.
|
||||
//
|
||||
// For threaded benchmarks only the current thread stops executing. If
|
||||
// multiple threads report an error only the first error message is used.
|
||||
// The current thread is no longer considered 'active' by
|
||||
// 'PauseTiming()' and 'ResumingTiming()'.
|
||||
//
|
||||
// NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit
|
||||
// the current scope immediately. If the function is called from within
|
||||
// the 'KeepRunning()' loop the current iteration will finish. It is the users
|
||||
// responsibility to exit the scope as needed.
|
||||
void SkipWithError(const char* msg);
|
||||
|
||||
// REQUIRES: called exactly once per iteration of the KeepRunning loop.
|
||||
// Set the manually measured time for this benchmark iteration, which
|
||||
// is used instead of automatically measured time if UseManualTime() was
|
||||
@ -409,6 +430,9 @@ private:
|
||||
|
||||
size_t complexity_n_;
|
||||
|
||||
public:
|
||||
// FIXME: Make this private somehow.
|
||||
bool error_occurred_;
|
||||
public:
|
||||
// Index of the executing thread. Values from [0, threads).
|
||||
const int thread_index;
|
||||
|
@ -42,6 +42,7 @@ class BenchmarkReporter {
|
||||
|
||||
struct Run {
|
||||
Run() :
|
||||
error_occurred(false),
|
||||
iterations(1),
|
||||
time_unit(kNanosecond),
|
||||
real_accumulated_time(0),
|
||||
@ -56,6 +57,9 @@ class BenchmarkReporter {
|
||||
|
||||
std::string benchmark_name;
|
||||
std::string report_label; // Empty if not set by benchmark.
|
||||
bool error_occurred;
|
||||
std::string error_message;
|
||||
|
||||
int64_t iterations;
|
||||
TimeUnit time_unit;
|
||||
double real_accumulated_time;
|
||||
|
104
src/benchmark.cc
104
src/benchmark.cc
@ -112,6 +112,10 @@ std::string* GetReportLabel() {
|
||||
return &label;
|
||||
}
|
||||
|
||||
// Global variable so that a benchmark can report an error as a human readable
|
||||
// string. If error_message is null no error occurred.
|
||||
static std::atomic<const char*> error_message = ATOMIC_VAR_INIT(nullptr);
|
||||
|
||||
// TODO(ericwf): support MallocCounter.
|
||||
//static benchmark::MallocCounter *benchmark_mc;
|
||||
|
||||
@ -127,6 +131,7 @@ class TimerManager {
|
||||
public:
|
||||
TimerManager(int num_threads, Notification* done)
|
||||
: num_threads_(num_threads),
|
||||
running_threads_(num_threads),
|
||||
done_(done),
|
||||
running_(false),
|
||||
real_time_used_(0),
|
||||
@ -134,7 +139,8 @@ class TimerManager {
|
||||
manual_time_used_(0),
|
||||
num_finalized_(0),
|
||||
phase_number_(0),
|
||||
entered_(0) {
|
||||
entered_(0)
|
||||
{
|
||||
}
|
||||
|
||||
// Called by each thread
|
||||
@ -197,6 +203,15 @@ class TimerManager {
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveErroredThread() EXCLUDES(lock_) {
|
||||
MutexLock ml(lock_);
|
||||
int last_thread = --running_threads_ == 0;
|
||||
if (last_thread && running_)
|
||||
InternalStop();
|
||||
else if (!last_thread)
|
||||
phase_condition_.notify_all();
|
||||
}
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double real_time_used() EXCLUDES(lock_) {
|
||||
MutexLock l(lock_);
|
||||
@ -222,6 +237,7 @@ class TimerManager {
|
||||
Mutex lock_;
|
||||
Condition phase_condition_;
|
||||
int num_threads_;
|
||||
int running_threads_;
|
||||
Notification* done_;
|
||||
|
||||
bool running_; // Is the timer running
|
||||
@ -253,22 +269,24 @@ class TimerManager {
|
||||
// entered the barrier. Returns iff this is the last thread to
|
||||
// enter the barrier.
|
||||
bool Barrier(MutexLock& ml) REQUIRES(lock_) {
|
||||
CHECK_LT(entered_, num_threads_);
|
||||
CHECK_LT(entered_, running_threads_);
|
||||
entered_++;
|
||||
if (entered_ < num_threads_) {
|
||||
if (entered_ < running_threads_) {
|
||||
// Wait for all threads to enter
|
||||
int phase_number_cp = phase_number_;
|
||||
auto cb = [this, phase_number_cp]() {
|
||||
return this->phase_number_ > phase_number_cp;
|
||||
return this->phase_number_ > phase_number_cp ||
|
||||
entered_ == running_threads_; // A thread has aborted in error
|
||||
};
|
||||
phase_condition_.wait(ml.native_handle(), cb);
|
||||
return false; // I was not the last one
|
||||
} else {
|
||||
// Last thread has reached the barrier
|
||||
phase_number_++;
|
||||
entered_ = 0;
|
||||
return true;
|
||||
if (phase_number_ > phase_number_cp)
|
||||
return false;
|
||||
// else (running_threads_ == entered_) and we are the last thread.
|
||||
}
|
||||
// Last thread has reached the barrier
|
||||
phase_number_++;
|
||||
entered_ = 0;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
@ -743,6 +761,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
MutexLock l(GetBenchmarkLock());
|
||||
GetReportLabel()->clear();
|
||||
}
|
||||
error_message = nullptr;
|
||||
|
||||
Notification done;
|
||||
timer_manager = std::unique_ptr<TimerManager>(new TimerManager(b.threads, &done));
|
||||
@ -788,47 +807,53 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
|
||||
MutexLock l(GetBenchmarkLock());
|
||||
label = *GetReportLabel();
|
||||
}
|
||||
const char* error_msg = error_message;
|
||||
|
||||
const double min_time = !IsZero(b.min_time) ? b.min_time
|
||||
: FLAGS_benchmark_min_time;
|
||||
|
||||
// If this was the first run, was elapsed time or cpu time large enough?
|
||||
// If this is not the first run, go with the current value of iter.
|
||||
if ((i > 0) ||
|
||||
if ((i > 0) || (error_msg != nullptr) ||
|
||||
(iters >= kMaxIterations) ||
|
||||
(seconds >= min_time) ||
|
||||
(real_accumulated_time >= 5*min_time)) {
|
||||
double bytes_per_second = 0;
|
||||
if (total.bytes_processed > 0 && seconds > 0.0) {
|
||||
bytes_per_second = (total.bytes_processed / seconds);
|
||||
}
|
||||
double items_per_second = 0;
|
||||
if (total.items_processed > 0 && seconds > 0.0) {
|
||||
items_per_second = (total.items_processed / seconds);
|
||||
}
|
||||
|
||||
// Create report about this benchmark run.
|
||||
BenchmarkReporter::Run report;
|
||||
report.benchmark_name = b.name;
|
||||
report.error_occurred = error_msg != nullptr;
|
||||
report.error_message = error_msg != nullptr ? error_msg : "";
|
||||
report.report_label = label;
|
||||
// Report the total iterations across all threads.
|
||||
report.iterations = static_cast<int64_t>(iters) * b.threads;
|
||||
report.time_unit = b.time_unit;
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = manual_accumulated_time;
|
||||
} else {
|
||||
report.real_accumulated_time = real_accumulated_time;
|
||||
|
||||
if (!report.error_occurred) {
|
||||
double bytes_per_second = 0;
|
||||
if (total.bytes_processed > 0 && seconds > 0.0) {
|
||||
bytes_per_second = (total.bytes_processed / seconds);
|
||||
}
|
||||
double items_per_second = 0;
|
||||
if (total.items_processed > 0 && seconds > 0.0) {
|
||||
items_per_second = (total.items_processed / seconds);
|
||||
}
|
||||
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = manual_accumulated_time;
|
||||
} else {
|
||||
report.real_accumulated_time = real_accumulated_time;
|
||||
}
|
||||
report.cpu_accumulated_time = cpu_accumulated_time;
|
||||
report.bytes_per_second = bytes_per_second;
|
||||
report.items_per_second = items_per_second;
|
||||
report.complexity_n = total.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
if(report.complexity != oNone)
|
||||
complexity_reports.push_back(report);
|
||||
}
|
||||
report.cpu_accumulated_time = cpu_accumulated_time;
|
||||
report.bytes_per_second = bytes_per_second;
|
||||
report.items_per_second = items_per_second;
|
||||
report.complexity_n = total.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
|
||||
reports.push_back(report);
|
||||
|
||||
if(report.complexity != oNone)
|
||||
complexity_reports.push_back(report);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -873,6 +898,7 @@ State::State(size_t max_iters, bool has_x, int x, bool has_y, int y,
|
||||
has_range_y_(has_y), range_y_(y),
|
||||
bytes_processed_(0), items_processed_(0),
|
||||
complexity_n_(0),
|
||||
error_occurred_(false),
|
||||
thread_index(thread_i),
|
||||
threads(n_threads),
|
||||
max_iterations(max_iters)
|
||||
@ -884,16 +910,26 @@ State::State(size_t max_iters, bool has_x, int x, bool has_y, int y,
|
||||
void State::PauseTiming() {
|
||||
// Add in time accumulated so far
|
||||
CHECK(running_benchmark);
|
||||
CHECK(started_ && !finished_);
|
||||
CHECK(started_ && !finished_ && !error_occurred_);
|
||||
timer_manager->StopTimer();
|
||||
}
|
||||
|
||||
void State::ResumeTiming() {
|
||||
CHECK(running_benchmark);
|
||||
CHECK(started_ && !finished_);
|
||||
CHECK(started_ && !finished_ && !error_occurred_);
|
||||
timer_manager->StartTimer();
|
||||
}
|
||||
|
||||
void State::SkipWithError(const char* msg) {
|
||||
CHECK(msg);
|
||||
error_occurred_ = true;
|
||||
const char* expected_no_error_msg = nullptr;
|
||||
error_message.compare_exchange_weak(expected_no_error_msg, msg);
|
||||
started_ = finished_ = true;
|
||||
total_iterations_ = max_iterations;
|
||||
timer_manager->RemoveErroredThread();
|
||||
}
|
||||
|
||||
void State::SetIterationTime(double seconds)
|
||||
{
|
||||
CHECK(running_benchmark);
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
@ -65,7 +66,11 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
|
||||
PrintRunData(run);
|
||||
}
|
||||
|
||||
if (reports.size() < 2) {
|
||||
auto error_count = std::count_if(
|
||||
reports.begin(), reports.end(),
|
||||
[](Run const& run) {return run.error_occurred;});
|
||||
|
||||
if (reports.size() - error_count < 2) {
|
||||
// We don't report aggregated data if there was a single run.
|
||||
return;
|
||||
}
|
||||
@ -95,6 +100,17 @@ void ConsoleReporter::ReportComplexity(const std::vector<Run> & complexity_repor
|
||||
}
|
||||
|
||||
void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
auto name_color = (result.report_big_o || result.report_rms)
|
||||
? COLOR_BLUE : COLOR_GREEN;
|
||||
ColorPrintf(name_color, "%-*s ", name_field_width_,
|
||||
result.benchmark_name.c_str());
|
||||
|
||||
if (result.error_occurred) {
|
||||
ColorPrintf(COLOR_RED, "ERROR OCCURRED: \'%s\'",
|
||||
result.error_message.c_str());
|
||||
ColorPrintf(COLOR_DEFAULT, "\n");
|
||||
return;
|
||||
}
|
||||
// Format bytes per second
|
||||
std::string rate;
|
||||
if (result.bytes_per_second > 0) {
|
||||
@ -112,10 +128,6 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
const char* timeLabel;
|
||||
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit);
|
||||
|
||||
ColorPrintf((result.report_big_o ||result.report_rms) ? COLOR_BLUE :
|
||||
COLOR_GREEN, "%-*s ",
|
||||
name_field_width_, result.benchmark_name.c_str());
|
||||
|
||||
if(result.report_big_o) {
|
||||
std::string big_o = result.report_big_o ? GetBigO(result.complexity) : "";
|
||||
ColorPrintf(COLOR_YELLOW, "%10.4f %s %10.4f %s ",
|
||||
@ -128,6 +140,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
result.real_accumulated_time * multiplier * 100,
|
||||
result.cpu_accumulated_time * multiplier * 100);
|
||||
} else if (result.iterations == 0) {
|
||||
|
||||
ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
|
||||
result.real_accumulated_time * multiplier,
|
||||
timeLabel,
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "benchmark/reporter.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
@ -27,6 +28,21 @@
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
namespace {
|
||||
std::vector<std::string> elements = {
|
||||
"name",
|
||||
"iterations",
|
||||
"real_time",
|
||||
"cpu_time",
|
||||
"time_unit",
|
||||
"bytes_per_second",
|
||||
"items_per_second",
|
||||
"label",
|
||||
"error_occurred",
|
||||
"error_message"
|
||||
};
|
||||
}
|
||||
|
||||
bool CSVReporter::ReportContext(const Context& context) {
|
||||
std::cerr << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
|
||||
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
|
||||
@ -43,8 +59,12 @@ bool CSVReporter::ReportContext(const Context& context) {
|
||||
std::cerr << "***WARNING*** Library was built as DEBUG. Timings may be "
|
||||
"affected.\n";
|
||||
#endif
|
||||
std::cout << "name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
|
||||
"items_per_second,label\n";
|
||||
for (auto B = elements.begin(); B != elements.end(); ) {
|
||||
std::cout << *B++;
|
||||
if (B != elements.end())
|
||||
std::cout << ",";
|
||||
}
|
||||
std::cout << "\n";
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -53,8 +73,12 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto error_count = std::count_if(
|
||||
reports.begin(), reports.end(),
|
||||
[](Run const& run) {return run.error_occurred;});
|
||||
|
||||
std::vector<Run> reports_cp = reports;
|
||||
if (reports.size() >= 2) {
|
||||
if (reports.size() - error_count >= 2) {
|
||||
Run mean_data;
|
||||
Run stddev_data;
|
||||
BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data);
|
||||
@ -82,6 +106,22 @@ void CSVReporter::ReportComplexity(const std::vector<Run>& complexity_reports) {
|
||||
}
|
||||
|
||||
void CSVReporter::PrintRunData(const Run & run) {
|
||||
|
||||
|
||||
// Field with embedded double-quote characters must be doubled and the field
|
||||
// delimited with double-quotes.
|
||||
std::string name = run.benchmark_name;
|
||||
ReplaceAll(&name, "\"", "\"\"");
|
||||
std::cout << '"' << name << "\",";
|
||||
if (run.error_occurred) {
|
||||
std::cout << std::string(elements.size() - 3, ',');
|
||||
std::cout << "true,";
|
||||
std::string msg = run.error_message;
|
||||
ReplaceAll(&msg, "\"", "\"\"");
|
||||
std::cout << '"' << msg << "\"\n";
|
||||
return;
|
||||
}
|
||||
|
||||
double multiplier;
|
||||
const char* timeLabel;
|
||||
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit);
|
||||
@ -93,12 +133,6 @@ void CSVReporter::PrintRunData(const Run & run) {
|
||||
cpu_time = cpu_time / static_cast<double>(run.iterations);
|
||||
}
|
||||
|
||||
// Field with embedded double-quote characters must be doubled and the field
|
||||
// delimited with double-quotes.
|
||||
std::string name = run.benchmark_name;
|
||||
ReplaceAll(&name, "\"", "\"\"");
|
||||
std::cout << "\"" << name << "\",";
|
||||
|
||||
// Do not print iteration on bigO and RMS report
|
||||
if(!run.report_big_o && !run.report_rms) {
|
||||
std::cout << run.iterations;
|
||||
@ -129,6 +163,7 @@ void CSVReporter::PrintRunData(const Run & run) {
|
||||
ReplaceAll(&label, "\"", "\"\"");
|
||||
std::cout << "\"" << label << "\"";
|
||||
}
|
||||
std::cout << ",,"; // for error_occurred and error_message
|
||||
std::cout << '\n';
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "benchmark/reporter.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
@ -96,8 +97,13 @@ void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
|
||||
out << ",\n";
|
||||
}
|
||||
first_report_ = false;
|
||||
|
||||
auto error_count = std::count_if(
|
||||
reports.begin(), reports.end(),
|
||||
[](Run const& run) {return run.error_occurred;});
|
||||
|
||||
std::vector<Run> reports_cp = reports;
|
||||
if (reports.size() >= 2) {
|
||||
if (reports.size() - error_count >= 2) {
|
||||
Run mean_data;
|
||||
Run stddev_data;
|
||||
BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data);
|
||||
@ -162,6 +168,14 @@ void JSONReporter::PrintRunData(Run const& run) {
|
||||
out << indent
|
||||
<< FormatKV("name", run.benchmark_name)
|
||||
<< ",\n";
|
||||
if (run.error_occurred) {
|
||||
out << indent
|
||||
<< FormatKV("error_occurred", run.error_occurred)
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("error_message", run.error_message)
|
||||
<< ",\n";
|
||||
}
|
||||
if(!run.report_big_o && !run.report_rms) {
|
||||
out << indent
|
||||
<< FormatKV("iterations", run.iterations)
|
||||
|
@ -41,6 +41,8 @@ void BenchmarkReporter::ComputeStats(
|
||||
for (Run const& run : reports) {
|
||||
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
|
||||
CHECK_EQ(run_iterations, run.iterations);
|
||||
if (run.error_occurred)
|
||||
continue;
|
||||
real_accumulated_time_stat +=
|
||||
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
|
||||
cpu_accumulated_time_stat +=
|
||||
|
@ -36,6 +36,9 @@ add_test(basic_benchmark basic_test --benchmark_min_time=0.01)
|
||||
compile_benchmark_test(diagnostics_test)
|
||||
add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_benchmark_test(skip_with_error_test)
|
||||
add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_benchmark_test(fixture_test)
|
||||
add_test(fixture_test fixture_test --benchmark_min_time=0.01)
|
||||
|
||||
|
161
test/skip_with_error_test.cc
Normal file
161
test/skip_with_error_test.cc
Normal file
@ -0,0 +1,161 @@
|
||||
|
||||
#undef NDEBUG
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "../src/check.h" // NOTE: check.h is for internal use only!
|
||||
#include <cassert>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
|
||||
class TestReporter : public benchmark::ConsoleReporter {
|
||||
public:
|
||||
virtual bool ReportContext(const Context& context) {
|
||||
return ConsoleReporter::ReportContext(context);
|
||||
};
|
||||
|
||||
virtual void ReportRuns(const std::vector<Run>& report) {
|
||||
all_runs_.insert(all_runs_.end(), begin(report), end(report));
|
||||
ConsoleReporter::ReportRuns(report);
|
||||
}
|
||||
|
||||
TestReporter() {}
|
||||
virtual ~TestReporter() {}
|
||||
|
||||
mutable std::vector<Run> all_runs_;
|
||||
};
|
||||
|
||||
struct TestCase {
|
||||
std::string name;
|
||||
bool error_occurred;
|
||||
std::string error_message;
|
||||
|
||||
typedef benchmark::BenchmarkReporter::Run Run;
|
||||
|
||||
void CheckRun(Run const& run) const {
|
||||
CHECK(name == run.benchmark_name) << "expected " << name << " got " << run.benchmark_name;
|
||||
CHECK(error_occurred == run.error_occurred);
|
||||
CHECK(error_message == run.error_message);
|
||||
if (error_occurred) {
|
||||
//CHECK(run.iterations == 0);
|
||||
} else {
|
||||
CHECK(run.iterations != 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<TestCase> ExpectedResults;
|
||||
|
||||
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
|
||||
for (auto TC : v) {
|
||||
TC.name = base_name + TC.name;
|
||||
ExpectedResults.push_back(std::move(TC));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CONCAT(x, y) CONCAT2(x, y)
|
||||
#define CONCAT2(x, y) x##y
|
||||
#define ADD_CASES(...) \
|
||||
int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
|
||||
|
||||
} // end namespace
|
||||
|
||||
|
||||
void BM_error_before_running(benchmark::State& state) {
|
||||
state.SkipWithError("error message");
|
||||
while (state.KeepRunning()) {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_error_before_running);
|
||||
ADD_CASES("BM_error_before_running",
|
||||
{{"", true, "error message"}});
|
||||
|
||||
void BM_error_during_running(benchmark::State& state) {
|
||||
int first_iter = true;
|
||||
while (state.KeepRunning()) {
|
||||
if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) {
|
||||
assert(first_iter);
|
||||
first_iter = false;
|
||||
state.SkipWithError("error message");
|
||||
} else {
|
||||
state.PauseTiming();
|
||||
state.ResumeTiming();
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
|
||||
ADD_CASES(
|
||||
"BM_error_during_running",
|
||||
{{"/1/threads:1", true, "error message"},
|
||||
{"/1/threads:2", true, "error message"},
|
||||
{"/1/threads:4", true, "error message"},
|
||||
{"/1/threads:8", true, "error message"},
|
||||
{"/2/threads:1", false, ""},
|
||||
{"/2/threads:2", false, ""},
|
||||
{"/2/threads:4", false, ""},
|
||||
{"/2/threads:8", false, ""}}
|
||||
);
|
||||
|
||||
void BM_error_after_running(benchmark::State& state) {
|
||||
while (state.KeepRunning()) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
}
|
||||
if (state.thread_index <= (state.threads / 2))
|
||||
state.SkipWithError("error message");
|
||||
}
|
||||
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
|
||||
ADD_CASES(
|
||||
"BM_error_after_running",
|
||||
{{"/threads:1", true, "error message"},
|
||||
{"/threads:2", true, "error message"},
|
||||
{"/threads:4", true, "error message"},
|
||||
{"/threads:8", true, "error message"}}
|
||||
);
|
||||
|
||||
void BM_error_while_paused(benchmark::State& state) {
|
||||
bool first_iter = true;
|
||||
while (state.KeepRunning()) {
|
||||
if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) {
|
||||
assert(first_iter);
|
||||
first_iter = false;
|
||||
state.PauseTiming();
|
||||
state.SkipWithError("error message");
|
||||
} else {
|
||||
state.PauseTiming();
|
||||
state.ResumeTiming();
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
|
||||
ADD_CASES(
|
||||
"BM_error_while_paused",
|
||||
{{"/1/threads:1", true, "error message"},
|
||||
{"/1/threads:2", true, "error message"},
|
||||
{"/1/threads:4", true, "error message"},
|
||||
{"/1/threads:8", true, "error message"},
|
||||
{"/2/threads:1", false, ""},
|
||||
{"/2/threads:2", false, ""},
|
||||
{"/2/threads:4", false, ""},
|
||||
{"/2/threads:8", false, ""}}
|
||||
);
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
benchmark::Initialize(&argc, argv);
|
||||
|
||||
TestReporter test_reporter;
|
||||
benchmark::RunSpecifiedBenchmarks(&test_reporter);
|
||||
|
||||
typedef benchmark::BenchmarkReporter::Run Run;
|
||||
auto EB = ExpectedResults.begin();
|
||||
|
||||
for (Run const& run : test_reporter.all_runs_) {
|
||||
assert(EB != ExpectedResults.end());
|
||||
EB->CheckRun(run);
|
||||
++EB;
|
||||
}
|
||||
assert(EB == ExpectedResults.end());
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user