From 43017f8b1510a50855e6b0ea145a534d3d754068 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Mon, 23 May 2016 19:24:56 -0600 Subject: [PATCH 1/8] Add SkipWithError(...) function. --- include/benchmark/benchmark_api.h | 19 +++- include/benchmark/reporter.h | 4 + src/benchmark.cc | 100 +++++++++++++------ src/check.h | 20 +++- src/console_reporter.cc | 18 +++- src/json_reporter.cc | 8 ++ src/reporter.cc | 2 + test/CMakeLists.txt | 6 ++ test/diagnostics_test.cc | 61 +++++++++++ test/skip_with_error_test.cc | 161 ++++++++++++++++++++++++++++++ 10 files changed, 359 insertions(+), 40 deletions(-) create mode 100644 test/diagnostics_test.cc create mode 100644 test/skip_with_error_test.cc diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 2ded4814..654986d4 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -235,22 +235,27 @@ enum TimeUnit { // benchmark to use. class State { public: - State(size_t max_iters, bool has_x, int x, bool has_y, int y, int thread_i, int n_threads); + State(size_t max_iters, bool has_x, int x, bool has_y, int y, + int thread_i, int n_threads); // Returns true iff the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. bool KeepRunning() { if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { - ResumeTiming(); + assert(!finished_); started_ = true; + ResumeTiming(); } bool const res = total_iterations_++ < max_iterations; if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { - assert(started_); - PauseTiming(); + assert(started_ && (!finished_ || error_occurred_)); + if (!error_occurred_) { + PauseTiming(); + } // Total iterations now is one greater than max iterations. Fix this. total_iterations_ = max_iterations; + finished_ = true; } return res; } @@ -283,6 +288,8 @@ public: // within each benchmark iteration, if possible. void ResumeTiming(); + void SkipWithError(const char* msg); + // REQUIRES: called exactly once per iteration of the KeepRunning loop. // Set the manually measured time for this benchmark iteration, which // is used instead of automatically measured time if UseManualTime() was @@ -371,6 +378,7 @@ public: private: bool started_; + bool finished_; size_t total_iterations_; bool has_range_x_; @@ -382,6 +390,9 @@ private: size_t bytes_processed_; size_t items_processed_; +public: + // FIXME: Make this private somehow. + bool error_occurred_; public: // Index of the executing thread. Values from [0, threads). const int thread_index; diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index aaf5fbff..ac222e45 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -42,6 +42,7 @@ class BenchmarkReporter { struct Run { Run() : + error_occurred(false), iterations(1), time_unit(kNanosecond), real_accumulated_time(0), @@ -52,6 +53,9 @@ class BenchmarkReporter { std::string benchmark_name; std::string report_label; // Empty if not set by benchmark. + bool error_occurred; + std::string error_message; + int64_t iterations; TimeUnit time_unit; double real_accumulated_time; diff --git a/src/benchmark.cc b/src/benchmark.cc index 3d12d283..5d8f279e 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -112,6 +112,10 @@ std::string* GetReportLabel() { return &label; } +// Global variable so that a benchmark can report an error as a human readable +// string. If error_message is null no error occurred. +static std::atomic error_message = ATOMIC_VAR_INIT(nullptr); + // TODO(ericwf): support MallocCounter. //static benchmark::MallocCounter *benchmark_mc; @@ -126,6 +130,7 @@ class TimerManager { public: TimerManager(int num_threads, Notification* done) : num_threads_(num_threads), + running_threads_(num_threads), done_(done), running_(false), real_time_used_(0), @@ -133,7 +138,8 @@ class TimerManager { manual_time_used_(0), num_finalized_(0), phase_number_(0), - entered_(0) { + entered_(0) + { } // Called by each thread @@ -196,6 +202,16 @@ class TimerManager { } } + void RemoveErroredThread() EXCLUDES(lock_) { + MutexLock ml(lock_); + int last_thread = --running_threads_ == 0; + if (last_thread && running_) { + InternalStop(); + } else if (!last_thread){ + phase_condition_.notify_all(); + } + } + // REQUIRES: timer is not running double real_time_used() EXCLUDES(lock_) { MutexLock l(lock_); @@ -221,6 +237,7 @@ class TimerManager { Mutex lock_; Condition phase_condition_; int num_threads_; + int running_threads_; Notification* done_; bool running_; // Is the timer running @@ -252,22 +269,27 @@ class TimerManager { // entered the barrier. Returns iff this is the last thread to // enter the barrier. bool Barrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, num_threads_); + CHECK_LT(entered_, running_threads_); entered_++; - if (entered_ < num_threads_) { + if (entered_ < running_threads_) { // Wait for all threads to enter int phase_number_cp = phase_number_; auto cb = [this, phase_number_cp]() { - return this->phase_number_ > phase_number_cp; + return this->phase_number_ > phase_number_cp || + entered_ == running_threads_; // A thread has aborted in error }; - phase_condition_.wait(ml.native_handle(), cb); - return false; // I was not the last one - } else { - // Last thread has reached the barrier - phase_number_++; - entered_ = 0; - return true; + while (true) { + phase_condition_.wait(ml.native_handle(), cb); + if (phase_number_ > phase_number_cp) + return false; + else if (running_threads_ == entered_) + break; + } } + // Last thread has reached the barrier + phase_number_++; + entered_ = 0; + return true; } }; @@ -702,6 +724,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, MutexLock l(GetBenchmarkLock()); GetReportLabel()->clear(); } + error_message = nullptr; Notification done; timer_manager = std::unique_ptr(new TimerManager(b.threads, &done)); @@ -747,40 +770,48 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, MutexLock l(GetBenchmarkLock()); label = *GetReportLabel(); } + const char* error_msg = error_message; const double min_time = !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time; // If this was the first run, was elapsed time or cpu time large enough? // If this is not the first run, go with the current value of iter. - if ((i > 0) || + if ((i > 0) || (error_msg != nullptr) || (iters >= kMaxIterations) || (seconds >= min_time) || (real_accumulated_time >= 5*min_time)) { - double bytes_per_second = 0; - if (total.bytes_processed > 0 && seconds > 0.0) { - bytes_per_second = (total.bytes_processed / seconds); - } - double items_per_second = 0; - if (total.items_processed > 0 && seconds > 0.0) { - items_per_second = (total.items_processed / seconds); - } // Create report about this benchmark run. BenchmarkReporter::Run report; report.benchmark_name = b.name; + report.error_occurred = error_msg != nullptr; + report.error_message = error_msg != nullptr ? error_msg : ""; report.report_label = label; // Report the total iterations across all threads. report.iterations = static_cast(iters) * b.threads; report.time_unit = b.time_unit; - if (b.use_manual_time) { - report.real_accumulated_time = manual_accumulated_time; - } else { - report.real_accumulated_time = real_accumulated_time; + + if (!report.error_occurred) { + + double bytes_per_second = 0; + if (total.bytes_processed > 0 && seconds > 0.0) { + bytes_per_second = (total.bytes_processed / seconds); + } + double items_per_second = 0; + if (total.items_processed > 0 && seconds > 0.0) { + items_per_second = (total.items_processed / seconds); + } + + if (b.use_manual_time) { + report.real_accumulated_time = manual_accumulated_time; + } else { + report.real_accumulated_time = real_accumulated_time; + } + report.cpu_accumulated_time = cpu_accumulated_time; + report.bytes_per_second = bytes_per_second; + report.items_per_second = items_per_second; } - report.cpu_accumulated_time = cpu_accumulated_time; - report.bytes_per_second = bytes_per_second; - report.items_per_second = items_per_second; reports.push_back(report); break; } @@ -815,10 +846,11 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, State::State(size_t max_iters, bool has_x, int x, bool has_y, int y, int thread_i, int n_threads) - : started_(false), total_iterations_(0), + : started_(false), finished_(false), total_iterations_(0), has_range_x_(has_x), range_x_(x), has_range_y_(has_y), range_y_(y), bytes_processed_(0), items_processed_(0), + error_occurred_(false), thread_index(thread_i), threads(n_threads), max_iterations(max_iters) @@ -830,14 +862,26 @@ State::State(size_t max_iters, bool has_x, int x, bool has_y, int y, void State::PauseTiming() { // Add in time accumulated so far CHECK(running_benchmark); + CHECK(started_ && !finished_ && !error_occurred_); timer_manager->StopTimer(); } void State::ResumeTiming() { CHECK(running_benchmark); + CHECK(started_ && !finished_); timer_manager->StartTimer(); } +void State::SkipWithError(const char* msg) { + CHECK(msg); + error_occurred_ = true; + const char* expected_no_error_msg = nullptr; + error_message.compare_exchange_weak(expected_no_error_msg, msg); + started_ = finished_ = true; + total_iterations_ = max_iterations; + timer_manager->RemoveErroredThread(); +} + void State::SetIterationTime(double seconds) { CHECK(running_benchmark); diff --git a/src/check.h b/src/check.h index d2c1fda9..cb49c496 100644 --- a/src/check.h +++ b/src/check.h @@ -10,6 +10,18 @@ namespace benchmark { namespace internal { +typedef void(AbortHandlerT)(); + +inline AbortHandlerT*& GetAbortHandler() { + static AbortHandlerT* handler = &std::abort; + return handler; +} + +BENCHMARK_NORETURN inline void CallAbortHandler() { + GetAbortHandler()(); + std::abort(); // fallback to enforce noreturn +} + // CheckHandler is the class constructed by failing CHECK macros. CheckHandler // will log information about the failures and abort when it is destructed. class CheckHandler { @@ -25,13 +37,13 @@ public: return log_; } - BENCHMARK_NORETURN ~CheckHandler() { + BENCHMARK_NORETURN ~CheckHandler() noexcept(false) { log_ << std::endl; - std::abort(); + CallAbortHandler(); } - CheckHandler & operator=(const CheckHandler&) = delete; - CheckHandler(const CheckHandler&) = delete; + CheckHandler & operator=(const CheckHandler&) = delete; + CheckHandler(const CheckHandler&) = delete; CheckHandler() = delete; private: std::ostream& log_; diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 56bd3ced..786d0304 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -65,7 +66,11 @@ void ConsoleReporter::ReportRuns(const std::vector& reports) { PrintRunData(run); } - if (reports.size() < 2) { + auto error_count = std::count_if( + reports.begin(), reports.end(), + [](Run const& run) {return run.error_occurred;}); + + if (reports.size() - error_count < 2) { // We don't report aggregated data if there was a single run. return; } @@ -80,6 +85,14 @@ void ConsoleReporter::ReportRuns(const std::vector& reports) { } void ConsoleReporter::PrintRunData(const Run& result) { + ColorPrintf(COLOR_GREEN, "%-*s ", + name_field_width_, result.benchmark_name.c_str()); + + if (result.error_occurred) { + ColorPrintf(COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str()); + ColorPrintf(COLOR_DEFAULT, "\n"); + return; + } // Format bytes per second std::string rate; if (result.bytes_per_second > 0) { @@ -97,9 +110,6 @@ void ConsoleReporter::PrintRunData(const Run& result) { const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit); - ColorPrintf(COLOR_GREEN, "%-*s ", - name_field_width_, result.benchmark_name.c_str()); - if (result.iterations == 0) { ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ", result.real_accumulated_time * multiplier, diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 7ed141fc..84e2e28a 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -137,6 +137,14 @@ void JSONReporter::PrintRunData(Run const& run) { out << indent << FormatKV("name", run.benchmark_name) << ",\n"; + if (run.error_occurred) { + out << indent + << FormatKV("error_occurred", run.error_occurred) + << ",\n"; + out << indent + << FormatKV("error_message", run.error_message) + << ",\n"; + } out << indent << FormatKV("iterations", run.iterations) << ",\n"; diff --git a/src/reporter.cc b/src/reporter.cc index 036546e7..a23c756e 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -39,6 +39,8 @@ void BenchmarkReporter::ComputeStats( for (Run const& run : reports) { CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); CHECK_EQ(run_iterations, run.iterations); + if (run.error_occurred) + continue; real_accumulated_time_stat += Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); cpu_accumulated_time_stat += diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index a10a53a9..efbe7bbe 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -36,6 +36,12 @@ add_test(options_benchmarks options_test --benchmark_min_time=0.01) compile_benchmark_test(basic_test) add_test(basic_benchmark basic_test --benchmark_min_time=0.01) +compile_benchmark_test(diagnostics_test) +add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01) + +compile_benchmark_test(skip_with_error_test) +add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01) + compile_benchmark_test(fixture_test) add_test(fixture_test fixture_test --benchmark_min_time=0.01) diff --git a/test/diagnostics_test.cc b/test/diagnostics_test.cc new file mode 100644 index 00000000..60fa3b16 --- /dev/null +++ b/test/diagnostics_test.cc @@ -0,0 +1,61 @@ +// Testing: +// State::PauseTiming() +// State::ResumeTiming() +// Test that CHECK's within these function diagnose when they are called +// outside of the KeepRunning() loop. +// +// NOTE: Users should NOT include or use src/check.h. This is only done in +// order to test library internals. + +#include "benchmark/benchmark_api.h" +#include "../src/check.h" +#include +#include + +#if defined(__GNUC__) && !defined(__EXCEPTIONS) +#define TEST_HAS_NO_EXCEPTIONS +#endif + +void TestHandler() { +#ifndef TEST_HAS_NO_EXCEPTIONS + throw std::logic_error(""); +#else + std::abort(); +#endif +} + +void try_invalid_pause_resume(benchmark::State& state) { +#if !defined(NDEBUG) && !defined(TEST_HAS_NO_EXCEPTIONS) + try { + state.PauseTiming(); + std::abort(); + } catch (std::logic_error const&) {} + try { + state.ResumeTiming(); + std::abort(); + } catch (std::logic_error const&) {} +#else + (void)state; // avoid unused warning +#endif +} + +void BM_diagnostic_test(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + while (state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test); + +int main(int argc, char** argv) { + benchmark::internal::GetAbortHandler() = &TestHandler; + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc new file mode 100644 index 00000000..dafbd64d --- /dev/null +++ b/test/skip_with_error_test.cc @@ -0,0 +1,161 @@ + +#undef NDEBUG +#include "benchmark/benchmark.h" +#include "../src/check.h" // NOTE: check.h is for internal use only! +#include +#include + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) { + all_runs_.insert(all_runs_.end(), begin(report), end(report)); + ConsoleReporter::ReportRuns(report); + } + + TestReporter() {} + virtual ~TestReporter() {} + + mutable std::vector all_runs_; +}; + +struct TestCase { + std::string name; + bool error_occurred; + std::string error_message; + + typedef benchmark::BenchmarkReporter::Run Run; + + void CheckRun(Run const& run) const { + CHECK(name == run.benchmark_name) << "expected " << name << " got " << run.benchmark_name; + CHECK(error_occurred == run.error_occurred); + CHECK(error_message == run.error_message); + if (error_occurred) { + //CHECK(run.iterations == 0); + } else { + CHECK(run.iterations != 0); + } + } +}; + +std::vector ExpectedResults; + +int AddCases(const char* base_name, std::initializer_list const& v) { + for (auto TC : v) { + TC.name = base_name + TC.name; + ExpectedResults.push_back(std::move(TC)); + } + return 0; +} + +#define CONCAT(x, y) CONCAT2(x, y) +#define CONCAT2(x, y) x##y +#define ADD_CASES(...) \ +int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) + +} // end namespace + + +void BM_error_before_running(benchmark::State& state) { + state.SkipWithError("error message"); + while (state.KeepRunning()) { + assert(false); + } +} +BENCHMARK(BM_error_before_running); +ADD_CASES("BM_error_before_running", + {{"", true, "error message"}}); + +void BM_error_during_running(benchmark::State& state) { + int first_iter = true; + while (state.KeepRunning()) { + if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + } else { + state.PauseTiming(); + state.ResumeTiming(); + } + } +} +BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); +ADD_CASES( + "BM_error_during_running", + {{"/1/threads:1", true, "error message"}, + {"/1/threads:2", true, "error message"}, + {"/1/threads:4", true, "error message"}, + {"/1/threads:8", true, "error message"}, + {"/2/threads:1", false, ""}, + {"/2/threads:2", false, ""}, + {"/2/threads:4", false, ""}, + {"/2/threads:8", false, ""}} +); + +void BM_error_after_running(benchmark::State& state) { + while (state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + if (state.thread_index <= (state.threads / 2)) + state.SkipWithError("error message"); +} +BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); +ADD_CASES( + "BM_error_after_running", + {{"/threads:1", true, "error message"}, + {"/threads:2", true, "error message"}, + {"/threads:4", true, "error message"}, + {"/threads:8", true, "error message"}} +); + +void BM_error_while_paused(benchmark::State& state) { + bool first_iter = true; + while (state.KeepRunning()) { + if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) { + assert(first_iter); + first_iter = false; + state.PauseTiming(); + state.SkipWithError("error message"); + } else { + state.PauseTiming(); + state.ResumeTiming(); + } + } +} +BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); +ADD_CASES( + "BM_error_while_paused", + {{"/1/threads:1", true, "error message"}, + {"/1/threads:2", true, "error message"}, + {"/1/threads:4", true, "error message"}, + {"/1/threads:8", true, "error message"}, + {"/2/threads:1", false, ""}, + {"/2/threads:2", false, ""}, + {"/2/threads:4", false, ""}, + {"/2/threads:8", false, ""}} +); + + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + TestReporter test_reporter; + benchmark::RunSpecifiedBenchmarks(&test_reporter); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); + + return 0; +} From 90c9ab1d8e0a44d229dae6b4f5f6355161de761a Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Mon, 23 May 2016 20:35:09 -0600 Subject: [PATCH 2/8] add doc --- README.md | 31 +++++++++++++++++++++++++++++++ include/benchmark/benchmark_api.h | 25 +++++++++++++++++++++---- 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 051b3011..f9f3bf4c 100644 --- a/README.md +++ b/README.md @@ -276,6 +276,37 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); /* BarTest is now registered */ ``` +## Exiting Benchmarks in Error + +When external influences such as file I/O and network errors occur within +a benchmark the `State::SkipWithError(const char* msg)` function can be used +to skip that run of benchmark and report the error. Note that only future +iterations of the `KeepRunning()` are skipped. Users may explicitly return +to exit the benchmark immediately. + +The `SkipWithError(...)` function may be used at any point within the benchmark, +including before and after the `KeepRunning()` loop. + +For example: + +```c++ +static void BM_test(benchmark::State& state) { + auto resource = GetResource(); + if (!resource.good()) { + state.SkipWithError("Resource is not good!"); + // KeepRunning() loop will not be entered. + } + while (state.KeepRunning()) { + auto data = resource.read_data(); + if (!resource.good()) { + state.SkipWithError("Failed to read data!"); + break; // Needed to skip the rest of the iteration. + } + do_stuff(data); + } +} +``` + ## Output Formats The library supports multiple output formats. Use the `--benchmark_format=` flag to set the format type. `tabular` is diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 654986d4..7100bf5e 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -260,13 +260,14 @@ public: return res; } - // REQUIRES: timer is running + // REQUIRES: timer is running and 'SkipWithError(...)' has not been called + // in the current thread. // Stop the benchmark timer. If not called, the timer will be // automatically stopped after KeepRunning() returns false for the first time. // // For threaded benchmarks the PauseTiming() function acts // like a barrier. I.e., the ith call by a particular thread to this - // function will block until all threads have made their ith call. + // function will block until all active threads have made their ith call. // The timer will stop when the last thread has called this function. // // NOTE: PauseTiming()/ResumeTiming() are relatively @@ -274,13 +275,14 @@ public: // within each benchmark iteration, if possible. void PauseTiming(); - // REQUIRES: timer is not running + // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called + // in the current thread. // Start the benchmark timer. The timer is NOT running on entrance to the // benchmark function. It begins running after the first call to KeepRunning() // // For threaded benchmarks the ResumeTiming() function acts // like a barrier. I.e., the ith call by a particular thread to this - // function will block until all threads have made their ith call. + // function will block until all active threads have made their ith call. // The timer will start when the last thread has called this function. // // NOTE: PauseTiming()/ResumeTiming() are relatively @@ -288,6 +290,21 @@ public: // within each benchmark iteration, if possible. void ResumeTiming(); + // REQUIRES: 'SkipWithError(...)' has not been called previously in the + // current thread. + // Skip any future iterations of the 'KeepRunning()' loop in the current + // thread and report an error with the specified 'msg'. After this call + // the user may explicitly 'return' from the benchmark. + // + // For threaded benchmarks only the current thread stops executing. If + // multiple threads report an error only the first error message will be used. + // The current thread is no longer considered 'active' thread by + // 'PauseTiming()' and 'ResumingTiming()'. + // + // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit + // the current scope immediately. If the function is called from within + // the 'KeepRunning()' loop the current iteration will finish. It is the users + // responsibility to exit the scope as needed. void SkipWithError(const char* msg); // REQUIRES: called exactly once per iteration of the KeepRunning loop. From 924b8cee7af0a302ecb07b3429760c4a906460c7 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 15:21:41 -0600 Subject: [PATCH 3/8] Reflow some wording. NFC --- README.md | 11 ++++++----- include/benchmark/benchmark_api.h | 10 +++++----- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 15997ab5..c9f08f27 100644 --- a/README.md +++ b/README.md @@ -311,11 +311,12 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); ## Exiting Benchmarks in Error -When external influences such as file I/O and network errors occur within -a benchmark the `State::SkipWithError(const char* msg)` function can be used -to skip that run of benchmark and report the error. Note that only future -iterations of the `KeepRunning()` are skipped. Users may explicitly return -to exit the benchmark immediately. +When errors caused by external influences, such as file I/O and network +communication, occur within a benchmark the +`State::SkipWithError(const char* msg)` function can be used to skip that run +of benchmark and report the error. Note that only future iterations of the +`KeepRunning()` are skipped. Users may explicitly return to exit the +benchmark immediately. The `SkipWithError(...)` function may be used at any point within the benchmark, including before and after the `KeepRunning()` loop. diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index d7718d36..77dfdd13 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -269,7 +269,7 @@ public: } // REQUIRES: timer is running and 'SkipWithError(...)' has not been called - // in the current thread. + // by the current thread. // Stop the benchmark timer. If not called, the timer will be // automatically stopped after KeepRunning() returns false for the first time. // @@ -284,7 +284,7 @@ public: void PauseTiming(); // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called - // in the current thread. + // by the current thread. // Start the benchmark timer. The timer is NOT running on entrance to the // benchmark function. It begins running after the first call to KeepRunning() // @@ -298,15 +298,15 @@ public: // within each benchmark iteration, if possible. void ResumeTiming(); - // REQUIRES: 'SkipWithError(...)' has not been called previously in the + // REQUIRES: 'SkipWithError(...)' has not been called previously by the // current thread. // Skip any future iterations of the 'KeepRunning()' loop in the current // thread and report an error with the specified 'msg'. After this call // the user may explicitly 'return' from the benchmark. // // For threaded benchmarks only the current thread stops executing. If - // multiple threads report an error only the first error message will be used. - // The current thread is no longer considered 'active' thread by + // multiple threads report an error only the first error message is used. + // The current thread is no longer considered 'active' by // 'PauseTiming()' and 'ResumingTiming()'. // // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit From 525858e68797d053ab4c859528164974978162ba Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 15:44:58 -0600 Subject: [PATCH 4/8] Fix error-handling in reporters --- src/csv_reporter.cc | 27 ++++++++++++++++++++------- src/json_reporter.cc | 8 +++++++- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 9ac74b4b..16e13dc7 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -15,6 +15,7 @@ #include "benchmark/reporter.h" #include +#include #include #include #include @@ -53,8 +54,12 @@ void CSVReporter::ReportRuns(const std::vector & reports) { return; } + auto error_count = std::count_if( + reports.begin(), reports.end(), + [](Run const& run) {return run.error_occurred;}); + std::vector reports_cp = reports; - if (reports.size() >= 2) { + if (reports.size() - error_count >= 2) { Run mean_data; Run stddev_data; BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); @@ -82,6 +87,20 @@ void CSVReporter::ReportComplexity(const std::vector& complexity_reports) { } void CSVReporter::PrintRunData(const Run & run) { + + + // Field with embedded double-quote characters must be doubled and the field + // delimited with double-quotes. + std::string name = run.benchmark_name; + ReplaceAll(&name, "\"", "\"\""); + std::cout << '"' << name << "\","; + if (run.error_occurred) { + std::cout << "error_occurred,"; + std::string msg = run.error_message; + ReplaceAll(&msg, "\"", "\"\""); + std::cout << '"' << msg << "\","; + } + double multiplier; const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit); @@ -93,12 +112,6 @@ void CSVReporter::PrintRunData(const Run & run) { cpu_time = cpu_time / static_cast(run.iterations); } - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string name = run.benchmark_name; - ReplaceAll(&name, "\"", "\"\""); - std::cout << "\"" << name << "\","; - // Do not print iteration on bigO and RMS report if(!run.report_big_o && !run.report_rms) { std::cout << run.iterations; diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 49796437..82487539 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -15,6 +15,7 @@ #include "benchmark/reporter.h" #include +#include #include #include #include @@ -96,8 +97,13 @@ void JSONReporter::ReportRuns(std::vector const& reports) { out << ",\n"; } first_report_ = false; + + auto error_count = std::count_if( + reports.begin(), reports.end(), + [](Run const& run) {return run.error_occurred;}); + std::vector reports_cp = reports; - if (reports.size() >= 2) { + if (reports.size() - error_count >= 2) { Run mean_data; Run stddev_data; BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); From 6a45324e1d1c1f2c7e56a2df970e38a62122fc4f Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 16:12:30 -0600 Subject: [PATCH 5/8] Address review comments. --- src/benchmark.cc | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/benchmark.cc b/src/benchmark.cc index 7532c9c4..1b966d4e 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -206,11 +206,10 @@ class TimerManager { void RemoveErroredThread() EXCLUDES(lock_) { MutexLock ml(lock_); int last_thread = --running_threads_ == 0; - if (last_thread && running_) { - InternalStop(); - } else if (!last_thread){ + if (last_thread && running_) + InternalStop(); + else if (!last_thread) phase_condition_.notify_all(); - } } // REQUIRES: timer is not running @@ -279,13 +278,10 @@ class TimerManager { return this->phase_number_ > phase_number_cp || entered_ == running_threads_; // A thread has aborted in error }; - while (true) { - phase_condition_.wait(ml.native_handle(), cb); - if (phase_number_ > phase_number_cp) + phase_condition_.wait(ml.native_handle(), cb); + if (phase_number_ > phase_number_cp) return false; - else if (running_threads_ == entered_) - break; - } + // else (running_threads_ == entered_) and we are the last thread. } // Last thread has reached the barrier phase_number_++; From ee8e37c67db9a717059f9f7bdfb83549208d65e9 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 16:19:04 -0600 Subject: [PATCH 6/8] Fix bad merge which caused the benchmark name to be printed twice --- src/console_reporter.cc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 2b1f281c..39446668 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -100,11 +100,14 @@ void ConsoleReporter::ReportComplexity(const std::vector & complexity_repor } void ConsoleReporter::PrintRunData(const Run& result) { - ColorPrintf(COLOR_GREEN, "%-*s ", - name_field_width_, result.benchmark_name.c_str()); + auto name_color = (result.report_big_o || result.report_rms) + ? COLOR_BLUE : COLOR_GREEN; + ColorPrintf(name_color, "%-*s ", name_field_width_, + result.benchmark_name.c_str()); if (result.error_occurred) { - ColorPrintf(COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str()); + ColorPrintf(COLOR_RED, "ERROR OCCURRED: \'%s\'", + result.error_message.c_str()); ColorPrintf(COLOR_DEFAULT, "\n"); return; } @@ -125,10 +128,6 @@ void ConsoleReporter::PrintRunData(const Run& result) { const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit); - ColorPrintf((result.report_big_o ||result.report_rms) ? COLOR_BLUE : - COLOR_GREEN, "%-*s ", - name_field_width_, result.benchmark_name.c_str()); - if(result.report_big_o) { std::string big_o = result.report_big_o ? GetBigO(result.complexity) : ""; ColorPrintf(COLOR_YELLOW, "%10.4f %s %10.4f %s ", From 6f84ffcd8b53b035fe4430070dbca19641892def Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 18:25:44 -0600 Subject: [PATCH 7/8] fix another bad merge --- src/json_reporter.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 82487539..cab527c0 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -176,9 +176,6 @@ void JSONReporter::PrintRunData(Run const& run) { << FormatKV("error_message", run.error_message) << ",\n"; } - out << indent - << FormatKV("iterations", run.iterations) - << ",\n"; if(!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) From 1003a70e5fe80967fe465da54c2f924944ad9fa6 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 24 May 2016 19:45:18 -0600 Subject: [PATCH 8/8] Fix csv_reporter when reporting errors --- src/csv_reporter.cc | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 16e13dc7..ac4bd3db 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -28,6 +28,21 @@ namespace benchmark { +namespace { +std::vector elements = { + "name", + "iterations", + "real_time", + "cpu_time", + "time_unit", + "bytes_per_second", + "items_per_second", + "label", + "error_occurred", + "error_message" +}; +} + bool CSVReporter::ReportContext(const Context& context) { std::cerr << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu << " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n"; @@ -44,8 +59,12 @@ bool CSVReporter::ReportContext(const Context& context) { std::cerr << "***WARNING*** Library was built as DEBUG. Timings may be " "affected.\n"; #endif - std::cout << "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label\n"; + for (auto B = elements.begin(); B != elements.end(); ) { + std::cout << *B++; + if (B != elements.end()) + std::cout << ","; + } + std::cout << "\n"; return true; } @@ -95,10 +114,12 @@ void CSVReporter::PrintRunData(const Run & run) { ReplaceAll(&name, "\"", "\"\""); std::cout << '"' << name << "\","; if (run.error_occurred) { - std::cout << "error_occurred,"; + std::cout << std::string(elements.size() - 3, ','); + std::cout << "true,"; std::string msg = run.error_message; ReplaceAll(&msg, "\"", "\"\""); - std::cout << '"' << msg << "\","; + std::cout << '"' << msg << "\"\n"; + return; } double multiplier; @@ -142,6 +163,7 @@ void CSVReporter::PrintRunData(const Run & run) { ReplaceAll(&label, "\"", "\"\""); std::cout << "\"" << label << "\""; } + std::cout << ",,"; // for error_occurred and error_message std::cout << '\n'; }