mirror of
https://github.com/google/benchmark.git
synced 2024-12-26 20:40:21 +08:00
3d85343d65
* Rewrite complexity_test to use (hardcoded) manual time This test is fundamentally flaky, because it tried to read tea leafs, and is inherently misbehaving in CI environments, since there are unmitigated sources of noise. That being said, the computed Big-O also depends on the `--benchmark_min_time=` Fixes https://github.com/google/benchmark/issues/272 * Correctly compute Big-O for manual timings. Fixes #1758. * complexity_test: do more stuff in empty loop * Make all empty loops be a bit longer empty Looks like on windows, some of these tests still fail, i guess clock precision is too small.
92 lines
2.3 KiB
C++
92 lines
2.3 KiB
C++
// Testing:
|
|
// State::PauseTiming()
|
|
// State::ResumeTiming()
|
|
// Test that CHECK's within these function diagnose when they are called
|
|
// outside of the KeepRunning() loop.
|
|
//
|
|
// NOTE: Users should NOT include or use src/check.h. This is only done in
|
|
// order to test library internals.
|
|
|
|
#include <cstdlib>
|
|
#include <stdexcept>
|
|
|
|
#include "../src/check.h"
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
|
|
#define TEST_HAS_NO_EXCEPTIONS
|
|
#endif
|
|
|
|
void TestHandler() {
|
|
#ifndef TEST_HAS_NO_EXCEPTIONS
|
|
throw std::logic_error("");
|
|
#else
|
|
std::abort();
|
|
#endif
|
|
}
|
|
|
|
void try_invalid_pause_resume(benchmark::State& state) {
|
|
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && \
|
|
!defined(TEST_HAS_NO_EXCEPTIONS)
|
|
try {
|
|
state.PauseTiming();
|
|
std::abort();
|
|
} catch (std::logic_error const&) {
|
|
}
|
|
try {
|
|
state.ResumeTiming();
|
|
std::abort();
|
|
} catch (std::logic_error const&) {
|
|
}
|
|
#else
|
|
(void)state; // avoid unused warning
|
|
#endif
|
|
}
|
|
|
|
void BM_diagnostic_test(benchmark::State& state) {
|
|
static bool called_once = false;
|
|
|
|
if (called_once == false) try_invalid_pause_resume(state);
|
|
|
|
for (auto _ : state) {
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
|
benchmark::DoNotOptimize(iterations);
|
|
}
|
|
|
|
if (called_once == false) try_invalid_pause_resume(state);
|
|
|
|
called_once = true;
|
|
}
|
|
BENCHMARK(BM_diagnostic_test);
|
|
|
|
void BM_diagnostic_test_keep_running(benchmark::State& state) {
|
|
static bool called_once = false;
|
|
|
|
if (called_once == false) try_invalid_pause_resume(state);
|
|
|
|
while (state.KeepRunning()) {
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
|
benchmark::DoNotOptimize(iterations);
|
|
}
|
|
|
|
if (called_once == false) try_invalid_pause_resume(state);
|
|
|
|
called_once = true;
|
|
}
|
|
BENCHMARK(BM_diagnostic_test_keep_running);
|
|
|
|
int main(int argc, char* argv[]) {
|
|
#ifdef NDEBUG
|
|
// This test is exercising functionality for debug builds, which are not
|
|
// available in release builds. Skip the test if we are in that environment
|
|
// to avoid a test failure.
|
|
std::cout << "Diagnostic test disabled in release build" << std::endl;
|
|
(void)argc;
|
|
(void)argv;
|
|
#else
|
|
benchmark::internal::GetAbortHandler() = &TestHandler;
|
|
benchmark::Initialize(&argc, argv);
|
|
benchmark::RunSpecifiedBenchmarks();
|
|
#endif
|
|
}
|