mirror of
https://github.com/google/benchmark.git
synced 2024-12-25 20:10:13 +08:00
get rid of warnings in tests (#1562)
This commit is contained in:
parent
2d5012275a
commit
9885aefb96
@ -5,7 +5,8 @@
|
||||
|
||||
void BM_empty(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_empty);
|
||||
|
@ -70,7 +70,7 @@ int AddComplexityTest(const std::string &test_name,
|
||||
void BM_Complexity_O1(benchmark::State &state) {
|
||||
for (auto _ : state) {
|
||||
for (int i = 0; i < 1024; ++i) {
|
||||
benchmark::DoNotOptimize(&i);
|
||||
benchmark::DoNotOptimize(i);
|
||||
}
|
||||
}
|
||||
state.SetComplexityN(state.range(0));
|
||||
@ -121,7 +121,8 @@ void BM_Complexity_O_N(benchmark::State &state) {
|
||||
// Test worst case scenario (item not in vector)
|
||||
const int64_t item_not_in_vector = state.range(0) * 2;
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
||||
auto it = std::find(v.begin(), v.end(), item_not_in_vector);
|
||||
benchmark::DoNotOptimize(it);
|
||||
}
|
||||
state.SetComplexityN(state.range(0));
|
||||
}
|
||||
@ -204,7 +205,8 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
||||
void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
state.SetComplexityN(n);
|
||||
}
|
||||
|
@ -49,7 +49,8 @@ void BM_diagnostic_test(benchmark::State& state) {
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
@ -64,7 +65,8 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) {
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
|
||||
while (state.KeepRunning()) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
|
@ -46,36 +46,19 @@ int main(int, char*[]) {
|
||||
|
||||
char buffer1024[1024] = "";
|
||||
benchmark::DoNotOptimize(buffer1024);
|
||||
benchmark::DoNotOptimize(&buffer1024[0]);
|
||||
|
||||
const char const_buffer1[1] = "";
|
||||
benchmark::DoNotOptimize(const_buffer1);
|
||||
|
||||
const char const_buffer2[2] = "";
|
||||
benchmark::DoNotOptimize(const_buffer2);
|
||||
|
||||
const char const_buffer3[3] = "";
|
||||
benchmark::DoNotOptimize(const_buffer3);
|
||||
|
||||
const char const_buffer8[8] = "";
|
||||
benchmark::DoNotOptimize(const_buffer8);
|
||||
|
||||
const char const_buffer20[20] = "";
|
||||
benchmark::DoNotOptimize(const_buffer20);
|
||||
|
||||
const char const_buffer1024[1024] = "";
|
||||
benchmark::DoNotOptimize(const_buffer1024);
|
||||
benchmark::DoNotOptimize(&const_buffer1024[0]);
|
||||
char* bptr = &buffer1024[0];
|
||||
benchmark::DoNotOptimize(bptr);
|
||||
|
||||
int x = 123;
|
||||
benchmark::DoNotOptimize(x);
|
||||
benchmark::DoNotOptimize(&x);
|
||||
int* xp = &x;
|
||||
benchmark::DoNotOptimize(xp);
|
||||
benchmark::DoNotOptimize(x += 42);
|
||||
|
||||
benchmark::DoNotOptimize(double_up(x));
|
||||
std::int64_t y = double_up(x);
|
||||
benchmark::DoNotOptimize(y);
|
||||
|
||||
// These tests are to e
|
||||
benchmark::DoNotOptimize(BitRef::Make());
|
||||
BitRef lval = BitRef::Make();
|
||||
benchmark::DoNotOptimize(lval);
|
||||
}
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
void BM_empty(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_empty);
|
||||
|
@ -24,7 +24,8 @@ static void BM_MapLookup(benchmark::State& state) {
|
||||
m = ConstructRandomMap(size);
|
||||
state.ResumeTiming();
|
||||
for (int i = 0; i < size; ++i) {
|
||||
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||
auto it = m.find(std::rand() % size);
|
||||
benchmark::DoNotOptimize(it);
|
||||
}
|
||||
}
|
||||
state.SetItemsProcessed(state.iterations() * size);
|
||||
@ -47,7 +48,8 @@ BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
|
||||
const int size = static_cast<int>(state.range(0));
|
||||
for (auto _ : state) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||
auto it = m.find(std::rand() % size);
|
||||
benchmark::DoNotOptimize(it);
|
||||
}
|
||||
}
|
||||
state.SetItemsProcessed(state.iterations() * size);
|
||||
|
@ -14,7 +14,8 @@ class TestMemoryManager : public benchmark::MemoryManager {
|
||||
|
||||
void BM_empty(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_empty);
|
||||
|
@ -7,7 +7,8 @@
|
||||
|
||||
static void BM_Simple(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_Simple);
|
||||
|
@ -93,7 +93,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
|
||||
void BM_bytes_per_second(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
state.SetBytesProcessed(1);
|
||||
}
|
||||
@ -124,7 +125,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
|
||||
void BM_items_per_second(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
state.SetItemsProcessed(1);
|
||||
}
|
||||
@ -404,7 +406,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
|
||||
void BM_Complexity_O1(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
state.SetComplexityN(state.range(0));
|
||||
}
|
||||
|
@ -141,7 +141,8 @@ ADD_CASES("BM_error_during_running_ranged_for",
|
||||
|
||||
void BM_error_after_running(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
if (state.thread_index() <= (state.threads() / 2))
|
||||
state.SkipWithError("error message");
|
||||
|
@ -372,7 +372,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
|
||||
void BM_CounterRates_Tabular(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
|
@ -67,7 +67,8 @@ int num_calls1 = 0;
|
||||
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
state.counters["foo"] = 1;
|
||||
state.counters["bar"] = ++num_calls1;
|
||||
@ -118,7 +119,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
|
||||
void BM_Counters_Rate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
|
||||
@ -161,7 +163,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
|
||||
void BM_Invert(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
|
||||
@ -201,7 +204,8 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
|
||||
void BM_Counters_InvertedRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] =
|
||||
@ -329,7 +333,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
|
||||
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
|
||||
@ -416,7 +421,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
|
||||
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] =
|
||||
@ -507,7 +513,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
|
||||
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
// This test requires a non-zero CPU time to avoid divide-by-zero
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
auto iterations = state.iterations();
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
|
||||
|
Loading…
Reference in New Issue
Block a user