2017-04-28 02:25:20 +08:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- Testing Prologue Output -------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-06-01 18:14:19 +08:00
|
|
|
// clang-format off
|
|
|
|
|
2017-04-28 02:25:20 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
|
|
|
|
|
2018-06-01 18:14:19 +08:00
|
|
|
// clang-format on
|
|
|
|
|
2017-04-28 02:25:20 +08:00
|
|
|
// ========================================================================= //
|
2017-04-28 05:11:40 +08:00
|
|
|
// ------------------------- Simple Counters Output ------------------------ //
|
2017-04-28 02:25:20 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Simple(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-04-28 02:25:20 +08:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
2021-11-04 20:09:10 +08:00
|
|
|
state.counters["bar"] = 2 * static_cast<double>(state.iterations());
|
2017-04-28 02:25:20 +08:00
|
|
|
}
|
2017-04-30 01:26:30 +08:00
|
|
|
BENCHMARK(BM_Counters_Simple);
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
2017-04-28 02:25:20 +08:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-04-28 02:25:20 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSimple(Results const& e) {
|
2018-06-27 22:45:30 +08:00
|
|
|
double its = e.NumIterations();
|
2017-04-28 22:02:27 +08:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
2017-04-29 03:45:30 +08:00
|
|
|
// check that the value of bar is within 0.1% of the expected value
|
2018-06-01 18:14:19 +08:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
|
2017-04-28 02:25:20 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
2017-04-28 05:11:40 +08:00
|
|
|
// --------------------- Counters+Items+Bytes/s Output --------------------- //
|
2017-04-28 02:25:20 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-06-01 18:14:19 +08:00
|
|
|
namespace {
|
|
|
|
int num_calls1 = 0;
|
|
|
|
}
|
2017-04-28 02:25:20 +08:00
|
|
|
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-28 02:25:20 +08:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
2017-04-28 22:02:27 +08:00
|
|
|
state.counters["bar"] = ++num_calls1;
|
2017-04-28 02:25:20 +08:00
|
|
|
state.SetBytesProcessed(364);
|
2017-04-28 22:02:27 +08:00
|
|
|
state.SetItemsProcessed(150);
|
2017-04-28 02:25:20 +08:00
|
|
|
}
|
2017-04-30 01:26:30 +08:00
|
|
|
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
|
2018-09-14 03:03:47 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
|
|
|
|
"bar=%hrfloat bytes_per_second=%hrfloat/s "
|
|
|
|
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 1,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
2018-09-14 03:03:47 +08:00
|
|
|
{"\"bytes_per_second\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float,$", MR_Next},
|
|
|
|
{"\"items_per_second\": %float$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"}", MR_Next}});
|
2017-04-28 02:25:20 +08:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
|
|
|
|
"%csv_bytes_items_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckBytesAndItemsPSec(Results const& e) {
|
2018-06-01 18:14:19 +08:00
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
2017-04-28 22:02:27 +08:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
|
2017-04-29 03:45:30 +08:00
|
|
|
// check that the values are within 0.1% of the expected values
|
2018-06-01 18:14:19 +08:00
|
|
|
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
|
|
|
|
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
|
|
|
|
&CheckBytesAndItemsPSec);
|
2017-04-28 02:25:20 +08:00
|
|
|
|
2017-04-28 05:11:40 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Rate Counters Output -------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Rate(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-28 05:11:40 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
|
|
|
|
}
|
2017-04-30 01:26:30 +08:00
|
|
|
BENCHMARK(BM_Counters_Rate);
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
2017-04-28 05:11:40 +08:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 2,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-04-28 05:11:40 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckRate(Results const& e) {
|
2018-06-01 18:14:19 +08:00
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
2017-04-30 01:26:30 +08:00
|
|
|
// check that the values are within 0.1% of the expected values
|
2018-06-01 18:14:19 +08:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
|
2017-04-28 05:11:40 +08:00
|
|
|
|
2019-08-12 22:47:46 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------------- Inverted Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Invert(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
|
|
|
|
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Invert);
|
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 3,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2019-08-12 22:47:46 +08:00
|
|
|
{"\"run_name\": \"BM_Invert\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-08-12 22:47:46 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckInvert(Results const& e) {
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
2022-08-28 01:41:33 +08:00
|
|
|
// --------------------- InvertedRate Counters Output ---------------------- //
|
2019-08-12 22:47:46 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_InvertedRate(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] =
|
|
|
|
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
|
|
|
|
state.counters["bar"] =
|
|
|
|
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_InvertedRate);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
|
|
|
|
"bar=%hrfloats foo=%hrfloats$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_InvertedRate\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 4,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2019-08-12 22:47:46 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-08-12 22:47:46 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckInvertedRate(Results const& e) {
|
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
|
|
|
// check that the values are within 0.1% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
|
|
|
|
|
2017-04-28 05:11:40 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Thread Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Threads(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-04-28 05:11:40 +08:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
|
|
|
state.counters["bar"] = 2;
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 5,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckThreads(Results const& e) {
|
2017-04-30 02:26:34 +08:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
|
2017-04-28 05:11:40 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- ThreadAvg Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgThreads(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-04-28 05:11:40 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
|
|
|
|
"%console_report bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 6,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgThreads(Results const& e) {
|
2017-04-30 02:26:34 +08:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
|
|
|
|
&CheckAvgThreads);
|
2017-04-28 05:11:40 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- ThreadAvg Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-28 05:11:40 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
|
2018-06-01 18:14:19 +08:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 7,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
|
|
|
|
MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-06-01 18:14:19 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
|
|
|
|
"threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-30 05:27:55 +08:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgThreadsRate(Results const& e) {
|
2018-06-01 18:14:19 +08:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
|
2017-04-30 05:27:55 +08:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
|
|
|
|
&CheckAvgThreadsRate);
|
2017-04-28 05:11:40 +08:00
|
|
|
|
2018-06-27 22:45:30 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------- IterationInvariant Counters Output ------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_IterationInvariant(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_IterationInvariant);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 8,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 22:45:30 +08:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckIterationInvariant(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
// check that the values are within 0.1% of the expected value
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
|
|
|
|
&CheckIterationInvariant);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------- IterationInvariantRate Counters Output ---------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2018-06-27 22:45:30 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] =
|
|
|
|
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
|
|
|
|
state.counters["bar"] =
|
|
|
|
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 9,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
|
|
|
|
MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-06-27 22:45:30 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
|
|
|
|
"%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckIsIterationInvariantRate(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
|
|
|
// check that the values are within 0.1% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
|
|
|
|
&CheckIsIterationInvariantRate);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
2022-08-28 01:41:33 +08:00
|
|
|
// --------------------- AvgIterations Counters Output --------------------- //
|
2018-06-27 22:45:30 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgIterations(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgIterations);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 10,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 22:45:30 +08:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgIterations(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
// check that the values are within 0.1% of the expected value
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
2022-08-28 01:41:33 +08:00
|
|
|
// ------------------- AvgIterationsRate Counters Output ------------------- //
|
2018-06-27 22:45:30 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2018-06-27 22:45:30 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
|
|
|
|
state.counters["bar"] =
|
|
|
|
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_kAvgIterationsRate);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 11,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 17:34:00 +08:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 22:45:30 +08:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
|
|
|
|
"%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgIterationsRate(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
|
|
|
// check that the values are within 0.1% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
|
|
|
|
&CheckAvgIterationsRate);
|
|
|
|
|
2017-04-28 02:25:20 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|