2017-05-03 03:33:28 +08:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
2017-05-03 06:00:45 +08:00
|
|
|
// @todo: <jpmag> this checks the full output at once; the rule for
|
|
|
|
// CounterSet1 was failing because it was not matching "^[-]+$".
|
|
|
|
// @todo: <jpmag> check that the counters are vertically aligned.
|
|
|
|
ADD_CASES(TC_ConsoleOut, {
|
|
|
|
// keeping these lines long improves readability, so:
|
|
|
|
// clang-format off
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
|
|
|
|
// clang-format on
|
|
|
|
});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header,"
|
|
|
|
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
|
2017-05-03 03:33:28 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Tabular(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-05-03 03:33:28 +08:00
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
namespace bm = benchmark;
|
2017-05-03 03:33:28 +08:00
|
|
|
state.counters.insert({
|
2017-05-03 06:00:45 +08:00
|
|
|
{"Foo", { 1, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", { 2, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", { 4, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", { 8, bm::Counter::kAvgThreads}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreads}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreads}},
|
2017-05-03 03:33:28 +08:00
|
|
|
});
|
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-05-03 03:33:28 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-03 06:00:45 +08:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
|
2017-05-03 03:33:28 +08:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabular(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
|
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
|
2017-05-03 03:33:28 +08:00
|
|
|
|
2017-05-03 03:47:41 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------------------- Tabular+Rate Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_CounterRates_Tabular(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-05-03 03:47:41 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2017-05-03 06:00:45 +08:00
|
|
|
{"Foo", { 1, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bar", { 2, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Baz", { 4, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bat", { 8, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
|
2017-05-03 03:47:41 +08:00
|
|
|
});
|
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-05-03 03:47:41 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-03 06:00:45 +08:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
|
2017-05-03 03:47:41 +08:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabularRate(Results const& e) {
|
|
|
|
double t = e.DurationCPUTime();
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001);
|
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
|
|
|
|
&CheckTabularRate);
|
2017-05-03 03:47:41 +08:00
|
|
|
|
2017-05-03 03:33:28 +08:00
|
|
|
// ========================================================================= //
|
2017-05-03 05:14:49 +08:00
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
2017-05-03 03:33:28 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
|
2017-05-03 05:14:49 +08:00
|
|
|
// set only some of the counters
|
2017-05-03 06:00:45 +08:00
|
|
|
void BM_CounterSet0_Tabular(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-05-03 05:14:49 +08:00
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
namespace bm = benchmark;
|
2017-05-03 05:14:49 +08:00
|
|
|
state.counters.insert({
|
2017-05-03 06:00:45 +08:00
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {20, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
2017-05-03 05:14:49 +08:00
|
|
|
});
|
2017-05-03 03:47:41 +08:00
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-05-03 05:14:49 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-03 06:00:45 +08:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
|
2017-05-03 05:14:49 +08:00
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
2017-05-03 06:00:45 +08:00
|
|
|
void CheckSet0(Results const& e) {
|
2017-05-03 05:14:49 +08:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
2017-05-03 06:00:45 +08:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
|
|
|
|
|
|
|
|
// again.
|
|
|
|
void BM_CounterSet1_Tabular(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-05-03 06:00:45 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
|
|
|
{"Foo", {15, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {25, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {45, bm::Counter::kAvgThreads}},
|
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-05-03 06:00:45 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
|
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet1(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
// set only some of the counters, different set now.
|
|
|
|
void BM_CounterSet2_Tabular(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2017-05-03 06:00:45 +08:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", {30, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
2017-08-01 09:04:02 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-05-03 06:00:45 +08:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
|
|
|
|
",%float,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet2(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
|
2017-05-03 05:14:49 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|