2018-08-30 02:11:06 +08:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Thousands Customisation ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Thousands(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
|
|
|
{"t0_1000000DefaultBase",
|
|
|
|
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
|
|
|
|
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1000)},
|
|
|
|
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1024)},
|
|
|
|
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1000)},
|
|
|
|
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1024)},
|
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
|
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{
|
|
|
|
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k "
|
|
|
|
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
|
|
|
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k "
|
|
|
|
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
|
|
|
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
|
|
|
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
|
|
|
"t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2_median %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
|
|
|
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
|
|
|
"t4_1048576Base1024=1024k$"},
|
2018-10-18 22:17:14 +08:00
|
|
|
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
|
|
|
|
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
|
|
|
|
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
|
2018-08-30 02:11:06 +08:00
|
|
|
});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 22:17:14 +08:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 22:17:14 +08:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 22:17:14 +08:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-30 02:11:06 +08:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
|
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
|
|
|
"0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
|
|
|
"0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
|
|
|
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
|
|
|
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckThousands(Results const& e) {
|
|
|
|
if (e.name != "BM_Counters_Thousands/repeats:2")
|
|
|
|
return; // Do not check the aggregates!
|
|
|
|
|
|
|
|
// check that the values are within 0.01% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
|
|
|
|
0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|