2016-06-02 05:08:01 +08:00
|
|
|
#undef NDEBUG
|
2016-05-19 01:59:34 +08:00
|
|
|
#include <algorithm>
|
2016-10-08 02:04:50 +08:00
|
|
|
#include <cassert>
|
2016-06-04 00:33:17 +08:00
|
|
|
#include <cmath>
|
2016-10-08 02:04:50 +08:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <vector>
|
2021-11-11 00:22:31 +08:00
|
|
|
|
2016-10-08 02:04:50 +08:00
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
2016-05-19 01:59:34 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
#define ADD_COMPLEXITY_CASES(...) \
|
2016-10-08 02:04:50 +08:00
|
|
|
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
|
|
|
|
|
2021-12-06 19:18:04 +08:00
|
|
|
int AddComplexityTest(const std::string &test_name,
|
|
|
|
const std::string &big_o_test_name,
|
|
|
|
const std::string &rms_test_name,
|
|
|
|
const std::string &big_o, int family_index) {
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
SetSubstitutions({{"%name", test_name},
|
|
|
|
{"%bigo_name", big_o_test_name},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"%rms_name", rms_test_name},
|
2016-10-08 11:56:22 +08:00
|
|
|
{"%bigo_str", "[ ]* %float " + big_o},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"%bigo", big_o},
|
|
|
|
{"%rms", "[ ]*[0-9]+ %"}});
|
|
|
|
AddCases(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
|
|
|
|
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
|
|
|
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
2021-06-02 23:06:45 +08:00
|
|
|
AddCases(
|
|
|
|
TC_JSONOut,
|
|
|
|
{{"\"name\": \"%bigo_name\",$"},
|
|
|
|
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"BigO\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"cpu_coefficient\": %float,$", MR_Next},
|
|
|
|
{"\"real_coefficient\": %float,$", MR_Next},
|
|
|
|
{"\"big_o\": \"%bigo\",$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next},
|
|
|
|
{"\"name\": \"%rms_name\",$"},
|
|
|
|
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
|
2021-06-03 04:45:41 +08:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"RMS\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
|
2021-06-02 23:06:45 +08:00
|
|
|
{"\"rms\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2016-10-08 02:04:50 +08:00
|
|
|
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
|
|
|
|
{"^\"%bigo_name\"", MR_Not},
|
|
|
|
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
|
2016-06-02 05:08:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(1) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-11-11 00:22:31 +08:00
|
|
|
void BM_Complexity_O1(benchmark::State &state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2024-02-19 23:22:35 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
double tmp = static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
tmp *= static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
2016-10-08 02:04:50 +08:00
|
|
|
}
|
2024-02-19 23:22:35 +08:00
|
|
|
|
|
|
|
// always 1ns per iteration
|
|
|
|
state.SetIterationTime(42 * 1e-9);
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
BENCHMARK(BM_Complexity_O1)
|
|
|
|
->Range(1, 1 << 18)
|
2024-02-19 23:22:35 +08:00
|
|
|
->UseManualTime()
|
|
|
|
->Complexity(benchmark::o1);
|
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->UseManualTime()->Complexity();
|
|
|
|
BENCHMARK(BM_Complexity_O1)
|
|
|
|
->Range(1, 1 << 18)
|
|
|
|
->UseManualTime()
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
->Complexity([](benchmark::IterationCount) { return 1.0; });
|
2016-10-08 02:04:50 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
const char *one_test_name = "BM_Complexity_O1/manual_time";
|
|
|
|
const char *big_o_1_test_name = "BM_Complexity_O1/manual_time_BigO";
|
|
|
|
const char *rms_o_1_test_name = "BM_Complexity_O1/manual_time_RMS";
|
|
|
|
const char *enum_auto_big_o_1 = "\\([0-9]+\\)";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *lambda_big_o_1 = "f\\(N\\)";
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2024-02-19 23:22:35 +08:00
|
|
|
enum_auto_big_o_1, /*family_index=*/0);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
// Add auto tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2024-02-19 23:22:35 +08:00
|
|
|
enum_auto_big_o_1, /*family_index=*/1);
|
2016-07-23 07:31:05 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2021-06-02 23:06:45 +08:00
|
|
|
lambda_big_o_1, /*family_index=*/2);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(N) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-11-11 00:22:31 +08:00
|
|
|
void BM_Complexity_O_N(benchmark::State &state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2024-02-19 23:22:35 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
double tmp = static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
tmp *= static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1ns per iteration per entry
|
2024-03-07 20:19:56 +08:00
|
|
|
state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
2024-02-19 23:22:35 +08:00
|
|
|
->Range(1 << 10, 1 << 20)
|
|
|
|
->UseManualTime()
|
2016-10-08 02:04:50 +08:00
|
|
|
->Complexity(benchmark::oN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
2024-02-19 23:22:35 +08:00
|
|
|
->Range(1 << 10, 1 << 20)
|
|
|
|
->UseManualTime()
|
|
|
|
->Complexity();
|
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 20)
|
|
|
|
->UseManualTime()
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
->Complexity([](benchmark::IterationCount n) -> double {
|
|
|
|
return static_cast<double>(n);
|
|
|
|
});
|
2016-10-08 02:04:50 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
const char *n_test_name = "BM_Complexity_O_N/manual_time";
|
|
|
|
const char *big_o_n_test_name = "BM_Complexity_O_N/manual_time_BigO";
|
|
|
|
const char *rms_o_n_test_name = "BM_Complexity_O_N/manual_time_RMS";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *enum_auto_big_o_n = "N";
|
|
|
|
const char *lambda_big_o_n = "f\\(N\\)";
|
2016-05-25 04:25:59 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
2021-06-02 23:06:45 +08:00
|
|
|
enum_auto_big_o_n, /*family_index=*/3);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
// Add auto tests
|
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
|
|
|
enum_auto_big_o_n, /*family_index=*/4);
|
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
2024-02-19 23:22:35 +08:00
|
|
|
lambda_big_o_n, /*family_index=*/5);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
2024-02-19 23:22:35 +08:00
|
|
|
// ------------------------- Testing BigO O(NlgN) ------------------------- //
|
2016-06-02 05:08:01 +08:00
|
|
|
// ========================================================================= //
|
2016-05-19 01:59:34 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
static const double kLog2E = 1.44269504088896340736;
|
2021-11-11 00:22:31 +08:00
|
|
|
static void BM_Complexity_O_N_log_N(benchmark::State &state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2024-02-19 23:22:35 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
double tmp = static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
tmp *= static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
}
|
|
|
|
|
2024-03-07 20:19:56 +08:00
|
|
|
state.SetIterationTime(static_cast<double>(state.range(0)) * kLog2E *
|
|
|
|
std::log(state.range(0)) * 42 * 1e-9);
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
2024-02-19 23:22:35 +08:00
|
|
|
->Range(1 << 10, 1U << 24)
|
|
|
|
->UseManualTime()
|
2016-10-08 02:04:50 +08:00
|
|
|
->Complexity(benchmark::oNLogN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
2024-02-19 23:22:35 +08:00
|
|
|
->Range(1 << 10, 1U << 24)
|
|
|
|
->UseManualTime()
|
|
|
|
->Complexity();
|
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1U << 24)
|
|
|
|
->UseManualTime()
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
->Complexity([](benchmark::IterationCount n) {
|
2023-11-10 18:09:50 +08:00
|
|
|
return kLog2E * static_cast<double>(n) * std::log(static_cast<double>(n));
|
2019-03-26 17:53:07 +08:00
|
|
|
});
|
2016-10-08 02:04:50 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time";
|
|
|
|
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO";
|
|
|
|
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *enum_auto_big_o_n_lg_n = "NlgN";
|
|
|
|
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
2021-06-02 23:06:45 +08:00
|
|
|
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
|
|
|
|
/*family_index=*/6);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
// NOTE: auto big-o is wron.g
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
2024-02-19 23:22:35 +08:00
|
|
|
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
|
2021-06-02 23:06:45 +08:00
|
|
|
/*family_index=*/7);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2024-02-19 23:22:35 +08:00
|
|
|
//// Add lambda tests
|
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
|
|
|
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
|
|
|
|
/*family_index=*/8);
|
|
|
|
|
2019-03-17 21:38:51 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------- Testing formatting of Complexity with captured args ------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-11-11 00:22:31 +08:00
|
|
|
void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
|
2019-03-17 21:38:51 +08:00
|
|
|
for (auto _ : state) {
|
2019-07-28 00:02:31 +08:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
double tmp = static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2024-03-07 20:19:56 +08:00
|
|
|
tmp *= static_cast<double>(state.iterations());
|
2024-02-19 23:22:35 +08:00
|
|
|
benchmark::DoNotOptimize(tmp);
|
|
|
|
}
|
|
|
|
|
2024-03-07 20:19:56 +08:00
|
|
|
state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
|
2019-03-17 21:38:51 +08:00
|
|
|
}
|
|
|
|
state.SetComplexityN(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
|
2024-02-19 23:22:35 +08:00
|
|
|
->UseManualTime()
|
2019-03-17 21:38:51 +08:00
|
|
|
->Complexity(benchmark::oN)
|
|
|
|
->Ranges({{1, 2}, {3, 4}});
|
|
|
|
|
|
|
|
const std::string complexity_capture_name =
|
2024-02-19 23:22:35 +08:00
|
|
|
"BM_ComplexityCaptureArgs/capture_test/manual_time";
|
2019-03-17 21:38:51 +08:00
|
|
|
|
|
|
|
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
|
2024-02-19 23:22:35 +08:00
|
|
|
complexity_capture_name + "_RMS", "N",
|
|
|
|
/*family_index=*/9);
|
2019-03-17 21:38:51 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2016-10-08 02:04:50 +08:00
|
|
|
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
|