2016-06-02 05:08:01 +08:00
|
|
|
#undef NDEBUG
|
2016-05-19 01:59:34 +08:00
|
|
|
#include <algorithm>
|
2016-10-08 02:04:50 +08:00
|
|
|
#include <cassert>
|
2016-06-04 00:33:17 +08:00
|
|
|
#include <cmath>
|
2016-10-08 02:04:50 +08:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <vector>
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
2016-05-19 01:59:34 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
#define ADD_COMPLEXITY_CASES(...) \
|
2016-10-08 02:04:50 +08:00
|
|
|
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
|
|
|
|
std::string rms_test_name, std::string big_o) {
|
|
|
|
SetSubstitutions({{"%name", test_name},
|
|
|
|
{"%bigo_name", big_o_test_name},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"%rms_name", rms_test_name},
|
2016-10-08 11:56:22 +08:00
|
|
|
{"%bigo_str", "[ ]* %float " + big_o},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"%bigo", big_o},
|
|
|
|
{"%rms", "[ ]*[0-9]+ %"}});
|
|
|
|
AddCases(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
|
|
|
|
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
|
|
|
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
|
|
|
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"aggregate_name\": \"BigO\",$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
{"\"cpu_coefficient\": %float,$", MR_Next},
|
|
|
|
{"\"real_coefficient\": %float,$", MR_Next},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"\"big_o\": \"%bigo\",$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next},
|
|
|
|
{"\"name\": \"%rms_name\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 17:53:07 +08:00
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
{"\"aggregate_name\": \"RMS\",$", MR_Next},
|
2016-10-08 12:26:01 +08:00
|
|
|
{"\"rms\": %float$", MR_Next},
|
2016-10-08 02:04:50 +08:00
|
|
|
{"}", MR_Next}});
|
|
|
|
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
|
|
|
|
{"^\"%bigo_name\"", MR_Not},
|
|
|
|
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
|
2016-06-02 05:08:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(1) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2016-05-19 01:59:34 +08:00
|
|
|
void BM_Complexity_O1(benchmark::State& state) {
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2016-10-08 02:04:50 +08:00
|
|
|
for (int i = 0; i < 1024; ++i) {
|
|
|
|
benchmark::DoNotOptimize(&i);
|
|
|
|
}
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
BENCHMARK(BM_Complexity_O1)
|
|
|
|
->Range(1, 1 << 18)
|
|
|
|
->Complexity([](benchmark::IterationCount) { return 1.0; });
|
2016-10-08 02:04:50 +08:00
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
const char *one_test_name = "BM_Complexity_O1";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
|
|
|
|
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
|
|
|
|
const char *enum_big_o_1 = "\\([0-9]+\\)";
|
|
|
|
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
|
|
|
|
// deduced.
|
2016-08-10 04:14:15 +08:00
|
|
|
// See https://github.com/google/benchmark/issues/272
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
|
|
|
|
const char *lambda_big_o_1 = "f\\(N\\)";
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
|
|
|
enum_big_o_1);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2016-07-23 07:31:05 +08:00
|
|
|
// Add auto enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
|
|
|
auto_big_o_1);
|
2016-07-23 07:31:05 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
|
|
|
lambda_big_o_1);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(N) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-04-04 06:12:47 +08:00
|
|
|
std::vector<int> ConstructRandomVector(int64_t size) {
|
2016-06-02 05:08:01 +08:00
|
|
|
std::vector<int> v;
|
2018-04-04 06:12:47 +08:00
|
|
|
v.reserve(static_cast<int>(size));
|
2016-06-02 05:08:01 +08:00
|
|
|
for (int i = 0; i < size; ++i) {
|
2018-07-09 18:45:10 +08:00
|
|
|
v.push_back(static_cast<int>(std::rand() % size));
|
2016-06-02 05:08:01 +08:00
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
2016-05-19 01:59:34 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
void BM_Complexity_O_N(benchmark::State& state) {
|
2016-08-05 03:30:14 +08:00
|
|
|
auto v = ConstructRandomVector(state.range(0));
|
2018-04-04 06:12:47 +08:00
|
|
|
// Test worst case scenario (item not in vector)
|
|
|
|
const int64_t item_not_in_vector = state.range(0) * 2;
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2016-10-08 02:04:50 +08:00
|
|
|
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity(benchmark::oN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
->Complexity([](benchmark::IterationCount n) -> double {
|
|
|
|
return static_cast<double>(n);
|
|
|
|
});
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity();
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
const char *n_test_name = "BM_Complexity_O_N";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
|
|
|
|
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
|
|
|
|
const char *enum_auto_big_o_n = "N";
|
|
|
|
const char *lambda_big_o_n = "f\\(N\\)";
|
2016-05-25 04:25:59 +08:00
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
|
|
|
enum_auto_big_o_n);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
|
|
|
lambda_big_o_n);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
|
|
|
|
// ========================================================================= //
|
2016-05-19 01:59:34 +08:00
|
|
|
|
|
|
|
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
2016-08-05 03:30:14 +08:00
|
|
|
auto v = ConstructRandomVector(state.range(0));
|
2017-10-18 02:17:02 +08:00
|
|
|
for (auto _ : state) {
|
2016-10-08 02:04:50 +08:00
|
|
|
std::sort(v.begin(), v.end());
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2016-08-05 03:30:14 +08:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-19 01:59:34 +08:00
|
|
|
}
|
2018-06-05 18:36:26 +08:00
|
|
|
static const double kLog2E = 1.44269504088896340736;
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity(benchmark::oNLogN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 17:33:11 +08:00
|
|
|
->Complexity([](benchmark::IterationCount n) {
|
2019-03-26 17:53:07 +08:00
|
|
|
return kLog2E * n * log(static_cast<double>(n));
|
|
|
|
});
|
2016-10-08 02:04:50 +08:00
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity();
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
|
2016-10-08 02:04:50 +08:00
|
|
|
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
|
|
|
|
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
|
|
|
|
const char *enum_auto_big_o_n_lg_n = "NlgN";
|
|
|
|
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
|
|
|
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
|
|
|
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
|
2016-06-02 05:08:01 +08:00
|
|
|
|
2019-03-17 21:38:51 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------- Testing formatting of Complexity with captured args ------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2019-03-26 17:53:07 +08:00
|
|
|
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
|
2019-03-17 21:38:51 +08:00
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
state.SetComplexityN(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
|
|
|
|
->Complexity(benchmark::oN)
|
|
|
|
->Ranges({{1, 2}, {3, 4}});
|
|
|
|
|
|
|
|
const std::string complexity_capture_name =
|
|
|
|
"BM_ComplexityCaptureArgs/capture_test";
|
|
|
|
|
|
|
|
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
|
|
|
|
complexity_capture_name + "_RMS", "N");
|
|
|
|
|
2016-06-02 05:08:01 +08:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2016-10-08 02:04:50 +08:00
|
|
|
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
|