2015-03-18 12:23:43 +08:00
|
|
|
// Copyright 2015 Google Inc. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-05-25 05:44:58 +08:00
|
|
|
#include <algorithm>
|
2019-03-26 17:53:07 +08:00
|
|
|
#include <cmath>
|
2016-06-03 04:01:31 +08:00
|
|
|
#include <cstdint>
|
2018-06-01 18:14:19 +08:00
|
|
|
#include <iomanip> // for setprecision
|
2015-03-18 12:23:43 +08:00
|
|
|
#include <iostream>
|
2018-06-01 18:14:19 +08:00
|
|
|
#include <limits>
|
2015-03-18 12:23:43 +08:00
|
|
|
#include <string>
|
2016-03-30 15:14:04 +08:00
|
|
|
#include <tuple>
|
2015-03-18 12:23:43 +08:00
|
|
|
#include <vector>
|
|
|
|
|
2021-06-19 00:31:47 +08:00
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "complexity.h"
|
2015-03-18 12:23:43 +08:00
|
|
|
#include "string_util.h"
|
2016-09-03 11:34:34 +08:00
|
|
|
#include "timers.h"
|
2015-03-18 12:23:43 +08:00
|
|
|
|
|
|
|
namespace benchmark {
|
2021-05-05 19:08:23 +08:00
|
|
|
namespace internal {
|
|
|
|
extern std::map<std::string, std::string>* global_context;
|
|
|
|
}
|
2015-03-18 12:23:43 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2021-06-19 00:31:47 +08:00
|
|
|
std::string StrEscape(const std::string& s) {
|
2019-04-20 01:47:25 +08:00
|
|
|
std::string tmp;
|
|
|
|
tmp.reserve(s.size());
|
|
|
|
for (char c : s) {
|
|
|
|
switch (c) {
|
2021-06-19 00:31:47 +08:00
|
|
|
case '\b':
|
|
|
|
tmp += "\\b";
|
|
|
|
break;
|
|
|
|
case '\f':
|
|
|
|
tmp += "\\f";
|
|
|
|
break;
|
|
|
|
case '\n':
|
|
|
|
tmp += "\\n";
|
|
|
|
break;
|
|
|
|
case '\r':
|
|
|
|
tmp += "\\r";
|
|
|
|
break;
|
|
|
|
case '\t':
|
|
|
|
tmp += "\\t";
|
|
|
|
break;
|
|
|
|
case '\\':
|
|
|
|
tmp += "\\\\";
|
|
|
|
break;
|
|
|
|
case '"':
|
|
|
|
tmp += "\\\"";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
tmp += c;
|
|
|
|
break;
|
2019-04-20 01:47:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
2015-03-18 12:23:43 +08:00
|
|
|
std::string FormatKV(std::string const& key, std::string const& value) {
|
2021-06-19 00:31:47 +08:00
|
|
|
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(),
|
|
|
|
StrEscape(value).c_str());
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string FormatKV(std::string const& key, const char* value) {
|
2021-06-19 00:31:47 +08:00
|
|
|
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(),
|
|
|
|
StrEscape(value).c_str());
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string FormatKV(std::string const& key, bool value) {
|
2021-06-19 00:31:47 +08:00
|
|
|
return StrFormat("\"%s\": %s", StrEscape(key).c_str(),
|
|
|
|
value ? "true" : "false");
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string FormatKV(std::string const& key, int64_t value) {
|
|
|
|
std::stringstream ss;
|
2019-04-20 01:47:25 +08:00
|
|
|
ss << '"' << StrEscape(key) << "\": " << value;
|
2015-03-18 12:23:43 +08:00
|
|
|
return ss.str();
|
|
|
|
}
|
|
|
|
|
2016-10-08 12:26:01 +08:00
|
|
|
std::string FormatKV(std::string const& key, double value) {
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
std::stringstream ss;
|
2019-04-20 01:47:25 +08:00
|
|
|
ss << '"' << StrEscape(key) << "\": ";
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
|
2019-03-19 18:12:54 +08:00
|
|
|
if (std::isnan(value))
|
2019-04-20 01:47:25 +08:00
|
|
|
ss << (value < 0 ? "-" : "") << "NaN";
|
2019-03-19 18:12:54 +08:00
|
|
|
else if (std::isinf(value))
|
|
|
|
ss << (value < 0 ? "-" : "") << "Infinity";
|
|
|
|
else {
|
2019-03-26 17:53:07 +08:00
|
|
|
const auto max_digits10 =
|
|
|
|
std::numeric_limits<decltype(value)>::max_digits10;
|
2019-03-19 18:12:54 +08:00
|
|
|
const auto max_fractional_digits10 = max_digits10 - 1;
|
2019-03-26 17:53:07 +08:00
|
|
|
ss << std::scientific << std::setprecision(max_fractional_digits10)
|
|
|
|
<< value;
|
2019-03-19 18:12:54 +08:00
|
|
|
}
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-25 07:13:55 +08:00
|
|
|
return ss.str();
|
2016-10-08 12:26:01 +08:00
|
|
|
}
|
|
|
|
|
2019-11-23 05:19:02 +08:00
|
|
|
int64_t RoundDouble(double v) { return std::lround(v); }
|
2015-03-18 12:23:43 +08:00
|
|
|
|
2016-10-08 02:35:03 +08:00
|
|
|
} // end namespace
|
2015-03-18 12:23:43 +08:00
|
|
|
|
|
|
|
bool JSONReporter::ReportContext(const Context& context) {
|
2016-05-28 03:34:37 +08:00
|
|
|
std::ostream& out = GetOutputStream();
|
2015-03-18 12:23:43 +08:00
|
|
|
|
|
|
|
out << "{\n";
|
|
|
|
std::string inner_indent(2, ' ');
|
|
|
|
|
|
|
|
// Open context block and print context information.
|
|
|
|
out << inner_indent << "\"context\": {\n";
|
|
|
|
std::string indent(4, ' ');
|
2015-03-27 02:26:07 +08:00
|
|
|
|
|
|
|
std::string walltime_value = LocalDateTimeString();
|
2015-03-18 12:23:43 +08:00
|
|
|
out << indent << FormatKV("date", walltime_value) << ",\n";
|
|
|
|
|
2018-12-11 19:23:02 +08:00
|
|
|
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
|
|
|
|
|
2018-02-22 00:43:57 +08:00
|
|
|
if (Context::executable_name) {
|
2019-04-20 01:47:25 +08:00
|
|
|
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
|
2018-02-22 00:43:57 +08:00
|
|
|
}
|
|
|
|
|
2017-11-23 00:33:52 +08:00
|
|
|
CPUInfo const& info = context.cpu_info;
|
|
|
|
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
|
2015-03-18 12:23:43 +08:00
|
|
|
<< ",\n";
|
2017-11-23 00:33:52 +08:00
|
|
|
out << indent
|
|
|
|
<< FormatKV("mhz_per_cpu",
|
|
|
|
RoundDouble(info.cycles_per_second / 1000000.0))
|
2015-03-18 12:23:43 +08:00
|
|
|
<< ",\n";
|
2020-07-28 19:46:07 +08:00
|
|
|
if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
|
2021-06-19 00:31:47 +08:00
|
|
|
out << indent
|
|
|
|
<< FormatKV("cpu_scaling_enabled",
|
|
|
|
info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
|
2020-07-28 19:46:07 +08:00
|
|
|
<< ",\n";
|
|
|
|
}
|
2015-03-18 12:23:43 +08:00
|
|
|
|
2017-11-27 04:33:01 +08:00
|
|
|
out << indent << "\"caches\": [\n";
|
|
|
|
indent = std::string(6, ' ');
|
|
|
|
std::string cache_indent(8, ' ');
|
|
|
|
for (size_t i = 0; i < info.caches.size(); ++i) {
|
|
|
|
auto& CI = info.caches[i];
|
|
|
|
out << indent << "{\n";
|
|
|
|
out << cache_indent << FormatKV("type", CI.type) << ",\n";
|
|
|
|
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
|
|
|
|
<< ",\n";
|
2021-06-19 00:31:47 +08:00
|
|
|
out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size))
|
|
|
|
<< ",\n";
|
2017-11-27 04:33:01 +08:00
|
|
|
out << cache_indent
|
|
|
|
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
|
|
|
|
<< "\n";
|
|
|
|
out << indent << "}";
|
|
|
|
if (i != info.caches.size() - 1) out << ",";
|
|
|
|
out << "\n";
|
|
|
|
}
|
|
|
|
indent = std::string(4, ' ');
|
|
|
|
out << indent << "],\n";
|
2018-07-09 12:17:44 +08:00
|
|
|
out << indent << "\"load_avg\": [";
|
|
|
|
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
|
|
|
|
out << *it++;
|
|
|
|
if (it != info.load_avg.end()) out << ",";
|
|
|
|
}
|
|
|
|
out << "],\n";
|
2017-11-27 04:33:01 +08:00
|
|
|
|
2015-03-18 12:23:43 +08:00
|
|
|
#if defined(NDEBUG)
|
|
|
|
const char build_type[] = "release";
|
|
|
|
#else
|
|
|
|
const char build_type[] = "debug";
|
|
|
|
#endif
|
2021-06-19 00:31:47 +08:00
|
|
|
out << indent << FormatKV("library_build_type", build_type);
|
2021-05-05 19:08:23 +08:00
|
|
|
|
|
|
|
if (internal::global_context != nullptr) {
|
2021-06-19 00:31:47 +08:00
|
|
|
for (const auto& kv : *internal::global_context) {
|
|
|
|
out << ",\n";
|
|
|
|
out << indent << FormatKV(kv.first, kv.second);
|
2021-05-05 19:08:23 +08:00
|
|
|
}
|
|
|
|
}
|
2021-06-19 00:31:47 +08:00
|
|
|
out << "\n";
|
2021-05-05 19:08:23 +08:00
|
|
|
|
2015-03-18 12:23:43 +08:00
|
|
|
// Close context block and open the list of benchmarks.
|
|
|
|
out << inner_indent << "},\n";
|
|
|
|
out << inner_indent << "\"benchmarks\": [\n";
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
|
|
|
|
if (reports.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
std::string indent(4, ' ');
|
2016-05-28 03:34:37 +08:00
|
|
|
std::ostream& out = GetOutputStream();
|
2015-03-18 12:23:43 +08:00
|
|
|
if (!first_report_) {
|
|
|
|
out << ",\n";
|
|
|
|
}
|
|
|
|
first_report_ = false;
|
2016-05-25 05:44:58 +08:00
|
|
|
|
2016-05-28 06:45:25 +08:00
|
|
|
for (auto it = reports.begin(); it != reports.end(); ++it) {
|
2016-06-03 04:01:31 +08:00
|
|
|
out << indent << "{\n";
|
|
|
|
PrintRunData(*it);
|
|
|
|
out << indent << '}';
|
|
|
|
auto it_cp = it;
|
|
|
|
if (++it_cp != reports.end()) {
|
|
|
|
out << ",\n";
|
|
|
|
}
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void JSONReporter::Finalize() {
|
2016-06-03 04:01:31 +08:00
|
|
|
// Close the list of benchmarks and the top level object.
|
|
|
|
GetOutputStream() << "\n ]\n}\n";
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void JSONReporter::PrintRunData(Run const& run) {
|
2016-06-03 04:01:31 +08:00
|
|
|
std::string indent(6, ' ');
|
|
|
|
std::ostream& out = GetOutputStream();
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
|
2021-06-02 23:06:45 +08:00
|
|
|
out << indent << FormatKV("family_index", run.family_index) << ",\n";
|
2021-06-03 04:45:41 +08:00
|
|
|
out << indent
|
|
|
|
<< FormatKV("per_family_instance_index", run.per_family_instance_index)
|
|
|
|
<< ",\n";
|
2019-03-17 21:38:51 +08:00
|
|
|
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 23:11:36 +08:00
|
|
|
out << indent << FormatKV("run_type", [&run]() -> const char* {
|
|
|
|
switch (run.run_type) {
|
|
|
|
case BenchmarkReporter::Run::RT_Iteration:
|
|
|
|
return "iteration";
|
|
|
|
case BenchmarkReporter::Run::RT_Aggregate:
|
|
|
|
return "aggregate";
|
|
|
|
}
|
|
|
|
BENCHMARK_UNREACHABLE();
|
|
|
|
}()) << ",\n";
|
2019-03-26 17:53:07 +08:00
|
|
|
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
|
|
|
|
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
|
|
|
|
out << indent << FormatKV("repetition_index", run.repetition_index)
|
|
|
|
<< ",\n";
|
|
|
|
}
|
|
|
|
out << indent << FormatKV("threads", run.threads) << ",\n";
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
|
|
|
|
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
out << indent << FormatKV("aggregate_unit", [&run]() -> const char* {
|
|
|
|
switch (run.aggregate_unit) {
|
|
|
|
case StatisticUnit::kTime:
|
|
|
|
return "time";
|
|
|
|
case StatisticUnit::kPercentage:
|
|
|
|
return "percentage";
|
|
|
|
}
|
|
|
|
BENCHMARK_UNREACHABLE();
|
|
|
|
}()) << ",\n";
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 20:08:15 +08:00
|
|
|
}
|
2016-10-08 02:35:03 +08:00
|
|
|
if (run.error_occurred) {
|
|
|
|
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
|
|
|
|
out << indent << FormatKV("error_message", run.error_message) << ",\n";
|
|
|
|
}
|
|
|
|
if (!run.report_big_o && !run.report_rms) {
|
|
|
|
out << indent << FormatKV("iterations", run.iterations) << ",\n";
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 22:36:56 +08:00
|
|
|
if (run.run_type != Run::RT_Aggregate ||
|
|
|
|
run.aggregate_unit == StatisticUnit::kTime) {
|
|
|
|
out << indent << FormatKV("real_time", run.GetAdjustedRealTime())
|
|
|
|
<< ",\n";
|
|
|
|
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
|
|
|
|
} else {
|
|
|
|
assert(run.aggregate_unit == StatisticUnit::kPercentage);
|
|
|
|
out << indent << FormatKV("real_time", run.real_accumulated_time)
|
|
|
|
<< ",\n";
|
|
|
|
out << indent << FormatKV("cpu_time", run.cpu_accumulated_time);
|
|
|
|
}
|
2016-10-08 02:35:03 +08:00
|
|
|
out << ",\n"
|
|
|
|
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
2016-06-03 04:01:31 +08:00
|
|
|
} else if (run.report_big_o) {
|
2018-06-01 18:14:19 +08:00
|
|
|
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
|
2016-06-03 04:01:31 +08:00
|
|
|
<< ",\n";
|
2018-06-01 18:14:19 +08:00
|
|
|
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
|
2016-06-03 04:01:31 +08:00
|
|
|
<< ",\n";
|
2016-10-08 02:35:03 +08:00
|
|
|
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
|
|
|
|
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
|
|
|
} else if (run.report_rms) {
|
2018-06-01 18:14:19 +08:00
|
|
|
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
|
2016-06-03 04:01:31 +08:00
|
|
|
}
|
2018-09-14 03:03:47 +08:00
|
|
|
|
2018-06-01 18:14:19 +08:00
|
|
|
for (auto& c : run.counters) {
|
|
|
|
out << ",\n" << indent << FormatKV(c.first, c.second);
|
2017-03-02 08:23:42 +08:00
|
|
|
}
|
2018-07-24 22:57:15 +08:00
|
|
|
|
2021-10-18 23:29:35 +08:00
|
|
|
if (run.memory_result) {
|
|
|
|
const MemoryManager::Result memory_result = *run.memory_result;
|
2018-07-24 22:57:15 +08:00
|
|
|
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
|
2021-10-18 23:29:35 +08:00
|
|
|
out << ",\n"
|
|
|
|
<< indent << FormatKV("max_bytes_used", memory_result.max_bytes_used);
|
|
|
|
|
|
|
|
auto report_if_present = [&out, &indent](const char* label, int64_t val) {
|
|
|
|
if (val != MemoryManager::TombstoneValue)
|
|
|
|
out << ",\n" << indent << FormatKV(label, val);
|
|
|
|
};
|
|
|
|
|
|
|
|
report_if_present("total_allocated_bytes",
|
|
|
|
memory_result.total_allocated_bytes);
|
|
|
|
report_if_present("net_heap_growth", memory_result.net_heap_growth);
|
2018-07-24 22:57:15 +08:00
|
|
|
}
|
|
|
|
|
2016-06-03 04:01:31 +08:00
|
|
|
if (!run.report_label.empty()) {
|
2016-10-08 02:35:03 +08:00
|
|
|
out << ",\n" << indent << FormatKV("label", run.report_label);
|
2016-06-03 04:01:31 +08:00
|
|
|
}
|
|
|
|
out << '\n';
|
2015-03-18 12:23:43 +08:00
|
|
|
}
|
|
|
|
|
2021-11-11 00:04:32 +08:00
|
|
|
const int64_t MemoryManager::TombstoneValue =
|
|
|
|
std::numeric_limits<int64_t>::max();
|
|
|
|
|
2018-06-01 18:14:19 +08:00
|
|
|
} // end namespace benchmark
|