[clang-tidy] autofix readability issues (#1931)

* [clang-tidy] autofix readability issues

* more modern clang format
This commit is contained in:
dominic 2025-02-12 11:40:49 -08:00 committed by GitHub
parent 2d4c8dd21a
commit adbda82db3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 253 additions and 225 deletions

View File

@ -10,9 +10,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: DoozyX/clang-format-lint-action@v0.15
- uses: DoozyX/clang-format-lint-action@v0.18.2
with:
source: './include/benchmark ./src ./test'
extensions: 'h,cc'
clangFormatVersion: 12
style: Google
clangFormatVersion: 18

View File

@ -313,7 +313,7 @@ BENCHMARK_EXPORT std::string GetBenchmarkVersion();
BENCHMARK_EXPORT void PrintDefaultHelp();
BENCHMARK_EXPORT void Initialize(int* argc, char** argv,
void (*HelperPrinterf)() = PrintDefaultHelp);
void (*HelperPrintf)() = PrintDefaultHelp);
BENCHMARK_EXPORT void Shutdown();
// Report to stdout all arguments in 'argv' as unrecognized except the first.
@ -631,7 +631,7 @@ class Counter {
Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
: value(v), flags(f), oneK(k) {}
BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; }
BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
};
@ -1165,7 +1165,7 @@ class BENCHMARK_EXPORT Benchmark {
// Pass this benchmark object to *func, which can customize
// the benchmark by calling various methods like Arg, Args,
// Threads, etc.
Benchmark* Apply(void (*func)(Benchmark* benchmark));
Benchmark* Apply(void (*custom_arguments)(Benchmark* benchmark));
// Set the range multiplier for non-dense range. If not called, the range
// multiplier kRangeMultiplier will be used.
@ -1869,8 +1869,8 @@ class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter {
void ReportRuns(const std::vector<Run>& reports) override;
protected:
virtual void PrintRunData(const Run& report);
virtual void PrintHeader(const Run& report);
virtual void PrintRunData(const Run& result);
virtual void PrintHeader(const Run& run);
OutputOptions output_options_;
size_t name_field_width_;
@ -1886,7 +1886,7 @@ class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter {
void Finalize() override;
private:
void PrintRunData(const Run& report);
void PrintRunData(const Run& run);
bool first_report_;
};
@ -1900,7 +1900,7 @@ class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG(
void ReportRuns(const std::vector<Run>& reports) override;
private:
void PrintRunData(const Run& report);
void PrintRunData(const Run& run);
bool printed_header_;
std::set<std::string> user_counter_names_;

View File

@ -46,7 +46,6 @@
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "perf_counters.h"
@ -198,7 +197,7 @@ State::State(std::string name, IterationCount max_iters,
// `PauseTiming`, a new `Counter` will be inserted the first time, which
// won't have the flag. Inserting them now also reduces the allocations
// during the benchmark.
if (perf_counters_measurement_) {
if (perf_counters_measurement_ != nullptr) {
for (const std::string& counter_name :
perf_counters_measurement_->names()) {
counters[counter_name] = Counter(0.0, Counter::kAvgIterations);
@ -247,7 +246,7 @@ void State::PauseTiming() {
// Add in time accumulated so far
BM_CHECK(started_ && !finished_ && !skipped());
timer_->StopTimer();
if (perf_counters_measurement_) {
if (perf_counters_measurement_ != nullptr) {
std::vector<std::pair<std::string, double>> measurements;
if (!perf_counters_measurement_->Stop(measurements)) {
BM_CHECK(false) << "Perf counters read the value failed.";
@ -265,7 +264,7 @@ void State::PauseTiming() {
void State::ResumeTiming() {
BM_CHECK(started_ && !finished_ && !skipped());
timer_->StartTimer();
if (perf_counters_measurement_) {
if (perf_counters_measurement_ != nullptr) {
perf_counters_measurement_->Start();
}
}
@ -342,7 +341,7 @@ namespace {
// Flushes streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
void FlushStreams(BenchmarkReporter* reporter) {
if (!reporter) {
if (reporter == nullptr) {
return;
}
std::flush(reporter->GetOutputStream());
@ -367,7 +366,7 @@ void Report(BenchmarkReporter* display_reporter,
report_one(display_reporter, run_results.display_report_aggregates_only,
run_results);
if (file_reporter) {
if (file_reporter != nullptr) {
report_one(file_reporter, run_results.file_report_aggregates_only,
run_results);
}
@ -408,7 +407,7 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
per_family_reports;
if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
((file_reporter == nullptr) || file_reporter->ReportContext(context))) {
FlushStreams(display_reporter);
FlushStreams(file_reporter);
@ -433,12 +432,12 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
if (benchmark.complexity() != oNone) {
reports_for_family = &per_family_reports[benchmark.family_index()];
}
benchmarks_with_threads += (benchmark.threads() > 1);
benchmarks_with_threads += static_cast<int>(benchmark.threads() > 1);
runners.emplace_back(benchmark, &perfcounters, reports_for_family);
int num_repeats_of_this_instance = runners.back().GetNumRepeats();
num_repetitions_total +=
static_cast<size_t>(num_repeats_of_this_instance);
if (reports_for_family) {
if (reports_for_family != nullptr) {
reports_for_family->num_runs_total += num_repeats_of_this_instance;
}
}
@ -482,7 +481,7 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
display_reporter->ReportRunsConfig(
runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
if (file_reporter) {
if (file_reporter != nullptr) {
file_reporter->ReportRunsConfig(
runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
}
@ -506,7 +505,7 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
}
}
display_reporter->Finalize();
if (file_reporter) {
if (file_reporter != nullptr) {
file_reporter->Finalize();
}
FlushStreams(display_reporter);
@ -569,7 +568,7 @@ ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
} // end namespace internal
BenchmarkReporter* CreateDefaultDisplayReporter() {
static auto default_display_reporter =
static auto* default_display_reporter =
internal::CreateReporter(FLAGS_benchmark_format,
internal::GetOutputOptions())
.release();
@ -611,7 +610,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) {
if (display_reporter == nullptr) {
default_display_reporter.reset(CreateDefaultDisplayReporter());
display_reporter = default_display_reporter.get();
}
@ -619,7 +618,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
if (fname.empty() && (file_reporter != nullptr)) {
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified.\n";
Out.flush();
@ -634,7 +633,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
Err.flush();
std::exit(1);
}
if (!file_reporter) {
if (file_reporter == nullptr) {
default_file_reporter = internal::CreateReporter(
FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular
? ConsoleReporter::OO_Tabular
@ -743,8 +742,8 @@ void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) {
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i) {
((argc != nullptr) && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; (argc != nullptr) && i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||

View File

@ -101,7 +101,7 @@ State BenchmarkInstance::Run(
}
void BenchmarkInstance::Setup() const {
if (setup_) {
if (setup_ != nullptr) {
State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_,
nullptr, nullptr, nullptr, nullptr);
setup_(st);
@ -109,7 +109,7 @@ void BenchmarkInstance::Setup() const {
}
void BenchmarkInstance::Teardown() const {
if (teardown_) {
if (teardown_ != nullptr) {
State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_,
nullptr, nullptr, nullptr, nullptr);
teardown_(st);

View File

@ -17,9 +17,9 @@ namespace internal {
// Information kept per benchmark we may want to run
class BenchmarkInstance {
public:
BenchmarkInstance(Benchmark* benchmark, int family_index,
int per_family_instance_index,
const std::vector<int64_t>& args, int threads);
BenchmarkInstance(Benchmark* benchmark, int family_idx,
int per_family_instance_idx,
const std::vector<int64_t>& args, int thread_count);
const BenchmarkName& name() const { return name_; }
int family_index() const { return family_index_; }

View File

@ -14,5 +14,5 @@
#include "benchmark/benchmark.h"
BENCHMARK_EXPORT int main(int, char**);
BENCHMARK_EXPORT int main(int /*argc*/, char** /*argv*/);
BENCHMARK_MAIN();

View File

@ -27,8 +27,8 @@ size_t size_impl(const Head& head, const Tail&... tail) {
}
// Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin
void join_impl(std::string&, char) {}
// TODO(dominic): use absl::StrJoin
void join_impl(std::string& /*unused*/, char /*unused*/) {}
template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head,

View File

@ -53,13 +53,13 @@ namespace benchmark {
namespace {
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static constexpr int kRangeMultiplier = 8;
constexpr int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
static constexpr size_t kMaxFamilySize = 100;
constexpr size_t kMaxFamilySize = 100;
static constexpr char kDisabledPrefix[] = "DISABLED_";
constexpr char kDisabledPrefix[] = "DISABLED_";
} // end namespace
namespace internal {
@ -82,7 +82,7 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
bool FindBenchmarks(std::string spec,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);

View File

@ -46,7 +46,6 @@
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "perf_counters.h"
@ -74,7 +73,7 @@ ProfilerManager* profiler_manager = nullptr;
namespace {
static constexpr IterationCount kMaxIterations = 1000000000000;
constexpr IterationCount kMaxIterations = 1000000000000;
const double kDefaultMinTime =
std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr);
@ -100,7 +99,7 @@ BenchmarkReporter::Run CreateRunReport(
report.repetition_index = repetition_index;
report.repetitions = repeats;
if (!report.skipped) {
if (report.skipped == 0u) {
if (b.use_manual_time()) {
report.real_accumulated_time = results.manual_time_used;
} else {
@ -118,9 +117,10 @@ BenchmarkReporter::Run CreateRunReport(
assert(memory_result != nullptr);
report.memory_result = memory_result;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result->num_allocs) /
static_cast<double>(memory_iterations)
: 0;
memory_iterations != 0
? static_cast<double>(memory_result->num_allocs) /
static_cast<double>(memory_iterations)
: 0;
}
internal::Finish(&report.counters, results.iterations, seconds,
@ -273,10 +273,11 @@ BenchmarkRunner::BenchmarkRunner(
FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only =
(b.aggregation_report_mode() &
internal::ARM_DisplayReportAggregatesOnly);
((b.aggregation_report_mode() &
internal::ARM_DisplayReportAggregatesOnly) != 0u);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
((b.aggregation_report_mode() &
internal::ARM_FileReportAggregatesOnly) != 0u);
BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
(perf_counters_measurement_ptr->num_counters() == 0))
<< "Perf counters were requested but could not be set up.";
@ -364,7 +365,7 @@ bool BenchmarkRunner::ShouldReportIterationResults(
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.skipped_ || FLAGS_benchmark_dry_run ||
return (i.results.skipped_ != 0u) || FLAGS_benchmark_dry_run ||
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >=
GetMinTimeToApply() || // The elapsed time is large enough.
@ -528,9 +529,9 @@ void BenchmarkRunner::DoOneRepetition() {
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
num_repetitions_done, repeats);
if (reports_for_family) {
if (reports_for_family != nullptr) {
++reports_for_family->num_runs_done;
if (!report.skipped) {
if (report.skipped == 0u) {
reports_for_family->Runs.push_back(report);
}
}

View File

@ -51,7 +51,7 @@ BenchTimeType ParseBenchMinTime(const std::string& value);
class BenchmarkRunner {
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
benchmark::internal::PerfCountersMeasurement* pmc_,
benchmark::internal::PerfCountersMeasurement* pcm_,
BenchmarkReporter::PerFamilyRunReports* reports_for_family);
int GetNumRepeats() const { return repeats; }

View File

@ -156,7 +156,7 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
SetConsoleTextAttribute(stdout_handle, original_color_attrs);
#else
const char* color_code = GetPlatformColorCode(color);
if (color_code) {
if (color_code != nullptr) {
out << FormatString("\033[0;3%sm", color_code);
}
out << FormatString(fmt, args) << "\033[m";
@ -195,7 +195,7 @@ bool IsColorTerminal() {
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
if ((term != nullptr) && 0 == strcmp(term, candidate)) {
term_supports_color = true;
break;
}

View File

@ -109,7 +109,7 @@ bool ParseKvPairs(const std::string& src_text, const char* str,
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) {
std::string FlagToEnvVar(const char* flag) {
const std::string flag_str(flag);
std::string env_var;

View File

@ -63,7 +63,7 @@ void ConsoleReporter::PrintHeader(const Run& run) {
FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if (!run.counters.empty()) {
if (output_options_ & OO_Tabular) {
if ((output_options_ & OO_Tabular) != 0) {
for (auto const& c : run.counters) {
str += FormatString(" %10s", c.first.c_str());
}
@ -83,7 +83,7 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) &&
print_header |= ((output_options_ & OO_Tabular) != 0) &&
(!internal::SameNames(run.counters, prev_counters_));
if (print_header) {
printed_header_ = true;
@ -97,8 +97,8 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
}
}
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
...) {
static void IgnoreColorPrint(std::ostream& out, LogColor /*unused*/,
const char* fmt, ...) {
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
@ -131,7 +131,7 @@ BENCHMARK_EXPORT
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
PrinterFn* printer = (output_options_ & OO_Color)
PrinterFn* printer = (output_options_ & OO_Color) != 0
? static_cast<PrinterFn*>(ColorPrintf)
: IgnoreColorPrint;
auto name_color =
@ -144,7 +144,8 @@ void ConsoleReporter::PrintRunData(const Run& result) {
result.skip_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
} else if (internal::SkippedWithMessage == result.skipped) {
}
if (internal::SkippedWithMessage == result.skipped) {
printer(Out, COLOR_WHITE, "SKIPPED: \'%s\'", result.skip_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
@ -178,9 +179,9 @@ void ConsoleReporter::PrintRunData(const Run& result) {
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
for (const auto& c : result.counters) {
const std::size_t cNameLen =
std::max(std::string::size_type(10), c.first.length());
std::max(static_cast<std::size_t>(10), c.first.length());
std::string s;
const char* unit = "";
if (result.run_type == Run::RT_Aggregate &&
@ -189,11 +190,11 @@ void ConsoleReporter::PrintRunData(const Run& result) {
unit = "%";
} else {
s = HumanReadableNumber(c.second.value, c.second.oneK);
if (c.second.flags & Counter::kIsRate) {
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if ((c.second.flags & Counter::kIsRate) != 0) {
unit = (c.second.flags & Counter::kInvert) != 0 ? "s" : "/s";
}
}
if (output_options_ & OO_Tabular) {
if ((output_options_ & OO_Tabular) != 0) {
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
unit);
} else {

View File

@ -20,20 +20,20 @@ namespace internal {
double Finish(Counter const& c, IterationCount iterations, double cpu_time,
double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
if ((c.flags & Counter::kIsRate) != 0) {
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads) {
if ((c.flags & Counter::kAvgThreads) != 0) {
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
if ((c.flags & Counter::kIsIterationInvariant) != 0) {
v *= static_cast<double>(iterations);
}
if (c.flags & Counter::kAvgIterations) {
if ((c.flags & Counter::kAvgIterations) != 0) {
v /= static_cast<double>(iterations);
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
if ((c.flags & Counter::kInvert) != 0) { // Invert is *always* last.
v = 1.0 / v;
}
return v;

View File

@ -115,7 +115,7 @@ BENCHMARK_EXPORT
void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.skipped) {
if (run.skipped != 0u) {
Out << std::string(elements.size() - 3, ',');
Out << std::boolalpha << (internal::SkippedWithError == run.skipped) << ",";
Out << CsvEscape(run.skip_message) << "\n";

View File

@ -85,6 +85,10 @@ std::string FormatKV(std::string const& key, int64_t value) {
return ss.str();
}
std::string FormatKV(std::string const& key, int value) {
return FormatKV(key, static_cast<int64_t>(value));
}
std::string FormatKV(std::string const& key, double value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
@ -122,7 +126,7 @@ bool JSONReporter::ReportContext(const Context& context) {
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) {
if (Context::executable_name != nullptr) {
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
@ -136,7 +140,7 @@ bool JSONReporter::ReportContext(const Context& context) {
if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
out << indent
<< FormatKV("cpu_scaling_enabled",
info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
info.scaling == CPUInfo::Scaling::ENABLED)
<< ",\n";
}
@ -144,7 +148,7 @@ bool JSONReporter::ReportContext(const Context& context) {
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) {
auto& CI = info.caches[i];
const auto& CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
@ -183,7 +187,7 @@ bool JSONReporter::ReportContext(const Context& context) {
out << ",\n";
// NOTE: our json schema is not strictly tied to the library version!
out << indent << FormatKV("json_schema_version", int64_t(1));
out << indent << FormatKV("json_schema_version", 1);
std::map<std::string, std::string>* global_context =
internal::GetGlobalContext();
@ -298,11 +302,11 @@ void JSONReporter::PrintRunData(Run const& run) {
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto& c : run.counters) {
for (const auto& c : run.counters) {
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.memory_result) {
if (run.memory_result != nullptr) {
const MemoryManager::Result memory_result = *run.memory_result;
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n"

View File

@ -42,17 +42,18 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << LocalDateTimeString() << "\n";
#endif
if (context.executable_name) {
Out << "Running " << context.executable_name << "\n";
if (benchmark::BenchmarkReporter::Context::executable_name != nullptr) {
Out << "Running " << benchmark::BenchmarkReporter::Context::executable_name
<< "\n";
}
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0) {
if (!info.caches.empty()) {
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) {
for (const auto &CInfo : info.caches) {
Out << " L" << CInfo.level << " " << CInfo.type << " "
<< (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0) {

View File

@ -153,7 +153,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
for (Run const& run : reports) {
BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
BM_CHECK_EQ(run_iterations, run.iterations);
if (run.skipped) {
if (run.skipped != 0u) {
continue;
}
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
@ -176,7 +176,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
}
const double iteration_rescale_factor =
double(reports.size()) / double(run_iterations);
static_cast<double>(reports.size()) / static_cast<double>(run_iterations);
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.

View File

@ -29,7 +29,7 @@ static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, int precision, double one_k,
std::string* mantissa, int64_t* exponent) {

View File

@ -76,7 +76,6 @@
#include "benchmark/benchmark.h"
#include "check.h"
#include "cycleclock.h"
#include "internal_macros.h"
#include "log.h"
#include "string_util.h"
#include "timers.h"
@ -121,7 +120,7 @@ struct ValueUnion {
explicit ValueUnion(std::size_t buff_size)
: size(sizeof(DataT) + buff_size),
buff(::new (std::malloc(size)) DataT(), &std::free) {}
buff(::new(std::malloc(size)) DataT(), &std::free) {}
ValueUnion(ValueUnion&& other) = default;

View File

@ -107,7 +107,7 @@ double MakeTime(struct timespec const& ts) {
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
BENCHMARK_NORETURN void DiagnoseAndExit(const char* msg) {
std::cerr << "ERROR: " << msg << '\n';
std::flush(std::cerr);
std::exit(EXIT_FAILURE);

View File

@ -5,7 +5,8 @@
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}

View File

@ -48,7 +48,7 @@ class BenchmarkTest : public testing::Test {
static void TeardownHook(int /* num_threads */) { queue->push("Teardown"); }
void Execute(const std::string& pattern) {
static void Execute(const std::string& pattern) {
queue->Clear();
std::unique_ptr<BenchmarkReporter> reporter(new NullReporter());

View File

@ -80,7 +80,7 @@ int fixture_setup = 0;
class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State&) override {
void SetUp(const ::benchmark::State& /*unused*/) override {
fixture_interaction::fixture_setup++;
}
@ -92,7 +92,7 @@ BENCHMARK_F(FIXTURE_BECHMARK_NAME, BM_WithFixture)(benchmark::State& st) {
}
}
static void DoSetupWithFixture(const benchmark::State&) {
static void DoSetupWithFixture(const benchmark::State& /*unused*/) {
fixture_interaction::setup++;
}
@ -110,7 +110,7 @@ namespace repetitions {
int setup = 0;
}
static void DoSetupWithRepetitions(const benchmark::State&) {
static void DoSetupWithRepetitions(const benchmark::State& /*unused*/) {
repetitions::setup++;
}
static void BM_WithRep(benchmark::State& state) {

View File

@ -46,16 +46,17 @@ void try_invalid_pause_resume(benchmark::State& state) {
void BM_diagnostic_test(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) {
if (!called_once) {
try_invalid_pause_resume(state);
}
for (auto _ : state) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
if (called_once == false) {
if (!called_once) {
try_invalid_pause_resume(state);
}
@ -66,16 +67,17 @@ BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) {
if (!called_once) {
try_invalid_pause_resume(state);
}
while (state.KeepRunning()) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
if (called_once == false) {
if (!called_once) {
try_invalid_pause_resume(state);
}

View File

@ -4,7 +4,7 @@
namespace {
#if defined(__GNUC__)
std::int64_t double_up(const std::int64_t x) __attribute__((const));
std::int64_t double_up(std::int64_t x) __attribute__((const));
#endif
std::int64_t double_up(const std::int64_t x) { return x * 2; }
} // namespace
@ -26,7 +26,7 @@ struct BitRef {
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
};
int main(int, char*[]) {
int main(int /*unused*/, char* /*unused*/[]) {
// this test verifies compilation of DoNotOptimize() for some types
char buffer1[1] = "";

View File

@ -2,7 +2,8 @@
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}

View File

@ -39,7 +39,7 @@ class MapFixture : public ::benchmark::Fixture {
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) override { m.clear(); }
void TearDown(const ::benchmark::State& /*unused*/) override { m.clear(); }
std::map<int, int> m;
};

View File

@ -14,7 +14,8 @@ class TestMemoryManager : public benchmark::MemoryManager {
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}

View File

@ -98,7 +98,7 @@ void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
while (!remaining_output.eof()) {
BM_CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
@ -149,7 +149,7 @@ class TestReporter : public benchmark::BenchmarkReporter {
bool ReportContext(const Context& context) override {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
for (auto* rep : reporters_) {
bool new_ret = rep->ReportContext(context);
BM_CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
@ -161,12 +161,12 @@ class TestReporter : public benchmark::BenchmarkReporter {
}
void ReportRuns(const std::vector<Run>& report) override {
for (auto rep : reporters_) {
for (auto* rep : reporters_) {
rep->ReportRuns(report);
}
}
void Finalize() override {
for (auto rep : reporters_) {
for (auto* rep : reporters_) {
rep->Finalize();
}
}
@ -206,7 +206,7 @@ class ResultsChecker {
void SetHeader_(const std::string& csv_header);
void SetValues_(const std::string& entry_csv_line);
std::vector<std::string> SplitCsv_(const std::string& line);
std::vector<std::string> SplitCsv_(const std::string& line) const;
};
// store the static ResultsChecker in a function to prevent initialization
@ -239,7 +239,7 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false) {
while (!output.eof()) {
BM_CHECK(output.good());
std::getline(output, line);
if (on_first) {
@ -287,7 +287,8 @@ void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
}
// a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
std::vector<std::string> ResultsChecker::SplitCsv_(
const std::string& line) const {
std::vector<std::string> out;
if (line.empty()) {
return out;
@ -295,8 +296,10 @@ std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
if (!field_names.empty()) {
out.reserve(field_names.size());
}
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) {
size_t prev = 0;
size_t pos = line.find_first_of(',');
size_t curr = pos;
while (pos != std::string::npos) {
BM_CHECK(curr > 0);
if (line[prev] == '"') {
++prev;
@ -330,7 +333,7 @@ size_t AddChecker(const std::string& bm_name, const ResultsCheckFn& fn) {
int Results::NumThreads() const {
auto pos = name.find("/threads:");
if (pos == name.npos) {
if (pos == std::string::npos) {
return 1;
}
auto end = name.find('/', pos + 9);
@ -348,7 +351,7 @@ double Results::GetTime(BenchmarkTime which) const {
BM_CHECK(which == kCpuTime || which == kRealTime);
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
const auto* unit = Get("time_unit");
BM_CHECK(unit);
if (*unit == "ns") {
return val * 1.e-9;
@ -517,7 +520,7 @@ static std::string GetTempFileName() {
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries) {
while (--retries != 0) {
std::string name = GetRandomFileName();
if (!FileExists(name)) {
return name;
@ -539,7 +542,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]) {
tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
argc = int(new_argv.size());
argc = static_cast<int>(new_argv.size());
benchmark::Initialize(&argc, new_argv.data());
benchmark::RunSpecifiedBenchmarks();

View File

@ -164,7 +164,7 @@ void RunTestOne() {
// benchmarks.
// Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() {
assert(ExpectedResults.size() != 0 &&
assert(!ExpectedResults.empty() &&
"must have at least one registered benchmark");
ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks();

View File

@ -96,7 +96,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetBytesProcessed(1);
@ -128,7 +129,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetItemsProcessed(1);
@ -409,7 +411,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetComplexityN(state.range(0));

View File

@ -97,11 +97,11 @@ BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
int first_iter = 1;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) {
assert(first_iter);
first_iter = false;
first_iter = 0;
state.SkipWithError("error message");
} else {
state.PauseTiming();
@ -143,7 +143,8 @@ ADD_CASES("BM_error_during_running_ranged_for",
void BM_error_after_running(benchmark::State& state) {
for (auto _ : state) {
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
if (state.thread_index() <= (state.threads() / 2)) {

View File

@ -13,18 +13,18 @@ namespace {
TEST(StringUtilTest, stoul) {
{
size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
EXPECT_EQ(1ul, pos);
EXPECT_EQ(0UL, benchmark::stoul("0", &pos));
EXPECT_EQ(1UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
EXPECT_EQ(1ul, pos);
EXPECT_EQ(7UL, benchmark::stoul("7", &pos));
EXPECT_EQ(1UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3ul, pos);
EXPECT_EQ(135UL, benchmark::stoul("135", &pos));
EXPECT_EQ(3UL, pos);
}
#if ULONG_MAX == 0xFFFFFFFFul
{
@ -35,35 +35,35 @@ TEST(StringUtilTest, stoul) {
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul,
EXPECT_EQ(0xFFFFFFFFFFFFFFFFUL,
benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20ul, pos);
EXPECT_EQ(20UL, pos);
}
#endif
{
size_t pos = 0;
EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
EXPECT_EQ(10UL, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
EXPECT_EQ(520UL, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
EXPECT_EQ(1010UL, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
EXPECT_EQ(4112UL, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
EXPECT_EQ(0xBEEFUL, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4UL, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
@ -73,83 +73,87 @@ TEST(StringUtilTest, stoul) {
#endif
}
TEST(StringUtilTest, stoi){{size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1ul, pos);
} // namespace
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
TEST(StringUtilTest, stoi) {
{
size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1UL, pos);
} // namespace
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4UL, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(std::ignore = benchmark::stoi("this is a test"),
std::invalid_argument);
}
{
ASSERT_THROW(std::ignore = benchmark::stoi("this is a test"),
std::invalid_argument);
}
#endif
}
TEST(StringUtilTest, stod){{size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8ul, pos);
}
TEST(StringUtilTest, stod) {
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4UL, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3UL, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8UL, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(std::ignore = benchmark::stod("this is a test"),
std::invalid_argument);
}
{
ASSERT_THROW(std::ignore = benchmark::stod("this is a test"),
std::invalid_argument);
}
#endif
}

View File

@ -9,7 +9,7 @@ namespace {
class DummyBenchmark : public Benchmark {
public:
DummyBenchmark() : Benchmark("dummy") {}
void Run(State&) override {}
void Run(State& /*state*/) override {}
};
TEST(DefaultTimeUnitTest, TimeUnitIsNotSet) {

View File

@ -64,7 +64,8 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
void BM_Counters_Tabular(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -375,7 +376,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;

View File

@ -67,7 +67,8 @@ int num_calls1 = 0;
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.counters["foo"] = 1;
@ -119,7 +120,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -163,7 +165,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
void BM_Invert(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -204,7 +207,8 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
void BM_Counters_InvertedRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -333,7 +337,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -421,7 +426,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@ -513,7 +519,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
auto iterations = static_cast<double>(state.iterations()) *
static_cast<double>(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;