mirror of
https://github.com/google/benchmark.git
synced 2025-02-26 19:30:12 +08:00
[clang-tidy] fix missing braces (#1928)
* [clang-tidy] fix missing braces
This commit is contained in:
parent
c68e308b4f
commit
f8db7f6c07
@ -280,7 +280,9 @@ void State::SkipWithMessage(const std::string& msg) {
|
||||
}
|
||||
}
|
||||
total_iterations_ = 0;
|
||||
if (timer_->running()) timer_->StopTimer();
|
||||
if (timer_->running()) {
|
||||
timer_->StopTimer();
|
||||
}
|
||||
}
|
||||
|
||||
void State::SkipWithError(const std::string& msg) {
|
||||
@ -293,7 +295,9 @@ void State::SkipWithError(const std::string& msg) {
|
||||
}
|
||||
}
|
||||
total_iterations_ = 0;
|
||||
if (timer_->running()) timer_->StopTimer();
|
||||
if (timer_->running()) {
|
||||
timer_->StopTimer();
|
||||
}
|
||||
}
|
||||
|
||||
void State::SetIterationTime(double seconds) {
|
||||
@ -309,10 +313,13 @@ void State::StartKeepRunning() {
|
||||
BM_CHECK(!started_ && !finished_);
|
||||
started_ = true;
|
||||
total_iterations_ = skipped() ? 0 : max_iterations;
|
||||
if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false))
|
||||
if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false)) {
|
||||
profiler_manager_->AfterSetupStart();
|
||||
}
|
||||
manager_->StartStopBarrier();
|
||||
if (!skipped()) ResumeTiming();
|
||||
if (!skipped()) {
|
||||
ResumeTiming();
|
||||
}
|
||||
}
|
||||
|
||||
void State::FinishKeepRunning() {
|
||||
@ -324,8 +331,9 @@ void State::FinishKeepRunning() {
|
||||
total_iterations_ = 0;
|
||||
finished_ = true;
|
||||
manager_->StartStopBarrier();
|
||||
if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false))
|
||||
if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false)) {
|
||||
profiler_manager_->BeforeTeardownStop();
|
||||
}
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
@ -334,7 +342,9 @@ namespace {
|
||||
// Flushes streams after invoking reporter methods that write to them. This
|
||||
// ensures users get timely updates even when streams are not line-buffered.
|
||||
void FlushStreams(BenchmarkReporter* reporter) {
|
||||
if (!reporter) return;
|
||||
if (!reporter) {
|
||||
return;
|
||||
}
|
||||
std::flush(reporter->GetOutputStream());
|
||||
std::flush(reporter->GetErrorStream());
|
||||
}
|
||||
@ -347,16 +357,20 @@ void Report(BenchmarkReporter* display_reporter,
|
||||
assert(reporter);
|
||||
// If there are no aggregates, do output non-aggregates.
|
||||
aggregates_only &= !results.aggregates_only.empty();
|
||||
if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
|
||||
if (!results.aggregates_only.empty())
|
||||
if (!aggregates_only) {
|
||||
reporter->ReportRuns(results.non_aggregates);
|
||||
}
|
||||
if (!results.aggregates_only.empty()) {
|
||||
reporter->ReportRuns(results.aggregates_only);
|
||||
}
|
||||
};
|
||||
|
||||
report_one(display_reporter, run_results.display_report_aggregates_only,
|
||||
run_results);
|
||||
if (file_reporter)
|
||||
if (file_reporter) {
|
||||
report_one(file_reporter, run_results.file_report_aggregates_only,
|
||||
run_results);
|
||||
}
|
||||
|
||||
FlushStreams(display_reporter);
|
||||
FlushStreams(file_reporter);
|
||||
@ -377,10 +391,13 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
||||
std::max<size_t>(name_field_width, benchmark.name().str().size());
|
||||
might_have_aggregates |= benchmark.repetitions() > 1;
|
||||
|
||||
for (const auto& Stat : benchmark.statistics())
|
||||
for (const auto& Stat : benchmark.statistics()) {
|
||||
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
|
||||
}
|
||||
}
|
||||
if (might_have_aggregates) {
|
||||
name_field_width += 1 + stat_field_width;
|
||||
}
|
||||
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
|
||||
|
||||
// Print header here
|
||||
BenchmarkReporter::Context context;
|
||||
@ -413,15 +430,17 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
||||
// Loop through all benchmarks
|
||||
for (const BenchmarkInstance& benchmark : benchmarks) {
|
||||
BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
|
||||
if (benchmark.complexity() != oNone)
|
||||
if (benchmark.complexity() != oNone) {
|
||||
reports_for_family = &per_family_reports[benchmark.family_index()];
|
||||
}
|
||||
benchmarks_with_threads += (benchmark.threads() > 1);
|
||||
runners.emplace_back(benchmark, &perfcounters, reports_for_family);
|
||||
int num_repeats_of_this_instance = runners.back().GetNumRepeats();
|
||||
num_repetitions_total +=
|
||||
static_cast<size_t>(num_repeats_of_this_instance);
|
||||
if (reports_for_family)
|
||||
if (reports_for_family) {
|
||||
reports_for_family->num_runs_total += num_repeats_of_this_instance;
|
||||
}
|
||||
}
|
||||
assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
|
||||
|
||||
@ -456,14 +475,17 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
||||
for (size_t repetition_index : repetition_indices) {
|
||||
internal::BenchmarkRunner& runner = runners[repetition_index];
|
||||
runner.DoOneRepetition();
|
||||
if (runner.HasRepeatsRemaining()) continue;
|
||||
if (runner.HasRepeatsRemaining()) {
|
||||
continue;
|
||||
}
|
||||
// FIXME: report each repetition separately, not all of them in bulk.
|
||||
|
||||
display_reporter->ReportRunsConfig(
|
||||
runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
|
||||
if (file_reporter)
|
||||
if (file_reporter) {
|
||||
file_reporter->ReportRunsConfig(
|
||||
runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
|
||||
}
|
||||
|
||||
RunResults run_results = runner.GetResults();
|
||||
|
||||
@ -484,7 +506,9 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
||||
}
|
||||
}
|
||||
display_reporter->Finalize();
|
||||
if (file_reporter) file_reporter->Finalize();
|
||||
if (file_reporter) {
|
||||
file_reporter->Finalize();
|
||||
}
|
||||
FlushStreams(display_reporter);
|
||||
FlushStreams(file_reporter);
|
||||
}
|
||||
@ -579,8 +603,9 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
|
||||
BenchmarkReporter* file_reporter,
|
||||
std::string spec) {
|
||||
if (spec.empty() || spec == "all")
|
||||
if (spec.empty() || spec == "all") {
|
||||
spec = "."; // Regexp that matches all benchmarks
|
||||
}
|
||||
|
||||
// Setup the reporters
|
||||
std::ofstream output_file;
|
||||
@ -635,8 +660,9 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
|
||||
}
|
||||
|
||||
if (FLAGS_benchmark_list_tests) {
|
||||
for (auto const& benchmark : benchmarks)
|
||||
for (auto const& benchmark : benchmarks) {
|
||||
Out << benchmark.name().str() << "\n";
|
||||
}
|
||||
} else {
|
||||
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
|
||||
}
|
||||
@ -749,7 +775,9 @@ void ParseCommandLineFlags(int* argc, char** argv) {
|
||||
ParseStringFlag(argv[i], "benchmark_time_unit",
|
||||
&FLAGS_benchmark_time_unit) ||
|
||||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
|
||||
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
|
||||
for (int j = i; j != *argc - 1; ++j) {
|
||||
argv[j] = argv[j + 1];
|
||||
}
|
||||
|
||||
--(*argc);
|
||||
--i;
|
||||
|
@ -140,7 +140,9 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
int per_family_instance_index = 0;
|
||||
|
||||
// Family was deleted or benchmark doesn't match
|
||||
if (!family) continue;
|
||||
if (!family) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (family->ArgsCnt() == -1) {
|
||||
family->Args({});
|
||||
@ -159,7 +161,9 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
// reserve in the special case the regex ".", since we know the final
|
||||
// family size. this doesn't take into account any disabled benchmarks
|
||||
// so worst case we reserve more than we need.
|
||||
if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size);
|
||||
if (spec == ".") {
|
||||
benchmarks->reserve(benchmarks->size() + family_size);
|
||||
}
|
||||
|
||||
for (auto const& args : family->args_) {
|
||||
for (int num_threads : *thread_counts) {
|
||||
@ -177,7 +181,9 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
|
||||
// Only bump the next family index once we've estabilished that
|
||||
// at least one instance of this family will be run.
|
||||
if (next_family_index == family_index) ++next_family_index;
|
||||
if (next_family_index == family_index) {
|
||||
++next_family_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -474,7 +480,9 @@ const char* Benchmark::GetName() const { return name_.c_str(); }
|
||||
|
||||
int Benchmark::ArgsCnt() const {
|
||||
if (args_.empty()) {
|
||||
if (arg_names_.empty()) return -1;
|
||||
if (arg_names_.empty()) {
|
||||
return -1;
|
||||
}
|
||||
return static_cast<int>(arg_names_.size());
|
||||
}
|
||||
return static_cast<int>(args_.front().size());
|
||||
|
@ -159,17 +159,23 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
|
||||
|
||||
double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b,
|
||||
const BenchTimeType& iters_or_time) {
|
||||
if (!IsZero(b.min_time())) return b.min_time();
|
||||
if (!IsZero(b.min_time())) {
|
||||
return b.min_time();
|
||||
}
|
||||
// If the flag was used to specify number of iters, then return the default
|
||||
// min_time.
|
||||
if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime;
|
||||
if (iters_or_time.tag == BenchTimeType::ITERS) {
|
||||
return kDefaultMinTime;
|
||||
}
|
||||
|
||||
return iters_or_time.time;
|
||||
}
|
||||
|
||||
IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b,
|
||||
const BenchTimeType& iters_or_time) {
|
||||
if (b.iterations() != 0) return b.iterations();
|
||||
if (b.iterations() != 0) {
|
||||
return b.iterations();
|
||||
}
|
||||
|
||||
// We've already concluded that this flag is currently used to pass
|
||||
// iters but do a check here again anyway.
|
||||
@ -297,7 +303,9 @@ BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
|
||||
|
||||
// The main thread has finished. Now let's wait for the other threads.
|
||||
manager->WaitForAllThreads();
|
||||
for (std::thread& thread : pool) thread.join();
|
||||
for (std::thread& thread : pool) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
IterationResults i;
|
||||
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
|
||||
@ -460,7 +468,9 @@ void BenchmarkRunner::DoOneRepetition() {
|
||||
// this warmup never happened except the fact that warmup_done is set. Every
|
||||
// other manipulation of the BenchmarkRunner instance would be a bug! Please
|
||||
// fix it.
|
||||
if (!warmup_done) RunWarmUp();
|
||||
if (!warmup_done) {
|
||||
RunWarmUp();
|
||||
}
|
||||
|
||||
IterationResults i;
|
||||
// We *may* be gradually increasing the length (iteration count)
|
||||
@ -482,8 +492,10 @@ void BenchmarkRunner::DoOneRepetition() {
|
||||
const bool results_are_significant = !is_the_first_repetition ||
|
||||
has_explicit_iteration_count ||
|
||||
ShouldReportIterationResults(i);
|
||||
|
||||
if (results_are_significant) break; // Good, let's report them!
|
||||
// Good, let's report them!
|
||||
if (results_are_significant) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
|
||||
// iteration count, and run the benchmark again...
|
||||
@ -518,7 +530,9 @@ void BenchmarkRunner::DoOneRepetition() {
|
||||
|
||||
if (reports_for_family) {
|
||||
++reports_for_family->num_runs_done;
|
||||
if (!report.skipped) reports_for_family->Runs.push_back(report);
|
||||
if (!report.skipped) {
|
||||
reports_for_family->Runs.push_back(report);
|
||||
}
|
||||
}
|
||||
|
||||
run_results.non_aggregates.push_back(report);
|
||||
|
@ -156,7 +156,9 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
|
||||
SetConsoleTextAttribute(stdout_handle, original_color_attrs);
|
||||
#else
|
||||
const char* color_code = GetPlatformColorCode(color);
|
||||
if (color_code) out << FormatString("\033[0;3%sm", color_code);
|
||||
if (color_code) {
|
||||
out << FormatString("\033[0;3%sm", color_code);
|
||||
}
|
||||
out << FormatString(fmt, args) << "\033[m";
|
||||
#endif
|
||||
}
|
||||
|
@ -113,8 +113,9 @@ static std::string FlagToEnvVar(const char* flag) {
|
||||
const std::string flag_str(flag);
|
||||
|
||||
std::string env_var;
|
||||
for (size_t i = 0; i != flag_str.length(); ++i)
|
||||
for (size_t i = 0; i != flag_str.length(); ++i) {
|
||||
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
|
||||
}
|
||||
|
||||
return env_var;
|
||||
}
|
||||
@ -167,7 +168,9 @@ std::map<std::string, std::string> KvPairsFromEnv(
|
||||
const std::string env_var = FlagToEnvVar(flag);
|
||||
const char* const value_str = getenv(env_var.c_str());
|
||||
|
||||
if (value_str == nullptr) return default_val;
|
||||
if (value_str == nullptr) {
|
||||
return default_val;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> value;
|
||||
if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) {
|
||||
@ -184,23 +187,31 @@ std::map<std::string, std::string> KvPairsFromEnv(
|
||||
const char* ParseFlagValue(const char* str, const char* flag,
|
||||
bool def_optional) {
|
||||
// str and flag must not be nullptr.
|
||||
if (str == nullptr || flag == nullptr) return nullptr;
|
||||
if (str == nullptr || flag == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The flag must start with "--".
|
||||
const std::string flag_str = std::string("--") + std::string(flag);
|
||||
const size_t flag_len = flag_str.length();
|
||||
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
|
||||
if (strncmp(str, flag_str.c_str(), flag_len) != 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Skips the flag name.
|
||||
const char* flag_end = str + flag_len;
|
||||
|
||||
// When def_optional is true, it's OK to not have a "=value" part.
|
||||
if (def_optional && (flag_end[0] == '\0')) return flag_end;
|
||||
if (def_optional && (flag_end[0] == '\0')) {
|
||||
return flag_end;
|
||||
}
|
||||
|
||||
// If def_optional is true and there are more characters after the
|
||||
// flag name, or if def_optional is false, there must be a '=' after
|
||||
// the flag name.
|
||||
if (flag_end[0] != '=') return nullptr;
|
||||
if (flag_end[0] != '=') {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Returns the string after "=".
|
||||
return flag_end + 1;
|
||||
@ -212,7 +223,9 @@ bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
|
||||
const char* const value_str = ParseFlagValue(str, flag, true);
|
||||
|
||||
// Aborts if the parsing failed.
|
||||
if (value_str == nullptr) return false;
|
||||
if (value_str == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Converts the string value to a bool.
|
||||
*value = IsTruthyFlagValue(value_str);
|
||||
@ -225,7 +238,9 @@ bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
|
||||
const char* const value_str = ParseFlagValue(str, flag, false);
|
||||
|
||||
// Aborts if the parsing failed.
|
||||
if (value_str == nullptr) return false;
|
||||
if (value_str == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Sets *value to the value of the flag.
|
||||
return ParseInt32(std::string("The value of flag --") + flag, value_str,
|
||||
@ -238,7 +253,9 @@ bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
|
||||
const char* const value_str = ParseFlagValue(str, flag, false);
|
||||
|
||||
// Aborts if the parsing failed.
|
||||
if (value_str == nullptr) return false;
|
||||
if (value_str == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Sets *value to the value of the flag.
|
||||
return ParseDouble(std::string("The value of flag --") + flag, value_str,
|
||||
@ -251,7 +268,9 @@ bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
|
||||
const char* const value_str = ParseFlagValue(str, flag, false);
|
||||
|
||||
// Aborts if the parsing failed.
|
||||
if (value_str == nullptr) return false;
|
||||
if (value_str == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*value = value_str;
|
||||
return true;
|
||||
@ -262,11 +281,15 @@ bool ParseKeyValueFlag(const char* str, const char* flag,
|
||||
std::map<std::string, std::string>* value) {
|
||||
const char* const value_str = ParseFlagValue(str, flag, false);
|
||||
|
||||
if (value_str == nullptr) return false;
|
||||
if (value_str == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto& kvpair : StrSplit(value_str, ',')) {
|
||||
const auto kv = StrSplit(kvpair, '=');
|
||||
if (kv.size() != 2) return false;
|
||||
if (kv.size() != 2) {
|
||||
return false;
|
||||
}
|
||||
value->emplace(kv[0], kv[1]);
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,9 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
typedef BenchmarkReporter::Run Run;
|
||||
std::vector<Run> results;
|
||||
|
||||
if (reports.size() < 2) return results;
|
||||
if (reports.size() < 2) {
|
||||
return results;
|
||||
}
|
||||
|
||||
// Accumulators.
|
||||
std::vector<ComplexityN> n;
|
||||
|
@ -189,8 +189,9 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
unit = "%";
|
||||
} else {
|
||||
s = HumanReadableNumber(c.second.value, c.second.oneK);
|
||||
if (c.second.flags & Counter::kIsRate)
|
||||
if (c.second.flags & Counter::kIsRate) {
|
||||
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
|
||||
}
|
||||
}
|
||||
if (output_options_ & OO_Tabular) {
|
||||
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
|
||||
|
@ -64,7 +64,9 @@ void Increment(UserCounters* l, UserCounters const& r) {
|
||||
}
|
||||
|
||||
bool SameNames(UserCounters const& l, UserCounters const& r) {
|
||||
if (&l == &r) return true;
|
||||
if (&l == &r) {
|
||||
return true;
|
||||
}
|
||||
if (l.size() != r.size()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -66,8 +66,10 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
|
||||
// save the names of all the user counters
|
||||
for (const auto& run : reports) {
|
||||
for (const auto& cnt : run.counters) {
|
||||
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
|
||||
if (cnt.first == "bytes_per_second" ||
|
||||
cnt.first == "items_per_second") {
|
||||
continue;
|
||||
}
|
||||
user_counter_names_.insert(cnt.first);
|
||||
}
|
||||
}
|
||||
@ -75,7 +77,9 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
|
||||
// print the header
|
||||
for (auto B = elements.begin(); B != elements.end();) {
|
||||
Out << *B++;
|
||||
if (B != elements.end()) Out << ",";
|
||||
if (B != elements.end()) {
|
||||
Out << ",";
|
||||
}
|
||||
}
|
||||
for (auto B = user_counter_names_.begin();
|
||||
B != user_counter_names_.end();) {
|
||||
@ -88,8 +92,10 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
|
||||
// check that all the current counters are saved in the name set
|
||||
for (const auto& run : reports) {
|
||||
for (const auto& cnt : run.counters) {
|
||||
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
|
||||
if (cnt.first == "bytes_per_second" ||
|
||||
cnt.first == "items_per_second") {
|
||||
continue;
|
||||
}
|
||||
BM_CHECK(user_counter_names_.find(cnt.first) !=
|
||||
user_counter_names_.end())
|
||||
<< "All counters must be present in each run. "
|
||||
|
@ -89,11 +89,11 @@ std::string FormatKV(std::string const& key, double value) {
|
||||
std::stringstream ss;
|
||||
ss << '"' << StrEscape(key) << "\": ";
|
||||
|
||||
if (std::isnan(value))
|
||||
if (std::isnan(value)) {
|
||||
ss << (value < 0 ? "-" : "") << "NaN";
|
||||
else if (std::isinf(value))
|
||||
} else if (std::isinf(value)) {
|
||||
ss << (value < 0 ? "-" : "") << "Infinity";
|
||||
else {
|
||||
} else {
|
||||
const auto max_digits10 =
|
||||
std::numeric_limits<decltype(value)>::max_digits10;
|
||||
const auto max_fractional_digits10 = max_digits10 - 1;
|
||||
@ -155,7 +155,9 @@ bool JSONReporter::ReportContext(const Context& context) {
|
||||
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
|
||||
<< "\n";
|
||||
out << indent << "}";
|
||||
if (i != info.caches.size() - 1) out << ",";
|
||||
if (i != info.caches.size() - 1) {
|
||||
out << ",";
|
||||
}
|
||||
out << "\n";
|
||||
}
|
||||
indent = std::string(4, ' ');
|
||||
@ -163,7 +165,9 @@ bool JSONReporter::ReportContext(const Context& context) {
|
||||
out << indent << "\"load_avg\": [";
|
||||
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
|
||||
out << *it++;
|
||||
if (it != info.load_avg.end()) out << ",";
|
||||
if (it != info.load_avg.end()) {
|
||||
out << ",";
|
||||
}
|
||||
}
|
||||
out << "],\n";
|
||||
|
||||
@ -306,8 +310,9 @@ void JSONReporter::PrintRunData(Run const& run) {
|
||||
|
||||
auto report_if_present = [&out, &indent](const std::string& label,
|
||||
int64_t val) {
|
||||
if (val != MemoryManager::TombstoneValue)
|
||||
if (val != MemoryManager::TombstoneValue) {
|
||||
out << ",\n" << indent << FormatKV(label, val);
|
||||
}
|
||||
};
|
||||
|
||||
report_if_present("total_allocated_bytes",
|
||||
|
@ -42,8 +42,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
Out << LocalDateTimeString() << "\n";
|
||||
#endif
|
||||
|
||||
if (context.executable_name)
|
||||
if (context.executable_name) {
|
||||
Out << "Running " << context.executable_name << "\n";
|
||||
}
|
||||
|
||||
const CPUInfo &info = context.cpu_info;
|
||||
Out << "Run on (" << info.num_cpus << " X "
|
||||
@ -54,8 +55,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
for (auto &CInfo : info.caches) {
|
||||
Out << " L" << CInfo.level << " " << CInfo.type << " "
|
||||
<< (CInfo.size / 1024) << " KiB";
|
||||
if (CInfo.num_sharing != 0)
|
||||
if (CInfo.num_sharing != 0) {
|
||||
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
|
||||
}
|
||||
Out << "\n";
|
||||
}
|
||||
}
|
||||
@ -63,7 +65,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
Out << "Load Average: ";
|
||||
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
|
||||
Out << StrFormat("%.2f", *It++);
|
||||
if (It != info.load_avg.end()) Out << ", ";
|
||||
if (It != info.load_avg.end()) {
|
||||
Out << ", ";
|
||||
}
|
||||
}
|
||||
Out << "\n";
|
||||
}
|
||||
@ -105,13 +109,17 @@ std::string BenchmarkReporter::Run::benchmark_name() const {
|
||||
|
||||
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
|
||||
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
|
||||
if (iterations != 0) new_time /= static_cast<double>(iterations);
|
||||
if (iterations != 0) {
|
||||
new_time /= static_cast<double>(iterations);
|
||||
}
|
||||
return new_time;
|
||||
}
|
||||
|
||||
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
|
||||
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
|
||||
if (iterations != 0) new_time /= static_cast<double>(iterations);
|
||||
if (iterations != 0) {
|
||||
new_time /= static_cast<double>(iterations);
|
||||
}
|
||||
return new_time;
|
||||
}
|
||||
|
||||
|
@ -31,12 +31,16 @@ auto StatisticsSum = [](const std::vector<double>& v) {
|
||||
};
|
||||
|
||||
double StatisticsMean(const std::vector<double>& v) {
|
||||
if (v.empty()) return 0.0;
|
||||
if (v.empty()) {
|
||||
return 0.0;
|
||||
}
|
||||
return StatisticsSum(v) * (1.0 / static_cast<double>(v.size()));
|
||||
}
|
||||
|
||||
double StatisticsMedian(const std::vector<double>& v) {
|
||||
if (v.size() < 3) return StatisticsMean(v);
|
||||
if (v.size() < 3) {
|
||||
return StatisticsMean(v);
|
||||
}
|
||||
std::vector<double> copy(v);
|
||||
|
||||
auto center = copy.begin() + v.size() / 2;
|
||||
@ -47,7 +51,9 @@ double StatisticsMedian(const std::vector<double>& v) {
|
||||
// before. Instead of resorting, we just look for the max value before it,
|
||||
// which is not necessarily the element immediately preceding `center` Since
|
||||
// `copy` is only partially sorted by `nth_element`.
|
||||
if (v.size() % 2 == 1) return *center;
|
||||
if (v.size() % 2 == 1) {
|
||||
return *center;
|
||||
}
|
||||
auto center2 = std::max_element(copy.begin(), center);
|
||||
return (*center + *center2) / 2.0;
|
||||
}
|
||||
@ -60,16 +66,22 @@ auto SumSquares = [](const std::vector<double>& v) {
|
||||
auto Sqr = [](const double dat) { return dat * dat; };
|
||||
auto Sqrt = [](const double dat) {
|
||||
// Avoid NaN due to imprecision in the calculations
|
||||
if (dat < 0.0) return 0.0;
|
||||
if (dat < 0.0) {
|
||||
return 0.0;
|
||||
}
|
||||
return std::sqrt(dat);
|
||||
};
|
||||
|
||||
double StatisticsStdDev(const std::vector<double>& v) {
|
||||
const auto mean = StatisticsMean(v);
|
||||
if (v.empty()) return mean;
|
||||
if (v.empty()) {
|
||||
return mean;
|
||||
}
|
||||
|
||||
// Sample standard deviation is undefined for n = 1
|
||||
if (v.size() == 1) return 0.0;
|
||||
if (v.size() == 1) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
const double avg_squares =
|
||||
SumSquares(v) * (1.0 / static_cast<double>(v.size()));
|
||||
@ -79,12 +91,16 @@ double StatisticsStdDev(const std::vector<double>& v) {
|
||||
}
|
||||
|
||||
double StatisticsCV(const std::vector<double>& v) {
|
||||
if (v.size() < 2) return 0.0;
|
||||
if (v.size() < 2) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
const auto stddev = StatisticsStdDev(v);
|
||||
const auto mean = StatisticsMean(v);
|
||||
|
||||
if (std::fpclassify(mean) == FP_ZERO) return 0.0;
|
||||
if (std::fpclassify(mean) == FP_ZERO) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
return stddev / mean;
|
||||
}
|
||||
@ -137,7 +153,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
for (Run const& run : reports) {
|
||||
BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
|
||||
BM_CHECK_EQ(run_iterations, run.iterations);
|
||||
if (run.skipped) continue;
|
||||
if (run.skipped) {
|
||||
continue;
|
||||
}
|
||||
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
|
||||
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
|
||||
// user counters
|
||||
|
@ -87,10 +87,14 @@ void ToExponentAndMantissa(double val, int precision, double one_k,
|
||||
}
|
||||
|
||||
std::string ExponentToPrefix(int64_t exponent, bool iec) {
|
||||
if (exponent == 0) return "";
|
||||
if (exponent == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
|
||||
if (index >= kUnitsSize) return "";
|
||||
if (index >= kUnitsSize) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const char* const* array =
|
||||
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
|
||||
@ -124,9 +128,12 @@ std::string StrFormatImp(const char* msg, va_list args) {
|
||||
va_end(args_cp);
|
||||
|
||||
// handle empty expansion
|
||||
if (ret == 0) return std::string{};
|
||||
if (static_cast<std::size_t>(ret) < local_buff.size())
|
||||
if (ret == 0) {
|
||||
return {};
|
||||
}
|
||||
if (static_cast<std::size_t>(ret) < local_buff.size()) {
|
||||
return std::string(local_buff.data());
|
||||
}
|
||||
|
||||
// we did not provide a long enough buffer on our first attempt.
|
||||
// add 1 to size to account for null-byte in size cast to prevent overflow
|
||||
@ -153,7 +160,9 @@ std::string StrFormat(const char* format, ...) {
|
||||
}
|
||||
|
||||
std::vector<std::string> StrSplit(const std::string& str, char delim) {
|
||||
if (str.empty()) return {};
|
||||
if (str.empty()) {
|
||||
return {};
|
||||
}
|
||||
std::vector<std::string> ret;
|
||||
size_t first = 0;
|
||||
size_t next = str.find(delim);
|
||||
|
110
src/sysinfo.cc
110
src/sysinfo.cc
@ -213,14 +213,18 @@ template <class ArgT>
|
||||
bool ReadFromFile(std::string const& fname, ArgT* arg) {
|
||||
*arg = ArgT();
|
||||
std::ifstream f(fname.c_str());
|
||||
if (!f.is_open()) return false;
|
||||
if (!f.is_open()) {
|
||||
return false;
|
||||
}
|
||||
f >> *arg;
|
||||
return f.good();
|
||||
}
|
||||
|
||||
CPUInfo::Scaling CpuScaling(int num_cpus) {
|
||||
// We don't have a valid CPU count, so don't even bother.
|
||||
if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN;
|
||||
if (num_cpus <= 0) {
|
||||
return CPUInfo::Scaling::UNKNOWN;
|
||||
}
|
||||
#if defined(BENCHMARK_OS_QNX)
|
||||
return CPUInfo::Scaling::UNKNOWN;
|
||||
#elif !defined(BENCHMARK_OS_WINDOWS)
|
||||
@ -231,8 +235,9 @@ CPUInfo::Scaling CpuScaling(int num_cpus) {
|
||||
for (int cpu = 0; cpu < num_cpus; ++cpu) {
|
||||
std::string governor_file =
|
||||
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
|
||||
if (ReadFromFile(governor_file, &res) && res != "performance")
|
||||
if (ReadFromFile(governor_file, &res) && res != "performance") {
|
||||
return CPUInfo::Scaling::ENABLED;
|
||||
}
|
||||
}
|
||||
return CPUInfo::Scaling::DISABLED;
|
||||
#else
|
||||
@ -268,28 +273,35 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
|
||||
CPUInfo::CacheInfo info;
|
||||
std::string fpath = StrCat(dir, "index", idx++, "/");
|
||||
std::ifstream f(StrCat(fpath, "size").c_str());
|
||||
if (!f.is_open()) break;
|
||||
if (!f.is_open()) {
|
||||
break;
|
||||
}
|
||||
std::string suffix;
|
||||
f >> info.size;
|
||||
if (f.fail())
|
||||
if (f.fail()) {
|
||||
PrintErrorAndDie("Failed while reading file '", fpath, "size'");
|
||||
}
|
||||
if (f.good()) {
|
||||
f >> suffix;
|
||||
if (f.bad())
|
||||
if (f.bad()) {
|
||||
PrintErrorAndDie(
|
||||
"Invalid cache size format: failed to read size suffix");
|
||||
else if (f && suffix != "K")
|
||||
} else if (f && suffix != "K") {
|
||||
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
|
||||
else if (suffix == "K")
|
||||
} else if (suffix == "K") {
|
||||
info.size *= 1024;
|
||||
}
|
||||
}
|
||||
if (!ReadFromFile(StrCat(fpath, "type"), &info.type))
|
||||
if (!ReadFromFile(StrCat(fpath, "type"), &info.type)) {
|
||||
PrintErrorAndDie("Failed to read from file ", fpath, "type");
|
||||
if (!ReadFromFile(StrCat(fpath, "level"), &info.level))
|
||||
}
|
||||
if (!ReadFromFile(StrCat(fpath, "level"), &info.level)) {
|
||||
PrintErrorAndDie("Failed to read from file ", fpath, "level");
|
||||
}
|
||||
std::string map_str;
|
||||
if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str))
|
||||
if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str)) {
|
||||
PrintErrorAndDie("Failed to read from file ", fpath, "shared_cpu_map");
|
||||
}
|
||||
info.num_sharing = CountSetBitsInCPUMap(map_str);
|
||||
res.push_back(info);
|
||||
}
|
||||
@ -334,15 +346,18 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
|
||||
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
|
||||
GetLogicalProcessorInformation(nullptr, &buffer_size);
|
||||
UPtr buff(static_cast<PInfo*>(std::malloc(buffer_size)), &std::free);
|
||||
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
|
||||
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) {
|
||||
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
|
||||
GetLastError());
|
||||
}
|
||||
|
||||
PInfo* it = buff.get();
|
||||
PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
|
||||
|
||||
for (; it != end; ++it) {
|
||||
if (it->Relationship != RelationCache) continue;
|
||||
if (it->Relationship != RelationCache) {
|
||||
continue;
|
||||
}
|
||||
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
|
||||
BitSet b(it->ProcessorMask);
|
||||
// To prevent duplicates, only consider caches where CPU 0 is specified
|
||||
@ -475,8 +490,7 @@ std::string GetSystemName() {
|
||||
#endif // def HOST_NAME_MAX
|
||||
char hostname[HOST_NAME_MAX];
|
||||
int retVal = gethostname(hostname, HOST_NAME_MAX);
|
||||
if (retVal != 0) return std::string("");
|
||||
return std::string(hostname);
|
||||
return retVal != 0 ? std::string() : std::string(hostname);
|
||||
#endif // Catch-all POSIX block.
|
||||
}
|
||||
|
||||
@ -539,21 +553,28 @@ int GetNumCPUs() {
|
||||
class ThreadAffinityGuard final {
|
||||
public:
|
||||
ThreadAffinityGuard() : reset_affinity(SetAffinity()) {
|
||||
if (!reset_affinity)
|
||||
if (!reset_affinity) {
|
||||
std::cerr << "***WARNING*** Failed to set thread affinity. Estimated CPU "
|
||||
"frequency may be incorrect.\n";
|
||||
}
|
||||
}
|
||||
|
||||
~ThreadAffinityGuard() {
|
||||
if (!reset_affinity) return;
|
||||
if (!reset_affinity) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY)
|
||||
int ret = pthread_setaffinity_np(self, sizeof(previous_affinity),
|
||||
&previous_affinity);
|
||||
if (ret == 0) return;
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
#elif defined(BENCHMARK_OS_WINDOWS_WIN32)
|
||||
DWORD_PTR ret = SetThreadAffinityMask(self, previous_affinity);
|
||||
if (ret != 0) return;
|
||||
if (ret != 0) {
|
||||
return;
|
||||
}
|
||||
#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY
|
||||
PrintErrorAndDie("Failed to reset thread affinity");
|
||||
}
|
||||
@ -570,22 +591,28 @@ class ThreadAffinityGuard final {
|
||||
self = pthread_self();
|
||||
ret = pthread_getaffinity_np(self, sizeof(previous_affinity),
|
||||
&previous_affinity);
|
||||
if (ret != 0) return false;
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cpu_set_t affinity;
|
||||
memcpy(&affinity, &previous_affinity, sizeof(affinity));
|
||||
|
||||
bool is_first_cpu = true;
|
||||
|
||||
for (int i = 0; i < CPU_SETSIZE; ++i)
|
||||
for (int i = 0; i < CPU_SETSIZE; ++i) {
|
||||
if (CPU_ISSET(i, &affinity)) {
|
||||
if (is_first_cpu)
|
||||
if (is_first_cpu) {
|
||||
is_first_cpu = false;
|
||||
else
|
||||
} else {
|
||||
CPU_CLR(i, &affinity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_first_cpu) return false;
|
||||
if (is_first_cpu) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = pthread_setaffinity_np(self, sizeof(affinity), &affinity);
|
||||
return ret == 0;
|
||||
@ -650,7 +677,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
|
||||
}
|
||||
|
||||
auto StartsWithKey = [](std::string const& Value, std::string const& Key) {
|
||||
if (Key.size() > Value.size()) return false;
|
||||
if (Key.size() > Value.size()) {
|
||||
return false;
|
||||
}
|
||||
auto Cmp = [&](char X, char Y) {
|
||||
return std::tolower(X) == std::tolower(Y);
|
||||
};
|
||||
@ -659,22 +688,30 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
|
||||
|
||||
std::string ln;
|
||||
while (std::getline(f, ln)) {
|
||||
if (ln.empty()) continue;
|
||||
if (ln.empty()) {
|
||||
continue;
|
||||
}
|
||||
std::size_t split_idx = ln.find(':');
|
||||
std::string value;
|
||||
if (split_idx != std::string::npos) value = ln.substr(split_idx + 1);
|
||||
if (split_idx != std::string::npos) {
|
||||
value = ln.substr(split_idx + 1);
|
||||
}
|
||||
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
|
||||
// accept positive values. Some environments (virtual machines) report zero,
|
||||
// which would cause infinite looping in WallTime_Init.
|
||||
if (StartsWithKey(ln, "cpu MHz")) {
|
||||
if (!value.empty()) {
|
||||
double cycles_per_second = benchmark::stod(value) * 1000000.0;
|
||||
if (cycles_per_second > 0) return cycles_per_second;
|
||||
if (cycles_per_second > 0) {
|
||||
return cycles_per_second;
|
||||
}
|
||||
}
|
||||
} else if (StartsWithKey(ln, "bogomips")) {
|
||||
if (!value.empty()) {
|
||||
bogo_clock = benchmark::stod(value) * 1000000.0;
|
||||
if (bogo_clock < 0.0) bogo_clock = error_value;
|
||||
if (bogo_clock < 0.0) {
|
||||
bogo_clock = error_value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -690,7 +727,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
|
||||
// If we found the bogomips clock, but nothing better, we'll use it (but
|
||||
// we're not happy about it); otherwise, fallback to the rough estimation
|
||||
// below.
|
||||
if (bogo_clock >= 0.0) return bogo_clock;
|
||||
if (bogo_clock >= 0.0) {
|
||||
return bogo_clock;
|
||||
}
|
||||
|
||||
#elif defined BENCHMARK_HAS_SYSCTL
|
||||
constexpr auto* freqStr =
|
||||
@ -705,9 +744,13 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
|
||||
#endif
|
||||
unsigned long long hz = 0;
|
||||
#if defined BENCHMARK_OS_OPENBSD
|
||||
if (GetSysctl(freqStr, &hz)) return static_cast<double>(hz * 1000000);
|
||||
if (GetSysctl(freqStr, &hz)) {
|
||||
return static_cast<double>(hz * 1000000);
|
||||
}
|
||||
#else
|
||||
if (GetSysctl(freqStr, &hz)) return static_cast<double>(hz);
|
||||
if (GetSysctl(freqStr, &hz)) {
|
||||
return static_cast<double>(hz);
|
||||
}
|
||||
#endif
|
||||
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
|
||||
freqStr, strerror(errno));
|
||||
@ -723,9 +766,10 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
|
||||
SUCCEEDED(
|
||||
SHGetValueA(HKEY_LOCAL_MACHINE,
|
||||
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
|
||||
"~MHz", nullptr, &data, &data_size)))
|
||||
"~MHz", nullptr, &data, &data_size))) {
|
||||
return static_cast<double>(static_cast<int64_t>(data) *
|
||||
static_cast<int64_t>(1000 * 1000)); // was mhz
|
||||
}
|
||||
#elif defined(BENCHMARK_OS_SOLARIS)
|
||||
kstat_ctl_t* kc = kstat_open();
|
||||
if (!kc) {
|
||||
|
@ -144,8 +144,9 @@ double ProcessCPUUsage() {
|
||||
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11.
|
||||
// See https://github.com/google/benchmark/pull/292
|
||||
struct timespec spec;
|
||||
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
|
||||
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) {
|
||||
return MakeTime(spec);
|
||||
}
|
||||
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
|
||||
#else
|
||||
struct rusage ru;
|
||||
@ -200,7 +201,9 @@ double ThreadCPUUsage() {
|
||||
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
|
||||
#elif defined(CLOCK_THREAD_CPUTIME_ID)
|
||||
struct timespec ts;
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) {
|
||||
return MakeTime(ts);
|
||||
}
|
||||
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
|
||||
#else
|
||||
#error Per-thread timing is not available on your system.
|
||||
|
@ -47,7 +47,9 @@ int main(int argc, char** argv) {
|
||||
// Make a fake argv and append the new --benchmark_min_time=<foo> to it.
|
||||
int fake_argc = argc + 1;
|
||||
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
|
||||
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
fake_argv[i] = argv[i];
|
||||
}
|
||||
fake_argv[argc] = "--benchmark_min_time=4x";
|
||||
|
||||
benchmark::Initialize(&fake_argc, const_cast<char**>(fake_argv));
|
||||
|
@ -73,7 +73,9 @@ int main(int argc, char** argv) {
|
||||
int fake_argc = argc + 1;
|
||||
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
|
||||
|
||||
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
fake_argv[i] = argv[i];
|
||||
}
|
||||
|
||||
const char* no_suffix = "--benchmark_min_time=4";
|
||||
const char* with_suffix = "--benchmark_min_time=4.0s";
|
||||
|
@ -44,7 +44,9 @@ double CalculatePi(int depth) {
|
||||
|
||||
std::set<int64_t> ConstructRandomSet(int64_t size) {
|
||||
std::set<int64_t> s;
|
||||
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
s.insert(s.end(), i);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -55,7 +57,9 @@ std::vector<int>* test_vector = nullptr;
|
||||
|
||||
static void BM_Factorial(benchmark::State& state) {
|
||||
int fac_42 = 0;
|
||||
for (auto _ : state) fac_42 = Factorial(8);
|
||||
for (auto _ : state) {
|
||||
fac_42 = Factorial(8);
|
||||
}
|
||||
// Prevent compiler optimizations
|
||||
std::stringstream ss;
|
||||
ss << fac_42;
|
||||
@ -66,7 +70,9 @@ BENCHMARK(BM_Factorial)->UseRealTime();
|
||||
|
||||
static void BM_CalculatePiRange(benchmark::State& state) {
|
||||
double pi = 0.0;
|
||||
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
|
||||
for (auto _ : state) {
|
||||
pi = CalculatePi(static_cast<int>(state.range(0)));
|
||||
}
|
||||
std::stringstream ss;
|
||||
ss << pi;
|
||||
state.SetLabel(ss.str());
|
||||
@ -90,7 +96,9 @@ static void BM_SetInsert(benchmark::State& state) {
|
||||
state.PauseTiming();
|
||||
data = ConstructRandomSet(state.range(0));
|
||||
state.ResumeTiming();
|
||||
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
|
||||
for (int j = 0; j < state.range(1); ++j) {
|
||||
data.insert(rand());
|
||||
}
|
||||
}
|
||||
state.SetItemsProcessed(state.iterations() * state.range(1));
|
||||
state.SetBytesProcessed(state.iterations() * state.range(1) *
|
||||
@ -108,7 +116,9 @@ static void BM_Sequential(benchmark::State& state) {
|
||||
ValueType v = 42;
|
||||
for (auto _ : state) {
|
||||
Container c;
|
||||
for (int64_t i = state.range(0); --i;) c.push_back(v);
|
||||
for (int64_t i = state.range(0); --i;) {
|
||||
c.push_back(v);
|
||||
}
|
||||
}
|
||||
const int64_t items_processed = state.iterations() * state.range(0);
|
||||
state.SetItemsProcessed(items_processed);
|
||||
@ -141,10 +151,11 @@ static void BM_SetupTeardown(benchmark::State& state) {
|
||||
int i = 0;
|
||||
for (auto _ : state) {
|
||||
std::lock_guard<std::mutex> l(test_vector_mu);
|
||||
if (i % 2 == 0)
|
||||
if (i % 2 == 0) {
|
||||
test_vector->push_back(i);
|
||||
else
|
||||
} else {
|
||||
test_vector->pop_back();
|
||||
}
|
||||
++i;
|
||||
}
|
||||
if (state.thread_index() == 0) {
|
||||
@ -156,8 +167,9 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
|
||||
static void BM_LongTest(benchmark::State& state) {
|
||||
double tracker = 0.0;
|
||||
for (auto _ : state) {
|
||||
for (int i = 0; i < state.range(0); ++i)
|
||||
for (int i = 0; i < state.range(0); ++i) {
|
||||
benchmark::DoNotOptimize(tracker += i);
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
|
||||
|
@ -46,14 +46,18 @@ void try_invalid_pause_resume(benchmark::State& state) {
|
||||
void BM_diagnostic_test(benchmark::State& state) {
|
||||
static bool called_once = false;
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
if (called_once == false) {
|
||||
try_invalid_pause_resume(state);
|
||||
}
|
||||
|
||||
for (auto _ : state) {
|
||||
auto iterations = double(state.iterations()) * double(state.iterations());
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
if (called_once == false) {
|
||||
try_invalid_pause_resume(state);
|
||||
}
|
||||
|
||||
called_once = true;
|
||||
}
|
||||
@ -62,14 +66,18 @@ BENCHMARK(BM_diagnostic_test);
|
||||
void BM_diagnostic_test_keep_running(benchmark::State& state) {
|
||||
static bool called_once = false;
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
if (called_once == false) {
|
||||
try_invalid_pause_resume(state);
|
||||
}
|
||||
|
||||
while (state.KeepRunning()) {
|
||||
auto iterations = double(state.iterations()) * double(state.iterations());
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
|
||||
if (called_once == false) try_invalid_pause_resume(state);
|
||||
if (called_once == false) {
|
||||
try_invalid_pause_resume(state);
|
||||
}
|
||||
|
||||
called_once = true;
|
||||
}
|
||||
|
@ -71,9 +71,10 @@ BENCHMARK(BM_FooBa);
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
bool list_only = false;
|
||||
for (int i = 0; i < argc; ++i)
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
|
||||
std::string::npos;
|
||||
}
|
||||
|
||||
benchmark::Initialize(&argc, argv);
|
||||
|
||||
|
@ -22,8 +22,9 @@ void MyBusySpinwait() {
|
||||
const auto elapsed = now - start;
|
||||
|
||||
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >=
|
||||
time_frame)
|
||||
time_frame) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,9 @@ void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
|
||||
<< "\n actual regex string \"" << TC.substituted_regex << "\""
|
||||
<< "\n started matching near: " << first_line;
|
||||
}
|
||||
if (TC.regex->Match(line)) return;
|
||||
if (TC.regex->Match(line)) {
|
||||
return;
|
||||
}
|
||||
BM_CHECK(TC.match_rule != MR_Next)
|
||||
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
|
||||
<< "\""
|
||||
@ -159,10 +161,14 @@ class TestReporter : public benchmark::BenchmarkReporter {
|
||||
}
|
||||
|
||||
void ReportRuns(const std::vector<Run>& report) override {
|
||||
for (auto rep : reporters_) rep->ReportRuns(report);
|
||||
for (auto rep : reporters_) {
|
||||
rep->ReportRuns(report);
|
||||
}
|
||||
}
|
||||
void Finalize() override {
|
||||
for (auto rep : reporters_) rep->Finalize();
|
||||
for (auto rep : reporters_) {
|
||||
rep->Finalize();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
@ -224,7 +230,9 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
|
||||
// clear before calling tellg()
|
||||
output.clear();
|
||||
// seek to zero only when needed
|
||||
if (output.tellg() > start) output.seekg(start);
|
||||
if (output.tellg() > start) {
|
||||
output.seekg(start);
|
||||
}
|
||||
// and just in case
|
||||
output.clear();
|
||||
}
|
||||
@ -265,7 +273,9 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) {
|
||||
|
||||
// set the values for a benchmark
|
||||
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
|
||||
if (entry_csv_line.empty()) return; // some lines are empty
|
||||
if (entry_csv_line.empty()) {
|
||||
return;
|
||||
} // some lines are empty
|
||||
BM_CHECK(!field_names.empty());
|
||||
auto vals = SplitCsv_(entry_csv_line);
|
||||
BM_CHECK_EQ(vals.size(), field_names.size());
|
||||
@ -279,21 +289,33 @@ void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
|
||||
// a quick'n'dirty csv splitter (eliminating quotes)
|
||||
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
|
||||
std::vector<std::string> out;
|
||||
if (line.empty()) return out;
|
||||
if (!field_names.empty()) out.reserve(field_names.size());
|
||||
if (line.empty()) {
|
||||
return out;
|
||||
}
|
||||
if (!field_names.empty()) {
|
||||
out.reserve(field_names.size());
|
||||
}
|
||||
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
|
||||
while (pos != line.npos) {
|
||||
BM_CHECK(curr > 0);
|
||||
if (line[prev] == '"') ++prev;
|
||||
if (line[curr - 1] == '"') --curr;
|
||||
if (line[prev] == '"') {
|
||||
++prev;
|
||||
}
|
||||
if (line[curr - 1] == '"') {
|
||||
--curr;
|
||||
}
|
||||
out.push_back(line.substr(prev, curr - prev));
|
||||
prev = pos + 1;
|
||||
pos = line.find_first_of(',', pos + 1);
|
||||
curr = pos;
|
||||
}
|
||||
curr = line.size();
|
||||
if (line[prev] == '"') ++prev;
|
||||
if (line[curr - 1] == '"') --curr;
|
||||
if (line[prev] == '"') {
|
||||
++prev;
|
||||
}
|
||||
if (line[curr - 1] == '"') {
|
||||
--curr;
|
||||
}
|
||||
out.push_back(line.substr(prev, curr - prev));
|
||||
return out;
|
||||
}
|
||||
@ -308,7 +330,9 @@ size_t AddChecker(const std::string& bm_name, const ResultsCheckFn& fn) {
|
||||
|
||||
int Results::NumThreads() const {
|
||||
auto pos = name.find("/threads:");
|
||||
if (pos == name.npos) return 1;
|
||||
if (pos == name.npos) {
|
||||
return 1;
|
||||
}
|
||||
auto end = name.find('/', pos + 9);
|
||||
std::stringstream ss;
|
||||
ss << name.substr(pos + 9, end);
|
||||
@ -378,7 +402,9 @@ int SetSubstitutions(
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!exists) subs.push_back(std::move(KV));
|
||||
if (!exists) {
|
||||
subs.push_back(std::move(KV));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -449,11 +475,14 @@ void RunOutputTests(int argc, char* argv[]) {
|
||||
BENCHMARK_RESTORE_DEPRECATED_WARNING
|
||||
|
||||
int SubstrCnt(const std::string& haystack, const std::string& pat) {
|
||||
if (pat.length() == 0) return 0;
|
||||
if (pat.length() == 0) {
|
||||
return 0;
|
||||
}
|
||||
int count = 0;
|
||||
for (size_t offset = haystack.find(pat); offset != std::string::npos;
|
||||
offset = haystack.find(pat, offset + pat.length()))
|
||||
offset = haystack.find(pat, offset + pat.length())) {
|
||||
++count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -471,7 +500,9 @@ static char RandomHexChar() {
|
||||
static std::string GetRandomFileName() {
|
||||
std::string model = "test.%%%%%%";
|
||||
for (auto& ch : model) {
|
||||
if (ch == '%') ch = RandomHexChar();
|
||||
if (ch == '%') {
|
||||
ch = RandomHexChar();
|
||||
}
|
||||
}
|
||||
return model;
|
||||
}
|
||||
@ -488,7 +519,9 @@ static std::string GetTempFileName() {
|
||||
int retries = 3;
|
||||
while (--retries) {
|
||||
std::string name = GetRandomFileName();
|
||||
if (!FileExists(name)) return name;
|
||||
if (!FileExists(name)) {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
std::cerr << "Failed to create unique temporary file name\n";
|
||||
std::flush(std::cerr);
|
||||
|
@ -226,9 +226,13 @@ void measure(size_t threadcount, PerfCounterValues* before,
|
||||
// threadpool.
|
||||
auto counters =
|
||||
PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2});
|
||||
for (auto& t : threads) t = std::thread(work);
|
||||
for (auto& t : threads) {
|
||||
t = std::thread(work);
|
||||
}
|
||||
counters.Snapshot(before);
|
||||
for (auto& t : threads) t.join();
|
||||
for (auto& t : threads) {
|
||||
t.join();
|
||||
}
|
||||
counters.Snapshot(after);
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,9 @@ int main(int argc, char** argv) {
|
||||
// to it.
|
||||
int fake_argc = argc + 1;
|
||||
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
|
||||
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
fake_argv[i] = argv[i];
|
||||
}
|
||||
fake_argv[argc] = "--benchmark_min_time=4x";
|
||||
|
||||
std::unique_ptr<benchmark::ProfilerManager> pm(new TestProfilerManager());
|
||||
|
@ -86,8 +86,9 @@ void BM_extra_args(benchmark::State& st, const char* label) {
|
||||
int RegisterFromFunction() {
|
||||
std::pair<const char*, const char*> cases[] = {
|
||||
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
|
||||
for (auto const& c : cases)
|
||||
for (auto const& c : cases) {
|
||||
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
int dummy2 = RegisterFromFunction();
|
||||
|
@ -146,8 +146,9 @@ void BM_error_after_running(benchmark::State& state) {
|
||||
auto iterations = double(state.iterations()) * double(state.iterations());
|
||||
benchmark::DoNotOptimize(iterations);
|
||||
}
|
||||
if (state.thread_index() <= (state.threads() / 2))
|
||||
if (state.thread_index() <= (state.threads() / 2)) {
|
||||
state.SkipWithError("error message");
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
|
||||
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
|
||||
|
@ -166,8 +166,9 @@ ADD_CASES(
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckThousands(Results const& e) {
|
||||
if (e.name != "BM_Counters_Thousands/repeats:2")
|
||||
if (e.name != "BM_Counters_Thousands/repeats:2") {
|
||||
return; // Do not check the aggregates!
|
||||
}
|
||||
|
||||
// check that the values are within 0.01% of the expected values
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
|
||||
|
Loading…
Reference in New Issue
Block a user