checked format before pull request

This commit is contained in:
Ismael 2016-06-02 19:42:08 +02:00
parent 212cfe1c2e
commit 11e3043554
6 changed files with 95 additions and 95 deletions

View File

@ -261,16 +261,16 @@ typedef double(BigOFunc)(size_t);
class State {
public:
State(size_t max_iters, bool has_x, int x, bool has_y, int y,
int thread_i, int n_threads);
int thread_i, int n_threads);
// Returns true iff the benchmark should continue through another iteration.
// Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
bool KeepRunning() {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
assert(!finished_);
started_ = true;
ResumeTiming();
assert(!finished_);
started_ = true;
ResumeTiming();
}
bool const res = total_iterations_++ < max_iterations;
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
@ -365,7 +365,7 @@ public:
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
void SetComplexityN(size_t complexity_n) {
complexity_n_ = complexity_n;
complexity_n_ = complexity_n;
}
BENCHMARK_ALWAYS_INLINE

View File

@ -156,11 +156,11 @@ private:
// Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter {
public:
public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
protected:
protected:
virtual void PrintRunData(const Run& report);
size_t name_field_width_;

View File

@ -277,7 +277,7 @@ class TimerManager {
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
@ -745,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
MutexLock l(GetBenchmarkLock());
total->bytes_processed += st.bytes_processed();
total->items_processed += st.items_processed();
total->complexity_n += st.complexity_length_n();
total->complexity_n += st.complexity_length_n();
}
timer_manager->Finalize();
}
void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
BenchmarkReporter* br,
std::vector<BenchmarkReporter::Run>& complexity_reports)
BenchmarkReporter* br,
std::vector<BenchmarkReporter::Run>& complexity_reports)
EXCLUDES(GetBenchmarkLock()) {
size_t iters = 1;
@ -764,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
pool.resize(b.threads);
const int repeats = b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions;
: FLAGS_benchmark_repetitions;
for (int i = 0; i < repeats; i++) {
std::string mem;
for (;;) {
@ -844,28 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
report.time_unit = b.time_unit;
if (!report.error_occurred) {
double bytes_per_second = 0;
if (total.bytes_processed > 0 && seconds > 0.0) {
bytes_per_second = (total.bytes_processed / seconds);
}
double items_per_second = 0;
if (total.items_processed > 0 && seconds > 0.0) {
items_per_second = (total.items_processed / seconds);
}
double bytes_per_second = 0;
if (total.bytes_processed > 0 && seconds > 0.0) {
bytes_per_second = (total.bytes_processed / seconds);
}
double items_per_second = 0;
if (total.items_processed > 0 && seconds > 0.0) {
items_per_second = (total.items_processed / seconds);
}
if (b.use_manual_time) {
report.real_accumulated_time = manual_accumulated_time;
} else {
report.real_accumulated_time = real_accumulated_time;
}
report.cpu_accumulated_time = cpu_accumulated_time;
report.bytes_per_second = bytes_per_second;
report.items_per_second = items_per_second;
report.complexity_n = total.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
if(report.complexity != oNone)
complexity_reports.push_back(report);
if (b.use_manual_time) {
report.real_accumulated_time = manual_accumulated_time;
} else {
report.real_accumulated_time = real_accumulated_time;
}
report.cpu_accumulated_time = cpu_accumulated_time;
report.bytes_per_second = bytes_per_second;
report.items_per_second = items_per_second;
report.complexity_n = total.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
if(report.complexity != oNone)
complexity_reports.push_back(report);
}
reports.push_back(report);
@ -893,12 +893,12 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
}
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
additional_run_stats.end());
if((b.complexity != oNone) && b.last_benchmark_instance) {
additional_run_stats = ComputeBigO(complexity_reports);
reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
additional_run_stats.end());
complexity_reports.clear();
}
@ -964,56 +964,56 @@ void State::SetLabel(const char* label) {
}
namespace internal {
namespace {
namespace {
void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter* reporter) {
CHECK(reporter != nullptr);
void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter* reporter) {
CHECK(reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (has_repetitions)
name_field_width += std::strlen("_stddev");
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (has_repetitions)
name_field_width += std::strlen("_stddev");
// Print header here
BenchmarkReporter::Context context;
context.num_cpus = NumCPUs();
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
// Print header here
BenchmarkReporter::Context context;
context.num_cpus = NumCPUs();
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
context.cpu_scaling_enabled = CpuScalingEnabled();
context.name_field_width = name_field_width;
context.cpu_scaling_enabled = CpuScalingEnabled();
context.name_field_width = name_field_width;
// Keep track of runing times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// Keep track of runing times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
if (reporter->ReportContext(context)) {
for (const auto& benchmark : benchmarks) {
RunBenchmark(benchmark, reporter, complexity_reports);
}
}
if (reporter->ReportContext(context)) {
for (const auto& benchmark : benchmarks) {
RunBenchmark(benchmark, reporter, complexity_reports);
}
}
}
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (FLAGS_benchmark_format == "console") {
return PtrType(new ConsoleReporter);
} else if (FLAGS_benchmark_format == "json") {
return PtrType(new JSONReporter);
} else if (FLAGS_benchmark_format == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
std::exit(1);
}
}
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (FLAGS_benchmark_format == "console") {
return PtrType(new ConsoleReporter);
} else if (FLAGS_benchmark_format == "json") {
return PtrType(new JSONReporter);
} else if (FLAGS_benchmark_format == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
std::exit(1);
}
}
} // end namespace
} // end namespace
} // end namespace internal
size_t RunSpecifiedBenchmarks() {

View File

@ -194,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
mean_data.iterations = run_iterations;
mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations;
run_iterations;
mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations;
run_iterations;
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
mean_data.items_per_second = items_per_second_stat.Mean();

View File

@ -26,15 +26,15 @@
namespace benchmark {
// Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
// Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq

View File

@ -155,7 +155,7 @@ void JSONReporter::PrintRunData(Run const& run) {
} else if(run.report_rms) {
out << indent
<< FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100))
<< "%";
<< '%';
}
if (run.bytes_per_second > 0.0) {
out << ",\n" << indent