Merge branch 'main' into fixsan

This commit is contained in:
dominic 2025-01-30 11:52:41 +00:00 committed by GitHub
commit 20fcc9187a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 223 additions and 124 deletions

View File

@ -17,7 +17,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04, macos-latest]
os: [ubuntu-22.04, ubuntu-20.04, ubuntu-22.04-arm, macos-latest]
build_type: ['Release', 'Debug']
compiler: ['g++', 'clang++']
lib: ['shared', 'static']

View File

@ -11,7 +11,7 @@ repos:
types_or: [ python, pyi ]
args: [ "--ignore-missing-imports", "--scripts-are-modules" ]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.1
rev: v0.9.2
hooks:
- id: ruff
args: [ --fix, --exit-non-zero-on-fix ]

View File

@ -83,10 +83,10 @@ def IsHeaderFile(filename):
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
# The compilation_commands.json file generated by CMake does not have
# entries for header files. So we do our best by asking the db for flags for
# a corresponding source file, if any. If one exists, the flags for that
# file should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:

View File

@ -60,7 +60,8 @@ class __OptionMaker:
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
"""Pure data class to store options calls, along with the benchmarked
function."""
def __init__(self, func):
self.func = func
@ -83,8 +84,8 @@ class __OptionMaker:
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @register
# The decorator returns Options so it is not technically a
# decorator and needs a final call to @register
return options
return __decorator
@ -93,8 +94,8 @@ class __OptionMaker:
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
# We have to instantiate an object, even if stateless, to be able to use
# __getattr__ on option.range
option = __OptionMaker()
@ -104,8 +105,8 @@ def register(undefined=None, *, name=None):
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
# We have either the function to benchmark (simple case) or an instance of
# Options (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:

View File

@ -13,7 +13,8 @@
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To run this example, you must first install the `google_benchmark` Python
package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
@ -57,7 +58,7 @@ def skipped(state):
state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue.
... # Benchmark code would be here.
# Benchmark code would be here.
@benchmark.register
@ -78,7 +79,6 @@ def custom_counters(state):
num_foo = 0.0
while state:
# Benchmark some code here
pass
# Collect some custom metric named foo
num_foo += 0.13

View File

@ -68,9 +68,10 @@ target-version = "py311"
[tool.ruff.lint]
# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default.
select = ["E", "F", "I", "W"]
select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"]
ignore = [
"E501", # line too long
"PLW2901", # redefined-loop-name
"UP031", # printf-string-formatting
]
[tool.ruff.lint.isort]

View File

@ -4,8 +4,9 @@ import platform
import re
import shutil
import sys
from collections.abc import Generator
from pathlib import Path
from typing import Any, Generator
from typing import Any
import setuptools
from setuptools.command import build_ext
@ -86,15 +87,14 @@ class BuildBazelExtension(build_ext.build_ext):
This is done in the ``bazel_build`` method, so it's not necessary to
do again in the `build_ext` base class.
"""
pass
def bazel_build(self, ext: BazelExtension) -> None:
def bazel_build(self, ext: BazelExtension) -> None: # noqa: C901
"""Runs the bazel build to create the package."""
temp_path = Path(self.build_temp)
# We round to the minor version, which makes rules_python
# look up the latest available patch version internally.
python_version = "{0}.{1}".format(*sys.version_info[:2])
python_version = "{}.{}".format(*sys.version_info[:2])
bazel_argv = [
"bazel",
@ -142,9 +142,7 @@ class BuildBazelExtension(build_ext.build_ext):
# we do not want the bare .so file included
# when building for ABI3, so we require a
# full and exact match on the file extension.
if "".join(fp.suffixes) == suffix:
should_copy = True
elif fp.suffix == ".pyi":
if "".join(fp.suffixes) == suffix or fp.suffix == ".pyi":
should_copy = True
elif Path(root) == srcdir and f == "py.typed":
# copy py.typed, but only at the package root.
@ -155,7 +153,7 @@ class BuildBazelExtension(build_ext.build_ext):
setuptools.setup(
cmdclass=dict(build_ext=BuildBazelExtension),
cmdclass={"build_ext": BuildBazelExtension},
package_data={"google_benchmark": ["py.typed", "*.pyi"]},
ext_modules=[
BazelExtension(

View File

@ -438,9 +438,7 @@ MemoryManager::Result* BenchmarkRunner::RunMemoryManager(
return memory_result;
}
void BenchmarkRunner::RunProfilerManager() {
// TODO: Provide a way to specify the number of iterations.
IterationCount profile_iterations = 1;
void BenchmarkRunner::RunProfilerManager(IterationCount profile_iterations) {
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
b.Setup();
@ -507,7 +505,10 @@ void BenchmarkRunner::DoOneRepetition() {
}
if (profiler_manager != nullptr) {
RunProfilerManager();
// We want to externally profile the benchmark for the same number of
// iterations because, for example, if we're tracing the benchmark then we
// want trace data to reasonably match PMU data.
RunProfilerManager(iters);
}
// Ok, now actually report.

View File

@ -19,7 +19,6 @@
#include <vector>
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#include "perf_counters.h"
#include "thread_manager.h"
@ -109,7 +108,7 @@ class BenchmarkRunner {
MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations);
void RunProfilerManager();
void RunProfilerManager(IterationCount profile_iterations);
IterationCount PredictNumItersNeeded(const IterationResults& i) const;

View File

@ -9,7 +9,6 @@
#include "benchmark/benchmark.h"
#include "benchmark/export.h"
#include "check.h"
#include "internal_macros.h"
namespace benchmark {

View File

@ -195,6 +195,9 @@ benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchm
compile_output_test(profiler_manager_test)
benchmark_add_test(NAME profiler_manager_test COMMAND profiler_manager_test --benchmark_min_time=0.01s)
compile_benchmark_test(profiler_manager_iterations_test)
benchmark_add_test(NAME profiler_manager_iterations COMMAND profiler_manager_iterations_test)
# MSVC does not allow to set the language standard to C++98/03.
if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
compile_benchmark_test(cxx03_test)

View File

@ -0,0 +1,61 @@
#include <cassert>
#include <cstdlib>
#include <memory>
#include <vector>
#include "benchmark/benchmark.h"
// Tests that we can specify the number of profiler iterations with
// --benchmark_min_time=<NUM>x.
namespace {
int iteration_count = 0;
int end_profiler_iteration_count = 0;
class TestProfilerManager : public benchmark::ProfilerManager {
void AfterSetupStart() override { iteration_count = 0; }
void BeforeTeardownStop() override {
end_profiler_iteration_count = iteration_count;
}
};
class NullReporter : public benchmark::BenchmarkReporter {
public:
bool ReportContext(const Context& /*context*/) override { return true; }
void ReportRuns(const std::vector<Run>& /* report */) override {}
};
} // end namespace
static void BM_MyBench(benchmark::State& state) {
for (auto s : state) {
++iteration_count;
}
}
BENCHMARK(BM_MyBench);
int main(int argc, char** argv) {
// Make a fake argv and append the new --benchmark_profiler_iterations=<foo>
// to it.
int fake_argc = argc + 1;
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
fake_argv[argc] = "--benchmark_min_time=4x";
std::unique_ptr<benchmark::ProfilerManager> pm(new TestProfilerManager());
benchmark::RegisterProfilerManager(pm.get());
benchmark::Initialize(&fake_argc, const_cast<char**>(fake_argv));
NullReporter null_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&null_reporter, "BM_MyBench");
assert(returned_count == 1);
// Check the executed iters.
assert(end_profiler_iteration_count == 4);
benchmark::RegisterProfilerManager(nullptr);
delete[] fake_argv;
return 0;
}

View File

@ -1,5 +1,6 @@
// FIXME: WIP
#include <cassert>
#include <memory>
#include "benchmark/benchmark.h"

View File

@ -85,7 +85,10 @@ def create_parser():
"-d",
"--dump_to_json",
dest="dump_to_json",
help="Additionally, dump benchmark comparison output to this file in JSON format.",
help=(
"Additionally, dump benchmark comparison output to this file in"
" JSON format."
),
)
utest = parser.add_argument_group()
@ -94,8 +97,15 @@ def create_parser():
dest="utest",
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(
report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS
help=(
"The tool can do a two-tailed Mann-Whitney U test with the null"
" hypothesis that it is equally likely that a randomly selected"
" value from one sample will be less than or greater than a"
" randomly selected value from a second sample.\nWARNING: requires"
f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})"
" number of repetitions to be meaningful!\nThe test is being done"
f" by default, if at least {report.UTEST_MIN_REPETITIONS}"
" repetitions were done.\nThis option can disable the U Test."
),
)
alpha_default = 0.05
@ -105,7 +115,9 @@ def create_parser():
default=alpha_default,
type=float,
help=(
"significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)"
"significance level alpha. if the calculated p-value is below this"
" value, then the result is said to be statistically significant"
" and the null hypothesis is rejected.\n(default: %0.4f)"
)
% alpha_default,
)
@ -116,7 +128,10 @@ def create_parser():
parser_a = subparsers.add_parser(
"benchmarks",
help="The most simple use-case, compare all the output of these two benchmarks",
help=(
"The most simple use-case, compare all the output of these two"
" benchmarks"
),
)
baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
@ -180,7 +195,10 @@ def create_parser():
parser_c = subparsers.add_parser(
"benchmarksfiltered",
help="Compare filter one of first benchmark with filter two of the second benchmark",
help=(
"Compare filter one of first benchmark with filter two of the"
" second benchmark"
),
)
baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
@ -205,7 +223,10 @@ def create_parser():
metavar="test_contender",
type=argparse.FileType("r"),
nargs=1,
help="The second benchmark executable or JSON output file, that will be compared against the baseline",
help=(
"The second benchmark executable or JSON output file, that will be"
" compared against the baseline"
),
)
contender.add_argument(
"filter_contender",

View File

@ -14,7 +14,7 @@ from numpy import array
from scipy.stats import gmean, mannwhitneyu
class BenchmarkColor(object):
class BenchmarkColor:
def __init__(self, name, code):
self.name = name
self.code = code
@ -249,8 +249,9 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
# We still got some results to show but issue a warning about it.
if not utest["have_optimal_repetitions"]:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS
dsc += (
f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+"
" repetitions recommended."
)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
@ -260,7 +261,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
use_color,
special_str,
BC_HEADER,
"{}{}".format(bc_name, UTEST_COL_NAME),
f"{bc_name}{UTEST_COL_NAME}",
first_col_width,
get_utest_color(utest["time_pvalue"]),
utest["time_pvalue"],
@ -285,7 +286,7 @@ def get_difference_report(json1, json2, utest=False):
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
benchmark_name = partition[0][0]["name"]
label = partition[0][0]["label"] if "label" in partition[0][0] else ""
label = partition[0][0].get("label", "")
time_unit = partition[0][0]["time_unit"]
measurements = []
utest_results = {}
@ -329,11 +330,7 @@ def get_difference_report(json1, json2, utest=False):
# time units which are not compatible with other time units in the
# benchmark suite.
if measurements:
run_type = (
partition[0][0]["run_type"]
if "run_type" in partition[0][0]
else ""
)
run_type = partition[0][0].get("run_type", "")
aggregate_name = (
partition[0][0]["aggregate_name"]
if run_type == "aggregate"
@ -403,12 +400,17 @@ def print_difference_report(
first_col_width = find_longest_name(json_diff_report)
first_col_width = max(first_col_width, len("Benchmark"))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
"Benchmark", 12 + first_col_width
fmt_str = (
"{:<{}s}Time CPU Time Old Time New CPU Old"
" CPU New"
)
first_line = fmt_str.format("Benchmark", 12 + first_col_width)
output_strs = [first_line, "-" * len(first_line)]
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
fmt_str = (
"{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}"
"{endc}{:14.0f}{:14.0f}"
)
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
# and if it is non-aggregate, then don't print it.
@ -464,7 +466,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs"
)
testOutput = os.path.join(testInputs, "test3_run0.json")
with open(testOutput, "r") as f:
with open(testOutput) as f:
json = json.load(f)
return json
@ -480,8 +482,8 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
print("\n")
print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])
for i, output_line in enumerate(output_lines):
self.assertEqual(expect_lines[i], output_line)
class TestReportDifference(unittest.TestCase):
@ -495,9 +497,9 @@ class TestReportDifference(unittest.TestCase):
)
testOutput1 = os.path.join(testInputs, "test1_run1.json")
testOutput2 = os.path.join(testInputs, "test1_run2.json")
with open(testOutput1, "r") as f:
with open(testOutput1) as f:
json1 = json.load(f)
with open(testOutput2, "r") as f:
with open(testOutput2) as f:
json2 = json.load(f)
return json1, json2
@ -584,8 +586,8 @@ class TestReportDifference(unittest.TestCase):
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
@ -819,7 +821,9 @@ class TestReportDifference(unittest.TestCase):
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["label"], expected["label"])
self.assertEqual(out["time_unit"], expected["time_unit"])
@ -837,7 +841,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs"
)
testOutput = os.path.join(testInputs, "test2_run.json")
with open(testOutput, "r") as f:
with open(testOutput) as f:
json = json.load(f)
return json
@ -861,8 +865,8 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
@ -947,7 +951,9 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
@ -965,9 +971,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
)
testOutput1 = os.path.join(testInputs, "test3_run0.json")
testOutput2 = os.path.join(testInputs, "test3_run1.json")
with open(testOutput1, "r") as f:
with open(testOutput1) as f:
json1 = json.load(f)
with open(testOutput2, "r") as f:
with open(testOutput2) as f:
json2 = json.load(f)
return json1, json2
@ -1025,8 +1031,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_pretty_printing_aggregates_only(self):
@ -1081,8 +1087,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
@ -1190,7 +1196,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
@ -1210,9 +1218,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
)
testOutput1 = os.path.join(testInputs, "test3_run0.json")
testOutput2 = os.path.join(testInputs, "test3_run1.json")
with open(testOutput1, "r") as f:
with open(testOutput1) as f:
json1 = json.load(f)
with open(testOutput2, "r") as f:
with open(testOutput2) as f:
json2 = json.load(f)
return json1, json2
@ -1270,8 +1278,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
@ -1380,7 +1388,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
@ -1398,9 +1408,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
)
testOutput1 = os.path.join(testInputs, "test4_run0.json")
testOutput2 = os.path.join(testInputs, "test4_run1.json")
with open(testOutput1, "r") as f:
with open(testOutput1) as f:
json1 = json.load(f)
with open(testOutput2, "r") as f:
with open(testOutput2) as f:
json2 = json.load(f)
return json1, json2
@ -1416,8 +1426,8 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for i, output_line in enumerate(output_lines):
parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
@ -1439,7 +1449,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
@ -1456,7 +1468,7 @@ class TestReportSorting(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs"
)
testOutput = os.path.join(testInputs, "test4_run.json")
with open(testOutput, "r") as f:
with open(testOutput) as f:
json = json.load(f)
return json
@ -1480,13 +1492,15 @@ class TestReportSorting(unittest.TestCase):
"88 family 1 instance 1 aggregate",
]
for n in range(len(self.json["benchmarks"]) ** 2):
for _n in range(len(self.json["benchmarks"]) ** 2):
random.shuffle(self.json["benchmarks"])
sorted_benchmarks = util.sort_benchmark_results(self.json)[
"benchmarks"
]
self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names):
for out, expected in zip(
sorted_benchmarks, expected_names, strict=True
):
self.assertEqual(out["name"], expected)
@ -1503,12 +1517,12 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
)
testOutput1 = os.path.join(testInputs, "test5_run0.json")
testOutput2 = os.path.join(testInputs, "test5_run1.json")
with open(testOutput1, "r") as f:
with open(testOutput1) as f:
json1 = json.load(f)
json1["benchmarks"] = [
json1["benchmarks"][0] for i in range(1000)
]
with open(testOutput2, "r") as f:
with open(testOutput2) as f:
json2 = json.load(f)
json2["benchmarks"] = [
json2["benchmarks"][0] for i in range(1000)
@ -1535,8 +1549,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
)
output_lines = output_lines_with_header[2:]
found = False
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(" ") if x]
for output_line in output_lines:
parts = [x for x in output_line.split(" ") if x]
found = expect_line == parts
if found:
break
@ -1578,7 +1592,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output):
for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
@ -1602,7 +1618,7 @@ def assert_utest(unittest_instance, lhs, rhs):
def assert_measurements(unittest_instance, lhs, rhs):
for m1, m2 in zip(lhs["measurements"], rhs["measurements"]):
for m1, m2 in zip(lhs["measurements"], rhs["measurements"], strict=False):
unittest_instance.assertEqual(m1["real_time"], m2["real_time"])
unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"])
# m1['time'] and m1['cpu'] hold values which are being calculated,

View File

@ -1,4 +1,6 @@
"""util.py - General utilities for running, loading, and processing benchmarks"""
"""util.py - General utilities for running, loading, and processing
benchmarks
"""
import json
import os
@ -46,7 +48,7 @@ def is_json_file(filename):
'False' otherwise.
"""
try:
with open(filename, "r") as f:
with open(filename) as f:
json.load(f)
return True
except BaseException:
@ -97,7 +99,8 @@ def find_benchmark_flag(prefix, benchmark_flags):
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith("--") and prefix.endswith("=")
assert prefix.startswith("--")
assert prefix.endswith("=")
result = None
for f in benchmark_flags:
if f.startswith(prefix):
@ -110,7 +113,8 @@ def remove_benchmark_flags(prefix, benchmark_flags):
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith("--") and prefix.endswith("=")
assert prefix.startswith("--")
assert prefix.endswith("=")
return [f for f in benchmark_flags if not f.startswith(prefix)]
@ -133,17 +137,16 @@ def load_benchmark_results(fname, benchmark_filter):
name = benchmark.get("run_name", None) or benchmark["name"]
return re.search(benchmark_filter, name) is not None
with open(fname, "r") as f:
with open(fname) as f:
results = json.load(f)
if "context" in results:
if "json_schema_version" in results["context"]:
json_schema_version = results["context"]["json_schema_version"]
if json_schema_version != 1:
print(
"In %s, got unnsupported JSON schema version: %i, expected 1"
% (fname, json_schema_version)
)
sys.exit(1)
if "json_schema_version" in results.get("context", {}):
json_schema_version = results["context"]["json_schema_version"]
if json_schema_version != 1:
print(
f"In {fname}, got unnsupported JSON schema version:"
f" {json_schema_version}, expected 1"
)
sys.exit(1)
if "benchmarks" in results:
results["benchmarks"] = list(
filter(benchmark_wanted, results["benchmarks"])
@ -157,9 +160,7 @@ def sort_benchmark_results(result):
# From inner key to the outer key!
benchmarks = sorted(
benchmarks,
key=lambda benchmark: benchmark["repetition_index"]
if "repetition_index" in benchmark
else -1,
key=lambda benchmark: benchmark.get("repetition_index", -1),
)
benchmarks = sorted(
benchmarks,
@ -169,15 +170,11 @@ def sort_benchmark_results(result):
)
benchmarks = sorted(
benchmarks,
key=lambda benchmark: benchmark["per_family_instance_index"]
if "per_family_instance_index" in benchmark
else -1,
key=lambda benchmark: benchmark.get("per_family_instance_index", -1),
)
benchmarks = sorted(
benchmarks,
key=lambda benchmark: benchmark["family_index"]
if "family_index" in benchmark
else -1,
key=lambda benchmark: benchmark.get("family_index", -1),
)
result["benchmarks"] = benchmarks
@ -197,11 +194,12 @@ def run_benchmark(exe_name, benchmark_flags):
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + [
"--benchmark_out=%s" % output_name
benchmark_flags = [
*list(benchmark_flags),
"--benchmark_out=%s" % output_name,
]
cmd = [exe_name] + benchmark_flags
cmd = [exe_name, *benchmark_flags]
print("RUNNING: %s" % " ".join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:

View File

@ -73,16 +73,16 @@ def process_identifiers(line):
parts = re.split(r"([a-zA-Z0-9_]+)", line)
new_line = ""
for tk in parts:
if is_identifier(tk):
if tk.startswith("__Z"):
tk = tk[1:]
elif (
if is_identifier(tk) and (
tk.startswith("__Z")
or (
tk.startswith("_")
and len(tk) > 1
and tk[1].isalpha()
and tk[1] != "Z"
):
tk = tk[1:]
)
):
tk = tk[1:]
new_line += tk
return new_line
@ -148,7 +148,7 @@ def main():
print("ERROR: input file '%s' does not exist" % input)
sys.exit(1)
with open(input, "r") as f:
with open(input) as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, "w") as f: