From ecb5df647341456596928d72c4c56b3f438a005e Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Jan 2025 12:50:00 +0100 Subject: [PATCH 1/6] Lint Python: Add more ruff rules (#1909) * Lint Python: Add more ruff rules * range(len()) --> enumerate() * zip(strict=True) --- .pre-commit-config.yaml | 2 +- bindings/python/google_benchmark/example.py | 3 +- pyproject.toml | 6 +- setup.py | 14 ++- tools/compare.py | 4 +- tools/gbench/report.py | 106 +++++++++++--------- tools/gbench/util.py | 46 ++++----- tools/strip_asm.py | 14 +-- 8 files changed, 98 insertions(+), 97 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 78a45807..c16928a9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ repos: types_or: [ python, pyi ] args: [ "--ignore-missing-imports", "--scripts-are-modules" ] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.1 + rev: v0.9.2 hooks: - id: ruff args: [ --fix, --exit-non-zero-on-fix ] diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index b92245ea..5909c0fc 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -57,7 +57,7 @@ def skipped(state): state.skip_with_error("some error") return # NOTE: You must explicitly return, or benchmark will continue. - ... # Benchmark code would be here. + # Benchmark code would be here. @benchmark.register @@ -78,7 +78,6 @@ def custom_counters(state): num_foo = 0.0 while state: # Benchmark some code here - pass # Collect some custom metric named foo num_foo += 0.13 diff --git a/pyproject.toml b/pyproject.toml index 338b0b90..761473c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,9 +68,11 @@ target-version = "py311" [tool.ruff.lint] # Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default. -select = ["E", "F", "I", "W"] +select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"] ignore = [ - "E501", # line too long + "E501", # line too long + "PLW2901", # redefined-loop-name + "UP031", # printf-string-formatting ] [tool.ruff.lint.isort] diff --git a/setup.py b/setup.py index 53933508..3c0269b1 100644 --- a/setup.py +++ b/setup.py @@ -4,8 +4,9 @@ import platform import re import shutil import sys +from collections.abc import Generator from pathlib import Path -from typing import Any, Generator +from typing import Any import setuptools from setuptools.command import build_ext @@ -86,15 +87,14 @@ class BuildBazelExtension(build_ext.build_ext): This is done in the ``bazel_build`` method, so it's not necessary to do again in the `build_ext` base class. """ - pass - def bazel_build(self, ext: BazelExtension) -> None: + def bazel_build(self, ext: BazelExtension) -> None: # noqa: C901 """Runs the bazel build to create the package.""" temp_path = Path(self.build_temp) # We round to the minor version, which makes rules_python # look up the latest available patch version internally. - python_version = "{0}.{1}".format(*sys.version_info[:2]) + python_version = "{}.{}".format(*sys.version_info[:2]) bazel_argv = [ "bazel", @@ -142,9 +142,7 @@ class BuildBazelExtension(build_ext.build_ext): # we do not want the bare .so file included # when building for ABI3, so we require a # full and exact match on the file extension. - if "".join(fp.suffixes) == suffix: - should_copy = True - elif fp.suffix == ".pyi": + if "".join(fp.suffixes) == suffix or fp.suffix == ".pyi": should_copy = True elif Path(root) == srcdir and f == "py.typed": # copy py.typed, but only at the package root. @@ -155,7 +153,7 @@ class BuildBazelExtension(build_ext.build_ext): setuptools.setup( - cmdclass=dict(build_ext=BuildBazelExtension), + cmdclass={"build_ext": BuildBazelExtension}, package_data={"google_benchmark": ["py.typed", "*.pyi"]}, ext_modules=[ BazelExtension( diff --git a/tools/compare.py b/tools/compare.py index 7572520c..36cbe075 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -94,9 +94,7 @@ def create_parser(): dest="utest", default=True, action="store_false", - help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format( - report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS - ), + help=f"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {report.UTEST_MIN_REPETITIONS} repetitions were done.\nThis option can disable the U Test.", ) alpha_default = 0.05 utest.add_argument( diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 7158fd16..6b58918b 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -14,7 +14,7 @@ from numpy import array from scipy.stats import gmean, mannwhitneyu -class BenchmarkColor(object): +class BenchmarkColor: def __init__(self, name, code): self.name = name self.code = code @@ -249,9 +249,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): # We still got some results to show but issue a warning about it. if not utest["have_optimal_repetitions"]: dsc_color = BC_WARNING - dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( - UTEST_OPTIMAL_REPETITIONS - ) + dsc += f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+ repetitions recommended." special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" @@ -260,7 +258,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): use_color, special_str, BC_HEADER, - "{}{}".format(bc_name, UTEST_COL_NAME), + f"{bc_name}{UTEST_COL_NAME}", first_col_width, get_utest_color(utest["time_pvalue"]), utest["time_pvalue"], @@ -285,7 +283,7 @@ def get_difference_report(json1, json2, utest=False): partitions = partition_benchmarks(json1, json2) for partition in partitions: benchmark_name = partition[0][0]["name"] - label = partition[0][0]["label"] if "label" in partition[0][0] else "" + label = partition[0][0].get("label", "") time_unit = partition[0][0]["time_unit"] measurements = [] utest_results = {} @@ -329,11 +327,7 @@ def get_difference_report(json1, json2, utest=False): # time units which are not compatible with other time units in the # benchmark suite. if measurements: - run_type = ( - partition[0][0]["run_type"] - if "run_type" in partition[0][0] - else "" - ) + run_type = partition[0][0].get("run_type", "") aggregate_name = ( partition[0][0]["aggregate_name"] if run_type == "aggregate" @@ -464,7 +458,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): os.path.dirname(os.path.realpath(__file__)), "Inputs" ) testOutput = os.path.join(testInputs, "test3_run0.json") - with open(testOutput, "r") as f: + with open(testOutput) as f: json = json.load(f) return json @@ -480,8 +474,8 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): print("\n") print("\n".join(output_lines)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - self.assertEqual(expect_lines[i], output_lines[i]) + for i, output_line in enumerate(output_lines): + self.assertEqual(expect_lines[i], output_line) class TestReportDifference(unittest.TestCase): @@ -495,9 +489,9 @@ class TestReportDifference(unittest.TestCase): ) testOutput1 = os.path.join(testInputs, "test1_run1.json") testOutput2 = os.path.join(testInputs, "test1_run2.json") - with open(testOutput1, "r") as f: + with open(testOutput1) as f: json1 = json.load(f) - with open(testOutput2, "r") as f: + with open(testOutput2) as f: json2 = json.load(f) return json1, json2 @@ -584,8 +578,8 @@ class TestReportDifference(unittest.TestCase): print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) @@ -819,7 +813,9 @@ class TestReportDifference(unittest.TestCase): }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["label"], expected["label"]) self.assertEqual(out["time_unit"], expected["time_unit"]) @@ -837,7 +833,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): os.path.dirname(os.path.realpath(__file__)), "Inputs" ) testOutput = os.path.join(testInputs, "test2_run.json") - with open(testOutput, "r") as f: + with open(testOutput) as f: json = json.load(f) return json @@ -861,8 +857,8 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) @@ -947,7 +943,9 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) @@ -965,9 +963,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase): ) testOutput1 = os.path.join(testInputs, "test3_run0.json") testOutput2 = os.path.join(testInputs, "test3_run1.json") - with open(testOutput1, "r") as f: + with open(testOutput1) as f: json1 = json.load(f) - with open(testOutput2, "r") as f: + with open(testOutput2) as f: json2 = json.load(f) return json1, json2 @@ -1025,8 +1023,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase): print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report_pretty_printing_aggregates_only(self): @@ -1081,8 +1079,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase): print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): @@ -1190,7 +1188,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase): }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) @@ -1210,9 +1210,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( ) testOutput1 = os.path.join(testInputs, "test3_run0.json") testOutput2 = os.path.join(testInputs, "test3_run1.json") - with open(testOutput1, "r") as f: + with open(testOutput1) as f: json1 = json.load(f) - with open(testOutput2, "r") as f: + with open(testOutput2) as f: json2 = json.load(f) return json1, json2 @@ -1270,8 +1270,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): @@ -1380,7 +1380,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) @@ -1398,9 +1400,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase): ) testOutput1 = os.path.join(testInputs, "test4_run0.json") testOutput2 = os.path.join(testInputs, "test4_run1.json") - with open(testOutput1, "r") as f: + with open(testOutput1) as f: json1 = json.load(f) - with open(testOutput2, "r") as f: + with open(testOutput2) as f: json2 = json.load(f) return json1, json2 @@ -1416,8 +1418,8 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase): print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for i, output_line in enumerate(output_lines): + parts = [x for x in output_line.split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): @@ -1439,7 +1441,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase): } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) @@ -1456,7 +1460,7 @@ class TestReportSorting(unittest.TestCase): os.path.dirname(os.path.realpath(__file__)), "Inputs" ) testOutput = os.path.join(testInputs, "test4_run.json") - with open(testOutput, "r") as f: + with open(testOutput) as f: json = json.load(f) return json @@ -1480,13 +1484,15 @@ class TestReportSorting(unittest.TestCase): "88 family 1 instance 1 aggregate", ] - for n in range(len(self.json["benchmarks"]) ** 2): + for _n in range(len(self.json["benchmarks"]) ** 2): random.shuffle(self.json["benchmarks"]) sorted_benchmarks = util.sort_benchmark_results(self.json)[ "benchmarks" ] self.assertEqual(len(expected_names), len(sorted_benchmarks)) - for out, expected in zip(sorted_benchmarks, expected_names): + for out, expected in zip( + sorted_benchmarks, expected_names, strict=True + ): self.assertEqual(out["name"], expected) @@ -1503,12 +1509,12 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2( ) testOutput1 = os.path.join(testInputs, "test5_run0.json") testOutput2 = os.path.join(testInputs, "test5_run1.json") - with open(testOutput1, "r") as f: + with open(testOutput1) as f: json1 = json.load(f) json1["benchmarks"] = [ json1["benchmarks"][0] for i in range(1000) ] - with open(testOutput2, "r") as f: + with open(testOutput2) as f: json2 = json.load(f) json2["benchmarks"] = [ json2["benchmarks"][0] for i in range(1000) @@ -1535,8 +1541,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2( ) output_lines = output_lines_with_header[2:] found = False - for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(" ") if x] + for output_line in output_lines: + parts = [x for x in output_line.split(" ") if x] found = expect_line == parts if found: break @@ -1578,7 +1584,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2( }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip(self.json_diff_report, expected_output): + for out, expected in zip( + self.json_diff_report, expected_output, strict=True + ): self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) @@ -1602,7 +1610,7 @@ def assert_utest(unittest_instance, lhs, rhs): def assert_measurements(unittest_instance, lhs, rhs): - for m1, m2 in zip(lhs["measurements"], rhs["measurements"]): + for m1, m2 in zip(lhs["measurements"], rhs["measurements"], strict=False): unittest_instance.assertEqual(m1["real_time"], m2["real_time"]) unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"]) # m1['time'] and m1['cpu'] hold values which are being calculated, diff --git a/tools/gbench/util.py b/tools/gbench/util.py index 1119a1a2..596b51a0 100644 --- a/tools/gbench/util.py +++ b/tools/gbench/util.py @@ -46,7 +46,7 @@ def is_json_file(filename): 'False' otherwise. """ try: - with open(filename, "r") as f: + with open(filename) as f: json.load(f) return True except BaseException: @@ -97,7 +97,8 @@ def find_benchmark_flag(prefix, benchmark_flags): if it is found return the arg it specifies. If specified more than once the last value is returned. If the flag is not found None is returned. """ - assert prefix.startswith("--") and prefix.endswith("=") + assert prefix.startswith("--") + assert prefix.endswith("=") result = None for f in benchmark_flags: if f.startswith(prefix): @@ -110,7 +111,8 @@ def remove_benchmark_flags(prefix, benchmark_flags): Return a new list containing the specified benchmark_flags except those with the specified prefix. """ - assert prefix.startswith("--") and prefix.endswith("=") + assert prefix.startswith("--") + assert prefix.endswith("=") return [f for f in benchmark_flags if not f.startswith(prefix)] @@ -133,17 +135,16 @@ def load_benchmark_results(fname, benchmark_filter): name = benchmark.get("run_name", None) or benchmark["name"] return re.search(benchmark_filter, name) is not None - with open(fname, "r") as f: + with open(fname) as f: results = json.load(f) - if "context" in results: - if "json_schema_version" in results["context"]: - json_schema_version = results["context"]["json_schema_version"] - if json_schema_version != 1: - print( - "In %s, got unnsupported JSON schema version: %i, expected 1" - % (fname, json_schema_version) - ) - sys.exit(1) + if "json_schema_version" in results.get("context", {}): + json_schema_version = results["context"]["json_schema_version"] + if json_schema_version != 1: + print( + "In %s, got unnsupported JSON schema version: %i, expected 1" + % (fname, json_schema_version) + ) + sys.exit(1) if "benchmarks" in results: results["benchmarks"] = list( filter(benchmark_wanted, results["benchmarks"]) @@ -157,9 +158,7 @@ def sort_benchmark_results(result): # From inner key to the outer key! benchmarks = sorted( benchmarks, - key=lambda benchmark: benchmark["repetition_index"] - if "repetition_index" in benchmark - else -1, + key=lambda benchmark: benchmark.get("repetition_index", -1), ) benchmarks = sorted( benchmarks, @@ -169,15 +168,11 @@ def sort_benchmark_results(result): ) benchmarks = sorted( benchmarks, - key=lambda benchmark: benchmark["per_family_instance_index"] - if "per_family_instance_index" in benchmark - else -1, + key=lambda benchmark: benchmark.get("per_family_instance_index", -1), ) benchmarks = sorted( benchmarks, - key=lambda benchmark: benchmark["family_index"] - if "family_index" in benchmark - else -1, + key=lambda benchmark: benchmark.get("family_index", -1), ) result["benchmarks"] = benchmarks @@ -197,11 +192,12 @@ def run_benchmark(exe_name, benchmark_flags): is_temp_output = True thandle, output_name = tempfile.mkstemp() os.close(thandle) - benchmark_flags = list(benchmark_flags) + [ - "--benchmark_out=%s" % output_name + benchmark_flags = [ + *list(benchmark_flags), + "--benchmark_out=%s" % output_name, ] - cmd = [exe_name] + benchmark_flags + cmd = [exe_name, *benchmark_flags] print("RUNNING: %s" % " ".join(cmd)) exitCode = subprocess.call(cmd) if exitCode != 0: diff --git a/tools/strip_asm.py b/tools/strip_asm.py index bc3a774a..14d80ed4 100755 --- a/tools/strip_asm.py +++ b/tools/strip_asm.py @@ -73,16 +73,16 @@ def process_identifiers(line): parts = re.split(r"([a-zA-Z0-9_]+)", line) new_line = "" for tk in parts: - if is_identifier(tk): - if tk.startswith("__Z"): - tk = tk[1:] - elif ( + if is_identifier(tk) and ( + tk.startswith("__Z") + or ( tk.startswith("_") and len(tk) > 1 and tk[1].isalpha() and tk[1] != "Z" - ): - tk = tk[1:] + ) + ): + tk = tk[1:] new_line += tk return new_line @@ -148,7 +148,7 @@ def main(): print("ERROR: input file '%s' does not exist" % input) sys.exit(1) - with open(input, "r") as f: + with open(input) as f: contents = f.read() new_contents = process_asm(contents) with open(output, "w") as f: From 6f21075d9cc3e6664152851f4a13ae240847b3bd Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Jan 2025 15:24:22 +0100 Subject: [PATCH 2/6] GitHub Actions: build-and-test on an ARM processor (#1911) [Standard GitHub-hosted runners for public repositories](https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories) --> `ubuntu-22.04-arm`, `ubuntu-24.04-arm` --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index d05300db..c32e799d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04, ubuntu-20.04, macos-latest] + os: [ubuntu-22.04, ubuntu-20.04, ubuntu-22.04-arm, macos-latest] build_type: ['Release', 'Debug'] compiler: ['g++', 'clang++'] lib: ['shared', 'static'] From 3d027d7e3817ec265872434378306f1e92fda9bb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Jan 2025 17:43:07 +0100 Subject: [PATCH 3/6] ruff rule E501: Fix long lines in Python code (#1910) * ruff rule E501: Fix long lines in Python code * Add missing space --------- Co-authored-by: dominic <510002+dmah42@users.noreply.github.com> --- .ycm_extra_conf.py | 8 ++--- bindings/python/google_benchmark/__init__.py | 15 +++++---- bindings/python/google_benchmark/example.py | 3 +- pyproject.toml | 1 - tools/compare.py | 35 ++++++++++++++++---- tools/gbench/report.py | 16 ++++++--- tools/gbench/util.py | 8 +++-- 7 files changed, 60 insertions(+), 26 deletions(-) diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py index caf257f0..ffef1b4d 100644 --- a/.ycm_extra_conf.py +++ b/.ycm_extra_conf.py @@ -83,10 +83,10 @@ def IsHeaderFile(filename): def GetCompilationInfoForFile(filename): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. + # The compilation_commands.json file generated by CMake does not have + # entries for header files. So we do our best by asking the db for flags for + # a corresponding source file, if any. If one exists, the flags for that + # file should be good enough. if IsHeaderFile(filename): basename = os.path.splitext(filename)[0] for extension in SOURCE_EXTENSIONS: diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py index 70063526..3685928f 100644 --- a/bindings/python/google_benchmark/__init__.py +++ b/bindings/python/google_benchmark/__init__.py @@ -60,7 +60,8 @@ class __OptionMaker: """ class Options: - """Pure data class to store options calls, along with the benchmarked function.""" + """Pure data class to store options calls, along with the benchmarked + function.""" def __init__(self, func): self.func = func @@ -83,8 +84,8 @@ class __OptionMaker: def __decorator(func_or_options): options = self.make(func_or_options) options.builder_calls.append((builder_name, args, kwargs)) - # The decorator returns Options so it is not technically a decorator - # and needs a final call to @register + # The decorator returns Options so it is not technically a + # decorator and needs a final call to @register return options return __decorator @@ -93,8 +94,8 @@ class __OptionMaker: # Alias for nicer API. -# We have to instantiate an object, even if stateless, to be able to use __getattr__ -# on option.range +# We have to instantiate an object, even if stateless, to be able to use +# __getattr__ on option.range option = __OptionMaker() @@ -104,8 +105,8 @@ def register(undefined=None, *, name=None): # Decorator is called without parenthesis so we return a decorator return lambda f: register(f, name=name) - # We have either the function to benchmark (simple case) or an instance of Options - # (@option._ case). + # We have either the function to benchmark (simple case) or an instance of + # Options (@option._ case). options = __OptionMaker.make(undefined) if name is None: diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index 5909c0fc..5635c418 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -13,7 +13,8 @@ # limitations under the License. """Example of Python using C++ benchmark framework. -To run this example, you must first install the `google_benchmark` Python package. +To run this example, you must first install the `google_benchmark` Python +package. To install using `setup.py`, download and extract the `google_benchmark` source. In the extracted directory, execute: diff --git a/pyproject.toml b/pyproject.toml index 761473c2..4595b6dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,6 @@ target-version = "py311" # Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default. select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"] ignore = [ - "E501", # line too long "PLW2901", # redefined-loop-name "UP031", # printf-string-formatting ] diff --git a/tools/compare.py b/tools/compare.py index 36cbe075..1dd9de23 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -85,7 +85,10 @@ def create_parser(): "-d", "--dump_to_json", dest="dump_to_json", - help="Additionally, dump benchmark comparison output to this file in JSON format.", + help=( + "Additionally, dump benchmark comparison output to this file in" + " JSON format." + ), ) utest = parser.add_argument_group() @@ -94,7 +97,16 @@ def create_parser(): dest="utest", default=True, action="store_false", - help=f"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {report.UTEST_MIN_REPETITIONS} repetitions were done.\nThis option can disable the U Test.", + help=( + "The tool can do a two-tailed Mann-Whitney U test with the null" + " hypothesis that it is equally likely that a randomly selected" + " value from one sample will be less than or greater than a" + " randomly selected value from a second sample.\nWARNING: requires" + f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})" + " number of repetitions to be meaningful!\nThe test is being done" + f" by default, if at least {report.UTEST_MIN_REPETITIONS}" + " repetitions were done.\nThis option can disable the U Test." + ), ) alpha_default = 0.05 utest.add_argument( @@ -103,7 +115,9 @@ def create_parser(): default=alpha_default, type=float, help=( - "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)" + "significance level alpha. if the calculated p-value is below this" + " value, then the result is said to be statistically significant" + " and the null hypothesis is rejected.\n(default: %0.4f)" ) % alpha_default, ) @@ -114,7 +128,10 @@ def create_parser(): parser_a = subparsers.add_parser( "benchmarks", - help="The most simple use-case, compare all the output of these two benchmarks", + help=( + "The most simple use-case, compare all the output of these two" + " benchmarks" + ), ) baseline = parser_a.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( @@ -178,7 +195,10 @@ def create_parser(): parser_c = subparsers.add_parser( "benchmarksfiltered", - help="Compare filter one of first benchmark with filter two of the second benchmark", + help=( + "Compare filter one of first benchmark with filter two of the" + " second benchmark" + ), ) baseline = parser_c.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( @@ -203,7 +223,10 @@ def create_parser(): metavar="test_contender", type=argparse.FileType("r"), nargs=1, - help="The second benchmark executable or JSON output file, that will be compared against the baseline", + help=( + "The second benchmark executable or JSON output file, that will be" + " compared against the baseline" + ), ) contender.add_argument( "filter_contender", diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 6b58918b..e143e45a 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -249,7 +249,10 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): # We still got some results to show but issue a warning about it. if not utest["have_optimal_repetitions"]: dsc_color = BC_WARNING - dsc += f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+ repetitions recommended." + dsc += ( + f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+" + " repetitions recommended." + ) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" @@ -397,12 +400,17 @@ def print_difference_report( first_col_width = find_longest_name(json_diff_report) first_col_width = max(first_col_width, len("Benchmark")) first_col_width += len(UTEST_COL_NAME) - first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - "Benchmark", 12 + first_col_width + fmt_str = ( + "{:<{}s}Time CPU Time Old Time New CPU Old" + " CPU New" ) + first_line = fmt_str.format("Benchmark", 12 + first_col_width) output_strs = [first_line, "-" * len(first_line)] - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + fmt_str = ( + "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}" + "{endc}{:14.0f}{:14.0f}" + ) for benchmark in json_diff_report: # *If* we were asked to only include aggregates, # and if it is non-aggregate, then don't print it. diff --git a/tools/gbench/util.py b/tools/gbench/util.py index 596b51a0..2e91006b 100644 --- a/tools/gbench/util.py +++ b/tools/gbench/util.py @@ -1,4 +1,6 @@ -"""util.py - General utilities for running, loading, and processing benchmarks""" +"""util.py - General utilities for running, loading, and processing +benchmarks +""" import json import os @@ -141,8 +143,8 @@ def load_benchmark_results(fname, benchmark_filter): json_schema_version = results["context"]["json_schema_version"] if json_schema_version != 1: print( - "In %s, got unnsupported JSON schema version: %i, expected 1" - % (fname, json_schema_version) + f"In {fname}, got unnsupported JSON schema version:" + f" {json_schema_version}, expected 1" ) sys.exit(1) if "benchmarks" in results: From 049f6e79cc3e8636cec21bbd94ed185b4a5f2653 Mon Sep 17 00:00:00 2001 From: xdje42 Date: Wed, 29 Jan 2025 01:53:56 -0800 Subject: [PATCH 4/6] [BUG] Run external profiler (ProfilerManager) same number of iterations #1913 (#1914) Run the external profiler the same number of iterations as the benchmark was run normally. This makes, for example, a trace collected via ProfilerManager consistent with collected PMU data. --- src/benchmark_runner.cc | 9 ++-- src/benchmark_runner.h | 2 +- test/CMakeLists.txt | 3 ++ test/profiler_manager_iterations_test.cc | 61 ++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 5 deletions(-) create mode 100644 test/profiler_manager_iterations_test.cc diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index 463f69fc..3e8aea73 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -438,9 +438,7 @@ MemoryManager::Result* BenchmarkRunner::RunMemoryManager( return memory_result; } -void BenchmarkRunner::RunProfilerManager() { - // TODO: Provide a way to specify the number of iterations. - IterationCount profile_iterations = 1; +void BenchmarkRunner::RunProfilerManager(IterationCount profile_iterations) { std::unique_ptr manager; manager.reset(new internal::ThreadManager(1)); b.Setup(); @@ -507,7 +505,10 @@ void BenchmarkRunner::DoOneRepetition() { } if (profiler_manager != nullptr) { - RunProfilerManager(); + // We want to externally profile the benchmark for the same number of + // iterations because, for example, if we're tracing the benchmark then we + // want trace data to reasonably match PMU data. + RunProfilerManager(iters); } // Ok, now actually report. diff --git a/src/benchmark_runner.h b/src/benchmark_runner.h index 6e5ceb31..332bbb51 100644 --- a/src/benchmark_runner.h +++ b/src/benchmark_runner.h @@ -109,7 +109,7 @@ class BenchmarkRunner { MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations); - void RunProfilerManager(); + void RunProfilerManager(IterationCount profile_iterations); IterationCount PredictNumItersNeeded(const IterationResults& i) const; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 321e24d9..3e2c6518 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -195,6 +195,9 @@ benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchm compile_output_test(profiler_manager_test) benchmark_add_test(NAME profiler_manager_test COMMAND profiler_manager_test --benchmark_min_time=0.01s) +compile_benchmark_test(profiler_manager_iterations_test) +benchmark_add_test(NAME profiler_manager_iterations COMMAND profiler_manager_iterations_test) + # MSVC does not allow to set the language standard to C++98/03. if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) compile_benchmark_test(cxx03_test) diff --git a/test/profiler_manager_iterations_test.cc b/test/profiler_manager_iterations_test.cc new file mode 100644 index 00000000..e727929d --- /dev/null +++ b/test/profiler_manager_iterations_test.cc @@ -0,0 +1,61 @@ +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Tests that we can specify the number of profiler iterations with +// --benchmark_min_time=x. +namespace { + +int iteration_count = 0; +int end_profiler_iteration_count = 0; + +class TestProfilerManager : public benchmark::ProfilerManager { + void AfterSetupStart() override { iteration_count = 0; } + void BeforeTeardownStop() override { + end_profiler_iteration_count = iteration_count; + } +}; + +class NullReporter : public benchmark::BenchmarkReporter { + public: + bool ReportContext(const Context& /*context*/) override { return true; } + void ReportRuns(const std::vector& /* report */) override {} +}; + +} // end namespace + +static void BM_MyBench(benchmark::State& state) { + for (auto s : state) { + ++iteration_count; + } +} +BENCHMARK(BM_MyBench); + +int main(int argc, char** argv) { + // Make a fake argv and append the new --benchmark_profiler_iterations= + // to it. + int fake_argc = argc + 1; + const char** fake_argv = new const char*[static_cast(fake_argc)]; + for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i]; + fake_argv[argc] = "--benchmark_min_time=4x"; + + std::unique_ptr pm(new TestProfilerManager()); + benchmark::RegisterProfilerManager(pm.get()); + + benchmark::Initialize(&fake_argc, const_cast(fake_argv)); + + NullReporter null_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&null_reporter, "BM_MyBench"); + assert(returned_count == 1); + + // Check the executed iters. + assert(end_profiler_iteration_count == 4); + + benchmark::RegisterProfilerManager(nullptr); + delete[] fake_argv; + return 0; +} From 4642758438659e01fed407b28062d88b70d31331 Mon Sep 17 00:00:00 2001 From: Dominic Hamon Date: Thu, 30 Jan 2025 09:52:07 +0000 Subject: [PATCH 5/6] fix some clang-tidy issues --- src/benchmark_runner.h | 1 - test/profiler_manager_test.cc | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/benchmark_runner.h b/src/benchmark_runner.h index 332bbb51..20a37c1a 100644 --- a/src/benchmark_runner.h +++ b/src/benchmark_runner.h @@ -19,7 +19,6 @@ #include #include "benchmark_api_internal.h" -#include "internal_macros.h" #include "perf_counters.h" #include "thread_manager.h" diff --git a/test/profiler_manager_test.cc b/test/profiler_manager_test.cc index 3b08a60d..21f2f1dc 100644 --- a/test/profiler_manager_test.cc +++ b/test/profiler_manager_test.cc @@ -1,5 +1,6 @@ // FIXME: WIP +#include #include #include "benchmark/benchmark.h" From 4a805f9f0f468bd4d499d060a1a1c6bd5d6b6b73 Mon Sep 17 00:00:00 2001 From: Dominic Hamon Date: Thu, 30 Jan 2025 10:00:04 +0000 Subject: [PATCH 6/6] clang-tidy warning --- src/string_util.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/string_util.h b/src/string_util.h index 731aa2c0..f1e50be4 100644 --- a/src/string_util.h +++ b/src/string_util.h @@ -9,7 +9,6 @@ #include "benchmark/benchmark.h" #include "benchmark/export.h" #include "check.h" -#include "internal_macros.h" namespace benchmark {