mirror of
https://github.com/google/benchmark.git
synced 2025-01-27 20:30:15 +08:00
Lint Python: Add more ruff rules (#1909)
* Lint Python: Add more ruff rules * range(len()) --> enumerate() * zip(strict=True)
This commit is contained in:
parent
d6536acfe8
commit
ecb5df6473
@ -11,7 +11,7 @@ repos:
|
||||
types_or: [ python, pyi ]
|
||||
args: [ "--ignore-missing-imports", "--scripts-are-modules" ]
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.1
|
||||
rev: v0.9.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [ --fix, --exit-non-zero-on-fix ]
|
||||
|
@ -57,7 +57,7 @@ def skipped(state):
|
||||
state.skip_with_error("some error")
|
||||
return # NOTE: You must explicitly return, or benchmark will continue.
|
||||
|
||||
... # Benchmark code would be here.
|
||||
# Benchmark code would be here.
|
||||
|
||||
|
||||
@benchmark.register
|
||||
@ -78,7 +78,6 @@ def custom_counters(state):
|
||||
num_foo = 0.0
|
||||
while state:
|
||||
# Benchmark some code here
|
||||
pass
|
||||
# Collect some custom metric named foo
|
||||
num_foo += 0.13
|
||||
|
||||
|
@ -68,9 +68,11 @@ target-version = "py311"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default.
|
||||
select = ["E", "F", "I", "W"]
|
||||
select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"]
|
||||
ignore = [
|
||||
"E501", # line too long
|
||||
"E501", # line too long
|
||||
"PLW2901", # redefined-loop-name
|
||||
"UP031", # printf-string-formatting
|
||||
]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
|
14
setup.py
14
setup.py
@ -4,8 +4,9 @@ import platform
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from collections.abc import Generator
|
||||
from pathlib import Path
|
||||
from typing import Any, Generator
|
||||
from typing import Any
|
||||
|
||||
import setuptools
|
||||
from setuptools.command import build_ext
|
||||
@ -86,15 +87,14 @@ class BuildBazelExtension(build_ext.build_ext):
|
||||
This is done in the ``bazel_build`` method, so it's not necessary to
|
||||
do again in the `build_ext` base class.
|
||||
"""
|
||||
pass
|
||||
|
||||
def bazel_build(self, ext: BazelExtension) -> None:
|
||||
def bazel_build(self, ext: BazelExtension) -> None: # noqa: C901
|
||||
"""Runs the bazel build to create the package."""
|
||||
temp_path = Path(self.build_temp)
|
||||
|
||||
# We round to the minor version, which makes rules_python
|
||||
# look up the latest available patch version internally.
|
||||
python_version = "{0}.{1}".format(*sys.version_info[:2])
|
||||
python_version = "{}.{}".format(*sys.version_info[:2])
|
||||
|
||||
bazel_argv = [
|
||||
"bazel",
|
||||
@ -142,9 +142,7 @@ class BuildBazelExtension(build_ext.build_ext):
|
||||
# we do not want the bare .so file included
|
||||
# when building for ABI3, so we require a
|
||||
# full and exact match on the file extension.
|
||||
if "".join(fp.suffixes) == suffix:
|
||||
should_copy = True
|
||||
elif fp.suffix == ".pyi":
|
||||
if "".join(fp.suffixes) == suffix or fp.suffix == ".pyi":
|
||||
should_copy = True
|
||||
elif Path(root) == srcdir and f == "py.typed":
|
||||
# copy py.typed, but only at the package root.
|
||||
@ -155,7 +153,7 @@ class BuildBazelExtension(build_ext.build_ext):
|
||||
|
||||
|
||||
setuptools.setup(
|
||||
cmdclass=dict(build_ext=BuildBazelExtension),
|
||||
cmdclass={"build_ext": BuildBazelExtension},
|
||||
package_data={"google_benchmark": ["py.typed", "*.pyi"]},
|
||||
ext_modules=[
|
||||
BazelExtension(
|
||||
|
@ -94,9 +94,7 @@ def create_parser():
|
||||
dest="utest",
|
||||
default=True,
|
||||
action="store_false",
|
||||
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(
|
||||
report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS
|
||||
),
|
||||
help=f"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {report.UTEST_MIN_REPETITIONS} repetitions were done.\nThis option can disable the U Test.",
|
||||
)
|
||||
alpha_default = 0.05
|
||||
utest.add_argument(
|
||||
|
@ -14,7 +14,7 @@ from numpy import array
|
||||
from scipy.stats import gmean, mannwhitneyu
|
||||
|
||||
|
||||
class BenchmarkColor(object):
|
||||
class BenchmarkColor:
|
||||
def __init__(self, name, code):
|
||||
self.name = name
|
||||
self.code = code
|
||||
@ -249,9 +249,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
|
||||
# We still got some results to show but issue a warning about it.
|
||||
if not utest["have_optimal_repetitions"]:
|
||||
dsc_color = BC_WARNING
|
||||
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
|
||||
UTEST_OPTIMAL_REPETITIONS
|
||||
)
|
||||
dsc += f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+ repetitions recommended."
|
||||
|
||||
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
|
||||
|
||||
@ -260,7 +258,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
|
||||
use_color,
|
||||
special_str,
|
||||
BC_HEADER,
|
||||
"{}{}".format(bc_name, UTEST_COL_NAME),
|
||||
f"{bc_name}{UTEST_COL_NAME}",
|
||||
first_col_width,
|
||||
get_utest_color(utest["time_pvalue"]),
|
||||
utest["time_pvalue"],
|
||||
@ -285,7 +283,7 @@ def get_difference_report(json1, json2, utest=False):
|
||||
partitions = partition_benchmarks(json1, json2)
|
||||
for partition in partitions:
|
||||
benchmark_name = partition[0][0]["name"]
|
||||
label = partition[0][0]["label"] if "label" in partition[0][0] else ""
|
||||
label = partition[0][0].get("label", "")
|
||||
time_unit = partition[0][0]["time_unit"]
|
||||
measurements = []
|
||||
utest_results = {}
|
||||
@ -329,11 +327,7 @@ def get_difference_report(json1, json2, utest=False):
|
||||
# time units which are not compatible with other time units in the
|
||||
# benchmark suite.
|
||||
if measurements:
|
||||
run_type = (
|
||||
partition[0][0]["run_type"]
|
||||
if "run_type" in partition[0][0]
|
||||
else ""
|
||||
)
|
||||
run_type = partition[0][0].get("run_type", "")
|
||||
aggregate_name = (
|
||||
partition[0][0]["aggregate_name"]
|
||||
if run_type == "aggregate"
|
||||
@ -464,7 +458,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
||||
os.path.dirname(os.path.realpath(__file__)), "Inputs"
|
||||
)
|
||||
testOutput = os.path.join(testInputs, "test3_run0.json")
|
||||
with open(testOutput, "r") as f:
|
||||
with open(testOutput) as f:
|
||||
json = json.load(f)
|
||||
return json
|
||||
|
||||
@ -480,8 +474,8 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
self.assertEqual(expect_lines[i], output_lines[i])
|
||||
for i, output_line in enumerate(output_lines):
|
||||
self.assertEqual(expect_lines[i], output_line)
|
||||
|
||||
|
||||
class TestReportDifference(unittest.TestCase):
|
||||
@ -495,9 +489,9 @@ class TestReportDifference(unittest.TestCase):
|
||||
)
|
||||
testOutput1 = os.path.join(testInputs, "test1_run1.json")
|
||||
testOutput2 = os.path.join(testInputs, "test1_run2.json")
|
||||
with open(testOutput1, "r") as f:
|
||||
with open(testOutput1) as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, "r") as f:
|
||||
with open(testOutput2) as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
@ -584,8 +578,8 @@ class TestReportDifference(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
@ -819,7 +813,9 @@ class TestReportDifference(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["label"], expected["label"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
@ -837,7 +833,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
os.path.dirname(os.path.realpath(__file__)), "Inputs"
|
||||
)
|
||||
testOutput = os.path.join(testInputs, "test2_run.json")
|
||||
with open(testOutput, "r") as f:
|
||||
with open(testOutput) as f:
|
||||
json = json.load(f)
|
||||
return json
|
||||
|
||||
@ -861,8 +857,8 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
@ -947,7 +943,9 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
assert_utest(self, out, expected)
|
||||
@ -965,9 +963,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
|
||||
)
|
||||
testOutput1 = os.path.join(testInputs, "test3_run0.json")
|
||||
testOutput2 = os.path.join(testInputs, "test3_run1.json")
|
||||
with open(testOutput1, "r") as f:
|
||||
with open(testOutput1) as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, "r") as f:
|
||||
with open(testOutput2) as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
@ -1025,8 +1023,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
def test_json_diff_report_pretty_printing_aggregates_only(self):
|
||||
@ -1081,8 +1079,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
def test_json_diff_report(self):
|
||||
@ -1190,7 +1188,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
assert_utest(self, out, expected)
|
||||
@ -1210,9 +1210,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
|
||||
)
|
||||
testOutput1 = os.path.join(testInputs, "test3_run0.json")
|
||||
testOutput2 = os.path.join(testInputs, "test3_run1.json")
|
||||
with open(testOutput1, "r") as f:
|
||||
with open(testOutput1) as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, "r") as f:
|
||||
with open(testOutput2) as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
@ -1270,8 +1270,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
def test_json_diff_report(self):
|
||||
@ -1380,7 +1380,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
|
||||
},
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
assert_utest(self, out, expected)
|
||||
@ -1398,9 +1400,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
|
||||
)
|
||||
testOutput1 = os.path.join(testInputs, "test4_run0.json")
|
||||
testOutput2 = os.path.join(testInputs, "test4_run1.json")
|
||||
with open(testOutput1, "r") as f:
|
||||
with open(testOutput1) as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, "r") as f:
|
||||
with open(testOutput2) as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
@ -1416,8 +1418,8 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for i, output_line in enumerate(output_lines):
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
def test_json_diff_report(self):
|
||||
@ -1439,7 +1441,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
|
||||
}
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
assert_utest(self, out, expected)
|
||||
@ -1456,7 +1460,7 @@ class TestReportSorting(unittest.TestCase):
|
||||
os.path.dirname(os.path.realpath(__file__)), "Inputs"
|
||||
)
|
||||
testOutput = os.path.join(testInputs, "test4_run.json")
|
||||
with open(testOutput, "r") as f:
|
||||
with open(testOutput) as f:
|
||||
json = json.load(f)
|
||||
return json
|
||||
|
||||
@ -1480,13 +1484,15 @@ class TestReportSorting(unittest.TestCase):
|
||||
"88 family 1 instance 1 aggregate",
|
||||
]
|
||||
|
||||
for n in range(len(self.json["benchmarks"]) ** 2):
|
||||
for _n in range(len(self.json["benchmarks"]) ** 2):
|
||||
random.shuffle(self.json["benchmarks"])
|
||||
sorted_benchmarks = util.sort_benchmark_results(self.json)[
|
||||
"benchmarks"
|
||||
]
|
||||
self.assertEqual(len(expected_names), len(sorted_benchmarks))
|
||||
for out, expected in zip(sorted_benchmarks, expected_names):
|
||||
for out, expected in zip(
|
||||
sorted_benchmarks, expected_names, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected)
|
||||
|
||||
|
||||
@ -1503,12 +1509,12 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
|
||||
)
|
||||
testOutput1 = os.path.join(testInputs, "test5_run0.json")
|
||||
testOutput2 = os.path.join(testInputs, "test5_run1.json")
|
||||
with open(testOutput1, "r") as f:
|
||||
with open(testOutput1) as f:
|
||||
json1 = json.load(f)
|
||||
json1["benchmarks"] = [
|
||||
json1["benchmarks"][0] for i in range(1000)
|
||||
]
|
||||
with open(testOutput2, "r") as f:
|
||||
with open(testOutput2) as f:
|
||||
json2 = json.load(f)
|
||||
json2["benchmarks"] = [
|
||||
json2["benchmarks"][0] for i in range(1000)
|
||||
@ -1535,8 +1541,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
|
||||
)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
found = False
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(" ") if x]
|
||||
for output_line in output_lines:
|
||||
parts = [x for x in output_line.split(" ") if x]
|
||||
found = expect_line == parts
|
||||
if found:
|
||||
break
|
||||
@ -1578,7 +1584,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
|
||||
},
|
||||
]
|
||||
self.assertEqual(len(self.json_diff_report), len(expected_output))
|
||||
for out, expected in zip(self.json_diff_report, expected_output):
|
||||
for out, expected in zip(
|
||||
self.json_diff_report, expected_output, strict=True
|
||||
):
|
||||
self.assertEqual(out["name"], expected["name"])
|
||||
self.assertEqual(out["time_unit"], expected["time_unit"])
|
||||
assert_utest(self, out, expected)
|
||||
@ -1602,7 +1610,7 @@ def assert_utest(unittest_instance, lhs, rhs):
|
||||
|
||||
|
||||
def assert_measurements(unittest_instance, lhs, rhs):
|
||||
for m1, m2 in zip(lhs["measurements"], rhs["measurements"]):
|
||||
for m1, m2 in zip(lhs["measurements"], rhs["measurements"], strict=False):
|
||||
unittest_instance.assertEqual(m1["real_time"], m2["real_time"])
|
||||
unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"])
|
||||
# m1['time'] and m1['cpu'] hold values which are being calculated,
|
||||
|
@ -46,7 +46,7 @@ def is_json_file(filename):
|
||||
'False' otherwise.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r") as f:
|
||||
with open(filename) as f:
|
||||
json.load(f)
|
||||
return True
|
||||
except BaseException:
|
||||
@ -97,7 +97,8 @@ def find_benchmark_flag(prefix, benchmark_flags):
|
||||
if it is found return the arg it specifies. If specified more than once the
|
||||
last value is returned. If the flag is not found None is returned.
|
||||
"""
|
||||
assert prefix.startswith("--") and prefix.endswith("=")
|
||||
assert prefix.startswith("--")
|
||||
assert prefix.endswith("=")
|
||||
result = None
|
||||
for f in benchmark_flags:
|
||||
if f.startswith(prefix):
|
||||
@ -110,7 +111,8 @@ def remove_benchmark_flags(prefix, benchmark_flags):
|
||||
Return a new list containing the specified benchmark_flags except those
|
||||
with the specified prefix.
|
||||
"""
|
||||
assert prefix.startswith("--") and prefix.endswith("=")
|
||||
assert prefix.startswith("--")
|
||||
assert prefix.endswith("=")
|
||||
return [f for f in benchmark_flags if not f.startswith(prefix)]
|
||||
|
||||
|
||||
@ -133,17 +135,16 @@ def load_benchmark_results(fname, benchmark_filter):
|
||||
name = benchmark.get("run_name", None) or benchmark["name"]
|
||||
return re.search(benchmark_filter, name) is not None
|
||||
|
||||
with open(fname, "r") as f:
|
||||
with open(fname) as f:
|
||||
results = json.load(f)
|
||||
if "context" in results:
|
||||
if "json_schema_version" in results["context"]:
|
||||
json_schema_version = results["context"]["json_schema_version"]
|
||||
if json_schema_version != 1:
|
||||
print(
|
||||
"In %s, got unnsupported JSON schema version: %i, expected 1"
|
||||
% (fname, json_schema_version)
|
||||
)
|
||||
sys.exit(1)
|
||||
if "json_schema_version" in results.get("context", {}):
|
||||
json_schema_version = results["context"]["json_schema_version"]
|
||||
if json_schema_version != 1:
|
||||
print(
|
||||
"In %s, got unnsupported JSON schema version: %i, expected 1"
|
||||
% (fname, json_schema_version)
|
||||
)
|
||||
sys.exit(1)
|
||||
if "benchmarks" in results:
|
||||
results["benchmarks"] = list(
|
||||
filter(benchmark_wanted, results["benchmarks"])
|
||||
@ -157,9 +158,7 @@ def sort_benchmark_results(result):
|
||||
# From inner key to the outer key!
|
||||
benchmarks = sorted(
|
||||
benchmarks,
|
||||
key=lambda benchmark: benchmark["repetition_index"]
|
||||
if "repetition_index" in benchmark
|
||||
else -1,
|
||||
key=lambda benchmark: benchmark.get("repetition_index", -1),
|
||||
)
|
||||
benchmarks = sorted(
|
||||
benchmarks,
|
||||
@ -169,15 +168,11 @@ def sort_benchmark_results(result):
|
||||
)
|
||||
benchmarks = sorted(
|
||||
benchmarks,
|
||||
key=lambda benchmark: benchmark["per_family_instance_index"]
|
||||
if "per_family_instance_index" in benchmark
|
||||
else -1,
|
||||
key=lambda benchmark: benchmark.get("per_family_instance_index", -1),
|
||||
)
|
||||
benchmarks = sorted(
|
||||
benchmarks,
|
||||
key=lambda benchmark: benchmark["family_index"]
|
||||
if "family_index" in benchmark
|
||||
else -1,
|
||||
key=lambda benchmark: benchmark.get("family_index", -1),
|
||||
)
|
||||
|
||||
result["benchmarks"] = benchmarks
|
||||
@ -197,11 +192,12 @@ def run_benchmark(exe_name, benchmark_flags):
|
||||
is_temp_output = True
|
||||
thandle, output_name = tempfile.mkstemp()
|
||||
os.close(thandle)
|
||||
benchmark_flags = list(benchmark_flags) + [
|
||||
"--benchmark_out=%s" % output_name
|
||||
benchmark_flags = [
|
||||
*list(benchmark_flags),
|
||||
"--benchmark_out=%s" % output_name,
|
||||
]
|
||||
|
||||
cmd = [exe_name] + benchmark_flags
|
||||
cmd = [exe_name, *benchmark_flags]
|
||||
print("RUNNING: %s" % " ".join(cmd))
|
||||
exitCode = subprocess.call(cmd)
|
||||
if exitCode != 0:
|
||||
|
@ -73,16 +73,16 @@ def process_identifiers(line):
|
||||
parts = re.split(r"([a-zA-Z0-9_]+)", line)
|
||||
new_line = ""
|
||||
for tk in parts:
|
||||
if is_identifier(tk):
|
||||
if tk.startswith("__Z"):
|
||||
tk = tk[1:]
|
||||
elif (
|
||||
if is_identifier(tk) and (
|
||||
tk.startswith("__Z")
|
||||
or (
|
||||
tk.startswith("_")
|
||||
and len(tk) > 1
|
||||
and tk[1].isalpha()
|
||||
and tk[1] != "Z"
|
||||
):
|
||||
tk = tk[1:]
|
||||
)
|
||||
):
|
||||
tk = tk[1:]
|
||||
new_line += tk
|
||||
return new_line
|
||||
|
||||
@ -148,7 +148,7 @@ def main():
|
||||
print("ERROR: input file '%s' does not exist" % input)
|
||||
sys.exit(1)
|
||||
|
||||
with open(input, "r") as f:
|
||||
with open(input) as f:
|
||||
contents = f.read()
|
||||
new_contents = process_asm(contents)
|
||||
with open(output, "w") as f:
|
||||
|
Loading…
Reference in New Issue
Block a user