1
0
mirror of https://github.com/google/benchmark.git synced 2025-04-30 06:50:27 +08:00

ruff rule E501: Fix long lines in Python code ()

* ruff rule E501: Fix long lines in Python code

* Add missing space

---------

Co-authored-by: dominic <510002+dmah42@users.noreply.github.com>
This commit is contained in:
Christian Clauss 2025-01-22 17:43:07 +01:00 committed by GitHub
parent 6f21075d9c
commit 3d027d7e38
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 60 additions and 26 deletions

View File

@ -83,10 +83,10 @@ def IsHeaderFile(filename):
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
# The compilation_commands.json file generated by CMake does not have
# entries for header files. So we do our best by asking the db for flags for
# a corresponding source file, if any. If one exists, the flags for that
# file should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:

View File

@ -60,7 +60,8 @@ class __OptionMaker:
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
"""Pure data class to store options calls, along with the benchmarked
function."""
def __init__(self, func):
self.func = func
@ -83,8 +84,8 @@ class __OptionMaker:
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @register
# The decorator returns Options so it is not technically a
# decorator and needs a final call to @register
return options
return __decorator
@ -93,8 +94,8 @@ class __OptionMaker:
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
# We have to instantiate an object, even if stateless, to be able to use
# __getattr__ on option.range
option = __OptionMaker()
@ -104,8 +105,8 @@ def register(undefined=None, *, name=None):
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
# We have either the function to benchmark (simple case) or an instance of
# Options (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:

View File

@ -13,7 +13,8 @@
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To run this example, you must first install the `google_benchmark` Python
package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:

View File

@ -70,7 +70,6 @@ target-version = "py311"
# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default.
select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"]
ignore = [
"E501", # line too long
"PLW2901", # redefined-loop-name
"UP031", # printf-string-formatting
]

View File

@ -85,7 +85,10 @@ def create_parser():
"-d",
"--dump_to_json",
dest="dump_to_json",
help="Additionally, dump benchmark comparison output to this file in JSON format.",
help=(
"Additionally, dump benchmark comparison output to this file in"
" JSON format."
),
)
utest = parser.add_argument_group()
@ -94,7 +97,16 @@ def create_parser():
dest="utest",
default=True,
action="store_false",
help=f"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {report.UTEST_MIN_REPETITIONS} repetitions were done.\nThis option can disable the U Test.",
help=(
"The tool can do a two-tailed Mann-Whitney U test with the null"
" hypothesis that it is equally likely that a randomly selected"
" value from one sample will be less than or greater than a"
" randomly selected value from a second sample.\nWARNING: requires"
f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})"
" number of repetitions to be meaningful!\nThe test is being done"
f" by default, if at least {report.UTEST_MIN_REPETITIONS}"
" repetitions were done.\nThis option can disable the U Test."
),
)
alpha_default = 0.05
utest.add_argument(
@ -103,7 +115,9 @@ def create_parser():
default=alpha_default,
type=float,
help=(
"significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)"
"significance level alpha. if the calculated p-value is below this"
" value, then the result is said to be statistically significant"
" and the null hypothesis is rejected.\n(default: %0.4f)"
)
% alpha_default,
)
@ -114,7 +128,10 @@ def create_parser():
parser_a = subparsers.add_parser(
"benchmarks",
help="The most simple use-case, compare all the output of these two benchmarks",
help=(
"The most simple use-case, compare all the output of these two"
" benchmarks"
),
)
baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
@ -178,7 +195,10 @@ def create_parser():
parser_c = subparsers.add_parser(
"benchmarksfiltered",
help="Compare filter one of first benchmark with filter two of the second benchmark",
help=(
"Compare filter one of first benchmark with filter two of the"
" second benchmark"
),
)
baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
@ -203,7 +223,10 @@ def create_parser():
metavar="test_contender",
type=argparse.FileType("r"),
nargs=1,
help="The second benchmark executable or JSON output file, that will be compared against the baseline",
help=(
"The second benchmark executable or JSON output file, that will be"
" compared against the baseline"
),
)
contender.add_argument(
"filter_contender",

View File

@ -249,7 +249,10 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
# We still got some results to show but issue a warning about it.
if not utest["have_optimal_repetitions"]:
dsc_color = BC_WARNING
dsc += f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+ repetitions recommended."
dsc += (
f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+"
" repetitions recommended."
)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
@ -397,12 +400,17 @@ def print_difference_report(
first_col_width = find_longest_name(json_diff_report)
first_col_width = max(first_col_width, len("Benchmark"))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
"Benchmark", 12 + first_col_width
fmt_str = (
"{:<{}s}Time CPU Time Old Time New CPU Old"
" CPU New"
)
first_line = fmt_str.format("Benchmark", 12 + first_col_width)
output_strs = [first_line, "-" * len(first_line)]
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
fmt_str = (
"{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}"
"{endc}{:14.0f}{:14.0f}"
)
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
# and if it is non-aggregate, then don't print it.

View File

@ -1,4 +1,6 @@
"""util.py - General utilities for running, loading, and processing benchmarks"""
"""util.py - General utilities for running, loading, and processing
benchmarks
"""
import json
import os
@ -141,8 +143,8 @@ def load_benchmark_results(fname, benchmark_filter):
json_schema_version = results["context"]["json_schema_version"]
if json_schema_version != 1:
print(
"In %s, got unnsupported JSON schema version: %i, expected 1"
% (fname, json_schema_version)
f"In {fname}, got unnsupported JSON schema version:"
f" {json_schema_version}, expected 1"
)
sys.exit(1)
if "benchmarks" in results: