Merge branch 'main' into fixsan

This commit is contained in:
dominic 2025-01-30 11:52:41 +00:00 committed by GitHub
commit 20fcc9187a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 223 additions and 124 deletions

View File

@ -17,7 +17,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-22.04, ubuntu-20.04, macos-latest] os: [ubuntu-22.04, ubuntu-20.04, ubuntu-22.04-arm, macos-latest]
build_type: ['Release', 'Debug'] build_type: ['Release', 'Debug']
compiler: ['g++', 'clang++'] compiler: ['g++', 'clang++']
lib: ['shared', 'static'] lib: ['shared', 'static']

View File

@ -11,7 +11,7 @@ repos:
types_or: [ python, pyi ] types_or: [ python, pyi ]
args: [ "--ignore-missing-imports", "--scripts-are-modules" ] args: [ "--ignore-missing-imports", "--scripts-are-modules" ]
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.1 rev: v0.9.2
hooks: hooks:
- id: ruff - id: ruff
args: [ --fix, --exit-non-zero-on-fix ] args: [ --fix, --exit-non-zero-on-fix ]

View File

@ -83,10 +83,10 @@ def IsHeaderFile(filename):
def GetCompilationInfoForFile(filename): def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries # The compilation_commands.json file generated by CMake does not have
# for header files. So we do our best by asking the db for flags for a # entries for header files. So we do our best by asking the db for flags for
# corresponding source file, if any. If one exists, the flags for that file # a corresponding source file, if any. If one exists, the flags for that
# should be good enough. # file should be good enough.
if IsHeaderFile(filename): if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0] basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS: for extension in SOURCE_EXTENSIONS:

View File

@ -60,7 +60,8 @@ class __OptionMaker:
""" """
class Options: class Options:
"""Pure data class to store options calls, along with the benchmarked function.""" """Pure data class to store options calls, along with the benchmarked
function."""
def __init__(self, func): def __init__(self, func):
self.func = func self.func = func
@ -83,8 +84,8 @@ class __OptionMaker:
def __decorator(func_or_options): def __decorator(func_or_options):
options = self.make(func_or_options) options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs)) options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator # The decorator returns Options so it is not technically a
# and needs a final call to @register # decorator and needs a final call to @register
return options return options
return __decorator return __decorator
@ -93,8 +94,8 @@ class __OptionMaker:
# Alias for nicer API. # Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__ # We have to instantiate an object, even if stateless, to be able to use
# on option.range # __getattr__ on option.range
option = __OptionMaker() option = __OptionMaker()
@ -104,8 +105,8 @@ def register(undefined=None, *, name=None):
# Decorator is called without parenthesis so we return a decorator # Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name) return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options # We have either the function to benchmark (simple case) or an instance of
# (@option._ case). # Options (@option._ case).
options = __OptionMaker.make(undefined) options = __OptionMaker.make(undefined)
if name is None: if name is None:

View File

@ -13,7 +13,8 @@
# limitations under the License. # limitations under the License.
"""Example of Python using C++ benchmark framework. """Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package. To run this example, you must first install the `google_benchmark` Python
package.
To install using `setup.py`, download and extract the `google_benchmark` source. To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute: In the extracted directory, execute:
@ -57,7 +58,7 @@ def skipped(state):
state.skip_with_error("some error") state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue. return # NOTE: You must explicitly return, or benchmark will continue.
... # Benchmark code would be here. # Benchmark code would be here.
@benchmark.register @benchmark.register
@ -78,7 +79,6 @@ def custom_counters(state):
num_foo = 0.0 num_foo = 0.0
while state: while state:
# Benchmark some code here # Benchmark some code here
pass
# Collect some custom metric named foo # Collect some custom metric named foo
num_foo += 0.13 num_foo += 0.13

View File

@ -68,9 +68,10 @@ target-version = "py311"
[tool.ruff.lint] [tool.ruff.lint]
# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default. # Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default.
select = ["E", "F", "I", "W"] select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"]
ignore = [ ignore = [
"E501", # line too long "PLW2901", # redefined-loop-name
"UP031", # printf-string-formatting
] ]
[tool.ruff.lint.isort] [tool.ruff.lint.isort]

View File

@ -4,8 +4,9 @@ import platform
import re import re
import shutil import shutil
import sys import sys
from collections.abc import Generator
from pathlib import Path from pathlib import Path
from typing import Any, Generator from typing import Any
import setuptools import setuptools
from setuptools.command import build_ext from setuptools.command import build_ext
@ -86,15 +87,14 @@ class BuildBazelExtension(build_ext.build_ext):
This is done in the ``bazel_build`` method, so it's not necessary to This is done in the ``bazel_build`` method, so it's not necessary to
do again in the `build_ext` base class. do again in the `build_ext` base class.
""" """
pass
def bazel_build(self, ext: BazelExtension) -> None: def bazel_build(self, ext: BazelExtension) -> None: # noqa: C901
"""Runs the bazel build to create the package.""" """Runs the bazel build to create the package."""
temp_path = Path(self.build_temp) temp_path = Path(self.build_temp)
# We round to the minor version, which makes rules_python # We round to the minor version, which makes rules_python
# look up the latest available patch version internally. # look up the latest available patch version internally.
python_version = "{0}.{1}".format(*sys.version_info[:2]) python_version = "{}.{}".format(*sys.version_info[:2])
bazel_argv = [ bazel_argv = [
"bazel", "bazel",
@ -142,9 +142,7 @@ class BuildBazelExtension(build_ext.build_ext):
# we do not want the bare .so file included # we do not want the bare .so file included
# when building for ABI3, so we require a # when building for ABI3, so we require a
# full and exact match on the file extension. # full and exact match on the file extension.
if "".join(fp.suffixes) == suffix: if "".join(fp.suffixes) == suffix or fp.suffix == ".pyi":
should_copy = True
elif fp.suffix == ".pyi":
should_copy = True should_copy = True
elif Path(root) == srcdir and f == "py.typed": elif Path(root) == srcdir and f == "py.typed":
# copy py.typed, but only at the package root. # copy py.typed, but only at the package root.
@ -155,7 +153,7 @@ class BuildBazelExtension(build_ext.build_ext):
setuptools.setup( setuptools.setup(
cmdclass=dict(build_ext=BuildBazelExtension), cmdclass={"build_ext": BuildBazelExtension},
package_data={"google_benchmark": ["py.typed", "*.pyi"]}, package_data={"google_benchmark": ["py.typed", "*.pyi"]},
ext_modules=[ ext_modules=[
BazelExtension( BazelExtension(

View File

@ -438,9 +438,7 @@ MemoryManager::Result* BenchmarkRunner::RunMemoryManager(
return memory_result; return memory_result;
} }
void BenchmarkRunner::RunProfilerManager() { void BenchmarkRunner::RunProfilerManager(IterationCount profile_iterations) {
// TODO: Provide a way to specify the number of iterations.
IterationCount profile_iterations = 1;
std::unique_ptr<internal::ThreadManager> manager; std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1)); manager.reset(new internal::ThreadManager(1));
b.Setup(); b.Setup();
@ -507,7 +505,10 @@ void BenchmarkRunner::DoOneRepetition() {
} }
if (profiler_manager != nullptr) { if (profiler_manager != nullptr) {
RunProfilerManager(); // We want to externally profile the benchmark for the same number of
// iterations because, for example, if we're tracing the benchmark then we
// want trace data to reasonably match PMU data.
RunProfilerManager(iters);
} }
// Ok, now actually report. // Ok, now actually report.

View File

@ -19,7 +19,6 @@
#include <vector> #include <vector>
#include "benchmark_api_internal.h" #include "benchmark_api_internal.h"
#include "internal_macros.h"
#include "perf_counters.h" #include "perf_counters.h"
#include "thread_manager.h" #include "thread_manager.h"
@ -109,7 +108,7 @@ class BenchmarkRunner {
MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations); MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations);
void RunProfilerManager(); void RunProfilerManager(IterationCount profile_iterations);
IterationCount PredictNumItersNeeded(const IterationResults& i) const; IterationCount PredictNumItersNeeded(const IterationResults& i) const;

View File

@ -9,7 +9,6 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "benchmark/export.h" #include "benchmark/export.h"
#include "check.h" #include "check.h"
#include "internal_macros.h"
namespace benchmark { namespace benchmark {

View File

@ -195,6 +195,9 @@ benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchm
compile_output_test(profiler_manager_test) compile_output_test(profiler_manager_test)
benchmark_add_test(NAME profiler_manager_test COMMAND profiler_manager_test --benchmark_min_time=0.01s) benchmark_add_test(NAME profiler_manager_test COMMAND profiler_manager_test --benchmark_min_time=0.01s)
compile_benchmark_test(profiler_manager_iterations_test)
benchmark_add_test(NAME profiler_manager_iterations COMMAND profiler_manager_iterations_test)
# MSVC does not allow to set the language standard to C++98/03. # MSVC does not allow to set the language standard to C++98/03.
if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
compile_benchmark_test(cxx03_test) compile_benchmark_test(cxx03_test)

View File

@ -0,0 +1,61 @@
#include <cassert>
#include <cstdlib>
#include <memory>
#include <vector>
#include "benchmark/benchmark.h"
// Tests that we can specify the number of profiler iterations with
// --benchmark_min_time=<NUM>x.
namespace {
int iteration_count = 0;
int end_profiler_iteration_count = 0;
class TestProfilerManager : public benchmark::ProfilerManager {
void AfterSetupStart() override { iteration_count = 0; }
void BeforeTeardownStop() override {
end_profiler_iteration_count = iteration_count;
}
};
class NullReporter : public benchmark::BenchmarkReporter {
public:
bool ReportContext(const Context& /*context*/) override { return true; }
void ReportRuns(const std::vector<Run>& /* report */) override {}
};
} // end namespace
static void BM_MyBench(benchmark::State& state) {
for (auto s : state) {
++iteration_count;
}
}
BENCHMARK(BM_MyBench);
int main(int argc, char** argv) {
// Make a fake argv and append the new --benchmark_profiler_iterations=<foo>
// to it.
int fake_argc = argc + 1;
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
fake_argv[argc] = "--benchmark_min_time=4x";
std::unique_ptr<benchmark::ProfilerManager> pm(new TestProfilerManager());
benchmark::RegisterProfilerManager(pm.get());
benchmark::Initialize(&fake_argc, const_cast<char**>(fake_argv));
NullReporter null_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&null_reporter, "BM_MyBench");
assert(returned_count == 1);
// Check the executed iters.
assert(end_profiler_iteration_count == 4);
benchmark::RegisterProfilerManager(nullptr);
delete[] fake_argv;
return 0;
}

View File

@ -1,5 +1,6 @@
// FIXME: WIP // FIXME: WIP
#include <cassert>
#include <memory> #include <memory>
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"

View File

@ -85,7 +85,10 @@ def create_parser():
"-d", "-d",
"--dump_to_json", "--dump_to_json",
dest="dump_to_json", dest="dump_to_json",
help="Additionally, dump benchmark comparison output to this file in JSON format.", help=(
"Additionally, dump benchmark comparison output to this file in"
" JSON format."
),
) )
utest = parser.add_argument_group() utest = parser.add_argument_group()
@ -94,8 +97,15 @@ def create_parser():
dest="utest", dest="utest",
default=True, default=True,
action="store_false", action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format( help=(
report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS "The tool can do a two-tailed Mann-Whitney U test with the null"
" hypothesis that it is equally likely that a randomly selected"
" value from one sample will be less than or greater than a"
" randomly selected value from a second sample.\nWARNING: requires"
f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})"
" number of repetitions to be meaningful!\nThe test is being done"
f" by default, if at least {report.UTEST_MIN_REPETITIONS}"
" repetitions were done.\nThis option can disable the U Test."
), ),
) )
alpha_default = 0.05 alpha_default = 0.05
@ -105,7 +115,9 @@ def create_parser():
default=alpha_default, default=alpha_default,
type=float, type=float,
help=( help=(
"significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)" "significance level alpha. if the calculated p-value is below this"
" value, then the result is said to be statistically significant"
" and the null hypothesis is rejected.\n(default: %0.4f)"
) )
% alpha_default, % alpha_default,
) )
@ -116,7 +128,10 @@ def create_parser():
parser_a = subparsers.add_parser( parser_a = subparsers.add_parser(
"benchmarks", "benchmarks",
help="The most simple use-case, compare all the output of these two benchmarks", help=(
"The most simple use-case, compare all the output of these two"
" benchmarks"
),
) )
baseline = parser_a.add_argument_group("baseline", "The benchmark baseline") baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument( baseline.add_argument(
@ -180,7 +195,10 @@ def create_parser():
parser_c = subparsers.add_parser( parser_c = subparsers.add_parser(
"benchmarksfiltered", "benchmarksfiltered",
help="Compare filter one of first benchmark with filter two of the second benchmark", help=(
"Compare filter one of first benchmark with filter two of the"
" second benchmark"
),
) )
baseline = parser_c.add_argument_group("baseline", "The benchmark baseline") baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument( baseline.add_argument(
@ -205,7 +223,10 @@ def create_parser():
metavar="test_contender", metavar="test_contender",
type=argparse.FileType("r"), type=argparse.FileType("r"),
nargs=1, nargs=1,
help="The second benchmark executable or JSON output file, that will be compared against the baseline", help=(
"The second benchmark executable or JSON output file, that will be"
" compared against the baseline"
),
) )
contender.add_argument( contender.add_argument(
"filter_contender", "filter_contender",

View File

@ -14,7 +14,7 @@ from numpy import array
from scipy.stats import gmean, mannwhitneyu from scipy.stats import gmean, mannwhitneyu
class BenchmarkColor(object): class BenchmarkColor:
def __init__(self, name, code): def __init__(self, name, code):
self.name = name self.name = name
self.code = code self.code = code
@ -249,8 +249,9 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
# We still got some results to show but issue a warning about it. # We still got some results to show but issue a warning about it.
if not utest["have_optimal_repetitions"]: if not utest["have_optimal_repetitions"]:
dsc_color = BC_WARNING dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( dsc += (
UTEST_OPTIMAL_REPETITIONS f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+"
" repetitions recommended."
) )
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
@ -260,7 +261,7 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
use_color, use_color,
special_str, special_str,
BC_HEADER, BC_HEADER,
"{}{}".format(bc_name, UTEST_COL_NAME), f"{bc_name}{UTEST_COL_NAME}",
first_col_width, first_col_width,
get_utest_color(utest["time_pvalue"]), get_utest_color(utest["time_pvalue"]),
utest["time_pvalue"], utest["time_pvalue"],
@ -285,7 +286,7 @@ def get_difference_report(json1, json2, utest=False):
partitions = partition_benchmarks(json1, json2) partitions = partition_benchmarks(json1, json2)
for partition in partitions: for partition in partitions:
benchmark_name = partition[0][0]["name"] benchmark_name = partition[0][0]["name"]
label = partition[0][0]["label"] if "label" in partition[0][0] else "" label = partition[0][0].get("label", "")
time_unit = partition[0][0]["time_unit"] time_unit = partition[0][0]["time_unit"]
measurements = [] measurements = []
utest_results = {} utest_results = {}
@ -329,11 +330,7 @@ def get_difference_report(json1, json2, utest=False):
# time units which are not compatible with other time units in the # time units which are not compatible with other time units in the
# benchmark suite. # benchmark suite.
if measurements: if measurements:
run_type = ( run_type = partition[0][0].get("run_type", "")
partition[0][0]["run_type"]
if "run_type" in partition[0][0]
else ""
)
aggregate_name = ( aggregate_name = (
partition[0][0]["aggregate_name"] partition[0][0]["aggregate_name"]
if run_type == "aggregate" if run_type == "aggregate"
@ -403,12 +400,17 @@ def print_difference_report(
first_col_width = find_longest_name(json_diff_report) first_col_width = find_longest_name(json_diff_report)
first_col_width = max(first_col_width, len("Benchmark")) first_col_width = max(first_col_width, len("Benchmark"))
first_col_width += len(UTEST_COL_NAME) first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( fmt_str = (
"Benchmark", 12 + first_col_width "{:<{}s}Time CPU Time Old Time New CPU Old"
" CPU New"
) )
first_line = fmt_str.format("Benchmark", 12 + first_col_width)
output_strs = [first_line, "-" * len(first_line)] output_strs = [first_line, "-" * len(first_line)]
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" fmt_str = (
"{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}"
"{endc}{:14.0f}{:14.0f}"
)
for benchmark in json_diff_report: for benchmark in json_diff_report:
# *If* we were asked to only include aggregates, # *If* we were asked to only include aggregates,
# and if it is non-aggregate, then don't print it. # and if it is non-aggregate, then don't print it.
@ -464,7 +466,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs" os.path.dirname(os.path.realpath(__file__)), "Inputs"
) )
testOutput = os.path.join(testInputs, "test3_run0.json") testOutput = os.path.join(testInputs, "test3_run0.json")
with open(testOutput, "r") as f: with open(testOutput) as f:
json = json.load(f) json = json.load(f)
return json return json
@ -480,8 +482,8 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines)) print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
self.assertEqual(expect_lines[i], output_lines[i]) self.assertEqual(expect_lines[i], output_line)
class TestReportDifference(unittest.TestCase): class TestReportDifference(unittest.TestCase):
@ -495,9 +497,9 @@ class TestReportDifference(unittest.TestCase):
) )
testOutput1 = os.path.join(testInputs, "test1_run1.json") testOutput1 = os.path.join(testInputs, "test1_run1.json")
testOutput2 = os.path.join(testInputs, "test1_run2.json") testOutput2 = os.path.join(testInputs, "test1_run2.json")
with open(testOutput1, "r") as f: with open(testOutput1) as f:
json1 = json.load(f) json1 = json.load(f)
with open(testOutput2, "r") as f: with open(testOutput2) as f:
json2 = json.load(f) json2 = json.load(f)
return json1, json2 return json1, json2
@ -584,8 +586,8 @@ class TestReportDifference(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(len(parts), 7) self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
@ -819,7 +821,9 @@ class TestReportDifference(unittest.TestCase):
}, },
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["label"], expected["label"]) self.assertEqual(out["label"], expected["label"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
@ -837,7 +841,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs" os.path.dirname(os.path.realpath(__file__)), "Inputs"
) )
testOutput = os.path.join(testInputs, "test2_run.json") testOutput = os.path.join(testInputs, "test2_run.json")
with open(testOutput, "r") as f: with open(testOutput) as f:
json = json.load(f) json = json.load(f)
return json return json
@ -861,8 +865,8 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(len(parts), 7) self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
@ -947,7 +951,9 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
}, },
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected) assert_utest(self, out, expected)
@ -965,9 +971,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
) )
testOutput1 = os.path.join(testInputs, "test3_run0.json") testOutput1 = os.path.join(testInputs, "test3_run0.json")
testOutput2 = os.path.join(testInputs, "test3_run1.json") testOutput2 = os.path.join(testInputs, "test3_run1.json")
with open(testOutput1, "r") as f: with open(testOutput1) as f:
json1 = json.load(f) json1 = json.load(f)
with open(testOutput2, "r") as f: with open(testOutput2) as f:
json2 = json.load(f) json2 = json.load(f)
return json1, json2 return json1, json2
@ -1025,8 +1031,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_pretty_printing_aggregates_only(self): def test_json_diff_report_pretty_printing_aggregates_only(self):
@ -1081,8 +1087,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self): def test_json_diff_report(self):
@ -1190,7 +1196,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
}, },
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected) assert_utest(self, out, expected)
@ -1210,9 +1218,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
) )
testOutput1 = os.path.join(testInputs, "test3_run0.json") testOutput1 = os.path.join(testInputs, "test3_run0.json")
testOutput2 = os.path.join(testInputs, "test3_run1.json") testOutput2 = os.path.join(testInputs, "test3_run1.json")
with open(testOutput1, "r") as f: with open(testOutput1) as f:
json1 = json.load(f) json1 = json.load(f)
with open(testOutput2, "r") as f: with open(testOutput2) as f:
json2 = json.load(f) json2 = json.load(f)
return json1, json2 return json1, json2
@ -1270,8 +1278,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self): def test_json_diff_report(self):
@ -1380,7 +1388,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
}, },
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected) assert_utest(self, out, expected)
@ -1398,9 +1408,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
) )
testOutput1 = os.path.join(testInputs, "test4_run0.json") testOutput1 = os.path.join(testInputs, "test4_run0.json")
testOutput2 = os.path.join(testInputs, "test4_run1.json") testOutput2 = os.path.join(testInputs, "test4_run1.json")
with open(testOutput1, "r") as f: with open(testOutput1) as f:
json1 = json.load(f) json1 = json.load(f)
with open(testOutput2, "r") as f: with open(testOutput2) as f:
json2 = json.load(f) json2 = json.load(f)
return json1, json2 return json1, json2
@ -1416,8 +1426,8 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
print("\n") print("\n")
print("\n".join(output_lines_with_header)) print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines)) self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)): for i, output_line in enumerate(output_lines):
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
self.assertEqual(expect_lines[i], parts) self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self): def test_json_diff_report(self):
@ -1439,7 +1449,9 @@ class TestReportDifferenceForPercentageAggregates(unittest.TestCase):
} }
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected) assert_utest(self, out, expected)
@ -1456,7 +1468,7 @@ class TestReportSorting(unittest.TestCase):
os.path.dirname(os.path.realpath(__file__)), "Inputs" os.path.dirname(os.path.realpath(__file__)), "Inputs"
) )
testOutput = os.path.join(testInputs, "test4_run.json") testOutput = os.path.join(testInputs, "test4_run.json")
with open(testOutput, "r") as f: with open(testOutput) as f:
json = json.load(f) json = json.load(f)
return json return json
@ -1480,13 +1492,15 @@ class TestReportSorting(unittest.TestCase):
"88 family 1 instance 1 aggregate", "88 family 1 instance 1 aggregate",
] ]
for n in range(len(self.json["benchmarks"]) ** 2): for _n in range(len(self.json["benchmarks"]) ** 2):
random.shuffle(self.json["benchmarks"]) random.shuffle(self.json["benchmarks"])
sorted_benchmarks = util.sort_benchmark_results(self.json)[ sorted_benchmarks = util.sort_benchmark_results(self.json)[
"benchmarks" "benchmarks"
] ]
self.assertEqual(len(expected_names), len(sorted_benchmarks)) self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names): for out, expected in zip(
sorted_benchmarks, expected_names, strict=True
):
self.assertEqual(out["name"], expected) self.assertEqual(out["name"], expected)
@ -1503,12 +1517,12 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
) )
testOutput1 = os.path.join(testInputs, "test5_run0.json") testOutput1 = os.path.join(testInputs, "test5_run0.json")
testOutput2 = os.path.join(testInputs, "test5_run1.json") testOutput2 = os.path.join(testInputs, "test5_run1.json")
with open(testOutput1, "r") as f: with open(testOutput1) as f:
json1 = json.load(f) json1 = json.load(f)
json1["benchmarks"] = [ json1["benchmarks"] = [
json1["benchmarks"][0] for i in range(1000) json1["benchmarks"][0] for i in range(1000)
] ]
with open(testOutput2, "r") as f: with open(testOutput2) as f:
json2 = json.load(f) json2 = json.load(f)
json2["benchmarks"] = [ json2["benchmarks"] = [
json2["benchmarks"][0] for i in range(1000) json2["benchmarks"][0] for i in range(1000)
@ -1535,8 +1549,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
) )
output_lines = output_lines_with_header[2:] output_lines = output_lines_with_header[2:]
found = False found = False
for i in range(0, len(output_lines)): for output_line in output_lines:
parts = [x for x in output_lines[i].split(" ") if x] parts = [x for x in output_line.split(" ") if x]
found = expect_line == parts found = expect_line == parts
if found: if found:
break break
@ -1578,7 +1592,9 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
}, },
] ]
self.assertEqual(len(self.json_diff_report), len(expected_output)) self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(self.json_diff_report, expected_output): for out, expected in zip(
self.json_diff_report, expected_output, strict=True
):
self.assertEqual(out["name"], expected["name"]) self.assertEqual(out["name"], expected["name"])
self.assertEqual(out["time_unit"], expected["time_unit"]) self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected) assert_utest(self, out, expected)
@ -1602,7 +1618,7 @@ def assert_utest(unittest_instance, lhs, rhs):
def assert_measurements(unittest_instance, lhs, rhs): def assert_measurements(unittest_instance, lhs, rhs):
for m1, m2 in zip(lhs["measurements"], rhs["measurements"]): for m1, m2 in zip(lhs["measurements"], rhs["measurements"], strict=False):
unittest_instance.assertEqual(m1["real_time"], m2["real_time"]) unittest_instance.assertEqual(m1["real_time"], m2["real_time"])
unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"]) unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"])
# m1['time'] and m1['cpu'] hold values which are being calculated, # m1['time'] and m1['cpu'] hold values which are being calculated,

View File

@ -1,4 +1,6 @@
"""util.py - General utilities for running, loading, and processing benchmarks""" """util.py - General utilities for running, loading, and processing
benchmarks
"""
import json import json
import os import os
@ -46,7 +48,7 @@ def is_json_file(filename):
'False' otherwise. 'False' otherwise.
""" """
try: try:
with open(filename, "r") as f: with open(filename) as f:
json.load(f) json.load(f)
return True return True
except BaseException: except BaseException:
@ -97,7 +99,8 @@ def find_benchmark_flag(prefix, benchmark_flags):
if it is found return the arg it specifies. If specified more than once the if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned. last value is returned. If the flag is not found None is returned.
""" """
assert prefix.startswith("--") and prefix.endswith("=") assert prefix.startswith("--")
assert prefix.endswith("=")
result = None result = None
for f in benchmark_flags: for f in benchmark_flags:
if f.startswith(prefix): if f.startswith(prefix):
@ -110,7 +113,8 @@ def remove_benchmark_flags(prefix, benchmark_flags):
Return a new list containing the specified benchmark_flags except those Return a new list containing the specified benchmark_flags except those
with the specified prefix. with the specified prefix.
""" """
assert prefix.startswith("--") and prefix.endswith("=") assert prefix.startswith("--")
assert prefix.endswith("=")
return [f for f in benchmark_flags if not f.startswith(prefix)] return [f for f in benchmark_flags if not f.startswith(prefix)]
@ -133,17 +137,16 @@ def load_benchmark_results(fname, benchmark_filter):
name = benchmark.get("run_name", None) or benchmark["name"] name = benchmark.get("run_name", None) or benchmark["name"]
return re.search(benchmark_filter, name) is not None return re.search(benchmark_filter, name) is not None
with open(fname, "r") as f: with open(fname) as f:
results = json.load(f) results = json.load(f)
if "context" in results: if "json_schema_version" in results.get("context", {}):
if "json_schema_version" in results["context"]: json_schema_version = results["context"]["json_schema_version"]
json_schema_version = results["context"]["json_schema_version"] if json_schema_version != 1:
if json_schema_version != 1: print(
print( f"In {fname}, got unnsupported JSON schema version:"
"In %s, got unnsupported JSON schema version: %i, expected 1" f" {json_schema_version}, expected 1"
% (fname, json_schema_version) )
) sys.exit(1)
sys.exit(1)
if "benchmarks" in results: if "benchmarks" in results:
results["benchmarks"] = list( results["benchmarks"] = list(
filter(benchmark_wanted, results["benchmarks"]) filter(benchmark_wanted, results["benchmarks"])
@ -157,9 +160,7 @@ def sort_benchmark_results(result):
# From inner key to the outer key! # From inner key to the outer key!
benchmarks = sorted( benchmarks = sorted(
benchmarks, benchmarks,
key=lambda benchmark: benchmark["repetition_index"] key=lambda benchmark: benchmark.get("repetition_index", -1),
if "repetition_index" in benchmark
else -1,
) )
benchmarks = sorted( benchmarks = sorted(
benchmarks, benchmarks,
@ -169,15 +170,11 @@ def sort_benchmark_results(result):
) )
benchmarks = sorted( benchmarks = sorted(
benchmarks, benchmarks,
key=lambda benchmark: benchmark["per_family_instance_index"] key=lambda benchmark: benchmark.get("per_family_instance_index", -1),
if "per_family_instance_index" in benchmark
else -1,
) )
benchmarks = sorted( benchmarks = sorted(
benchmarks, benchmarks,
key=lambda benchmark: benchmark["family_index"] key=lambda benchmark: benchmark.get("family_index", -1),
if "family_index" in benchmark
else -1,
) )
result["benchmarks"] = benchmarks result["benchmarks"] = benchmarks
@ -197,11 +194,12 @@ def run_benchmark(exe_name, benchmark_flags):
is_temp_output = True is_temp_output = True
thandle, output_name = tempfile.mkstemp() thandle, output_name = tempfile.mkstemp()
os.close(thandle) os.close(thandle)
benchmark_flags = list(benchmark_flags) + [ benchmark_flags = [
"--benchmark_out=%s" % output_name *list(benchmark_flags),
"--benchmark_out=%s" % output_name,
] ]
cmd = [exe_name] + benchmark_flags cmd = [exe_name, *benchmark_flags]
print("RUNNING: %s" % " ".join(cmd)) print("RUNNING: %s" % " ".join(cmd))
exitCode = subprocess.call(cmd) exitCode = subprocess.call(cmd)
if exitCode != 0: if exitCode != 0:

View File

@ -73,16 +73,16 @@ def process_identifiers(line):
parts = re.split(r"([a-zA-Z0-9_]+)", line) parts = re.split(r"([a-zA-Z0-9_]+)", line)
new_line = "" new_line = ""
for tk in parts: for tk in parts:
if is_identifier(tk): if is_identifier(tk) and (
if tk.startswith("__Z"): tk.startswith("__Z")
tk = tk[1:] or (
elif (
tk.startswith("_") tk.startswith("_")
and len(tk) > 1 and len(tk) > 1
and tk[1].isalpha() and tk[1].isalpha()
and tk[1] != "Z" and tk[1] != "Z"
): )
tk = tk[1:] ):
tk = tk[1:]
new_line += tk new_line += tk
return new_line return new_line
@ -148,7 +148,7 @@ def main():
print("ERROR: input file '%s' does not exist" % input) print("ERROR: input file '%s' does not exist" % input)
sys.exit(1) sys.exit(1)
with open(input, "r") as f: with open(input) as f:
contents = f.read() contents = f.read()
new_contents = process_asm(contents) new_contents = process_asm(contents)
with open(output, "w") as f: with open(output, "w") as f: