mirror of
https://github.com/google/benchmark.git
synced 2024-12-26 20:40:21 +08:00
Bind benchmark builder to Python (#1040)
* Fix setup.py and reformat * Bind benchmark * Add benchmark option to Python * Add Python examples for range, complexity, and thread * Remove invalid multithreading in Python * Bump Python bindings version to 0.2.0 Co-authored-by: Dominic Hamon <dominichamon@users.noreply.github.com>
This commit is contained in:
parent
df9e2948fa
commit
73d4d5e8d6
@ -29,24 +29,111 @@ Example usage:
|
|||||||
|
|
||||||
from absl import app
|
from absl import app
|
||||||
from google_benchmark import _benchmark
|
from google_benchmark import _benchmark
|
||||||
from google_benchmark._benchmark import Counter
|
from google_benchmark._benchmark import (
|
||||||
|
Counter,
|
||||||
|
kNanosecond,
|
||||||
|
kMicrosecond,
|
||||||
|
kMillisecond,
|
||||||
|
oNone,
|
||||||
|
o1,
|
||||||
|
oN,
|
||||||
|
oNSquared,
|
||||||
|
oNCubed,
|
||||||
|
oLogN,
|
||||||
|
oNLogN,
|
||||||
|
oAuto,
|
||||||
|
oLambda,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"register",
|
"register",
|
||||||
"main",
|
"main",
|
||||||
"Counter",
|
"Counter",
|
||||||
|
"kNanosecond",
|
||||||
|
"kMicrosecond",
|
||||||
|
"kMillisecond",
|
||||||
|
"oNone",
|
||||||
|
"o1",
|
||||||
|
"oN",
|
||||||
|
"oNSquared",
|
||||||
|
"oNCubed",
|
||||||
|
"oLogN",
|
||||||
|
"oNLogN",
|
||||||
|
"oAuto",
|
||||||
|
"oLambda",
|
||||||
]
|
]
|
||||||
|
|
||||||
__version__ = "0.1.0"
|
__version__ = "0.2.0"
|
||||||
|
|
||||||
|
|
||||||
def register(f=None, *, name=None):
|
class __OptionMaker:
|
||||||
if f is None:
|
"""A stateless class to collect benchmark options.
|
||||||
|
|
||||||
|
Collect all decorator calls like @option.range(start=0, limit=1<<5).
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Options:
|
||||||
|
"""Pure data class to store options calls, along with the benchmarked function."""
|
||||||
|
|
||||||
|
def __init__(self, func):
|
||||||
|
self.func = func
|
||||||
|
self.builder_calls = []
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def make(cls, func_or_options):
|
||||||
|
"""Make Options from Options or the benchmarked function."""
|
||||||
|
if isinstance(func_or_options, cls.Options):
|
||||||
|
return func_or_options
|
||||||
|
return cls.Options(func_or_options)
|
||||||
|
|
||||||
|
def __getattr__(self, builder_name):
|
||||||
|
"""Append option call in the Options."""
|
||||||
|
|
||||||
|
# The function that get returned on @option.range(start=0, limit=1<<5).
|
||||||
|
def __builder_method(*args, **kwargs):
|
||||||
|
|
||||||
|
# The decorator that get called, either with the benchmared function
|
||||||
|
# or the previous Options
|
||||||
|
def __decorator(func_or_options):
|
||||||
|
options = self.make(func_or_options)
|
||||||
|
options.builder_calls.append((builder_name, args, kwargs))
|
||||||
|
# The decorator returns Options so it is not technically a decorator
|
||||||
|
# and needs a final call to @regiser
|
||||||
|
return options
|
||||||
|
|
||||||
|
return __decorator
|
||||||
|
|
||||||
|
return __builder_method
|
||||||
|
|
||||||
|
|
||||||
|
# Alias for nicer API.
|
||||||
|
# We have to instanciate an object, even if stateless, to be able to use __getattr__
|
||||||
|
# on option.range
|
||||||
|
option = __OptionMaker()
|
||||||
|
|
||||||
|
|
||||||
|
def register(undefined=None, *, name=None):
|
||||||
|
"""Register function for benchmarking."""
|
||||||
|
if undefined is None:
|
||||||
|
# Decorator is called without parenthesis so we return a decorator
|
||||||
return lambda f: register(f, name=name)
|
return lambda f: register(f, name=name)
|
||||||
|
|
||||||
|
# We have either the function to benchmark (simple case) or an instance of Options
|
||||||
|
# (@option._ case).
|
||||||
|
options = __OptionMaker.make(undefined)
|
||||||
|
|
||||||
if name is None:
|
if name is None:
|
||||||
name = f.__name__
|
name = options.func.__name__
|
||||||
_benchmark.RegisterBenchmark(name, f)
|
|
||||||
return f
|
# We register the benchmark and reproduce all the @option._ calls onto the
|
||||||
|
# benchmark builder pattern
|
||||||
|
benchmark = _benchmark.RegisterBenchmark(name, options.func)
|
||||||
|
for name, args, kwargs in options.builder_calls[::-1]:
|
||||||
|
getattr(benchmark, name)(*args, **kwargs)
|
||||||
|
|
||||||
|
# return the benchmarked function because the decorator does not modify it
|
||||||
|
return options.func
|
||||||
|
|
||||||
|
|
||||||
def _flags_parser(argv):
|
def _flags_parser(argv):
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
// Benchmark for Python.
|
// Benchmark for Python.
|
||||||
|
|
||||||
#include "benchmark/benchmark.h"
|
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -11,6 +9,8 @@
|
|||||||
#include "pybind11/stl.h"
|
#include "pybind11/stl.h"
|
||||||
#include "pybind11/stl_bind.h"
|
#include "pybind11/stl_bind.h"
|
||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
|
PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -37,16 +37,82 @@ std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
|
|||||||
return remaining_argv;
|
return remaining_argv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegisterBenchmark(const char* name, py::function f) {
|
benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
|
||||||
benchmark::RegisterBenchmark(name,
|
py::function f) {
|
||||||
[f](benchmark::State& state) { f(&state); });
|
return benchmark::RegisterBenchmark(
|
||||||
|
name, [f](benchmark::State& state) { f(&state); });
|
||||||
}
|
}
|
||||||
|
|
||||||
PYBIND11_MODULE(_benchmark, m) {
|
PYBIND11_MODULE(_benchmark, m) {
|
||||||
m.def("Initialize", Initialize);
|
using benchmark::TimeUnit;
|
||||||
m.def("RegisterBenchmark", RegisterBenchmark);
|
py::enum_<TimeUnit>(m, "TimeUnit")
|
||||||
m.def("RunSpecifiedBenchmarks",
|
.value("kNanosecond", TimeUnit::kNanosecond)
|
||||||
[]() { benchmark::RunSpecifiedBenchmarks(); });
|
.value("kMicrosecond", TimeUnit::kMicrosecond)
|
||||||
|
.value("kMillisecond", TimeUnit::kMillisecond)
|
||||||
|
.export_values();
|
||||||
|
|
||||||
|
using benchmark::BigO;
|
||||||
|
py::enum_<BigO>(m, "BigO")
|
||||||
|
.value("oNone", BigO::oNone)
|
||||||
|
.value("o1", BigO::o1)
|
||||||
|
.value("oN", BigO::oN)
|
||||||
|
.value("oNSquared", BigO::oNSquared)
|
||||||
|
.value("oNCubed", BigO::oNCubed)
|
||||||
|
.value("oLogN", BigO::oLogN)
|
||||||
|
.value("oNLogN", BigO::oLogN)
|
||||||
|
.value("oAuto", BigO::oAuto)
|
||||||
|
.value("oLambda", BigO::oLambda)
|
||||||
|
.export_values();
|
||||||
|
|
||||||
|
using benchmark::internal::Benchmark;
|
||||||
|
py::class_<Benchmark>(m, "Benchmark")
|
||||||
|
// For methods returning a pointer tor the current object, reference
|
||||||
|
// return policy is used to ask pybind not to take ownership oof the
|
||||||
|
// returned object and avoid calling delete on it.
|
||||||
|
// https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
|
||||||
|
//
|
||||||
|
// For methods taking a const std::vector<...>&, a copy is created
|
||||||
|
// because a it is bound to a Python list.
|
||||||
|
// https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||||
|
.def("unit", &Benchmark::Unit, py::return_value_policy::reference)
|
||||||
|
.def("arg", &Benchmark::Arg, py::return_value_policy::reference)
|
||||||
|
.def("args", &Benchmark::Args, py::return_value_policy::reference)
|
||||||
|
.def("range", &Benchmark::Range, py::return_value_policy::reference,
|
||||||
|
py::arg("start"), py::arg("limit"))
|
||||||
|
.def("dense_range", &Benchmark::DenseRange,
|
||||||
|
py::return_value_policy::reference, py::arg("start"),
|
||||||
|
py::arg("limit"), py::arg("step") = 1)
|
||||||
|
.def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
|
||||||
|
.def("args_product", &Benchmark::ArgsProduct,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
|
||||||
|
.def("arg_names", &Benchmark::ArgNames,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("range_pair", &Benchmark::RangePair,
|
||||||
|
py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
|
||||||
|
py::arg("lo2"), py::arg("hi2"))
|
||||||
|
.def("range_multiplier", &Benchmark::RangeMultiplier,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
|
||||||
|
.def("iterations", &Benchmark::Iterations,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("repetitions", &Benchmark::Repetitions,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
|
||||||
|
py::return_value_policy::reference, py::arg("value") = true)
|
||||||
|
.def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
|
||||||
|
py::return_value_policy::reference, py::arg("value") = true)
|
||||||
|
.def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("use_real_time", &Benchmark::UseRealTime,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def("use_manual_time", &Benchmark::UseManualTime,
|
||||||
|
py::return_value_policy::reference)
|
||||||
|
.def(
|
||||||
|
"complexity",
|
||||||
|
(Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
|
||||||
|
py::return_value_policy::reference,
|
||||||
|
py::arg("complexity") = benchmark::oAuto);
|
||||||
|
|
||||||
using benchmark::Counter;
|
using benchmark::Counter;
|
||||||
py::class_<Counter> py_counter(m, "Counter");
|
py::class_<Counter> py_counter(m, "Counter");
|
||||||
@ -104,5 +170,11 @@ PYBIND11_MODULE(_benchmark, m) {
|
|||||||
.def_readwrite("counters", &State::counters)
|
.def_readwrite("counters", &State::counters)
|
||||||
.def_readonly("thread_index", &State::thread_index)
|
.def_readonly("thread_index", &State::thread_index)
|
||||||
.def_readonly("threads", &State::threads);
|
.def_readonly("threads", &State::threads);
|
||||||
|
|
||||||
|
m.def("Initialize", Initialize);
|
||||||
|
m.def("RegisterBenchmark", RegisterBenchmark,
|
||||||
|
py::return_value_policy::reference);
|
||||||
|
m.def("RunSpecifiedBenchmarks",
|
||||||
|
[]() { benchmark::RunSpecifiedBenchmarks(); });
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -64,7 +64,7 @@ def manual_timing(state):
|
|||||||
while state:
|
while state:
|
||||||
# Manually count Python CPU time
|
# Manually count Python CPU time
|
||||||
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
|
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
|
||||||
# Somehting to benchmark
|
# Something to benchmark
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
end = time.perf_counter()
|
end = time.perf_counter()
|
||||||
state.set_iteration_time(end - start)
|
state.set_iteration_time(end - start)
|
||||||
@ -92,5 +92,45 @@ def custom_counters(state):
|
|||||||
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
|
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark.register
|
||||||
|
@benchmark.option.measure_process_cpu_time()
|
||||||
|
@benchmark.option.use_real_time()
|
||||||
|
def with_options(state):
|
||||||
|
while state:
|
||||||
|
sum(range(1_000_000))
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark.register(name="sum_million_microseconds")
|
||||||
|
@benchmark.option.unit(benchmark.kMicrosecond)
|
||||||
|
def with_options(state):
|
||||||
|
while state:
|
||||||
|
sum(range(1_000_000))
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark.register
|
||||||
|
@benchmark.option.arg(100)
|
||||||
|
@benchmark.option.arg(1000)
|
||||||
|
def passing_argument(state):
|
||||||
|
while state:
|
||||||
|
sum(range(state.range(0)))
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark.register
|
||||||
|
@benchmark.option.range(8, limit=8 << 10)
|
||||||
|
def using_range(state):
|
||||||
|
while state:
|
||||||
|
sum(range(state.range(0)))
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark.register
|
||||||
|
@benchmark.option.range_multiplier(2)
|
||||||
|
@benchmark.option.range(1 << 10, 1 << 18)
|
||||||
|
@benchmark.option.complexity(benchmark.oN)
|
||||||
|
def computing_complexity(state):
|
||||||
|
while state:
|
||||||
|
sum(range(state.range(0)))
|
||||||
|
state.complexity_n = state.range(0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
benchmark.main()
|
benchmark.main()
|
||||||
|
96
setup.py
96
setup.py
@ -12,29 +12,32 @@ from setuptools.command import build_ext
|
|||||||
HERE = os.path.dirname(os.path.abspath(__file__))
|
HERE = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
IS_WINDOWS = sys.platform.startswith('win')
|
IS_WINDOWS = sys.platform.startswith("win")
|
||||||
|
|
||||||
|
|
||||||
def _get_version():
|
def _get_version():
|
||||||
"""Parse the version string from __init__.py."""
|
"""Parse the version string from __init__.py."""
|
||||||
with open(os.path.join(
|
with open(
|
||||||
HERE, 'bindings', 'python', 'google_benchmark', '__init__.py')) as init_file:
|
os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py")
|
||||||
|
) as init_file:
|
||||||
try:
|
try:
|
||||||
version_line = next(
|
version_line = next(
|
||||||
line for line in init_file if line.startswith('__version__'))
|
line for line in init_file if line.startswith("__version__")
|
||||||
|
)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
raise ValueError('__version__ not defined in __init__.py')
|
raise ValueError("__version__ not defined in __init__.py")
|
||||||
else:
|
else:
|
||||||
namespace = {}
|
namespace = {}
|
||||||
exec(version_line, namespace) # pylint: disable=exec-used
|
exec(version_line, namespace) # pylint: disable=exec-used
|
||||||
return namespace['__version__']
|
return namespace["__version__"]
|
||||||
|
|
||||||
|
|
||||||
def _parse_requirements(path):
|
def _parse_requirements(path):
|
||||||
with open(os.path.join(HERE, path)) as requirements:
|
with open(os.path.join(HERE, path)) as requirements:
|
||||||
return [
|
return [
|
||||||
line.rstrip() for line in requirements
|
line.rstrip()
|
||||||
if not (line.isspace() or line.startswith('#'))
|
for line in requirements
|
||||||
|
if not (line.isspace() or line.startswith("#"))
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -43,8 +46,9 @@ class BazelExtension(setuptools.Extension):
|
|||||||
|
|
||||||
def __init__(self, name, bazel_target):
|
def __init__(self, name, bazel_target):
|
||||||
self.bazel_target = bazel_target
|
self.bazel_target = bazel_target
|
||||||
self.relpath, self.target_name = (
|
self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split(
|
||||||
posixpath.relpath(bazel_target, '//').split(':'))
|
":"
|
||||||
|
)
|
||||||
setuptools.Extension.__init__(self, name, sources=[])
|
setuptools.Extension.__init__(self, name, sources=[])
|
||||||
|
|
||||||
|
|
||||||
@ -58,30 +62,33 @@ class BuildBazelExtension(build_ext.build_ext):
|
|||||||
|
|
||||||
def bazel_build(self, ext):
|
def bazel_build(self, ext):
|
||||||
"""Runs the bazel build to create the package."""
|
"""Runs the bazel build to create the package."""
|
||||||
with open('WORKSPACE', 'r') as workspace:
|
with open("WORKSPACE", "r") as workspace:
|
||||||
workspace_contents = workspace.read()
|
workspace_contents = workspace.read()
|
||||||
|
|
||||||
with open('WORKSPACE', 'w') as workspace:
|
with open("WORKSPACE", "w") as workspace:
|
||||||
workspace.write(re.sub(
|
workspace.write(
|
||||||
r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
|
re.sub(
|
||||||
sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
|
r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
|
||||||
workspace_contents))
|
sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
|
||||||
|
workspace_contents,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.exists(self.build_temp):
|
if not os.path.exists(self.build_temp):
|
||||||
os.makedirs(self.build_temp)
|
os.makedirs(self.build_temp)
|
||||||
|
|
||||||
bazel_argv = [
|
bazel_argv = [
|
||||||
'bazel',
|
"bazel",
|
||||||
'build',
|
"build",
|
||||||
ext.bazel_target,
|
ext.bazel_target,
|
||||||
'--symlink_prefix=' + os.path.join(self.build_temp, 'bazel-'),
|
"--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"),
|
||||||
'--compilation_mode=' + ('dbg' if self.debug else 'opt'),
|
"--compilation_mode=" + ("dbg" if self.debug else "opt"),
|
||||||
]
|
]
|
||||||
|
|
||||||
if IS_WINDOWS:
|
if IS_WINDOWS:
|
||||||
# Link with python*.lib.
|
# Link with python*.lib.
|
||||||
for library_dir in self.library_dirs:
|
for library_dir in self.library_dirs:
|
||||||
bazel_argv.append('--linkopt=/LIBPATH:' + library_dir)
|
bazel_argv.append("--linkopt=/LIBPATH:" + library_dir)
|
||||||
|
|
||||||
self.spawn(bazel_argv)
|
self.spawn(bazel_argv)
|
||||||
|
|
||||||
@ -89,6 +96,7 @@ class BuildBazelExtension(build_ext.build_ext):
|
|||||||
ext_bazel_bin_path = os.path.join(
|
ext_bazel_bin_path = os.path.join(
|
||||||
self.build_temp, 'bazel-bin',
|
self.build_temp, 'bazel-bin',
|
||||||
ext.relpath, ext.target_name + shared_lib_suffix)
|
ext.relpath, ext.target_name + shared_lib_suffix)
|
||||||
|
|
||||||
ext_dest_path = self.get_ext_fullpath(ext.name)
|
ext_dest_path = self.get_ext_fullpath(ext.name)
|
||||||
ext_dest_dir = os.path.dirname(ext_dest_path)
|
ext_dest_dir = os.path.dirname(ext_dest_path)
|
||||||
if not os.path.exists(ext_dest_dir):
|
if not os.path.exists(ext_dest_dir):
|
||||||
@ -97,32 +105,36 @@ class BuildBazelExtension(build_ext.build_ext):
|
|||||||
|
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name='google_benchmark',
|
name="google_benchmark",
|
||||||
version=_get_version(),
|
version=_get_version(),
|
||||||
url='https://github.com/google/benchmark',
|
url="https://github.com/google/benchmark",
|
||||||
description='A library to benchmark code snippets.',
|
description="A library to benchmark code snippets.",
|
||||||
author='Google',
|
author="Google",
|
||||||
author_email='benchmark-py@google.com',
|
author_email="benchmark-py@google.com",
|
||||||
# Contained modules and scripts.
|
# Contained modules and scripts.
|
||||||
package_dir={'': 'bindings/python'},
|
package_dir={"": "bindings/python"},
|
||||||
packages=setuptools.find_packages('bindings/python'),
|
packages=setuptools.find_packages("bindings/python"),
|
||||||
install_requires=_parse_requirements('bindings/python/requirements.txt'),
|
install_requires=_parse_requirements("bindings/python/requirements.txt"),
|
||||||
cmdclass=dict(build_ext=BuildBazelExtension),
|
cmdclass=dict(build_ext=BuildBazelExtension),
|
||||||
ext_modules=[BazelExtension(
|
ext_modules=[
|
||||||
'google_benchmark._benchmark', '//bindings/python/google_benchmark:_benchmark')],
|
BazelExtension(
|
||||||
|
"google_benchmark._benchmark",
|
||||||
|
"//bindings/python/google_benchmark:_benchmark",
|
||||||
|
)
|
||||||
|
],
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
# PyPI package information.
|
# PyPI package information.
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 4 - Beta',
|
"Development Status :: 4 - Beta",
|
||||||
'Intended Audience :: Developers',
|
"Intended Audience :: Developers",
|
||||||
'Intended Audience :: Science/Research',
|
"Intended Audience :: Science/Research",
|
||||||
'License :: OSI Approved :: Apache Software License',
|
"License :: OSI Approved :: Apache Software License",
|
||||||
'Programming Language :: Python :: 3.6',
|
"Programming Language :: Python :: 3.6",
|
||||||
'Programming Language :: Python :: 3.7',
|
"Programming Language :: Python :: 3.7",
|
||||||
'Programming Language :: Python :: 3.8',
|
"Programming Language :: Python :: 3.8",
|
||||||
'Topic :: Software Development :: Testing',
|
"Topic :: Software Development :: Testing",
|
||||||
'Topic :: System :: Benchmark',
|
"Topic :: System :: Benchmark",
|
||||||
],
|
],
|
||||||
license='Apache 2.0',
|
license="Apache 2.0",
|
||||||
keywords='benchmark',
|
keywords="benchmark",
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user