mirror of
https://github.com/google/benchmark.git
synced 2025-04-04 00:20:38 +08:00
[tools] Run autopep8 and apply fixes found. (#739)
This commit is contained in:
parent
eafa34a5e8
commit
eee8b05c97
@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import unittest
|
||||||
"""
|
"""
|
||||||
compare.py - versatile benchmark output compare tool
|
compare.py - versatile benchmark output compare tool
|
||||||
"""
|
"""
|
||||||
@ -244,9 +245,6 @@ def main():
|
|||||||
print(ln)
|
print(ln)
|
||||||
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
|
|
||||||
class TestParser(unittest.TestCase):
|
class TestParser(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.parser = create_parser()
|
self.parser = create_parser()
|
||||||
@ -402,7 +400,7 @@ class TestParser(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
#unittest.main()
|
# unittest.main()
|
||||||
main()
|
main()
|
||||||
|
|
||||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import unittest
|
||||||
"""report.py - Utilities for reporting statistics about benchmark results
|
"""report.py - Utilities for reporting statistics about benchmark results
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
@ -270,9 +271,6 @@ def generate_difference_report(
|
|||||||
# Unit tests
|
# Unit tests
|
||||||
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
|
|
||||||
class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
||||||
def load_results(self):
|
def load_results(self):
|
||||||
import json
|
import json
|
||||||
@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
|||||||
'BM_One',
|
'BM_One',
|
||||||
'BM_Two',
|
'BM_Two',
|
||||||
'short', # These two are not sorted
|
'short', # These two are not sorted
|
||||||
'medium', # These two are not sorted
|
'medium', # These two are not sorted
|
||||||
]
|
]
|
||||||
json = self.load_results()
|
json = self.load_results()
|
||||||
output_lines = get_unique_benchmark_names(json)
|
output_lines = get_unique_benchmark_names(json)
|
||||||
@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
|||||||
for i in range(0, len(output_lines)):
|
for i in range(0, len(output_lines)):
|
||||||
self.assertEqual(expect_lines[i], output_lines[i])
|
self.assertEqual(expect_lines[i], output_lines[i])
|
||||||
|
|
||||||
|
|
||||||
class TestReportDifference(unittest.TestCase):
|
class TestReportDifference(unittest.TestCase):
|
||||||
def load_results(self):
|
def load_results(self):
|
||||||
import json
|
import json
|
||||||
|
@ -7,11 +7,13 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
# Input file type enumeration
|
# Input file type enumeration
|
||||||
IT_Invalid = 0
|
IT_Invalid = 0
|
||||||
IT_JSON = 1
|
IT_JSON = 1
|
||||||
IT_Executable = 2
|
IT_Executable = 2
|
||||||
|
|
||||||
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
|
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
|
||||||
|
|
||||||
|
|
||||||
def is_executable_file(filename):
|
def is_executable_file(filename):
|
||||||
"""
|
"""
|
||||||
Return 'True' if 'filename' names a valid file which is likely
|
Return 'True' if 'filename' names a valid file which is likely
|
||||||
@ -46,7 +48,7 @@ def is_json_file(filename):
|
|||||||
with open(filename, 'r') as f:
|
with open(filename, 'r') as f:
|
||||||
json.load(f)
|
json.load(f)
|
||||||
return True
|
return True
|
||||||
except:
|
except BaseException:
|
||||||
pass
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -84,6 +86,7 @@ def check_input_file(filename):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return ftype
|
return ftype
|
||||||
|
|
||||||
|
|
||||||
def find_benchmark_flag(prefix, benchmark_flags):
|
def find_benchmark_flag(prefix, benchmark_flags):
|
||||||
"""
|
"""
|
||||||
Search the specified list of flags for a flag matching `<prefix><arg>` and
|
Search the specified list of flags for a flag matching `<prefix><arg>` and
|
||||||
@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
|
|||||||
result = f[len(prefix):]
|
result = f[len(prefix):]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def remove_benchmark_flags(prefix, benchmark_flags):
|
def remove_benchmark_flags(prefix, benchmark_flags):
|
||||||
"""
|
"""
|
||||||
Return a new list containing the specified benchmark_flags except those
|
Return a new list containing the specified benchmark_flags except those
|
||||||
@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
|
|||||||
assert prefix.startswith('--') and prefix.endswith('=')
|
assert prefix.startswith('--') and prefix.endswith('=')
|
||||||
return [f for f in benchmark_flags if not f.startswith(prefix)]
|
return [f for f in benchmark_flags if not f.startswith(prefix)]
|
||||||
|
|
||||||
|
|
||||||
def load_benchmark_results(fname):
|
def load_benchmark_results(fname):
|
||||||
"""
|
"""
|
||||||
Read benchmark output from a file and return the JSON object.
|
Read benchmark output from a file and return the JSON object.
|
||||||
@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
|
|||||||
thandle, output_name = tempfile.mkstemp()
|
thandle, output_name = tempfile.mkstemp()
|
||||||
os.close(thandle)
|
os.close(thandle)
|
||||||
benchmark_flags = list(benchmark_flags) + \
|
benchmark_flags = list(benchmark_flags) + \
|
||||||
['--benchmark_out=%s' % output_name]
|
['--benchmark_out=%s' % output_name]
|
||||||
|
|
||||||
cmd = [exe_name] + benchmark_flags
|
cmd = [exe_name] + benchmark_flags
|
||||||
print("RUNNING: %s" % ' '.join(cmd))
|
print("RUNNING: %s" % ' '.join(cmd))
|
||||||
@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
|
|||||||
elif ftype == IT_Executable:
|
elif ftype == IT_Executable:
|
||||||
return run_benchmark(filename, benchmark_flags)
|
return run_benchmark(filename, benchmark_flags)
|
||||||
else:
|
else:
|
||||||
assert False # This branch is unreachable
|
assert False # This branch is unreachable
|
||||||
|
Loading…
Reference in New Issue
Block a user