From eee8b05c97d7b832bf67d6e000958d012ab30165 Mon Sep 17 00:00:00 2001 From: Jusufadis Bakamovic Date: Fri, 7 Dec 2018 14:34:00 +0100 Subject: [PATCH] [tools] Run autopep8 and apply fixes found. (#739) --- tools/compare.py | 6 ++---- tools/gbench/report.py | 7 +++---- tools/gbench/util.py | 15 ++++++++++----- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tools/compare.py b/tools/compare.py index 9ff5c142..539ace6f 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import unittest """ compare.py - versatile benchmark output compare tool """ @@ -244,9 +245,6 @@ def main(): print(ln) -import unittest - - class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() @@ -402,7 +400,7 @@ class TestParser(unittest.TestCase): if __name__ == '__main__': - #unittest.main() + # unittest.main() main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 28bab34a..5085b931 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -1,3 +1,4 @@ +import unittest """report.py - Utilities for reporting statistics about benchmark results """ import os @@ -270,9 +271,6 @@ def generate_difference_report( # Unit tests -import unittest - - class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json @@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): 'BM_One', 'BM_Two', 'short', # These two are not sorted - 'medium', # These two are not sorted + 'medium', # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) @@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): for i in range(0, len(output_lines)): self.assertEqual(expect_lines[i], output_lines[i]) + class TestReportDifference(unittest.TestCase): def load_results(self): import json diff --git a/tools/gbench/util.py b/tools/gbench/util.py index 07c23772..1f8e8e2c 100644 --- a/tools/gbench/util.py +++ b/tools/gbench/util.py @@ -7,11 +7,13 @@ import subprocess import sys # Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 +IT_Invalid = 0 +IT_JSON = 1 IT_Executable = 2 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 + + def is_executable_file(filename): """ Return 'True' if 'filename' names a valid file which is likely @@ -46,7 +48,7 @@ def is_json_file(filename): with open(filename, 'r') as f: json.load(f) return True - except: + except BaseException: pass return False @@ -84,6 +86,7 @@ def check_input_file(filename): sys.exit(1) return ftype + def find_benchmark_flag(prefix, benchmark_flags): """ Search the specified list of flags for a flag matching `` and @@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags): result = f[len(prefix):] return result + def remove_benchmark_flags(prefix, benchmark_flags): """ Return a new list containing the specified benchmark_flags except those @@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags): assert prefix.startswith('--') and prefix.endswith('=') return [f for f in benchmark_flags if not f.startswith(prefix)] + def load_benchmark_results(fname): """ Read benchmark output from a file and return the JSON object. @@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags): thandle, output_name = tempfile.mkstemp() os.close(thandle) benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] + ['--benchmark_out=%s' % output_name] cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) @@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags): elif ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) else: - assert False # This branch is unreachable \ No newline at end of file + assert False # This branch is unreachable