From c7192c8a9af0a1cb4d013c589af92d6dceedef60 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Fri, 18 Aug 2017 20:55:27 +0300 Subject: [PATCH] compare_bench.py: fixup benchmark_options. (#435) https://github.com/google/benchmark/commit/2373382284918fda13f726aefd6e2f700784797f reworked parsing, and introduced a regression in handling of the optional options that should be passed to both of the benchmarks. Now, unless the *first* optional argument starts with '-', it would just complain about that argument: Unrecognized positional argument arguments: '['q']' which is wrong. However if some dummy arg like '-q' was passed first, it would then happily passthrough them all... This commit fixes benchmark_options behavior, by restoring original passthrough behavior for all the optional positional arguments. --- tools/compare_bench.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/compare_bench.py b/tools/compare_bench.py index d54baaa0..7bbf0d01 100755 --- a/tools/compare_bench.py +++ b/tools/compare_bench.py @@ -39,21 +39,20 @@ def main(): parser.add_argument( 'test2', metavar='test2', type=str, nargs=1, help='A benchmark executable or JSON output file') - # FIXME this is a dummy argument which will never actually match - # any --benchmark flags but it helps generate a better usage message parser.add_argument( - 'benchmark_options', metavar='benchmark_option', nargs='*', + 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables' ) args, unknown_args = parser.parse_known_args() # Parse the command line flags test1 = args.test1[0] test2 = args.test2[0] - if args.benchmark_options: + if unknown_args: + # should never happen print("Unrecognized positional argument arguments: '%s'" - % args.benchmark_options) + % unknown_args) exit(1) - benchmark_options = unknown_args + benchmark_options = args.benchmark_options check_inputs(test1, test2, benchmark_options) # Run the benchmarks and report the results json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)