memgraph/tools/apollo/macro_benchmark_summary
Matej Ferencevic 859641cb0c Changed macro benchmark summary format
Reviewers: buda, mtomic, mislav.bradac

Reviewed By: mislav.bradac

Subscribers: florijan, pullbot

Differential Revision: https://phabricator.memgraph.io/D972
2017-12-28 16:35:45 +01:00

127 lines
4.3 KiB
Python
Executable File

#!/usr/bin/env python3
import argparse
import json
import os
import sys
def load_file(fname):
with open(fname) as f:
data = f.read()
try:
return json.loads(data)
except json.decoder.JSONDecodeError:
return {"results": [], "headers": []}
def strip_integers(row):
return {k: v for k, v in row.items() if type(v) == str}
def find_item(results_prev, header_cur, row_cur):
row_cur = strip_integers(row_cur)
row_prev = None
for result in results_prev:
s = strip_integers(result)
if s == row_cur:
row_prev = result
break
if row_prev is None: return None
if not header_cur in row_prev: return None
return row_prev[header_cur]
def compare_values(headers_cur, results_cur, headers_prev, results_prev):
ret = [list(map(lambda x: " ".join(x.split("_")).capitalize(),
headers_cur))]
for row_cur in results_cur:
ret.append([])
performance_change = False
for header in headers_cur:
item_cur = row_cur[header]
if type(item_cur) == str:
item = " ".join(item_cur.split("_")).capitalize()
else:
value_cur = item_cur["median"]
item_prev = find_item(results_prev, header, row_cur)
if header != "max_memory":
fmt = "{:.3f}ms"
scale = 1000.0
treshold = 0.050
else:
fmt = "{:.2f}MiB"
scale = 1.0 / 1024.0
treshold = 0.025
# TODO: add statistics check
if item_prev != None:
value_prev = item_prev["median"]
if value_prev != 0.0:
diff = (value_cur - value_prev) / value_prev
else:
diff = 0.0
if diff < -treshold and value_cur > 0.0005:
performance_change = True
sign = " {icon arrow-down color=green}"
elif diff > treshold and value_cur > 0.0005:
performance_change = True
sign = " {icon arrow-up color=red}"
else:
sign = ""
fmt += " //({:+.2%})//{}"
item = fmt.format(value_cur * scale, diff, sign)
else:
fmt += " //(new)// {{icon plus color=blue}}"
item = fmt.format(value_cur * scale)
performance_change = True
ret[-1].append(item)
if not performance_change: ret.pop()
return ret
def generate_remarkup(data):
ret = "==== Macro benchmark summary: ====\n\n"
if len(data) > 1:
ret += "<table>\n"
for row in data:
ret += " <tr>\n"
for item in row:
if row == data[0]:
fmt = " <th>{}</th>\n"
else:
fmt = " <td>{}</td>\n"
ret += fmt.format(item)
ret += " </tr>\n"
ret += "</table>\n"
else:
ret += "No performance change detected.\n"
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process macro benchmark summary.")
parser.add_argument("--current", nargs = "+", required = True,
help = "current summary files")
parser.add_argument("--previous", nargs = "+", required = True,
help = "previous summary files")
parser.add_argument("--output", default = "",
help = "output file, if not specified the script outputs to stdout")
args = parser.parse_args()
headers_cur, headers_prev = None, None
results_cur, results_prev = [], []
for current in args.current:
data = load_file(current)
if headers_cur is None:
headers_cur = data["headers"]
results_cur += data["results"]
for previous in args.previous:
data = load_file(previous)
if headers_prev is None:
headers_prev = data["headers"]
results_prev += data["results"]
markup = generate_remarkup(compare_values(headers_cur, results_cur,
headers_prev, results_prev))
if args.output == "":
sys.stdout.write(markup)
sys.exit(0)
with open(args.output, "w") as f:
f.write(markup)