memgraph/tools/apollo/macro_benchmark_summary
Matej Ferencevic cfb8db9e20 Lowered treshold for macro benchmark memory measurements.
Reviewers: teon.banek, buda, mislav.bradac

Reviewed By: mislav.bradac

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D927
2017-10-25 20:24:34 +02:00

132 lines
4.1 KiB
Python
Executable File

#!/usr/bin/python3
import argparse
import os
import sys
def convert2float(val):
try:
return float(val)
except:
return val
def parse_file(fname):
with open(fname) as f:
data = f.readlines()
ret = []
for row in data:
row = row.strip()
if row == "": continue
ret.append(list(map(convert2float, row.split())))
return ret
def strip_integers(row):
return list(filter(lambda x: type(x) == str, row))
def find_item(data, header, row):
headers = data[0]
row = strip_integers(row)
pos_x = -1
for i in range(len(data)):
s = strip_integers(data[i])
if s != row: continue
pos_x = i
break
if pos_x == -1: return None
pos_y = -1
for j in range(len(headers)):
if headers[j] != header: continue
pos_y = j
break
if pos_y == -1: return None
return data[pos_x][pos_y]
def compare_values(data_cur, data_prev):
ret = []
headers = data_cur[0]
for i in range(len(data_cur)):
ret.append([])
row_cur = data_cur[i]
performance_change = False
for j in range(len(row_cur)):
item_cur = row_cur[j]
if type(item_cur) == str:
item = " ".join(item_cur.split("_")).capitalize()
else:
item_prev = find_item(data_prev, headers[j], row_cur)
if j != len(row_cur) - 1:
fmt = "{:.3f}ms"
scale = 1000.0
treshold = 0.050
else:
fmt = "{:.2f}MiB"
scale = 1.0 / 1024.0
treshold = 0.025
if item_prev != None:
if item_prev != 0.0:
diff = (item_cur - item_prev) / item_prev
else:
diff = 0.0
if diff < -treshold and item_cur > 0.0005:
performance_change = True
sign = " {icon arrow-down color=green}"
elif diff > treshold and item_cur > 0.0005:
performance_change = True
sign = " {icon arrow-up color=red}"
else:
sign = ""
fmt += " //({:+.2%})//{}"
item = fmt.format(item_cur * scale, diff, sign)
else:
fmt += " //(new)// {{icon plus color=blue}}"
item = fmt.format(item_cur * scale)
performance_change = True
ret[-1].append(item)
if performance_change == False and i > 0: ret.pop()
return ret
def generate_remarkup(data):
ret = "==== Macro benchmark summary: ====\n\n"
if len(data) > 1:
ret += "<table>\n"
for row in data:
ret += " <tr>\n"
for item in row:
if row == data[0]:
fmt = " <th>{}</th>\n"
else:
fmt = " <td>{}</td>\n"
ret += fmt.format(item)
ret += " </tr>\n"
ret += "</table>\n"
else:
ret += "No performance change detected.\n"
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process macro benchmark summary.")
parser.add_argument("--current", nargs = "+", required = True,
help = "current summary files")
parser.add_argument("--previous", nargs = "+", required = True,
help = "previous summary files")
parser.add_argument("--output", default = "",
help = "output file, if not specified the script outputs to stdout")
args = parser.parse_args()
data_cur, data_prev = [], []
for i, current in enumerate(args.current):
off = 0 if i == 0 else 1
data_cur += parse_file(current)[off:]
for i, previous in enumerate(args.previous):
off = 0 if i == 0 else 1
data_prev += parse_file(previous)[off:]
markup = generate_remarkup(compare_values(data_cur, data_prev))
if args.output == "":
sys.stdout.write(markup)
sys.exit(0)
with open(args.output, "w") as f:
f.write(markup)