measuring latency

Reviewers: buda

Reviewed By: buda

Differential Revision: https://phabricator.memgraph.io/D494
This commit is contained in:
Matej Gradiček 2017-06-20 11:04:30 +00:00
parent 187c831eb7
commit 83d8673afb
4 changed files with 162 additions and 13 deletions

78
plot_latency Executable file
View File

@ -0,0 +1,78 @@
#!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import json
from argparse import ArgumentParser
"""
Plots graph of latencies of memgraph and neo4j. Takes paths to
json of latencies as arguments.
"""
def main():
argp = ArgumentParser(description=__doc__)
argp.add_argument('--memgraph-latency',
help='Path to the json of memgraph latency')
argp.add_argument('--neo4j-latency',
help='Path to the json of neo4j latency')
args = argp.parse_args()
fig = plt.gcf()
fig.set_size_inches(10, 16)
with open(args.neo4j_latency) as json_file:
json_neo = json.load(json_file)
with open(args.memgraph_latency) as json_file:
json_mem = json.load(json_file)
tests_num = 0
time_list_neo = []
time_list_mem = []
max_time = 0
for key in json_mem['data']:
if json_neo['data'][key]['status'] == "passed" and \
json_mem['data'][key]['status'] == 'passed':
time_neo = json_neo['data'][key]['execution_time']
time_mem = json_mem['data'][key]['execution_time']
max_time = max(max_time, time_neo, time_mem)
offset = 0.01 * max_time
for key in json_mem['data']:
if json_neo['data'][key]['status'] == "passed" and \
json_mem['data'][key]['status'] == 'passed':
time_neo = json_neo['data'][key]['execution_time']
time_mem = json_mem['data'][key]['execution_time']
time_list_neo.append(time_neo)
time_list_mem.append(time_mem)
tests_num += 1
if time_neo < time_mem:
plt.plot((time_mem, time_neo), (tests_num, tests_num), color='red',
label=key, lw=0.3)
else:
plt.plot((time_mem, time_neo), (tests_num, tests_num), color='green',
label=key, lw=0.3)
ratio = '%.2f' % (max(time_neo, time_mem) / min(time_neo, time_mem))
plt.text(max(time_mem, time_neo) + offset, tests_num, key + " ---> " + \
ratio + "x", size=1)
x = range(1, tests_num + 1)
plt.plot(time_list_mem, x, marker='o', markerfacecolor='orange', color='orange',
linestyle='', markersize=0.5)
plt.plot(time_list_neo, x, marker='o', markerfacecolor='blue', color='blue',
linestyle='', markersize=0.5)
plt.margins(0.1, 0.01)
plt.savefig("latency_graph.png", dpi=2000)
if __name__ == '__main__':
main()

View File

@ -3,6 +3,7 @@ import datetime
import time
import json
import sys
import os
from steps.test_parameters import TestParameters
from neo4j.v1 import GraphDatabase, basic_auth
from steps.graph_properties import GraphProperties
@ -10,13 +11,31 @@ from test_results import TestResults
test_results = TestResults()
"""
Executes before every step. Checks if step is execution
step and sets context variable to true if it is.
"""
def before_step(context, step):
context.execution_step = False
if step.name == "executing query":
context.execution_step = True
def before_scenario(context, step):
"""
Executes before every scenario. Initializes test parameters,
graph properties, exception and test execution time.
"""
def before_scenario(context, scenario):
context.test_parameters = TestParameters()
context.graph_properties = GraphProperties()
context.exception = None
context.execution_time = None
"""
Executes after every scenario. Pauses execution if flags are set.
Adds execution time to latency dict if it is not None.
"""
def after_scenario(context, scenario):
test_results.add_test(scenario.status)
if context.config.single_scenario or \
@ -24,32 +43,50 @@ def after_scenario(context, scenario):
print("Press enter to continue")
sys.stdin.readline()
if context.execution_time is not None:
context.js['data'][scenario.name] = {
"execution_time": context.execution_time, "status": scenario.status
}
"""
Executes after every feature. If flag is set, pauses before
executing next scenario.
"""
def after_feature(context, feature):
if context.config.single_feature:
print("Press enter to continue")
sys.stdin.readline()
"""
Executes before running tests. Initializes driver and latency
dict and creates needed directories.
"""
def before_all(context):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime("%Y_%m_%d__%H_%M_%S")
latency_file = "latency/" + context.config.database + "/" + \
get_test_suite(context) + "/" + timestamp + ".json"
if not os.path.exists(os.path.dirname(latency_file)):
os.makedirs(os.path.dirname(latency_file))
context.driver = create_db_driver(context)
context.latency_file = latency_file
context.js = dict()
context.js["metadata"] = dict()
context.js["metadata"]["execution_time_unit"] = "seconds"
context.js["data"] = dict()
set_logging(context)
"""
Executes when testing is finished. Creates JSON files of test latency
and test results.
"""
def after_all(context):
context.driver.close()
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime("%Y_%m_%d__%H_%M")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime("%Y_%m_%d__%H_%M")
root = context.config.root
if root.endswith("/"):
root = root[0:len(root) - 1]
if root.endswith("features"):
root = root[0: len(root) - len("features") - 1]
test_suite = root.split('/')[-1]
test_suite = get_test_suite(context)
file_name = context.config.output_folder + timestamp + \
"-" + context.config.database + "-" + test_suite + ".json"
@ -59,13 +96,40 @@ def after_all(context):
with open(file_name, 'w') as f:
json.dump(js, f)
with open(context.latency_file, "a") as f:
json.dump(context.js, f)
"""
Returns test suite from a test root folder.
If test root is a feature file, name of file is returned without
.feature extension.
"""
def get_test_suite(context):
root = context.config.root
if root.endswith("/"):
root = root[0:len(root) - 1]
if root.endswith("features"):
root = root[0: len(root) - len("features") - 1]
test_suite = root.split('/')[-1]
return test_suite
"""
Initializes log and sets logging level to debug.
"""
def set_logging(context):
logging.basicConfig(level="DEBUG")
log = logging.getLogger(__name__)
context.log = log
"""
Creates database driver and returns it.
"""
def create_db_driver(context):
uri = context.config.database_uri
auth_token = basic_auth(

View File

@ -1,3 +1,5 @@
import time
def query(q, context, params={}):
"""
Function used to execute query on database. Query results are
@ -16,6 +18,7 @@ def query(q, context, params={}):
if (context.config.database == "neo4j" or
context.config.database == "memgraph"):
session = context.driver.session()
start = time.time()
try:
# executing query
results = session.run(q, params)
@ -40,6 +43,10 @@ def query(q, context, params={}):
context.exception = e
context.log.info('%s', str(e))
finally:
end = time.time()
if context.execution_step is not None and \
context.execution_step:
context.execution_time = end - start
session.close()
return results_list

View File

@ -598,7 +598,7 @@ Feature: Functions
Scenario: Keys test:
Given an empty graph
And having executed:
When executing query:
"""
CREATE (n{true: 123, a: null, b: 'x', null: 1}) RETURN KEYS(n) AS a
"""