Add support for distributed tck tests
Reviewers: mferencevic, ipaljak, mculinovic Reviewed By: mculinovic Subscribers: teon.banek, msantl, pullbot Differential Revision: https://phabricator.memgraph.io/D1395
This commit is contained in:
parent
e89fe953b3
commit
4fd5b1ebc4
@ -1,8 +1,13 @@
|
||||
- name: quality_assurance
|
||||
commands: TIMEOUT=300 ./continuous_integration
|
||||
infiles:
|
||||
infiles: &INFILES
|
||||
- . # current directory
|
||||
- ../../build_debug/memgraph # memgraph debug binary
|
||||
- ../../config # directory with config files
|
||||
outfile_paths:
|
||||
outfile_paths: &OUTFILE_PATHS
|
||||
- \./memgraph/tests/qa/\.quality_assurance_status
|
||||
|
||||
- name: quality_assurance_distributed
|
||||
commands: TIMEOUT=300 ./continuous_integration --distributed
|
||||
infiles: *INFILES
|
||||
outfile_paths: *OUTFILE_PATHS
|
||||
|
@ -18,7 +18,11 @@ import json
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from argparse import ArgumentParser
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
class Test:
|
||||
"""
|
||||
@ -43,7 +47,7 @@ class Test:
|
||||
self.memgraph_params = memgraph_params
|
||||
self.mandatory = mandatory
|
||||
|
||||
# constants
|
||||
# Constants
|
||||
suites = [
|
||||
Test(
|
||||
name="memgraph_V1",
|
||||
@ -70,6 +74,16 @@ suite_suffix = "memgraph-{}.json"
|
||||
qa_status_path = ".quality_assurance_status"
|
||||
measurements_path = ".apollo_measurements"
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""
|
||||
Parse command line arguments
|
||||
"""
|
||||
argp = ArgumentParser(description=__doc__)
|
||||
argp.add_argument("--distributed", action="store_true")
|
||||
return argp.parse_args()
|
||||
|
||||
|
||||
def get_newest_path(folder, suffix):
|
||||
"""
|
||||
:param folder: Scanned folder.
|
||||
@ -95,12 +109,12 @@ def generate_measurements(suite, result_path):
|
||||
with open(result_path) as f:
|
||||
result = json.load(f)
|
||||
ret = ""
|
||||
for i in ["total", "passed"]:
|
||||
for i in ["total", "passed", "restarts"]:
|
||||
ret += "{}.{} {}\n".format(suite, i, result[i])
|
||||
return ret
|
||||
|
||||
|
||||
def generate_status(suite, result_path, required = False):
|
||||
def generate_status(suite, result_path, required=False):
|
||||
"""
|
||||
:param suite: Test suite name.
|
||||
:param result_path: File path with json status report.
|
||||
@ -112,6 +126,7 @@ def generate_status(suite, result_path, required = False):
|
||||
result = json.load(f)
|
||||
total = result["total"]
|
||||
passed = result["passed"]
|
||||
restarts = result["restarts"]
|
||||
ratio = passed / total
|
||||
msg = "{} / {} //({:.2%})//".format(passed, total, ratio)
|
||||
if required:
|
||||
@ -119,16 +134,17 @@ def generate_status(suite, result_path, required = False):
|
||||
msg += " {icon check color=green}"
|
||||
else:
|
||||
msg += " {icon times color=red}"
|
||||
return (msg, passed, total)
|
||||
return (msg, passed, total, restarts)
|
||||
|
||||
|
||||
def generate_remarkup(data):
|
||||
def generate_remarkup(data, distributed=False):
|
||||
"""
|
||||
:param data: Tabular data to convert to remarkup.
|
||||
|
||||
:return: Remarkup formatted status string.
|
||||
"""
|
||||
ret = "==== Quality assurance status: ====\n\n"
|
||||
extra_desc = "distributed " if distributed else ""
|
||||
ret = "==== Quality assurance {}status: ====\n\n".format(extra_desc)
|
||||
ret += "<table>\n"
|
||||
for row in data:
|
||||
ret += " <tr>\n"
|
||||
@ -144,52 +160,68 @@ def generate_remarkup(data):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# logger config
|
||||
args = parse_args()
|
||||
distributed = []
|
||||
# Tests are not mandatory for distributed
|
||||
if args.distributed:
|
||||
distributed = ["--distributed"]
|
||||
for suite in suites:
|
||||
suite.mandatory = False
|
||||
|
||||
# Logger config
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# run suites
|
||||
venv_python = os.path.join(SCRIPT_DIR, "ve3", "bin", "python3")
|
||||
exec_dir = os.path.realpath(os.path.join(SCRIPT_DIR, "tck_engine"))
|
||||
tests_dir = os.path.realpath(os.path.join(exec_dir, "tests"))
|
||||
# Run suites
|
||||
for suite in suites:
|
||||
log.info("Starting suite '{}' scenarios.".format(suite.name))
|
||||
subprocess.run(["./run",
|
||||
"--test-suite", suite.test_suite,
|
||||
"--name", suite.name,
|
||||
# "--verbose",
|
||||
"--memgraph-params", suite.memgraph_params],
|
||||
check = False)
|
||||
test = os.path.realpath(os.path.join(tests_dir, suite.test_suite))
|
||||
cmd = [venv_python, "-u",
|
||||
os.path.join(exec_dir, "test_executor.py"),
|
||||
"--root", test,
|
||||
"--test-name", "{}".format(suite.name),
|
||||
"--db", "memgraph",
|
||||
"--memgraph-params",
|
||||
"\"{}\"".format(suite.memgraph_params)] + distributed
|
||||
subprocess.run(cmd, check=False)
|
||||
|
||||
# measurements
|
||||
# Measurements
|
||||
measurements = ""
|
||||
|
||||
# status table headers
|
||||
status_data = [["Suite", "Scenarios"]]
|
||||
# Status table headers
|
||||
status_data = [["Suite", "Scenarios", "Restarts"]]
|
||||
|
||||
# List of mandatory suites that have failed
|
||||
mandatory_fails = []
|
||||
|
||||
for suite in suites:
|
||||
# get data files for test suite
|
||||
# Get data files for test suite
|
||||
suite_result_path = get_newest_path(results_folder,
|
||||
suite_suffix.format(suite.name))
|
||||
suite_suffix.format(suite.name))
|
||||
log.info("Memgraph result path is {}".format(suite_result_path))
|
||||
|
||||
# read scenarios
|
||||
suite_status, suite_passed, suite_total = generate_status(
|
||||
suite.name, suite_result_path, required = suite.mandatory)
|
||||
# Read scenarios
|
||||
suite_status, suite_passed, suite_total, suite_restarts = \
|
||||
generate_status(suite.name, suite_result_path,
|
||||
required=suite.mandatory)
|
||||
|
||||
if suite.mandatory and suite_passed != suite_total:
|
||||
if suite.mandatory and suite_passed != suite_total or \
|
||||
not args.distributed and suite_restarts > 0:
|
||||
mandatory_fails.append(suite.name)
|
||||
|
||||
status_data.append([suite.name, suite_status])
|
||||
status_data.append([suite.name, suite_status, suite_restarts])
|
||||
measurements += generate_measurements(suite.name, suite_result_path)
|
||||
|
||||
# create status message
|
||||
qa_status_message = generate_remarkup(status_data)
|
||||
# Create status message
|
||||
qa_status_message = generate_remarkup(status_data, args.distributed)
|
||||
|
||||
# create the report file
|
||||
# Create the report file
|
||||
with open(qa_status_path, "w") as f:
|
||||
f.write(qa_status_message)
|
||||
|
||||
#create the measurements file
|
||||
# Create the measurements file
|
||||
with open(measurements_path, "w") as f:
|
||||
f.write(measurements)
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
'''
|
||||
Filters failing scenarios from a tck test run and prints them to stdout.
|
||||
'''
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
argp = ArgumentParser(description=__doc__)
|
||||
|
@ -1,13 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
function print_usage_and_exit {
|
||||
echo "./run --test-suite test_suite [--unstable]"
|
||||
echo "./local_runner --test-suite test_suite [--distributed] [--num-machines num_machines]"
|
||||
echo "Required arguments:"
|
||||
echo -e " --test-suite test_suite\trun test_suite scenarios, test_suite must be test folder in tck_engine/tests."
|
||||
echo -e " --name name\tunique identifer of test_suite and its parameters"
|
||||
echo "Optional arguments:"
|
||||
echo -e " --unstable\trun unstable scenarios"
|
||||
echo -e " --memgraph-params \"param1=value1 param2=value2\"\tcommand line arguments for memgraph"
|
||||
echo -e " --distributed\trun memgraph in distributed"
|
||||
echo -e " --num-machines num-machines\tnumber of machines for distributed, default is 3"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -15,18 +16,25 @@ function print_usage_and_exit {
|
||||
set -e
|
||||
|
||||
# read arguments
|
||||
unstable=false
|
||||
distributed=false
|
||||
num_machines=3
|
||||
memgraph_params=""
|
||||
|
||||
loglevel=2
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--unstable)
|
||||
unstable=true
|
||||
--distributed)
|
||||
distributed=true
|
||||
shift
|
||||
;;
|
||||
--verbose)
|
||||
loglevel=0
|
||||
--num-machines)
|
||||
if [ $# -eq 1 ]; then
|
||||
print_usage_and_exit
|
||||
fi
|
||||
num_machines=$2
|
||||
re='^[0-9]+$'
|
||||
if ! [[ $num_machines =~ $re ]] ; then
|
||||
print_usage_and_exit
|
||||
fi
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--memgraph-params)
|
||||
@ -66,45 +74,24 @@ fi
|
||||
|
||||
# save the path where this script is
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
memgraph_src_dir=${script_dir}/../..
|
||||
|
||||
memgraph_build_dir=${memgraph_src_dir}/build
|
||||
if [ ! -d ${memgraph_build_dir} ]; then
|
||||
memgraph_build_dir=${memgraph_src_dir}/build_debug
|
||||
fi
|
||||
memgraph_binary=${memgraph_build_dir}/memgraph
|
||||
|
||||
# activate virtualenv
|
||||
source $script_dir/ve3/bin/activate
|
||||
|
||||
# get full path to memgraph config for interpreted queries
|
||||
config_path="${memgraph_src_dir}/config/testing.conf"
|
||||
|
||||
# run scenarios
|
||||
cd ${script_dir}
|
||||
tck_flags="--root tck_engine/tests/$test_suite
|
||||
--test-name $name
|
||||
--no-side-effects --db memgraph"
|
||||
--db memgraph"
|
||||
|
||||
if [[ $unstable = true ]]; then
|
||||
tck_flags="$tck_flags --unstable"
|
||||
if [[ $distributed = true ]]; then
|
||||
tck_flags="$tck_flags --distributed"
|
||||
tck_flags="$tck_flags --num-machines $num_machines"
|
||||
fi
|
||||
|
||||
# the script has to be careful because memgraph process will be detached
|
||||
set +e
|
||||
if [ -n "$memgraph_params" ]; then
|
||||
python3 tck_engine/test_executor.py $tck_flags --memgraph-params \"$memgraph_params\"
|
||||
else
|
||||
python3 tck_engine/test_executor.py $tck_flags
|
||||
fi
|
||||
|
||||
# run memgraph
|
||||
MEMGRAPH_CONFIG="$config_path"
|
||||
"$memgraph_binary" --min-log-level=$loglevel "$memgraph_params" 1>&2 &
|
||||
background_pid=$!
|
||||
|
||||
function cleanup_and_exit {
|
||||
kill -9 $background_pid
|
||||
sleep 2
|
||||
exit $1
|
||||
}
|
||||
|
||||
sleep 2
|
||||
|
||||
python3 tck_engine/test_executor.py $tck_flags
|
||||
cleanup_and_exit $?
|
@ -1,111 +1,108 @@
|
||||
import logging
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import atexit
|
||||
import datetime
|
||||
import time
|
||||
import json
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from fcntl import fcntl, F_GETFL, F_SETFL
|
||||
from steps.test_parameters import TestParameters
|
||||
from neo4j.v1 import GraphDatabase, basic_auth
|
||||
from steps.graph_properties import GraphProperties
|
||||
from test_results import TestResults
|
||||
|
||||
# Constants - Memgraph flags
|
||||
COMMON_FLAGS = ["--durability-enabled=false",
|
||||
"--snapshot-on-exit=false",
|
||||
"--db-recover-on-startup=false"]
|
||||
DISTRIBUTED_FLAGS = ["--num-workers", str(6),
|
||||
"--rpc-num-workers", str(6)]
|
||||
MASTER_FLAGS = ["--master",
|
||||
"--master-port", "10000"]
|
||||
MEMGRAPH_PORT = 7687
|
||||
|
||||
# Module-scoped variables
|
||||
test_results = TestResults()
|
||||
|
||||
"""
|
||||
Executes before every step. Checks if step is execution
|
||||
step and sets context variable to true if it is.
|
||||
"""
|
||||
def before_step(context, step):
|
||||
context.execution_step = False
|
||||
if step.name == "executing query":
|
||||
context.execution_step = True
|
||||
|
||||
# Helper functions
|
||||
def get_script_path():
|
||||
return os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
"""
|
||||
Executes before every scenario. Initializes test parameters,
|
||||
graph properties, exception and test execution time.
|
||||
"""
|
||||
def before_scenario(context, scenario):
|
||||
context.test_parameters = TestParameters()
|
||||
context.graph_properties = GraphProperties()
|
||||
context.exception = None
|
||||
context.execution_time = None
|
||||
def start_process(cmd, stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.PIPE, **kwargs):
|
||||
ret = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
|
||||
# set the O_NONBLOCK flag of process stderr file descriptor
|
||||
if stderr == subprocess.PIPE:
|
||||
flags = fcntl(ret.stderr, F_GETFL) # get current stderr flags
|
||||
fcntl(ret.stderr, F_SETFL, flags | os.O_NONBLOCK)
|
||||
return ret
|
||||
|
||||
|
||||
"""
|
||||
Executes after every scenario. Pauses execution if flags are set.
|
||||
Adds execution time to latency dict if it is not None.
|
||||
"""
|
||||
def after_scenario(context, scenario):
|
||||
test_results.add_test(scenario.status)
|
||||
if context.config.single_scenario or \
|
||||
(context.config.single_fail and scenario.status == "failed"):
|
||||
print("Press enter to continue")
|
||||
sys.stdin.readline()
|
||||
|
||||
if context.execution_time is not None:
|
||||
context.js['data'][scenario.name] = {
|
||||
"execution_time": context.execution_time, "status": scenario.status
|
||||
}
|
||||
def is_tested_system_active(context):
|
||||
return all(proc.poll() is None for proc in context.memgraph_processes)
|
||||
|
||||
|
||||
"""
|
||||
Executes after every feature. If flag is set, pauses before
|
||||
executing next scenario.
|
||||
"""
|
||||
def after_feature(context, feature):
|
||||
if context.config.single_feature:
|
||||
print("Press enter to continue")
|
||||
sys.stdin.readline()
|
||||
def is_tested_system_inactive(context):
|
||||
return not any(proc.poll() is None for proc in context.memgraph_processes)
|
||||
|
||||
|
||||
"""
|
||||
Executes before running tests. Initializes driver and latency
|
||||
dict and creates needed directories.
|
||||
"""
|
||||
def before_all(context):
|
||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime("%Y_%m_%d__%H_%M_%S")
|
||||
latency_file = "latency/" + context.config.database + "/" + \
|
||||
get_test_suite(context) + "/" + timestamp + ".json"
|
||||
if not os.path.exists(os.path.dirname(latency_file)):
|
||||
os.makedirs(os.path.dirname(latency_file))
|
||||
context.driver = create_db_driver(context)
|
||||
context.latency_file = latency_file
|
||||
context.js = dict()
|
||||
context.js["metadata"] = dict()
|
||||
context.js["metadata"]["execution_time_unit"] = "seconds"
|
||||
context.js["data"] = dict()
|
||||
set_logging(context)
|
||||
def worker_flags(worker_id):
|
||||
flags = ["--worker",
|
||||
"--worker-id", str(worker_id),
|
||||
"--worker-port", str(10000 + worker_id),
|
||||
"--master-port", str(10000)]
|
||||
return flags
|
||||
|
||||
|
||||
"""
|
||||
Executes when testing is finished. Creates JSON files of test latency
|
||||
and test results.
|
||||
"""
|
||||
def after_all(context):
|
||||
context.driver.close()
|
||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime("%Y_%m_%d__%H_%M")
|
||||
|
||||
test_suite = get_test_suite(context)
|
||||
file_name = context.config.output_folder + timestamp + \
|
||||
"-" + context.config.database + "-" + context.config.test_name + ".json"
|
||||
|
||||
js = {
|
||||
"total": test_results.num_total(), "passed": test_results.num_passed(),
|
||||
"test_suite": test_suite, "timestamp": timestamp, "db": context.config.database}
|
||||
with open(file_name, 'w') as f:
|
||||
json.dump(js, f)
|
||||
|
||||
with open(context.latency_file, "a") as f:
|
||||
json.dump(context.js, f)
|
||||
def wait_for_server(port, delay=0.01):
|
||||
cmd = ["nc", "-z", "-w", "1", "127.0.0.1", port]
|
||||
while subprocess.call(cmd) != 0:
|
||||
time.sleep(0.01)
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
def run_memgraph(context, flags):
|
||||
memgraph_cmd = [os.path.join(context.memgraph_dir, "memgraph")]
|
||||
memgraph_subprocess = start_process(memgraph_cmd + flags)
|
||||
context.memgraph_processes.append(memgraph_subprocess)
|
||||
|
||||
|
||||
def start_memgraph(context):
|
||||
if context.config.distributed: # Run distributed
|
||||
flags = COMMON_FLAGS.copy()
|
||||
if context.config.memgraph_params:
|
||||
flags += context.extra_flags
|
||||
run_memgraph(context, flags + DISTRIBUTED_FLAGS + MASTER_FLAGS)
|
||||
for i in range(1, int(context.config.num_machines)):
|
||||
run_memgraph(context, flags + DISTRIBUTED_FLAGS + worker_flags(i))
|
||||
else: # Run single machine memgraph
|
||||
flags = COMMON_FLAGS.copy()
|
||||
if context.config.memgraph_params:
|
||||
flags += context.extra_flags
|
||||
run_memgraph(context, flags)
|
||||
assert is_tested_system_active(context), "Failed to start memgraph"
|
||||
wait_for_server(str(MEMGRAPH_PORT)) # wait for memgraph to start
|
||||
|
||||
|
||||
def cleanup(context):
|
||||
if context.config.database == "memgraph":
|
||||
list(map(lambda p: p.kill(), context.memgraph_processes))
|
||||
list(map(lambda p: p.wait(), context.memgraph_processes))
|
||||
assert is_tested_system_inactive(context), "Failed to stop memgraph"
|
||||
context.memgraph_processes.clear()
|
||||
|
||||
|
||||
"""
|
||||
Returns test suite from a test root folder.
|
||||
If test root is a feature file, name of file is returned without
|
||||
.feature extension.
|
||||
"""
|
||||
def get_test_suite(context):
|
||||
"""
|
||||
Returns test suite from a test root folder.
|
||||
If test root is a feature file, name of file is returned without
|
||||
.feature extension.
|
||||
"""
|
||||
root = context.config.root
|
||||
|
||||
if root.endswith("/"):
|
||||
@ -118,24 +115,153 @@ def get_test_suite(context):
|
||||
return test_suite
|
||||
|
||||
|
||||
"""
|
||||
Initializes log and sets logging level to debug.
|
||||
"""
|
||||
def set_logging(context):
|
||||
"""
|
||||
Initializes log and sets logging level to debug.
|
||||
"""
|
||||
logging.basicConfig(level="DEBUG")
|
||||
log = logging.getLogger(__name__)
|
||||
context.log = log
|
||||
|
||||
|
||||
"""
|
||||
Creates database driver and returns it.
|
||||
"""
|
||||
def create_db_driver(context):
|
||||
"""
|
||||
Creates database driver and returns it.
|
||||
"""
|
||||
uri = context.config.database_uri
|
||||
auth_token = basic_auth(
|
||||
context.config.database_username, context.config.database_password)
|
||||
if context.config.database == "neo4j" or context.config.database == "memgraph":
|
||||
if context.config.database == "neo4j" or \
|
||||
context.config.database == "memgraph":
|
||||
driver = GraphDatabase.driver(uri, auth=auth_token, encrypted=0)
|
||||
else:
|
||||
raise "Unsupported database type"
|
||||
return driver
|
||||
|
||||
|
||||
# Behave specific functions
|
||||
def before_step(context, step):
|
||||
"""
|
||||
Executes before every step. Checks if step is execution
|
||||
step and sets context variable to true if it is.
|
||||
"""
|
||||
context.execution_step = False
|
||||
if step.name == "executing query":
|
||||
context.execution_step = True
|
||||
|
||||
|
||||
def before_scenario(context, scenario):
|
||||
"""
|
||||
Executes before every scenario. Initializes test parameters,
|
||||
graph properties, exception and test execution time.
|
||||
"""
|
||||
if context.config.database == "memgraph":
|
||||
# Check if memgraph is up and running
|
||||
if is_tested_system_active(context):
|
||||
context.is_tested_system_restarted = False
|
||||
else:
|
||||
cleanup(context)
|
||||
start_memgraph(context)
|
||||
context.is_tested_system_restarted = True
|
||||
context.test_parameters = TestParameters()
|
||||
context.graph_properties = GraphProperties()
|
||||
context.exception = None
|
||||
context.execution_time = None
|
||||
|
||||
|
||||
def before_all(context):
|
||||
"""
|
||||
Executes before running tests. Initializes driver and latency
|
||||
dict and creates needed directories.
|
||||
"""
|
||||
timestamp = datetime.datetime.fromtimestamp(
|
||||
time.time()).strftime("%Y_%m_%d__%H_%M_%S")
|
||||
latency_file = "latency/" + context.config.database + "/" + \
|
||||
get_test_suite(context) + "/" + timestamp + ".json"
|
||||
if not os.path.exists(os.path.dirname(latency_file)):
|
||||
os.makedirs(os.path.dirname(latency_file))
|
||||
context.latency_file = latency_file
|
||||
context.js = dict()
|
||||
context.js["metadata"] = dict()
|
||||
context.js["metadata"]["execution_time_unit"] = "seconds"
|
||||
context.js["data"] = dict()
|
||||
set_logging(context)
|
||||
# set config for memgraph
|
||||
context.memgraph_processes = []
|
||||
script_path = get_script_path()
|
||||
context.memgraph_dir = os.path.realpath(
|
||||
os.path.join(script_path, "../../../build"))
|
||||
if not os.path.exists(context.memgraph_dir):
|
||||
context.memgraph_dir = os.path.realpath(
|
||||
os.path.join(script_path, "../../../build_debug"))
|
||||
if context.config.memgraph_params:
|
||||
params = context.config.memgraph_params.strip("\"")
|
||||
context.extra_flags = params.split()
|
||||
atexit.register(cleanup, context)
|
||||
if context.config.database == "memgraph":
|
||||
start_memgraph(context)
|
||||
context.driver = create_db_driver(context)
|
||||
|
||||
|
||||
def after_scenario(context, scenario):
|
||||
"""
|
||||
Executes after every scenario. Pauses execution if flags are set.
|
||||
Adds execution time to latency dict if it is not None.
|
||||
"""
|
||||
err_output = [p.stderr.read() # noqa unused variable
|
||||
for p in context.memgraph_processes]
|
||||
# print error output for each subprocess if scenario failed
|
||||
if scenario.status == "failed":
|
||||
for i, err in enumerate(err_output):
|
||||
if err:
|
||||
err = err.decode("utf-8")
|
||||
print("\n", "-" * 5, "Machine {}".format(i), "-" * 5)
|
||||
list(map(print, [line for line in err.splitlines()]))
|
||||
test_results.add_test(scenario.status, context.is_tested_system_restarted)
|
||||
if context.config.single_scenario or \
|
||||
(context.config.single_fail and scenario.status == "failed"):
|
||||
print("Press enter to continue")
|
||||
sys.stdin.readline()
|
||||
|
||||
if context.execution_time is not None:
|
||||
context.js['data'][scenario.name] = {
|
||||
"execution_time": context.execution_time, "status": scenario.status
|
||||
}
|
||||
|
||||
|
||||
def after_feature(context, feature):
|
||||
"""
|
||||
Executes after every feature. If flag is set, pauses before
|
||||
executing next scenario.
|
||||
"""
|
||||
if context.config.single_feature:
|
||||
print("Press enter to continue")
|
||||
sys.stdin.readline()
|
||||
|
||||
|
||||
def after_all(context):
|
||||
"""
|
||||
Executes when testing is finished. Creates JSON files of test latency
|
||||
and test results.
|
||||
"""
|
||||
context.driver.close()
|
||||
timestamp = datetime.datetime.fromtimestamp(
|
||||
time.time()).strftime("%Y_%m_%d__%H_%M")
|
||||
|
||||
test_suite = get_test_suite(context)
|
||||
file_name = context.config.output_folder + timestamp + \
|
||||
"-" + context.config.database + "-" + context.config.test_name + ".json"
|
||||
|
||||
js = {
|
||||
"total": test_results.num_total(),
|
||||
"passed": test_results.num_passed(),
|
||||
"restarts": test_results.num_restarts(),
|
||||
"test_suite": test_suite,
|
||||
"timestamp": timestamp,
|
||||
"db": context.config.database
|
||||
}
|
||||
with open(file_name, 'w') as f:
|
||||
json.dump(js, f)
|
||||
|
||||
with open(context.latency_file, "a") as f:
|
||||
json.dump(context.js, f)
|
||||
|
@ -1,4 +1,6 @@
|
||||
from behave import *
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from behave import given
|
||||
import graph
|
||||
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def query(q, context, params={}):
|
||||
"""
|
||||
Function used to execute query on database. Query results are
|
||||
@ -16,13 +19,13 @@ def query(q, context, params={}):
|
||||
results_list = []
|
||||
|
||||
if (context.config.database == "neo4j" or
|
||||
context.config.database == "memgraph"):
|
||||
context.config.database == "memgraph"):
|
||||
session = context.driver.session()
|
||||
start = time.time()
|
||||
try:
|
||||
# executing query
|
||||
results = session.run(q, params)
|
||||
if not context.config.no_side_effects:
|
||||
if context.config.side_effects:
|
||||
summary = results.summary()
|
||||
add_side_effects(context, summary.counters)
|
||||
results_list = list(results)
|
||||
@ -33,7 +36,7 @@ def query(q, context, params={}):
|
||||
with session.begin_transaction() as tx:
|
||||
results = tx.run(q, params)
|
||||
summary = results.summary()
|
||||
if not context.config.no_side_effects:
|
||||
if context.config.side_effects:
|
||||
add_side_effects(context, summary.counters)
|
||||
results_list = list(results)
|
||||
tx.success = True
|
||||
|
@ -1,4 +1,6 @@
|
||||
from behave import *
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from behave import then
|
||||
|
||||
# TODO check for exact error?
|
||||
|
||||
|
@ -1,11 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import database
|
||||
import os
|
||||
from behave import *
|
||||
from behave import given
|
||||
|
||||
|
||||
def clear_graph(context):
|
||||
database.query("MATCH (n) DETACH DELETE n", context)
|
||||
if context.exception != None:
|
||||
if context.exception is not None:
|
||||
context.exception = None
|
||||
database.query("MATCH (n) DETACH DELETE n", context)
|
||||
|
||||
@ -46,7 +48,8 @@ def create_graph(name, context):
|
||||
i = 0
|
||||
while i < len(content):
|
||||
ch = content[i]
|
||||
if ch == '\\' and i != len(content) - 1 and content[i + 1] in q_marks:
|
||||
if ch == '\\' and i != len(content) - 1 and \
|
||||
content[i + 1] in q_marks:
|
||||
single_query += ch + content[i + 1]
|
||||
i += 2
|
||||
else:
|
||||
|
@ -1,3 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
class GraphProperties:
|
||||
|
||||
"""
|
||||
|
@ -1,3 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
def parse(el, ignore_order):
|
||||
"""
|
||||
Function used to parse result element. Result element can be
|
||||
|
@ -1,7 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import database
|
||||
import parser
|
||||
from behave import *
|
||||
from behave import given, then, step, when
|
||||
from neo4j.v1.types import Node, Path, Relationship
|
||||
|
||||
|
||||
@ -52,7 +53,7 @@ def parse_props(prop_json):
|
||||
elif isinstance(prop_json[prop], str):
|
||||
properties += prop + ": " + "'" + prop_json[prop] + "', "
|
||||
elif isinstance(prop_json[prop], bool):
|
||||
if prop_json[prop] == True:
|
||||
if prop_json[prop]:
|
||||
properties += prop + ": true, "
|
||||
else:
|
||||
properties += prop + ": false, "
|
||||
@ -192,7 +193,8 @@ def get_result_rows(context, ignore_order):
|
||||
values = result.values()
|
||||
for i in range(0, len(keys)):
|
||||
result_rows.append(keys[i] + ":" + parser.parse(
|
||||
to_string(values[i]).replace("\n", "\\n").replace(" ", ""), ignore_order))
|
||||
to_string(values[i]).replace("\n", "\\n").replace(" ", ""),
|
||||
ignore_order))
|
||||
return result_rows
|
||||
|
||||
|
||||
@ -211,7 +213,8 @@ def get_expected_rows(context, ignore_order):
|
||||
for row in context.table:
|
||||
for col in context.table.headings:
|
||||
expected_rows.append(
|
||||
col + ":" + parser.parse(row[col].replace(" ", ""), ignore_order))
|
||||
col + ":" + parser.parse(row[col].replace(" ", ""),
|
||||
ignore_order))
|
||||
return expected_rows
|
||||
|
||||
|
||||
@ -324,7 +327,7 @@ def side_effects_number(prop, table):
|
||||
|
||||
@then('the side effects should be')
|
||||
def side_effects_step(context):
|
||||
if context.config.no_side_effects:
|
||||
if not context.config.side_effects:
|
||||
return
|
||||
table = context.table
|
||||
# get side effects from db queries
|
||||
@ -334,12 +337,12 @@ def side_effects_step(context):
|
||||
properties_dif = side_effects_number("properties", table)
|
||||
# compare side effects
|
||||
assert(context.graph_properties.compare(nodes_dif,
|
||||
relationships_dif, labels_dif, properties_dif) == True)
|
||||
relationships_dif, labels_dif, properties_dif))
|
||||
|
||||
|
||||
@then('no side effects')
|
||||
def side_effects_step(context):
|
||||
if context.config.no_side_effects:
|
||||
if not context.config.side_effects:
|
||||
return
|
||||
# check if side effects are non existing
|
||||
assert(context.graph_properties.compare([], [], [], []) == True)
|
||||
assert(context.graph_properties.compare([], [], [], []))
|
||||
|
@ -1,3 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from behave.__main__ import main as behave_main
|
||||
from behave import configuration
|
||||
from argparse import ArgumentParser
|
||||
@ -5,16 +8,18 @@ import os
|
||||
import sys
|
||||
|
||||
|
||||
def parse_args():
|
||||
def parse_args(argv):
|
||||
argp = ArgumentParser(description=__doc__)
|
||||
argp.add_argument("--root", default="tck_engine/tests/openCypher_M05",
|
||||
help="Path to folder where tests are located, default is tck_engine/tests/openCypher_M05")
|
||||
argp.add_argument("--root", default="tck_engine/tests/memgraph_V1",
|
||||
help="Path to folder where tests are located, default "
|
||||
"is tck_engine/tests/memgraph_V1")
|
||||
argp.add_argument(
|
||||
"--stop", action="store_true", help="Stop testing after first fail.")
|
||||
argp.add_argument("--no-side-effects", action="store_true",
|
||||
argp.add_argument("--side-effects", action="store_false",
|
||||
help="Check for side effects in tests.")
|
||||
argp.add_argument("--db", default="neo4j", choices=[
|
||||
"neo4j", "memgraph"], help="Default is neo4j.")
|
||||
argp.add_argument("--db", default="memgraph",
|
||||
choices=["neo4j", "memgraph"],
|
||||
help="Default is memgraph.")
|
||||
argp.add_argument("--db-user", default="neo4j", help="Default is neo4j.")
|
||||
argp.add_argument(
|
||||
"--db-pass", default="1234", help="Default is 1234.")
|
||||
@ -22,8 +27,8 @@ def parse_args():
|
||||
help="Default is bolt://127.0.0.1:7687.")
|
||||
argp.add_argument("--output-folder", default="tck_engine/results/",
|
||||
help="Test result output folder, default is results/.")
|
||||
argp.add_argument("--logging", default="DEBUG", choices=[
|
||||
"INFO", "DEBUG"], help="Logging level, default is DEBUG.")
|
||||
argp.add_argument("--logging", default="DEBUG", choices=["INFO", "DEBUG"],
|
||||
help="Logging level, default is DEBUG.")
|
||||
argp.add_argument("--unstable", action="store_true",
|
||||
help="Include unstable feature from features.")
|
||||
argp.add_argument("--single-fail", action="store_true",
|
||||
@ -34,7 +39,13 @@ def parse_args():
|
||||
help="Pause after every feature.")
|
||||
argp.add_argument("--test-name", default="",
|
||||
help="Name of the test")
|
||||
return argp.parse_args()
|
||||
argp.add_argument("--distributed", action="store_true",
|
||||
help="Run memgraph in distributed")
|
||||
argp.add_argument("--num-machines", type=int, default=3,
|
||||
help="Number of machines for distributed run")
|
||||
argp.add_argument("--memgraph-params", default="",
|
||||
help="Additional params for memgraph run")
|
||||
return argp.parse_args(argv)
|
||||
|
||||
|
||||
def add_config(option, dictionary):
|
||||
@ -43,18 +54,18 @@ def add_config(option, dictionary):
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
def main(argv):
|
||||
"""
|
||||
Script used to run behave tests with given options. List of
|
||||
options is available when running python test_executor.py -help.
|
||||
"""
|
||||
args = parse_args()
|
||||
args = parse_args(argv)
|
||||
|
||||
tests_root = os.path.abspath(args.root)
|
||||
|
||||
# adds options to cucumber configuration
|
||||
add_config("--no-side-effects",
|
||||
dict(action="store_true", help="Exclude side effects."))
|
||||
add_config("--side-effects",
|
||||
dict(action="store_false", help="Exclude side effects."))
|
||||
add_config("--database", dict(help="Choose database(memgraph/neo4j)."))
|
||||
add_config("--database-password", dict(help="Database password."))
|
||||
add_config("--database-username", dict(help="Database username."))
|
||||
@ -69,14 +80,19 @@ def main():
|
||||
add_config("--single-feature",
|
||||
dict(action="store_true", help="Pause after every feature."))
|
||||
add_config("--test-name", dict(help="Name of the test."))
|
||||
add_config("--distributed",
|
||||
dict(action="store_true", help="Run memgraph in distributed."))
|
||||
add_config("--num-machines",
|
||||
dict(help="Number of machines for distributed run."))
|
||||
add_config("--memgraph-params", dict(help="Additional memgraph params."))
|
||||
|
||||
# list with all options
|
||||
# options will be passed to the cucumber engine
|
||||
behave_options = [tests_root]
|
||||
if args.stop:
|
||||
behave_options.append("--stop")
|
||||
if args.no_side_effects:
|
||||
behave_options.append("--no-side-effects")
|
||||
if args.side_effects:
|
||||
behave_options.append("--side-effects")
|
||||
if args.db != "memgraph":
|
||||
behave_options.append("-e")
|
||||
behave_options.append("memgraph*")
|
||||
@ -99,13 +115,21 @@ def main():
|
||||
behave_options.append("--single-scenario")
|
||||
if (args.single_feature):
|
||||
behave_options.append("--single-feature")
|
||||
if (args.distributed):
|
||||
behave_options.append("--distributed")
|
||||
behave_options.append("--num-machines")
|
||||
behave_options.append(str(args.num_machines))
|
||||
behave_options.append("--output-folder")
|
||||
behave_options.append(args.output_folder)
|
||||
behave_options.append("--test-name")
|
||||
behave_options.append(args.test_name)
|
||||
if (args.memgraph_params):
|
||||
behave_options.append("--memgraph-params")
|
||||
behave_options.append(args.memgraph_params)
|
||||
|
||||
# runs tests with options
|
||||
return behave_main(behave_options)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
@ -1,18 +1,22 @@
|
||||
class TestResults:
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
class TestResults:
|
||||
"""
|
||||
Class used to store test results. It has parameters total
|
||||
and passed.
|
||||
Class used to store test results.
|
||||
|
||||
@attribute total:
|
||||
int, total number of scenarios.
|
||||
@attribute passed:
|
||||
int, number of passed scenarios.
|
||||
@attribute restarts:
|
||||
int, number of restarts of underlying tested system.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.total = 0
|
||||
self.passed = 0
|
||||
self.restarts = 0
|
||||
|
||||
def num_passed(self):
|
||||
"""
|
||||
@ -26,7 +30,13 @@ class TestResults:
|
||||
"""
|
||||
return self.total
|
||||
|
||||
def add_test(self, status):
|
||||
def num_restarts(self):
|
||||
"""
|
||||
Getter for param restarts.
|
||||
"""
|
||||
return self.restarts
|
||||
|
||||
def add_test(self, status, is_tested_system_restarted):
|
||||
"""
|
||||
Method adds one scenario to current results. If
|
||||
scenario passed, number of passed scenarios increases.
|
||||
@ -37,3 +47,5 @@ class TestResults:
|
||||
if status == "passed":
|
||||
self.passed += 1
|
||||
self.total += 1
|
||||
if is_tested_system_restarted:
|
||||
self.restarts += 1
|
||||
|
Loading…
Reference in New Issue
Block a user