#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Continuous integration toolkit. The purpose of this script is to generate everything which is needed for the CI environment. List of responsibilities: * execute default suites * terminate execution if any of internal scenarios fails * creates the report file that is needed by the Apollo plugin to post the status on Phabricator. (.quality_assurance_status) """ import argparse import atexit import copy import json import os import subprocess import sys import tempfile import time import yaml SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) TESTS_DIR = os.path.join(SCRIPT_DIR, "tests") BASE_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..")) BUILD_DIR = os.path.join(BASE_DIR, "build") def wait_for_server(port, delay=0.01): cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)] count = 0 while subprocess.call(cmd) != 0: time.sleep(0.01) if count > 20 / 0.01: print("Could not wait for server on port", port, "to startup!") sys.exit(1) count += 1 time.sleep(delay) def generate_result_csv(suite, result_path): if not os.path.exists(result_path): return "" with open(result_path) as f: result = json.load(f) ret = "" for i in ["total", "passed"]: ret += "{},{},{}\n".format(suite, i, result[i]) return ret def generate_status(suite, result_path, required): if not os.path.exists(result_path): return ("Internal error!", 0, 1) with open(result_path) as f: result = json.load(f) total = result["total"] passed = result["passed"] ratio = passed / total msg = "{} / {} ({:.2%})".format(passed, total, ratio) if required: if passed == total: msg += " 👍" # +1 emoji else: msg += " ⛔" # no entry emoji return (msg, passed, total) def generate_result_html(data): ret = "\n" ret += "

Memgraph GQL Behave Tests Status

\n" ret += "\n" for row in data: ret += " \n" for item in row: if row == data[0]: fmt = " \n" else: fmt = " \n" ret += fmt.format(item) ret += " \n" ret += "
{}{}
\n" return ret class MemgraphRunner: def __init__(self, build_directory): self.build_directory = build_directory self.proc_mg = None self.args = [] def start(self, args=[]): if args == self.args and self.is_running(): return self.stop() self.args = copy.deepcopy(args) self.data_directory = tempfile.TemporaryDirectory() memgraph_binary = os.path.join(self.build_directory, "memgraph") args_mg = [ memgraph_binary, "--storage-properties-on-edges=true", "--data-directory", self.data_directory.name, "--log-file", str(os.path.join(BASE_DIR, "tests", "gql_behave", "memgraph.log")), ] self.proc_mg = subprocess.Popen(args_mg + self.args) wait_for_server(7687, 1) assert self.is_running(), "The Memgraph process died!" def is_running(self): if self.proc_mg is None: return False if self.proc_mg.poll() is not None: return False return True def stop(self): if not self.is_running(): return self.proc_mg.terminate() code = self.proc_mg.wait() assert code == 0, "The Memgraph process exited with non-zero!" def main(): # Parse args argp = argparse.ArgumentParser() argp.add_argument("--build-directory", default=BUILD_DIR) argp.add_argument("--cluster-size", default=3, type=int) args = argp.parse_args() # Load tests from config file with open(os.path.join(TESTS_DIR, "config.yaml")) as f: suites = yaml.safe_load(f) # venv used to run the qa engine venv_python = os.path.join(BASE_DIR, "tests", "ve3", "bin", "python3") # Temporary directory for suite results output_dir = tempfile.TemporaryDirectory() # Memgraph runner memgraph = MemgraphRunner(args.build_directory) @atexit.register def cleanup(): memgraph.stop() # Results storage result_csv = "suite,status,quantity\n" status_data = [["Suite", "Status"]] mandatory_fails = [] # Run suites old_storage_mode = "" for suite in suites: print("Starting suite '{}' scenarios.".format(suite["name"])) suite_storage_mode = suite["storage_mode"] if old_storage_mode != suite_storage_mode: memgraph.stop() memgraph.start(["--storage-mode", suite_storage_mode]) old_storage_mode = suite_storage_mode suite["stats_file"] = os.path.join(output_dir.name, suite["name"] + ".json") cmd = [ venv_python, "-u", os.path.join(SCRIPT_DIR, "run.py"), "--stats-file", suite["stats_file"], suite["test_suite"], ] # The exit code isn't checked here because the `behave` framework # returns a non-zero exit code when some tests fail. subprocess.run(cmd) suite_status, suite_passed, suite_total = generate_status( suite["name"], suite["stats_file"], suite["must_pass"] ) status_data.append([suite["name"], suite_status]) result_csv += generate_result_csv(suite["name"], suite["stats_file"]) if suite["must_pass"] and suite_passed != suite_total: mandatory_fails.append(suite["name"]) break # Create status message result_html = generate_result_html(status_data) # Create the report file result_html_path = os.path.join(SCRIPT_DIR, "gql_behave_status.html") with open(result_html_path, "w") as f: f.write(result_html) # Create the measurements file result_csv_path = os.path.join(SCRIPT_DIR, "gql_behave_status.csv") with open(result_csv_path, "w") as f: f.write(result_csv) print(f"CSV status is generated in {result_csv_path}") print(f"HTML status is generated in {result_html_path}") # Check if tests failed if mandatory_fails != []: sys.exit(f"Some tests that must pass have failed: {mandatory_fails}") if __name__ == "__main__": main()