Add benchmarking GHA Diff job (#147)
This commit is contained in:
parent
cc27a04139
commit
782c377f5d
61
.github/workflows/diff.yaml
vendored
61
.github/workflows/diff.yaml
vendored
@ -340,3 +340,64 @@ jobs:
|
||||
with:
|
||||
name: "Jepsen Report"
|
||||
path: tests/jepsen/Jepsen.tar.gz
|
||||
|
||||
release_benchmarks:
|
||||
name: "Release benchmarks"
|
||||
runs-on: [self-hosted, Linux, X64, Diff, Gen7]
|
||||
env:
|
||||
THREADS: 24
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v2/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build only memgraph release binarie.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=release ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run macro benchmarks
|
||||
run: |
|
||||
cd tests/macro_benchmark
|
||||
./harness QuerySuite MemgraphRunner \
|
||||
--groups aggregation 1000_create unwind_create dense_expand match \
|
||||
--no-strict
|
||||
|
||||
- name: Upload macro benchmark results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "macro_benchmark" \
|
||||
--benchmark-results-path "../../tests/macro_benchmark/.harness_summary" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}"
|
||||
|
||||
- name: Run mgbench
|
||||
run: |
|
||||
cd tests/mgbench
|
||||
./benchmark.py --num-workers-for-benchmark 12 --export-results benchmark_result.json pokec/medium/*/*
|
||||
|
||||
- name: Upload mgbench results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "mgbench" \
|
||||
--benchmark-results-path "../../tests/mgbench/benchmark_result.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}"
|
||||
|
@ -9,6 +9,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import requests
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from argparse import ArgumentParser
|
||||
|
||||
@ -20,7 +21,7 @@ GITHUB_REF = os.getenv("GITHUB_REF", "")
|
||||
|
||||
BENCH_GRAPH_SERVER_ENDPOINT = os.getenv(
|
||||
"BENCH_GRAPH_SERVER_ENDPOINT",
|
||||
"http://mgdeps-cache:9000")
|
||||
"http://bench-graph-api:9001")
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -38,6 +39,10 @@ def post_measurement(args):
|
||||
with open(args.benchmark_results_path, "r") as f:
|
||||
data = json.load(f)
|
||||
timestamp = datetime.now().timestamp()
|
||||
branch = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
stdout=subprocess.PIPE,
|
||||
check=True).stdout.decode("utf-8").strip()
|
||||
req = requests.post(
|
||||
f"{BENCH_GRAPH_SERVER_ENDPOINT}/measurements",
|
||||
json={
|
||||
@ -48,8 +53,8 @@ def post_measurement(args):
|
||||
"git_sha": GITHUB_SHA,
|
||||
"github_run_id": args.github_run_id,
|
||||
"github_run_number": args.github_run_number,
|
||||
"results": data
|
||||
},
|
||||
"results": data,
|
||||
"git_branch": branch},
|
||||
timeout=1)
|
||||
assert req.status_code == 200, \
|
||||
f"Uploading {args.benchmark_name} data failed."
|
||||
|
Loading…
Reference in New Issue
Block a user