Add single_create group to harness

Reviewers: mferencevic, buda

Reviewed By: mferencevic

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D714
This commit is contained in:
Mislav Bradac 2017-08-28 10:51:19 +02:00
parent 7fc821ad25
commit 3e793fb8ac
80 changed files with 30 additions and 60 deletions

View File

@ -1,12 +0,0 @@
EDGE_COUNT = 100000
BATCH_SIZE = 50
query = []
while EDGE_COUNT > 0:
query.append("MATCH (a), (b)")
new_edges = min(BATCH_SIZE, EDGE_COUNT)
query.append('CREATE (a)-[:Type]->(b) ' * new_edges)
query.append(";")
EDGE_COUNT -= new_edges
print(" ".join(query))

View File

@ -1,11 +0,0 @@
PATTERN_COUNT = 100000
BATCH_SIZE = 50
query = []
while PATTERN_COUNT > 0:
new_patterns = min(BATCH_SIZE, PATTERN_COUNT)
query.append('CREATE ()-[:Type]->() ' * new_patterns)
query.append(";")
PATTERN_COUNT -= new_patterns
print(" ".join(query))

View File

@ -1,11 +0,0 @@
VERTEX_COUNT = 100000
BATCH_SIZE = 50
query = []
while VERTEX_COUNT > 0:
new_vertices = min(BATCH_SIZE, VERTEX_COUNT)
query.append('CREATE ()' * new_vertices)
query.append(";")
VERTEX_COUNT -= new_vertices
print(" ".join(query))

View File

@ -1,15 +0,0 @@
VERTEX_COUNT = 100000
BATCH_SIZE = 50
query = []
while VERTEX_COUNT > 0:
new_vertices = min(BATCH_SIZE, VERTEX_COUNT)
query.append('CREATE (:L1:L2:L3:L4:L5:L6:L7 '
'{p1: true, p2: 42, '
'p3: "Here is some text that is not extremely short", '
'p4:"Short text", p5: 234.434, p6: 11.11, p7: false})'
* new_vertices)
query.append(";")
VERTEX_COUNT -= new_vertices
print(" ".join(query))

View File

@ -0,0 +1 @@
print("MATCH (a), (b) CREATE (a)-[:Type]->(b);" * 1000)

View File

@ -0,0 +1 @@
print("CREATE ()-[:Type]->();" * 1000)

View File

@ -0,0 +1 @@
print("CREATE ();" * 1000)

View File

@ -0,0 +1 @@
print("""CREATE (:L1:L2:L3:L4:L5:L6:L7 {p1: true, p2: 42, p3: "Here is some text that is not extremely short", p4:"Short text", p5: 234.434, p6: 11.11, p7: false});""" * 1000)

View File

@ -0,0 +1,3 @@
{
"iterations": 3
}

View File

@ -0,0 +1 @@
MATCH (n)-[x]->(m) DELETE x

View File

@ -0,0 +1 @@
MATCH (a), (b) WITH a, b UNWIND range(1, 100000) AS x CREATE (a)-[:Type]->(b)

View File

@ -0,0 +1 @@
CREATE ()

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -0,0 +1 @@
UNWIND range(1, 100000) AS x CREATE ()-[:Type]->()

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -0,0 +1 @@
UNWIND range(1, 100000) AS x CREATE ()

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -0,0 +1 @@
UNWIND range(1, 100000) AS x CREATE (:L1:L2:L3:L4:L5:L6:L7 {p1: true, p2: 42, p3: "Here is some text that is not extremely short", p4:"Short text", p5: 234.434, p6: 11.11, p7: false})

View File

@ -137,7 +137,7 @@ class _QuerySuite:
"""
argp = ArgumentParser("QuerySuite.scenarios argument parser")
argp.add_argument("--query-scenarios-root", default=path.join(
DIR_PATH, "..", "groups"),
DIR_PATH, "groups"),
dest="root")
args, _ = argp.parse_known_args()
log.info("Loading query scenarios from root: %s", args.root)
@ -278,8 +278,6 @@ class _QuerySuite:
def groups(self):
""" Which groups can be executed by a QuerySuite scenario """
assert False, "This is a base class, use one of derived suites"
return ["create", "match", "expression", "aggregation", "return",
"update", "delete", "hardcoded"]
class QuerySuite(_QuerySuite):
@ -290,8 +288,8 @@ class QuerySuite(_QuerySuite):
return ["MemgraphRunner", "NeoRunner"]
def groups(self):
return ["create", "match", "expression", "aggregation", "return",
"update", "delete"]
return ["1000_create", "unwind_create", "match", "expression",
"aggregation", "return", "update", "delete"]
class QueryParallelSuite(_QuerySuite):
@ -546,7 +544,7 @@ def main():
log.info("Executing %d scenarios", len(filtered_scenarios))
results = []
for (group, scenario_name), scenario in filtered_scenarios.items():
for (group, scenario_name), scenario in sorted(filtered_scenarios.items()):
log.info("Executing group.scenario '%s.%s' with elements %s",
group, scenario_name, list(scenario.keys()))
for iter_result in suite.run(scenario, group, scenario_name, runner):

View File

@ -234,5 +234,4 @@ def store_data(data):
if not i in data:
raise StorageException("Field '{}' missing in data!".format(i))
data["timestamp"] = time.time()
print("STORE DATA:", data)
_storage_file.write(json.dumps(data) + "\n")

View File

@ -4,6 +4,7 @@
#include "communication/bolt/client.hpp"
#include "io/network/network_endpoint.hpp"
#include "io/network/socket.hpp"
#include "utils/timer.hpp"
using SocketT = io::network::Socket;
using EndpointT = io::network::NetworkEndpoint;
@ -35,7 +36,10 @@ int main(int argc, char **argv) {
break;
}
try {
utils::Timer t;
auto ret = client.Execute(s, {});
auto elapsed = t.Elapsed().count();
std::cout << "Wall time:\n " << elapsed << std::endl;
std::cout << "Fields:" << std::endl;
for (auto &field : ret.fields) {
@ -43,7 +47,7 @@ int main(int argc, char **argv) {
}
std::cout << "Records:" << std::endl;
for (int i = 0; i < ret.records.size(); ++i) {
for (int i = 0; i < static_cast<int>(ret.records.size()); ++i) {
std::cout << " " << i << std::endl;
for (auto &value : ret.records[i]) {
std::cout << " " << value << std::endl;
@ -54,8 +58,7 @@ int main(int argc, char **argv) {
for (auto &data : ret.metadata) {
std::cout << " " << data.first << " : " << data.second << std::endl;
}
}
catch (const communication::bolt::ClientQueryException &e) {
} catch (const communication::bolt::ClientQueryException &e) {
std::cout << "Client received exception: " << e.what() << std::endl;
}
}

View File

@ -190,7 +190,9 @@ binary_release_path = os.path.join(BUILD_RELEASE_DIR, binary_release_name)
binary_release_link_path = os.path.join(BUILD_RELEASE_DIR, "memgraph")
# macro benchmark tests
MACRO_BENCHMARK_ARGS = "QuerySuite MemgraphRunner --groups aggregation --no-strict"
MACRO_BENCHMARK_ARGS = (
"QuerySuite MemgraphRunner " +
"--groups aggregation 1000_create unwind_create --no-strict")
macro_bench_path = os.path.join(BASE_DIR, "tests", "macro_benchmark")
harness_client_binary = os.path.join(BUILD_RELEASE_DIR, "tests",
"macro_benchmark", "harness_client")