diff --git a/experimental/distributed/CMakeLists.txt b/experimental/distributed/CMakeLists.txt index c077671e5..c301554a1 100644 --- a/experimental/distributed/CMakeLists.txt +++ b/experimental/distributed/CMakeLists.txt @@ -27,8 +27,8 @@ enable_testing() add_subdirectory(tests) # copy test scripts into the build/ directory (for distributed tests) -configure_file(${PROJECT_SOURCE_DIR}/tests/start_distributed.py - ${PROJECT_BINARY_DIR}/tests/start_distributed.py COPYONLY) +configure_file(${PROJECT_SOURCE_DIR}/tests/start_distributed + ${PROJECT_BINARY_DIR}/tests/start_distributed COPYONLY) configure_file(${PROJECT_SOURCE_DIR}/tests/config ${PROJECT_BINARY_DIR}/tests/config COPYONLY) diff --git a/experimental/distributed/tests/distributed_test.cpp b/experimental/distributed/tests/distributed_test.cpp index bb20439c7..28c1ba68e 100644 --- a/experimental/distributed/tests/distributed_test.cpp +++ b/experimental/distributed/tests/distributed_test.cpp @@ -136,7 +136,7 @@ class Master : public Reactor { if (workers_seen == static_cast<int64_t>(worker_mnids_.size())) { subscription.Unsubscribe(); // Sleep for a while so we can read output in the terminal. - // (start_distributed.py runs each process in a new tab which is + // (start_distributed runs each process in a new tab which is // closed immediately after process has finished) std::this_thread::sleep_for(std::chrono::seconds(4)); CloseChannel("main"); @@ -146,12 +146,13 @@ class Master : public Reactor { // send a TextMessage to each worker for (auto wmnid : worker_mnids_) { auto stream = memgraph.FindChannel(wmnid, "worker", "main"); - stream->OnEventOnce().ChainOnce<ChannelResolvedMessage>([this, stream]( - const ChannelResolvedMessage &msg, const Subscription &) { - msg.channelWriter()->Send<TextMessage>("master", "main", - "hi from master"); - stream->Close(); - }); + stream->OnEventOnce().ChainOnce<ChannelResolvedMessage>( + [this, stream](const ChannelResolvedMessage &msg, + const Subscription &) { + msg.channelWriter()->Send<TextMessage>("master", "main", + "hi from master"); + stream->Close(); + }); } } diff --git a/experimental/distributed/tests/start_distributed.py b/experimental/distributed/tests/start_distributed similarity index 92% rename from experimental/distributed/tests/start_distributed.py rename to experimental/distributed/tests/start_distributed index 648db169c..bbbd0b253 100755 --- a/experimental/distributed/tests/start_distributed.py +++ b/experimental/distributed/tests/start_distributed @@ -1,3 +1,6 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# # Automatically copied to the build/ directory during Makefile (configured by cmake) import os diff --git a/tests/macro_benchmark/harness.py b/tests/macro_benchmark/harness similarity index 100% rename from tests/macro_benchmark/harness.py rename to tests/macro_benchmark/harness diff --git a/tests/public_benchmark/ldbc/README.md b/tests/public_benchmark/ldbc/README.md index aa93139a6..ca4cc1e6e 100644 --- a/tests/public_benchmark/ldbc/README.md +++ b/tests/public_benchmark/ldbc/README.md @@ -33,7 +33,7 @@ cd memgraph/tests/public_benchmark/ldbc source ve3/bin/activate - ./index_creation.py ldbc-snb-impls/snb-interactive-neo4j/scripts/indexCreation.neo4j + ./index_creation ldbc-snb-impls/snb-interactive-neo4j/scripts/indexCreation.neo4j ## Where is and how to use LDBC plotting? diff --git a/tests/public_benchmark/ldbc/index_creation.py b/tests/public_benchmark/ldbc/index_creation similarity index 100% rename from tests/public_benchmark/ldbc/index_creation.py rename to tests/public_benchmark/ldbc/index_creation diff --git a/tests/public_benchmark/ldbc/run_benchmark b/tests/public_benchmark/ldbc/run_benchmark index 039fa6bd4..d3206e346 100755 --- a/tests/public_benchmark/ldbc/run_benchmark +++ b/tests/public_benchmark/ldbc/run_benchmark @@ -156,7 +156,7 @@ LDBC_DEFAULT_PROPERTIES = \ def create_index(port, database): index_file = os.path.join(SCRIPT_DIR, 'ldbc-snb-impls', 'snb-interactive-neo4j', 'scripts', 'indexCreation.neo4j') - subprocess.check_call(('ve3/bin/python3', 'index_creation.py', '--port', + subprocess.check_call(('ve3/bin/python3', 'index_creation', '--port', port, '--database', database, index_file), cwd=SCRIPT_DIR) time.sleep(1.0) diff --git a/tests/stress/bipartite.py b/tests/stress/bipartite.py old mode 100755 new mode 100644 diff --git a/tests/stress/continuous_integration b/tests/stress/continuous_integration index 728c44147..ec103a055 100755 --- a/tests/stress/continuous_integration +++ b/tests/stress/continuous_integration @@ -11,8 +11,8 @@ import sys import time # dataset calibrated for running on Apollo (total 4min) -# bipartite runs for approx. 30s -# create_match runs for approx. 30s +# bipartite.py runs for approx. 30s +# create_match.py runs for approx. 30s # long_running runs for 1min # long_running runs for 2min SMALL_DATASET = [ @@ -39,7 +39,7 @@ SMALL_DATASET = [ ] # dataset calibrated for running on daily stress instance (total 9h) -# bipartite and create_match run for approx. 15min +# bipartite.py and create_match.py run for approx. 15min # long_running runs for 5min x 6 times = 30min # long_running runs for 8h LARGE_DATASET = [ diff --git a/tests/stress/create_match.py b/tests/stress/create_match.py old mode 100755 new mode 100644 diff --git a/tools/apollo/generate b/tools/apollo/generate index 1e1de0df5..6a6e4cd75 100755 --- a/tools/apollo/generate +++ b/tools/apollo/generate @@ -211,7 +211,7 @@ postgresql_lib_dir = os.path.join(LIBS_DIR, "postgresql", "lib") infile = create_archive("macro_benchmark", [binary_release_path, binary_release_link_path, macro_bench_path, config_path, harness_client_binaries, postgresql_lib_dir], cwd = WORKSPACE_DIR) -supervisor = "./memgraph/tests/macro_benchmark/harness.py" +supervisor = "./memgraph/tests/macro_benchmark/harness" outfile_paths = "\./memgraph/tests/macro_benchmark/\.harness_summary" RUNS.append(generate_run("macro_benchmark__query_suite", supervisor = supervisor, @@ -242,7 +242,7 @@ if mode == "diff": binary_parent_link_path, parent_macro_bench_path, parent_config_path, parent_harness_client_binaries, parent_postgresql_lib_dir], cwd = WORKSPACE_DIR) - supervisor = "./parent/tests/macro_benchmark/harness.py" + supervisor = "./parent/tests/macro_benchmark/harness" args = MACRO_BENCHMARK_ARGS + " --RunnerBin " + binary_parent_path outfile_paths = "\./parent/tests/macro_benchmark/\.harness_summary" RUNS.append(generate_run("macro_benchmark_parent__query_suite", diff --git a/tools/memory_usage.py b/tools/memory_usage similarity index 96% rename from tools/memory_usage.py rename to tools/memory_usage index 8539aa563..075ae6734 100755 --- a/tools/memory_usage.py +++ b/tools/memory_usage @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- - import time import sys import os # hackish way to resuse existing start code -sys.path.append("../tests/macro_benchmark/") +sys.path.append(os.path.dirname(os.path.realpath(__file__)) + + "/../tests/macro_benchmark/") from databases import * from clients import * from common import get_absolute_path diff --git a/tools/neo_to_memgraph.py b/tools/neo_to_memgraph similarity index 100% rename from tools/neo_to_memgraph.py rename to tools/neo_to_memgraph diff --git a/tools/snapshot_recovery_speed.py b/tools/snapshot_recovery_speed similarity index 100% rename from tools/snapshot_recovery_speed.py rename to tools/snapshot_recovery_speed