From d4315b3242697959ff435df5371a14c21d054b90 Mon Sep 17 00:00:00 2001 From: Matej Ferencevic <matej.ferencevic@memgraph.io> Date: Wed, 20 Jun 2018 18:23:16 +0200 Subject: [PATCH] Prepare release v0.11.0 --- CMakeLists.txt | 8 - apollo_archives.yaml | 1 - apollo_build.yaml | 11 +- src/CMakeLists.txt | 100 +- src/communication/bolt/client.hpp | 2 + .../bolt/v1/states/executing.hpp | 8 - src/communication/raft/network_common.hpp | 23 - src/communication/raft/raft-inl.hpp | 699 ----- src/communication/raft/raft.hpp | 277 -- src/communication/raft/rpc.hpp | 120 - src/communication/raft/storage/file.hpp | 239 -- src/communication/raft/storage/memory.hpp | 63 - src/communication/raft/test_utils.hpp | 141 - src/communication/rpc/client.cpp | 100 - src/communication/rpc/client.hpp | 101 - src/communication/rpc/client_pool.hpp | 68 - src/communication/rpc/messages.capnp | 9 - src/communication/rpc/messages.hpp | 54 - src/communication/rpc/protocol.cpp | 77 - src/communication/rpc/protocol.hpp | 55 - src/communication/rpc/server.cpp | 17 - src/communication/rpc/server.hpp | 86 - src/database/config.cpp | 47 - src/database/counters.cpp | 31 - src/database/counters.hpp | 23 - src/database/counters_rpc_messages.lcp | 23 - src/database/graph_db.cpp | 350 +-- src/database/graph_db.hpp | 70 - src/database/graph_db_accessor.cpp | 138 +- src/database/graph_db_accessor.hpp | 8 - src/database/state_delta.lcp | 9 +- src/database/storage_gc.hpp | 1 - src/database/storage_gc_master.hpp | 68 - src/database/storage_gc_worker.hpp | 46 - src/distributed/bfs_rpc_clients.cpp | 178 -- src/distributed/bfs_rpc_clients.hpp | 62 - src/distributed/bfs_rpc_messages.lcp | 280 -- src/distributed/bfs_rpc_server.hpp | 126 - src/distributed/bfs_subcursor.cpp | 196 -- src/distributed/bfs_subcursor.hpp | 141 - src/distributed/cache.cpp | 99 - src/distributed/cache.hpp | 62 - src/distributed/cluster_discovery_master.cpp | 42 - src/distributed/cluster_discovery_master.hpp | 27 - src/distributed/cluster_discovery_worker.cpp | 41 - src/distributed/cluster_discovery_worker.hpp | 50 - src/distributed/coordination.cpp | 34 - src/distributed/coordination.hpp | 36 - src/distributed/coordination_master.cpp | 92 - src/distributed/coordination_master.hpp | 61 - src/distributed/coordination_rpc_messages.lcp | 72 - src/distributed/coordination_worker.cpp | 46 - src/distributed/coordination_worker.hpp | 33 - src/distributed/data_manager.cpp | 54 - src/distributed/data_manager.hpp | 45 - src/distributed/data_rpc_clients.cpp | 49 - src/distributed/data_rpc_clients.hpp | 31 - src/distributed/data_rpc_messages.lcp | 76 - src/distributed/data_rpc_server.cpp | 43 - src/distributed/data_rpc_server.hpp | 17 - src/distributed/durability_rpc_clients.cpp | 25 - src/distributed/durability_rpc_clients.hpp | 28 - src/distributed/durability_rpc_messages.lcp | 20 - src/distributed/durability_rpc_server.cpp | 20 - src/distributed/durability_rpc_server.hpp | 21 - src/distributed/index_rpc_messages.lcp | 25 - src/distributed/index_rpc_server.cpp | 33 - src/distributed/index_rpc_server.hpp | 22 - src/distributed/plan_consumer.cpp | 41 - src/distributed/plan_consumer.hpp | 44 - src/distributed/plan_dispatcher.cpp | 35 - src/distributed/plan_dispatcher.hpp | 30 - src/distributed/plan_rpc_messages.lcp | 59 - src/distributed/produce_rpc_server.cpp | 176 -- src/distributed/produce_rpc_server.hpp | 92 - src/distributed/pull_produce_rpc_messages.lcp | 547 ---- src/distributed/pull_rpc_clients.cpp | 41 - src/distributed/pull_rpc_clients.hpp | 48 - src/distributed/rpc_worker_clients.hpp | 154 -- src/distributed/serialization.capnp | 71 - src/distributed/serialization.cpp | 120 - src/distributed/serialization.hpp | 209 -- src/distributed/storage_gc_rpc_messages.lcp | 20 - .../token_sharing_rpc_messages.lcp | 20 - src/distributed/token_sharing_rpc_server.hpp | 100 - .../transactional_cache_cleaner.hpp | 86 - ...ansactional_cache_cleaner_rpc_messages.lcp | 17 - src/distributed/updates_rpc_clients.cpp | 116 - src/distributed/updates_rpc_clients.hpp | 76 - src/distributed/updates_rpc_messages.lcp | 187 -- src/distributed/updates_rpc_server.cpp | 385 --- src/distributed/updates_rpc_server.hpp | 104 - src/durability/recovery.capnp | 9 - src/durability/recovery.hpp | 20 - src/io/CMakeLists.txt | 24 - src/io/network/endpoint.capnp | 10 - src/io/network/endpoint.cpp | 12 - src/io/network/endpoint.hpp | 4 - src/lisp/lcp.lisp | 1 - src/memgraph_bolt.cpp | 89 +- src/query/common.capnp | 15 - src/query/common.cpp | 23 - src/query/common.hpp | 12 - src/query/exceptions.hpp | 7 - src/query/frontend/ast/ast.capnp | 396 --- src/query/frontend/ast/ast.cpp | 2395 ----------------- src/query/frontend/ast/ast.hpp | 1927 ------------- src/query/frontend/ast/ast_visitor.hpp | 6 +- .../frontend/ast/cypher_main_visitor.cpp | 64 +- .../frontend/ast/cypher_main_visitor.hpp | 13 - .../frontend/opencypher/grammar/Cypher.g4 | 24 - src/query/frontend/semantic/symbol.capnp | 31 - src/query/frontend/semantic/symbol.hpp | 70 - .../frontend/semantic/symbol_generator.cpp | 4 - .../frontend/semantic/symbol_generator.hpp | 2 - src/query/frontend/semantic/symbol_table.hpp | 35 - .../interpret/awesome_memgraph_functions.cpp | 17 - src/query/interpret/eval.hpp | 2 - src/query/interpreter.cpp | 62 +- src/query/interpreter.hpp | 9 +- src/query/plan/cost_estimator.hpp | 2 - src/query/plan/distributed.cpp | 334 --- src/query/plan/operator.cpp | 940 +------ src/query/plan/operator.lcp | 386 +-- src/query/plan/preprocess.hpp | 2 - src/query/plan/rule_based_planner.cpp | 10 - src/query/plan/rule_based_planner.hpp | 9 - src/stats/metrics.cpp | 105 - src/stats/metrics.hpp | 202 -- src/stats/stats.cpp | 113 - src/stats/stats.hpp | 33 - src/stats/stats_rpc_messages.lcp | 51 - src/storage/address.hpp | 9 - src/storage/concurrent_id_mapper_master.cpp | 54 - src/storage/concurrent_id_mapper_master.hpp | 20 - .../concurrent_id_mapper_rpc_messages.lcp | 44 - src/storage/concurrent_id_mapper_worker.cpp | 60 - src/storage/concurrent_id_mapper_worker.hpp | 34 - src/storage/record_accessor.cpp | 53 +- src/storage/serialization.capnp | 21 - src/storage/types.hpp | 40 - src/storage/vertex_accessor.cpp | 4 - src/transactions/commit_log.hpp | 9 - src/transactions/common.capnp | 12 - src/transactions/engine_master.cpp | 98 - src/transactions/engine_master.hpp | 30 - src/transactions/engine_rpc_messages.lcp | 69 - src/transactions/engine_single_node.cpp | 1 - src/transactions/engine_worker.cpp | 191 -- src/transactions/engine_worker.hpp | 73 - src/transactions/snapshot.cpp | 16 - src/transactions/snapshot.hpp | 4 - src/utils/serialization.capnp | 97 - src/utils/serialization.hpp | 489 ---- tests/CMakeLists.txt | 3 - tests/benchmark/serialization.cpp | 143 - tests/distributed/card_fraud/.gitignore | 2 - tests/distributed/card_fraud/apollo_runs.py | 47 - tests/distributed/card_fraud/card_fraud.py | 223 -- tests/distributed/card_fraud/config.json | 8 - .../card_fraud/generate_dataset.sh | 18 - tests/distributed/common.py | 1 - tests/distributed/jail_faker.py | 1 - tests/distributed/jail_service.py | 150 -- tests/distributed/local_runner | 54 - tests/distributed/master.py | 81 - tests/distributed/raft/CMakeLists.txt | 29 - tests/distributed/raft/README.md | 13 - tests/distributed/raft/example_client.cpp | 49 - tests/distributed/raft/example_server.cpp | 77 - tests/distributed/raft/example_test.py | 61 - tests/distributed/raft/messages.hpp | 21 - .../clients/card_fraud_client.cpp | 15 +- .../macro_benchmark/clients/graph_500_bfs.cpp | 4 - .../clients/long_running_common.hpp | 21 +- tests/manual/CMakeLists.txt | 9 - tests/manual/card_fraud_local.cpp | 77 - tests/manual/distributed_common.hpp | 98 - tests/manual/distributed_repl.cpp | 61 - tests/manual/query_planner.cpp | 79 - tests/manual/raft_rpc.cpp | 50 - tests/unit/CMakeLists.txt | 60 - .../unit/concurrent_id_mapper_distributed.cpp | 52 - tests/unit/counters.cpp | 26 - tests/unit/cypher_main_visitor.cpp | 135 +- tests/unit/database_master.cpp | 11 - tests/unit/distributed_bfs.cpp | 113 - tests/unit/distributed_common.hpp | 248 -- tests/unit/distributed_coordination.cpp | 205 -- tests/unit/distributed_data_exchange.cpp | 133 - tests/unit/distributed_durability.cpp | 117 - .../distributed_dynamic_graph_partitioner.cpp | 152 -- tests/unit/distributed_gc.cpp | 78 - tests/unit/distributed_graph_db.cpp | 183 -- tests/unit/distributed_interpretation.cpp | 316 --- tests/unit/distributed_query_plan.cpp | 367 --- tests/unit/distributed_serialization.cpp | 162 -- tests/unit/distributed_token_sharing.cpp | 33 - tests/unit/distributed_updates.cpp | 562 ---- tests/unit/distributed_vertex_migrator.cpp | 181 -- tests/unit/metrics.cpp | 90 - tests/unit/query_common.hpp | 5 - tests/unit/query_plan_match_filter_return.cpp | 67 +- tests/unit/query_planner.cpp | 868 +----- tests/unit/query_semantic.cpp | 24 - tests/unit/raft.cpp | 660 ----- tests/unit/raft_storage.cpp | 71 - tests/unit/rpc.cpp | 199 -- tests/unit/rpc_worker_clients.cpp | 146 - tests/unit/serialization.cpp | 390 --- tests/unit/transaction_engine_distributed.cpp | 150 -- tools/src/CMakeLists.txt | 6 +- tools/src/mg_statsd/main.cpp | 73 - tools/tests/CMakeLists.txt | 3 - tools/tests/statsd/mg_statsd_client.cpp | 62 - 215 files changed, 144 insertions(+), 24135 deletions(-) delete mode 100644 src/communication/raft/network_common.hpp delete mode 100644 src/communication/raft/raft-inl.hpp delete mode 100644 src/communication/raft/raft.hpp delete mode 100644 src/communication/raft/rpc.hpp delete mode 100644 src/communication/raft/storage/file.hpp delete mode 100644 src/communication/raft/storage/memory.hpp delete mode 100644 src/communication/raft/test_utils.hpp delete mode 100644 src/communication/rpc/client.cpp delete mode 100644 src/communication/rpc/client.hpp delete mode 100644 src/communication/rpc/client_pool.hpp delete mode 100644 src/communication/rpc/messages.capnp delete mode 100644 src/communication/rpc/messages.hpp delete mode 100644 src/communication/rpc/protocol.cpp delete mode 100644 src/communication/rpc/protocol.hpp delete mode 100644 src/communication/rpc/server.cpp delete mode 100644 src/communication/rpc/server.hpp delete mode 100644 src/database/counters_rpc_messages.lcp delete mode 100644 src/database/storage_gc_master.hpp delete mode 100644 src/database/storage_gc_worker.hpp delete mode 100644 src/distributed/bfs_rpc_clients.cpp delete mode 100644 src/distributed/bfs_rpc_clients.hpp delete mode 100644 src/distributed/bfs_rpc_messages.lcp delete mode 100644 src/distributed/bfs_rpc_server.hpp delete mode 100644 src/distributed/bfs_subcursor.cpp delete mode 100644 src/distributed/bfs_subcursor.hpp delete mode 100644 src/distributed/cache.cpp delete mode 100644 src/distributed/cache.hpp delete mode 100644 src/distributed/cluster_discovery_master.cpp delete mode 100644 src/distributed/cluster_discovery_master.hpp delete mode 100644 src/distributed/cluster_discovery_worker.cpp delete mode 100644 src/distributed/cluster_discovery_worker.hpp delete mode 100644 src/distributed/coordination.cpp delete mode 100644 src/distributed/coordination.hpp delete mode 100644 src/distributed/coordination_master.cpp delete mode 100644 src/distributed/coordination_master.hpp delete mode 100644 src/distributed/coordination_rpc_messages.lcp delete mode 100644 src/distributed/coordination_worker.cpp delete mode 100644 src/distributed/coordination_worker.hpp delete mode 100644 src/distributed/data_manager.cpp delete mode 100644 src/distributed/data_manager.hpp delete mode 100644 src/distributed/data_rpc_clients.cpp delete mode 100644 src/distributed/data_rpc_clients.hpp delete mode 100644 src/distributed/data_rpc_messages.lcp delete mode 100644 src/distributed/data_rpc_server.cpp delete mode 100644 src/distributed/data_rpc_server.hpp delete mode 100644 src/distributed/durability_rpc_clients.cpp delete mode 100644 src/distributed/durability_rpc_clients.hpp delete mode 100644 src/distributed/durability_rpc_messages.lcp delete mode 100644 src/distributed/durability_rpc_server.cpp delete mode 100644 src/distributed/durability_rpc_server.hpp delete mode 100644 src/distributed/index_rpc_messages.lcp delete mode 100644 src/distributed/index_rpc_server.cpp delete mode 100644 src/distributed/index_rpc_server.hpp delete mode 100644 src/distributed/plan_consumer.cpp delete mode 100644 src/distributed/plan_consumer.hpp delete mode 100644 src/distributed/plan_dispatcher.cpp delete mode 100644 src/distributed/plan_dispatcher.hpp delete mode 100644 src/distributed/plan_rpc_messages.lcp delete mode 100644 src/distributed/produce_rpc_server.cpp delete mode 100644 src/distributed/produce_rpc_server.hpp delete mode 100644 src/distributed/pull_produce_rpc_messages.lcp delete mode 100644 src/distributed/pull_rpc_clients.cpp delete mode 100644 src/distributed/pull_rpc_clients.hpp delete mode 100644 src/distributed/rpc_worker_clients.hpp delete mode 100644 src/distributed/serialization.capnp delete mode 100644 src/distributed/serialization.cpp delete mode 100644 src/distributed/serialization.hpp delete mode 100644 src/distributed/storage_gc_rpc_messages.lcp delete mode 100644 src/distributed/token_sharing_rpc_messages.lcp delete mode 100644 src/distributed/token_sharing_rpc_server.hpp delete mode 100644 src/distributed/transactional_cache_cleaner.hpp delete mode 100644 src/distributed/transactional_cache_cleaner_rpc_messages.lcp delete mode 100644 src/distributed/updates_rpc_clients.cpp delete mode 100644 src/distributed/updates_rpc_clients.hpp delete mode 100644 src/distributed/updates_rpc_messages.lcp delete mode 100644 src/distributed/updates_rpc_server.cpp delete mode 100644 src/distributed/updates_rpc_server.hpp delete mode 100644 src/durability/recovery.capnp delete mode 100644 src/io/network/endpoint.capnp delete mode 100644 src/query/common.capnp delete mode 100644 src/query/frontend/ast/ast.capnp delete mode 100644 src/query/frontend/semantic/symbol.capnp delete mode 100644 src/stats/metrics.cpp delete mode 100644 src/stats/metrics.hpp delete mode 100644 src/stats/stats.cpp delete mode 100644 src/stats/stats.hpp delete mode 100644 src/stats/stats_rpc_messages.lcp delete mode 100644 src/storage/concurrent_id_mapper_master.cpp delete mode 100644 src/storage/concurrent_id_mapper_master.hpp delete mode 100644 src/storage/concurrent_id_mapper_rpc_messages.lcp delete mode 100644 src/storage/concurrent_id_mapper_worker.cpp delete mode 100644 src/storage/concurrent_id_mapper_worker.hpp delete mode 100644 src/storage/serialization.capnp delete mode 100644 src/transactions/common.capnp delete mode 100644 src/transactions/engine_master.cpp delete mode 100644 src/transactions/engine_master.hpp delete mode 100644 src/transactions/engine_rpc_messages.lcp delete mode 100644 src/transactions/engine_worker.cpp delete mode 100644 src/transactions/engine_worker.hpp delete mode 100644 src/transactions/snapshot.cpp delete mode 100644 src/utils/serialization.capnp delete mode 100644 src/utils/serialization.hpp delete mode 100644 tests/benchmark/serialization.cpp delete mode 100644 tests/distributed/card_fraud/.gitignore delete mode 100755 tests/distributed/card_fraud/apollo_runs.py delete mode 100644 tests/distributed/card_fraud/card_fraud.py delete mode 100644 tests/distributed/card_fraud/config.json delete mode 100755 tests/distributed/card_fraud/generate_dataset.sh delete mode 120000 tests/distributed/common.py delete mode 120000 tests/distributed/jail_faker.py delete mode 100755 tests/distributed/jail_service.py delete mode 100755 tests/distributed/local_runner delete mode 100755 tests/distributed/master.py delete mode 100644 tests/distributed/raft/CMakeLists.txt delete mode 100644 tests/distributed/raft/README.md delete mode 100644 tests/distributed/raft/example_client.cpp delete mode 100644 tests/distributed/raft/example_server.cpp delete mode 100644 tests/distributed/raft/example_test.py delete mode 100644 tests/distributed/raft/messages.hpp delete mode 100644 tests/manual/card_fraud_local.cpp delete mode 100644 tests/manual/distributed_common.hpp delete mode 100644 tests/manual/distributed_repl.cpp delete mode 100644 tests/manual/raft_rpc.cpp delete mode 100644 tests/unit/concurrent_id_mapper_distributed.cpp delete mode 100644 tests/unit/counters.cpp delete mode 100644 tests/unit/database_master.cpp delete mode 100644 tests/unit/distributed_bfs.cpp delete mode 100644 tests/unit/distributed_common.hpp delete mode 100644 tests/unit/distributed_coordination.cpp delete mode 100644 tests/unit/distributed_data_exchange.cpp delete mode 100644 tests/unit/distributed_durability.cpp delete mode 100644 tests/unit/distributed_dynamic_graph_partitioner.cpp delete mode 100644 tests/unit/distributed_gc.cpp delete mode 100644 tests/unit/distributed_graph_db.cpp delete mode 100644 tests/unit/distributed_interpretation.cpp delete mode 100644 tests/unit/distributed_query_plan.cpp delete mode 100644 tests/unit/distributed_serialization.cpp delete mode 100644 tests/unit/distributed_token_sharing.cpp delete mode 100644 tests/unit/distributed_updates.cpp delete mode 100644 tests/unit/distributed_vertex_migrator.cpp delete mode 100644 tests/unit/metrics.cpp delete mode 100644 tests/unit/raft.cpp delete mode 100644 tests/unit/raft_storage.cpp delete mode 100644 tests/unit/rpc.cpp delete mode 100644 tests/unit/rpc_worker_clients.cpp delete mode 100644 tests/unit/serialization.cpp delete mode 100644 tests/unit/transaction_engine_distributed.cpp delete mode 100644 tools/src/mg_statsd/main.cpp delete mode 100644 tools/tests/statsd/mg_statsd_client.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 827284f02..cd35aadd8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,9 +137,6 @@ if (USE_READLINE) endif() endif() -set(Boost_USE_STATIC_LIBS ON) -find_package(Boost 1.62 REQUIRED COMPONENTS iostreams serialization) - # OpenSSL find_package(OpenSSL REQUIRED) @@ -193,7 +190,6 @@ option(EXPERIMENTAL "Build experimental binaries" OFF) option(CUSTOMERS "Build customer binaries" OFF) option(TEST_COVERAGE "Generate coverage reports from running memgraph" OFF) option(TOOLS "Build tools binaries" ON) -option(MG_COMMUNITY "Build Memgraph Community Edition" OFF) option(ASAN "Build with Address Sanitizer. To get a reasonable performance option should be used only in Release or RelWithDebInfo build " OFF) option(TSAN "Build with Thread Sanitizer. To get a reasonable performance option should be used only in Release or RelWithDebInfo build " OFF) option(UBSAN "Build with Undefined Behaviour Sanitizer" OFF) @@ -208,10 +204,6 @@ if (TEST_COVERAGE) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") endif() -if (MG_COMMUNITY) - add_definitions(-DMG_COMMUNITY) -endif() - if (ASAN) # Enable Addres sanitizer and get nicer stack traces in error messages. # NOTE: AddressSanitizer uses llvm-symbolizer binary from the Clang diff --git a/apollo_archives.yaml b/apollo_archives.yaml index 6f79721fc..5400128a0 100644 --- a/apollo_archives.yaml +++ b/apollo_archives.yaml @@ -3,7 +3,6 @@ - build_debug/memgraph - build_release/memgraph - build_release/tools/src/mg_import_csv - - build_release/tools/src/mg_statsd - config filename: binaries.tar.gz diff --git a/apollo_build.yaml b/apollo_build.yaml index 00afbba8a..b084ba360 100644 --- a/apollo_build.yaml +++ b/apollo_build.yaml @@ -33,13 +33,8 @@ cmake -DCMAKE_BUILD_TYPE=release .. TIMEOUT=1200 make -j$THREADS memgraph tools memgraph__macro_benchmark memgraph__stress memgraph__manual__card_fraud_generate_snapshot - # Generate distributed card fraud dataset. - cd ../tests/distributed/card_fraud - ./generate_dataset.sh - cd ../../.. - # Checkout to parent commit and initialize. - cd ../parent + cd ../../parent git checkout HEAD~1 TIMEOUT=1200 ./init @@ -88,7 +83,3 @@ cd ../../docs/user_technical # TODO (mferencevic): uncomment this once couscous is replaced with pandoc #./bundle_community - - # Generate distributed card fraud dataset. - cd ../../tests/distributed/card_fraud - ./generate_dataset.sh diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 54c51ab23..17186dd96 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -13,36 +13,12 @@ set(memgraph_src_files communication/helpers.cpp communication/init.cpp communication/bolt/v1/decoder/decoded_value.cpp - communication/rpc/client.cpp - communication/rpc/protocol.cpp - communication/rpc/server.cpp data_structures/concurrent/skiplist_gc.cpp database/config.cpp database/counters.cpp database/graph_db.cpp database/graph_db_accessor.cpp database/state_delta.cpp - distributed/bfs_rpc_clients.cpp - distributed/bfs_subcursor.cpp - distributed/cluster_discovery_master.cpp - distributed/cluster_discovery_worker.cpp - distributed/coordination.cpp - distributed/coordination_master.cpp - distributed/coordination_worker.cpp - distributed/durability_rpc_clients.cpp - distributed/durability_rpc_server.cpp - distributed/index_rpc_server.cpp - distributed/plan_consumer.cpp - distributed/plan_dispatcher.cpp - distributed/cache.cpp - distributed/data_manager.cpp - distributed/data_rpc_clients.cpp - distributed/data_rpc_server.cpp - distributed/produce_rpc_server.cpp - distributed/pull_rpc_clients.cpp - distributed/serialization.cpp - distributed/updates_rpc_clients.cpp - distributed/updates_rpc_server.cpp durability/paths.cpp durability/recovery.cpp durability/snapshooter.cpp @@ -61,41 +37,16 @@ set(memgraph_src_files query/plan/rule_based_planner.cpp query/plan/variable_start_planner.cpp query/typed_value.cpp - stats/metrics.cpp - stats/stats.cpp - storage/concurrent_id_mapper_master.cpp - storage/concurrent_id_mapper_worker.cpp - storage/dynamic_graph_partitioner/dgp.cpp - storage/dynamic_graph_partitioner/vertex_migrator.cpp storage/edge_accessor.cpp storage/locking/record_lock.cpp storage/property_value.cpp storage/property_value_store.cpp storage/record_accessor.cpp storage/vertex_accessor.cpp - transactions/engine_master.cpp transactions/engine_single_node.cpp - transactions/engine_worker.cpp - transactions/snapshot.cpp ) # ----------------------------------------------------------------------------- -# Use this function to add each capnp file to generation. This way each file is -# standalone and we avoid recompiling everything. -# NOTE: memgraph_src_files and generated_capnp_files are globally updated. -function(add_capnp capnp_src_file) - set(cpp_file ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file}.c++) - set(h_file ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file}.h) - add_custom_command(OUTPUT ${cpp_file} ${h_file} - COMMAND ${CAPNP_EXE} compile -o${CAPNP_CXX_EXE} ${capnp_src_file} -I ${CMAKE_CURRENT_SOURCE_DIR} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file} capnproto-proj - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - # Update *global* generated_capnp_files - set(generated_capnp_files ${generated_capnp_files} ${cpp_file} ${h_file} PARENT_SCOPE) - # Update *global* memgraph_src_files - set(memgraph_src_files ${memgraph_src_files} ${cpp_file} PARENT_SCOPE) -endfunction(add_capnp) - # Lisp C++ Preprocessing set(lcp_exe ${CMAKE_SOURCE_DIR}/tools/lcp) @@ -135,67 +86,19 @@ function(add_lcp lcp_file) set(generated_lcp_files ${generated_lcp_files} ${h_file} ${cpp_file} ${capnp_file} PARENT_SCOPE) endfunction(add_lcp) -add_lcp(database/counters_rpc_messages.lcp CAPNP_SCHEMA @0x95a2c3ea3871e945) -add_capnp(database/counters_rpc_messages.capnp) add_lcp(database/state_delta.lcp CAPNP_SCHEMA @0xdea01657b3563887) -add_capnp(database/state_delta.capnp) -add_lcp(distributed/bfs_rpc_messages.lcp CAPNP_SCHEMA @0x8e508640b09b6d2a) -add_capnp(distributed/bfs_rpc_messages.capnp) -add_lcp(distributed/coordination_rpc_messages.lcp CAPNP_SCHEMA @0x93df0c4703cf98fb) -add_capnp(distributed/coordination_rpc_messages.capnp) -add_lcp(distributed/data_rpc_messages.lcp CAPNP_SCHEMA @0xc1c8a341ba37aaf5) -add_capnp(distributed/data_rpc_messages.capnp) -add_lcp(distributed/durability_rpc_messages.lcp CAPNP_SCHEMA @0xf5e53bc271e2163d) -add_capnp(distributed/durability_rpc_messages.capnp) -add_lcp(distributed/index_rpc_messages.lcp CAPNP_SCHEMA @0xa8aab46862945bd6) -add_capnp(distributed/index_rpc_messages.capnp) -add_lcp(distributed/plan_rpc_messages.lcp CAPNP_SCHEMA @0xfcbc48dc9f106d28) -add_capnp(distributed/plan_rpc_messages.capnp) -add_lcp(distributed/pull_produce_rpc_messages.lcp CAPNP_SCHEMA @0xa78a9254a73685bd) -add_capnp(distributed/pull_produce_rpc_messages.capnp) -add_lcp(distributed/storage_gc_rpc_messages.lcp CAPNP_SCHEMA @0xd705663dfe36cf81) -add_capnp(distributed/storage_gc_rpc_messages.capnp) -add_lcp(distributed/token_sharing_rpc_messages.lcp CAPNP_SCHEMA @0x8f295db54ec4caec) -add_capnp(distributed/token_sharing_rpc_messages.capnp) -add_lcp(distributed/transactional_cache_cleaner_rpc_messages.lcp CAPNP_SCHEMA @0xe2be6183a1ff9e11) -add_capnp(distributed/transactional_cache_cleaner_rpc_messages.capnp) -add_lcp(distributed/updates_rpc_messages.lcp CAPNP_SCHEMA @0x82d5f38d73c7b53a) -add_capnp(distributed/updates_rpc_messages.capnp) add_lcp(query/plan/operator.lcp CAPNP_SCHEMA @0xe5cae8d045d30c42) -add_capnp(query/plan/operator.capnp) -add_lcp(stats/stats_rpc_messages.lcp CAPNP_SCHEMA @0xc19a87c81b9b4512) -add_capnp(stats/stats_rpc_messages.capnp) -add_lcp(storage/concurrent_id_mapper_rpc_messages.lcp CAPNP_SCHEMA @0xa6068dae93d225dd) -add_capnp(storage/concurrent_id_mapper_rpc_messages.capnp) -add_lcp(transactions/engine_rpc_messages.lcp CAPNP_SCHEMA @0xde02b7c49180cad5) -add_capnp(transactions/engine_rpc_messages.capnp) add_custom_target(generate_lcp DEPENDS ${generated_lcp_files}) -# Registering capnp must come after registering lcp files. - -add_capnp(communication/rpc/messages.capnp) -add_capnp(distributed/serialization.capnp) -add_capnp(durability/recovery.capnp) -add_capnp(query/common.capnp) -add_capnp(query/frontend/ast/ast.capnp) -add_capnp(query/frontend/semantic/symbol.capnp) -add_capnp(storage/serialization.capnp) -add_capnp(transactions/common.capnp) -add_capnp(utils/serialization.capnp) - -add_custom_target(generate_capnp DEPENDS generate_lcp ${generated_capnp_files}) - # ----------------------------------------------------------------------------- string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type) # memgraph_lib depend on these libraries set(MEMGRAPH_ALL_LIBS stdc++fs Threads::Threads fmt cppitertools - antlr_opencypher_parser_lib dl glog gflags capnp kj + antlr_opencypher_parser_lib dl glog gflags ${OPENSSL_LIBRARIES} - ${Boost_IOSTREAMS_LIBRARY_RELEASE} - ${Boost_SERIALIZATION_LIBRARY_RELEASE} mg-utils mg-io) if (USE_LTALLOC) @@ -214,7 +117,6 @@ target_link_libraries(memgraph_lib ${MEMGRAPH_ALL_LIBS}) target_include_directories(memgraph_lib PRIVATE ${OPENSSL_INCLUDE_DIR}) add_dependencies(memgraph_lib generate_opencypher_parser) add_dependencies(memgraph_lib generate_lcp) -add_dependencies(memgraph_lib generate_capnp) # STATIC library used to store key-value pairs add_library(kvstore_lib STATIC storage/kvstore.cpp) diff --git a/src/communication/bolt/client.hpp b/src/communication/bolt/client.hpp index 70aa7a396..002f60995 100644 --- a/src/communication/bolt/client.hpp +++ b/src/communication/bolt/client.hpp @@ -7,6 +7,8 @@ #include "communication/bolt/v1/encoder/chunked_encoder_buffer.hpp" #include "communication/bolt/v1/encoder/client_encoder.hpp" +#include "communication/client.hpp" + #include "query/typed_value.hpp" #include "utils/exceptions.hpp" diff --git a/src/communication/bolt/v1/states/executing.hpp b/src/communication/bolt/v1/states/executing.hpp index ac10af79d..5334c2e4c 100644 --- a/src/communication/bolt/v1/states/executing.hpp +++ b/src/communication/bolt/v1/states/executing.hpp @@ -10,7 +10,6 @@ #include "communication/bolt/v1/decoder/decoded_value.hpp" #include "communication/bolt/v1/state.hpp" #include "database/graph_db.hpp" -#include "distributed/pull_rpc_clients.hpp" #include "query/exceptions.hpp" #include "query/typed_value.hpp" #include "utils/exceptions.hpp" @@ -122,13 +121,6 @@ State HandleRun(TSession &session, State state, Marker marker) { return State::Result; } session.db_accessor_->AdvanceCommand(); - if (session.db_.type() == database::GraphDb::Type::DISTRIBUTED_MASTER) { - auto tx_id = session.db_accessor_->transaction_id(); - auto futures = - session.db_.pull_clients().NotifyAllTransactionCommandAdvanced( - tx_id); - for (auto &future : futures) future.wait(); - } } auto ¶ms_map = params.ValueMap(); diff --git a/src/communication/raft/network_common.hpp b/src/communication/raft/network_common.hpp deleted file mode 100644 index 96ceeb0b0..000000000 --- a/src/communication/raft/network_common.hpp +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include "communication/rpc/messages.hpp" -#include "communication/raft/raft.hpp" - -namespace communication::raft { - -enum class RpcType { REQUEST_VOTE, APPEND_ENTRIES }; - -template <class State> -struct PeerRpcRequest { - RpcType type; - RequestVoteRequest request_vote; - AppendEntriesRequest<State> append_entries; -}; - -struct PeerRpcReply { - RpcType type; - RequestVoteReply request_vote; - AppendEntriesReply append_entries; -}; - -} // namespace communication::raft diff --git a/src/communication/raft/raft-inl.hpp b/src/communication/raft/raft-inl.hpp deleted file mode 100644 index f5c52c67a..000000000 --- a/src/communication/raft/raft-inl.hpp +++ /dev/null @@ -1,699 +0,0 @@ -#pragma once - -#include <algorithm> - -#include "fmt/format.h" -#include "glog/logging.h" - -namespace communication::raft { - -namespace impl { - -template <class State> -RaftMemberImpl<State>::RaftMemberImpl(RaftNetworkInterface<State> &network, - RaftStorageInterface<State> &storage, - const MemberId &id, - const RaftConfig &config) - : network_(network), storage_(storage), id_(id), config_(config) { - std::lock_guard<std::mutex> lock(mutex_); - - tie(term_, voted_for_) = storage_.GetTermAndVotedFor(); - - for (const auto &peer_id : config_.members) { - peer_states_[peer_id] = std::make_unique<RaftPeerState>(); - } - - SetElectionTimer(); -} - -template <class State> -RaftMemberImpl<State>::~RaftMemberImpl() { - Stop(); -} - -template <class State> -void RaftMemberImpl<State>::Stop() { - { - std::lock_guard<std::mutex> lock(mutex_); - if (!exiting_) { - LogInfo("Stopping..."); - exiting_ = true; - } - } - state_changed_.notify_all(); -} - -template <class State> -template <class... Args> -void RaftMemberImpl<State>::LogInfo(const std::string &format, - Args &&... args) { - LOG(INFO) << fmt::format("[id = {}, term = {}] {}", id_, term_, - fmt::format(format, std::forward<Args>(args)...)) - << std::endl; -} - -template <class State> -void RaftMemberImpl<State>::TimerThreadMain() { - std::unique_lock<std::mutex> lock(mutex_); - while (!exiting_) { - if (Clock::now() >= next_election_time_) { - StartNewElection(); - } - state_changed_.wait_until(lock, next_election_time_); - } -} - -template <class State> -void RaftMemberImpl<State>::PeerThreadMain(std::string peer_id) { - RaftPeerState &peer_state = *peer_states_[peer_id]; - - LogInfo("Peer thread started for {}", peer_id); - - std::unique_lock<std::mutex> lock(mutex_); - - /* This loop will either call a function that issues an RPC or wait on the - * condition variable. It must not do both! Lock on `mutex_` is released while - * waiting for RPC response, which might cause us to miss a notification on - * `state_changed_` conditional variable and wait indefinitely. The safest - * thing to do is to assume some important part of state was modified while we - * were waiting for the response and loop around to check. */ - while (!exiting_) { - TimePoint now = Clock::now(); - TimePoint wait_until; - - if (mode_ != RaftMode::FOLLOWER && peer_state.backoff_until > now) { - wait_until = peer_state.backoff_until; - } else { - switch (mode_) { - case RaftMode::FOLLOWER: - wait_until = TimePoint::max(); - break; - case RaftMode::CANDIDATE: - if (!peer_state.request_vote_done) { - RequestVote(peer_id, peer_state, lock); - continue; - } - break; - case RaftMode::LEADER: - if (peer_state.next_index <= storage_.GetLastLogIndex() || - now >= peer_state.next_heartbeat_time) { - AppendEntries(peer_id, peer_state, lock); - continue; - } else { - wait_until = peer_state.next_heartbeat_time; - } - break; - } - } - - state_changed_.wait_until(lock, wait_until); - } - - LogInfo("Peer thread exiting for {}", peer_id); -} - -template <class State> -void RaftMemberImpl<State>::CandidateOrLeaderTransitionToFollower() { - DCHECK(mode_ != RaftMode::FOLLOWER) - << "`CandidateOrLeaderTransitionToFollower` called from follower mode"; - mode_ = RaftMode::FOLLOWER; - leader_ = {}; - SetElectionTimer(); -} - -template <class State> -void RaftMemberImpl<State>::CandidateTransitionToLeader() { - DCHECK(mode_ == RaftMode::CANDIDATE) - << "`CandidateTransitionToLeader` called while not in candidate mode"; - mode_ = RaftMode::LEADER; - leader_ = id_; - - /* We don't want to trigger elections while in leader mode. */ - next_election_time_ = TimePoint::max(); - - /* [Raft thesis, Section 6.4] - * "The Leader Completeness Property guarantees that a leader has all - * committed entries, but at the start of its term, it may not know which - * those are. To find out, it needs to commit an entry from its term. Raft - * handles this by having each leader commit a blank no-op entry into the log - * at the start of its term. As soon as this no-op entry is committed, the - * leader’s commit index will be at least as large as any other servers’ - * during its term." */ - LogEntry<State> entry; - entry.term = term_; - entry.command = std::experimental::nullopt; - storage_.AppendLogEntry(entry); -} - -template <class State> -bool RaftMemberImpl<State>::CandidateOrLeaderNoteTerm(const TermId new_term) { - DCHECK(mode_ != RaftMode::FOLLOWER) - << "`CandidateOrLeaderNoteTerm` called from follower mode"; - /* [Raft thesis, Section 3.3] - * "Current terms are exchanged whenever servers communicate; if one server's - * current term is smaller than the other's, then it updates its current term - * to the larger value. If a candidate or leader discovers that its term is - * out of date, it immediately reverts to follower state." */ - if (term_ < new_term) { - UpdateTermAndVotedFor(new_term, {}); - CandidateOrLeaderTransitionToFollower(); - return true; - } - return false; -} - -template <class State> -void RaftMemberImpl<State>::UpdateTermAndVotedFor( - const TermId new_term, - const std::experimental::optional<MemberId> &new_voted_for) { - term_ = new_term; - voted_for_ = new_voted_for; - leader_ = {}; - - storage_.WriteTermAndVotedFor(term_, voted_for_); -} - -template <class State> -void RaftMemberImpl<State>::SetElectionTimer() { - /* [Raft thesis, section 3.4] - * "Raft uses randomized election timeouts to ensure that split votes are rare - * and that they are resolved quickly. To prevent split votes in the first - * place, election timeouts are chosen randomly from a fixed interval (e.g., - * 150-300 ms)." */ - std::uniform_int_distribution<uint64_t> distribution( - config_.leader_timeout_min.count(), config_.leader_timeout_max.count()); - Clock::duration wait_interval = std::chrono::milliseconds(distribution(rng_)); - next_election_time_ = Clock::now() + wait_interval; -} - -template <class State> -void RaftMemberImpl<State>::StartNewElection() { - LogInfo("Starting new election"); - /* [Raft thesis, section 3.4] - * "To begin an election, a follower increments its current term and - * transitions to candidate state. It then votes for itself and issues - * RequestVote RPCs in parallel to each of the other servers in the cluster." - */ - UpdateTermAndVotedFor(term_ + 1, id_); - mode_ = RaftMode::CANDIDATE; - - /* [Raft thesis, section 3.4] - * "Each candidate restarts its randomized election timeout at the start of an - * election, and it waits for that timeout to elapse before starting the next - * election; this reduces the likelihood of another split vote in the new - * election." */ - SetElectionTimer(); - - for (const auto &peer_id : config_.members) { - if (peer_id == id_) { - continue; - } - auto &peer_state = peer_states_[peer_id]; - peer_state->request_vote_done = false; - peer_state->voted_for_me = false; - peer_state->match_index = 0; - peer_state->next_index = storage_.GetLastLogIndex() + 1; - - /* [Raft thesis, section 3.5] - * "Until the leader has discovered where it and the follower's logs match, - * the leader can send AppendEntries with no entries (like heartbeats) to - * save bandwidth. Then, once the matchIndex immediately precedes the - * nextIndex, the leader should begin to send the actual entries." */ - peer_state->suppress_log_entries = true; - - /* [Raft thesis, section 3.4] - * "Once a candidate wins an election, it becomes leader. It then sends - * heartbeat messages to all of the other servers to establish its authority - * and prevent new elections." - * - * This will make newly elected leader send heartbeats immediately. - */ - peer_state->next_heartbeat_time = TimePoint::min(); - peer_state->backoff_until = TimePoint::min(); - } - - // We already have the majority if we're in a single node cluster. - if (CountVotes()) { - LogInfo("Elected as leader."); - CandidateTransitionToLeader(); - } - - /* Notify peer threads to start issuing RequestVote RPCs. */ - state_changed_.notify_all(); -} - -template <class State> -bool RaftMemberImpl<State>::CountVotes() { - DCHECK(mode_ == RaftMode::CANDIDATE) - << "`CountVotes` should only be called from candidate mode"; - int num_votes = 0; - for (const auto &peer_id : config_.members) { - if (peer_id == id_ || peer_states_[peer_id]->voted_for_me) { - num_votes++; - } - } - - return 2 * num_votes > config_.members.size(); -} - -template <class State> -void RaftMemberImpl<State>::RequestVote(const std::string &peer_id, - RaftPeerState &peer_state, - std::unique_lock<std::mutex> &lock) { - LogInfo("Requesting vote from {}", peer_id); - - RequestVoteRequest request; - request.candidate_term = term_; - request.candidate_id = id_; - request.last_log_index = storage_.GetLastLogIndex(); - request.last_log_term = storage_.GetLogTerm(request.last_log_index); - - RequestVoteReply reply; - - /* Release lock before issuing RPC and waiting for response. */ - /* TODO(mtomic): Revise how this will work with RPC cancellation. */ - lock.unlock(); - bool ok = network_.SendRequestVote(peer_id, request, reply); - lock.lock(); - - /* TODO(mtomic): Maybe implement exponential backoff. */ - if (!ok) { - peer_state.backoff_until = Clock::now() + config_.rpc_backoff; - return; - } - - if (term_ != request.candidate_term || mode_ != RaftMode::CANDIDATE || - exiting_) { - LogInfo("Ignoring RequestVote RPC reply from {}", peer_id); - return; - } - - if (CandidateOrLeaderNoteTerm(reply.term)) { - state_changed_.notify_all(); - return; - } - - DCHECK(reply.term == term_) << "Stale RequestVote RPC reply"; - - peer_state.request_vote_done = true; - - if (reply.vote_granted) { - peer_state.voted_for_me = true; - LogInfo("Got vote from {}", peer_id); - - if (CountVotes()) { - LogInfo("Elected as leader."); - CandidateTransitionToLeader(); - } - } else { - LogInfo("Vote denied from {}", peer_id); - } - - state_changed_.notify_all(); -} - -template <class State> -void RaftMemberImpl<State>::AdvanceCommitIndex() { - DCHECK(mode_ == RaftMode::LEADER) - << "`AdvanceCommitIndex` can only be called from leader mode"; - - std::vector<LogIndex> match_indices; - for (const auto &peer : peer_states_) { - match_indices.push_back(peer.second->match_index); - } - match_indices.push_back(storage_.GetLastLogIndex()); - std::sort(match_indices.begin(), match_indices.end(), - std::greater<LogIndex>()); - LogIndex new_commit_index_ = match_indices[(config_.members.size() - 1) / 2]; - - LogInfo("Trying to advance commit index {} to {}", commit_index_, - new_commit_index_); - - /* This can happen because we reset `match_index` to 0 for every peer when - * elected. */ - if (commit_index_ >= new_commit_index_) { - return; - } - - /* [Raft thesis, section 3.6.2] - * (...) Raft never commits log entries from previous terms by counting - * replicas. Only log entries from the leader's current term are committed by - * counting replicas; once an entry from the current term has been committed - * in this way, then all prior entries are committed indirectly because of the - * Log Matching Property." */ - if (storage_.GetLogTerm(new_commit_index_) != term_) { - LogInfo("Cannot commit log entry from previous term"); - return; - } - - commit_index_ = std::max(commit_index_, new_commit_index_); -} - -template <class State> -void RaftMemberImpl<State>::AppendEntries(const std::string &peer_id, - RaftPeerState &peer_state, - std::unique_lock<std::mutex> &lock) { - LogInfo("Appending entries to {}", peer_id); - - AppendEntriesRequest<State> request; - request.leader_term = term_; - request.leader_id = id_; - - request.prev_log_index = peer_state.next_index - 1; - request.prev_log_term = storage_.GetLogTerm(peer_state.next_index - 1); - - if (!peer_state.suppress_log_entries && - peer_state.next_index <= storage_.GetLastLogIndex()) { - request.entries = storage_.GetLogSuffix(peer_state.next_index); - } else { - request.entries = {}; - } - - request.leader_commit = commit_index_; - - AppendEntriesReply reply; - - /* Release lock before issuing RPC and waiting for response. */ - /* TODO(mtomic): Revise how this will work with RPC cancellation. */ - lock.unlock(); - bool ok = network_.SendAppendEntries(peer_id, request, reply); - lock.lock(); - - /* TODO(mtomic): Maybe implement exponential backoff. */ - if (!ok) { - /* There is probably something wrong with this peer, let's avoid sending log - * entries. */ - peer_state.suppress_log_entries = true; - peer_state.backoff_until = Clock::now() + config_.rpc_backoff; - return; - } - - if (term_ != request.leader_term || exiting_) { - return; - } - - if (CandidateOrLeaderNoteTerm(reply.term)) { - state_changed_.notify_all(); - return; - } - - DCHECK(mode_ == RaftMode::LEADER) - << "Elected leader for term should never change"; - DCHECK(reply.term == term_) << "Got stale AppendEntries reply"; - - if (reply.success) { - /* We've found a match, we can start sending log entries. */ - peer_state.suppress_log_entries = false; - - LogIndex new_match_index = request.prev_log_index + request.entries.size(); - DCHECK(peer_state.match_index <= new_match_index) - << "`match_index` should increase monotonically within a term"; - peer_state.match_index = new_match_index; - AdvanceCommitIndex(); - peer_state.next_index = peer_state.match_index + 1; - peer_state.next_heartbeat_time = Clock::now() + config_.heartbeat_interval; - } else { - DCHECK(peer_state.next_index > 1) - << "Log replication should not fail for first log entry."; - --peer_state.next_index; - } - - state_changed_.notify_all(); -} - -template <class State> -RequestVoteReply RaftMemberImpl<State>::OnRequestVote( - const RequestVoteRequest &request) { - std::lock_guard<std::mutex> lock(mutex_); - LogInfo("RequestVote RPC request from {}", request.candidate_id); - - RequestVoteReply reply; - - /* [Raft thesis, Section 3.3] - * "If a server receives a request with a stale term number, it rejects the - * request." */ - if (request.candidate_term < term_) { - reply.term = term_; - reply.vote_granted = false; - return reply; - } - - /* [Raft thesis, Section 3.3] - * "Current terms are exchanged whenever servers communicate; if one server's - * current term is smaller than the other's, then it updates its current term - * to the larger value. If a candidate or leader discovers that its term is - * out of date, it immediately reverts to follower state." */ - if (request.candidate_term > term_) { - if (mode_ != RaftMode::FOLLOWER) { - CandidateOrLeaderTransitionToFollower(); - } - UpdateTermAndVotedFor(request.candidate_term, {}); - } - - /* [Raft thesis, Section 3.6.1] - * "Raft uses the voting process to prevent a candidate from winning an - * election unless its log contains all committed entries. (...) The - * RequestVote RPC implements this restriction: the RPC includes information - * about the candidate's log, and the voter denies its vote if its own log is - * more up-to-date than that of the candidate. Raft determines which of two - * logs is more up-to-date by comparing the index and term of the last entries - * in the logs. If the logs have last entries with different terms, then the - * log with the later term is more up-to-date. If the logs end with the same - * term, then whichever log is longer is more up-to-date." */ - LogIndex my_last_log_index = storage_.GetLastLogIndex(); - TermId my_last_log_term = storage_.GetLogTerm(my_last_log_index); - if (my_last_log_term > request.last_log_term || - (my_last_log_term == request.last_log_term && - my_last_log_index > request.last_log_index)) { - reply.term = term_; - reply.vote_granted = false; - return reply; - } - - /* [Raft thesis, Section 3.4] - * "Each server will vote for at most one candidate in a given term, on a - * firstcome-first-served basis." - */ - - /* We voted for someone else in this term. */ - if (request.candidate_term == term_ && voted_for_ && - *voted_for_ != request.candidate_id) { - reply.term = term_; - reply.vote_granted = false; - return reply; - } - - /* Now we know we will vote for this candidate, because it's term is at least - * as big as ours and we haven't voted for anyone else. */ - UpdateTermAndVotedFor(request.candidate_term, request.candidate_id); - - /* [Raft thesis, Section 3.4] - * A server remains in follower state as long as it receives valid RPCs from a - * leader or candidate. */ - SetElectionTimer(); - state_changed_.notify_all(); - - reply.term = request.candidate_term; - reply.vote_granted = true; - return reply; -} - -template <class State> -AppendEntriesReply RaftMemberImpl<State>::OnAppendEntries( - const AppendEntriesRequest<State> &request) { - std::lock_guard<std::mutex> lock(mutex_); - LogInfo("AppendEntries RPC request from {}", request.leader_id); - - AppendEntriesReply reply; - - /* [Raft thesis, Section 3.3] - * "If a server receives a request with a stale term number, it rejects the - * request." */ - if (request.leader_term < term_) { - reply.term = term_; - reply.success = false; - return reply; - } - - /* [Raft thesis, Section 3.3] - * "Current terms are exchanged whenever servers communicate; if one server's - * current term is smaller than the other's, then it updates its current term - * to the larger value. If a candidate or leader discovers that its term is - * out of date, it immediately reverts to follower state." */ - if (request.leader_term > term_) { - if (mode_ != RaftMode::FOLLOWER) { - CandidateOrLeaderTransitionToFollower(); - } - UpdateTermAndVotedFor(request.leader_term, {}); - } - - /* [Raft thesis, Section 3.4] - * "While waiting for votes, a candidate may receive an AppendEntries RPC from - * another server claiming to be leader. If the leader's term (included in its - * RPC) is at least as large as the candidate's current term, then the - * candidate recognizes the leader as legitimate and returns to follower - * state." */ - if (mode_ == RaftMode::CANDIDATE && request.leader_term == term_) { - CandidateOrLeaderTransitionToFollower(); - } - - DCHECK(mode_ != RaftMode::LEADER) - << "Leader cannot accept `AppendEntries` RPC"; - DCHECK(term_ == request.leader_term) << "Term should be equal to request " - "term when accepting `AppendEntries` " - "RPC"; - - leader_ = request.leader_id; - - /* [Raft thesis, Section 3.4] - * A server remains in follower state as long as it receives valid RPCs from a - * leader or candidate. */ - SetElectionTimer(); - state_changed_.notify_all(); - - /* [Raft thesis, Section 3.5] - * "When sending an AppendEntries RPC, the leader includes the index and term - * of the entry in its log that immediately precedes the new entries. If the - * follower does not find an entry in its log with the same index and term, - * then it refuses the new entries." */ - if (request.prev_log_index > storage_.GetLastLogIndex() || - storage_.GetLogTerm(request.prev_log_index) != request.prev_log_term) { - reply.term = term_; - reply.success = false; - return reply; - } - - /* [Raft thesis, Section 3.5] - * "To bring a follower's log into consistency with its own, the leader must - * find the latest log entry where the two logs agree, delete any entries in - * the follower's log after that point, and send the follower all of the - * leader's entries after that point." */ - - /* Entry at `request.prev_log_index` is the last entry where ours and leader's - * logs agree. It's time to replace the tail of the log with new entries from - * the leader. We have to be careful here as duplicated AppendEntries RPCs - * could cause data loss. - * - * There is a possibility that an old AppendEntries RPC is duplicated and - * received after processing newer one. For example, leader appends entry 3 - * and then entry 4, but follower recieves entry 3, then entry 4, and then - * entry 3 again. We have to be careful not to delete entry 4 from log when - * processing the last RPC. */ - LogIndex index = request.prev_log_index; - auto it = request.entries.begin(); - for (; it != request.entries.end(); ++it) { - ++index; - if (index > storage_.GetLastLogIndex()) { - break; - } - if (storage_.GetLogTerm(index) != it->term) { - LogInfo("Truncating log suffix from index {}", index); - DCHECK(commit_index_ < index) - << "Committed entries should never be truncated from the log"; - storage_.TruncateLogSuffix(index); - break; - } - } - - LogInfo("Appending {} out of {} logs from {}.", request.entries.end() - it, - request.entries.size(), request.leader_id); - - for (; it != request.entries.end(); ++it) { - storage_.AppendLogEntry(*it); - } - - commit_index_ = std::max(commit_index_, request.leader_commit); - - /* Let's bump election timer once again, we don't want to take down the leader - * because of our long disk writes. */ - SetElectionTimer(); - state_changed_.notify_all(); - - reply.term = term_; - reply.success = true; - return reply; -} - -template <class State> -ClientResult RaftMemberImpl<State>::AddCommand( - const typename State::Change &command, bool blocking) { - std::unique_lock<std::mutex> lock(mutex_); - if (mode_ != RaftMode::LEADER) { - return ClientResult::NOT_LEADER; - } - - LogEntry<State> entry; - entry.term = term_; - entry.command = command; - storage_.AppendLogEntry(entry); - - // Entry is already replicated if this is a single node cluster. - AdvanceCommitIndex(); - - state_changed_.notify_all(); - - if (!blocking) { - return ClientResult::OK; - } - - LogIndex index = storage_.GetLastLogIndex(); - - while (!exiting_ && term_ == entry.term) { - if (commit_index_ >= index) { - return ClientResult::OK; - } - state_changed_.wait(lock); - } - - return ClientResult::NOT_LEADER; -} - -} // namespace impl - -template <class State> -RaftMember<State>::RaftMember(RaftNetworkInterface<State> &network, - RaftStorageInterface<State> &storage, - const MemberId &id, const RaftConfig &config) - : network_(network), impl_(network, storage, id, config) { - timer_thread_ = - std::thread(&impl::RaftMemberImpl<State>::TimerThreadMain, &impl_); - - for (const auto &peer_id : config.members) { - if (peer_id != id) { - peer_threads_.emplace_back(&impl::RaftMemberImpl<State>::PeerThreadMain, - &impl_, peer_id); - } - } - - network_.Start(*this); -} - -template <class State> -RaftMember<State>::~RaftMember() { - impl_.Stop(); - timer_thread_.join(); - - for (auto &peer_thread : peer_threads_) { - peer_thread.join(); - } -} - -template <class State> -ClientResult RaftMember<State>::AddCommand( - const typename State::Change &command, bool blocking) { - return impl_.AddCommand(command, blocking); -} - -template <class State> -RequestVoteReply RaftMember<State>::OnRequestVote( - const RequestVoteRequest &request) { - return impl_.OnRequestVote(request); -} - -template <class State> -AppendEntriesReply RaftMember<State>::OnAppendEntries( - const AppendEntriesRequest<State> &request) { - return impl_.OnAppendEntries(request); -} - -} // namespace communication::raft diff --git a/src/communication/raft/raft.hpp b/src/communication/raft/raft.hpp deleted file mode 100644 index 0c82671f3..000000000 --- a/src/communication/raft/raft.hpp +++ /dev/null @@ -1,277 +0,0 @@ -#pragma once - -#include <chrono> -#include <condition_variable> -#include <experimental/optional> -#include <map> -#include <mutex> -#include <random> -#include <set> -#include <thread> -#include <vector> - -#include "boost/serialization/vector.hpp" -#include "glog/logging.h" - -#include "utils/serialization.hpp" - -namespace communication::raft { - -template <class State> -class RaftMember; - -enum class ClientResult { NOT_LEADER, OK }; - -using Clock = std::chrono::system_clock; -using TimePoint = std::chrono::system_clock::time_point; - -using MemberId = std::string; -using TermId = uint64_t; - -using ClientId = uint64_t; -using CommandId = uint64_t; - -using LogIndex = uint64_t; - -template <class State> -struct LogEntry { - int term; - - std::experimental::optional<typename State::Change> command; - - bool operator==(const LogEntry &rhs) const { - return term == rhs.term && command == rhs.command; - } - bool operator!=(const LogEntry &rhs) const { return !(*this == rhs); } - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &term; - ar &command; - } -}; - -/* Raft RPC requests and replies as described in [Raft thesis, Figure 3.1]. */ -struct RequestVoteRequest { - TermId candidate_term; - MemberId candidate_id; - LogIndex last_log_index; - TermId last_log_term; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &candidate_term; - ar &candidate_id; - ar &last_log_index; - ar &last_log_term; - } -}; - -struct RequestVoteReply { - TermId term; - bool vote_granted; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &term; - ar &vote_granted; - } -}; - -template <class State> -struct AppendEntriesRequest { - TermId leader_term; - MemberId leader_id; - LogIndex prev_log_index; - TermId prev_log_term; - std::vector<LogEntry<State>> entries; - LogIndex leader_commit; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &leader_term; - ar &leader_id; - ar &prev_log_index; - ar &prev_log_term; - ar &entries; - ar &leader_commit; - } -}; - -struct AppendEntriesReply { - TermId term; - bool success; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &term; - ar &success; - } -}; - -template <class State> -class RaftNetworkInterface { - public: - virtual ~RaftNetworkInterface() = default; - - /* These function return false if RPC failed for some reason (e.g. cannot - * establish connection or request cancelled). Otherwise - * `reply` contains response from peer. */ - virtual bool SendRequestVote(const MemberId &recipient, - const RequestVoteRequest &request, - RequestVoteReply &reply) = 0; - - virtual bool SendAppendEntries(const MemberId &recipient, - const AppendEntriesRequest<State> &request, - AppendEntriesReply &reply) = 0; - - /* This will be called once the RaftMember is ready to start receiving RPCs. - */ - virtual void Start(RaftMember<State> &member) = 0; -}; - -template <class State> -class RaftStorageInterface { - public: - virtual ~RaftStorageInterface() = default; - - virtual void WriteTermAndVotedFor( - const TermId term, - const std::experimental::optional<std::string> &voted_for) = 0; - virtual std::pair<TermId, std::experimental::optional<MemberId>> - GetTermAndVotedFor() = 0; - virtual void AppendLogEntry(const LogEntry<State> &entry) = 0; - virtual TermId GetLogTerm(const LogIndex index) = 0; - virtual LogEntry<State> GetLogEntry(const LogIndex index) = 0; - virtual std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) = 0; - virtual LogIndex GetLastLogIndex() = 0; - virtual void TruncateLogSuffix(const LogIndex index) = 0; -}; - -struct RaftConfig { - std::vector<MemberId> members; - std::chrono::milliseconds leader_timeout_min; - std::chrono::milliseconds leader_timeout_max; - std::chrono::milliseconds heartbeat_interval; - std::chrono::milliseconds rpc_backoff; -}; - -namespace impl { - -enum class RaftMode { FOLLOWER, CANDIDATE, LEADER }; - -struct RaftPeerState { - bool request_vote_done; - bool voted_for_me; - LogIndex match_index; - LogIndex next_index; - bool suppress_log_entries; - Clock::time_point next_heartbeat_time; - Clock::time_point backoff_until; -}; - -template <class State> -class RaftMemberImpl { - public: - explicit RaftMemberImpl(RaftNetworkInterface<State> &network, - RaftStorageInterface<State> &storage, - const MemberId &id, const RaftConfig &config); - - ~RaftMemberImpl(); - - void Stop(); - - void TimerThreadMain(); - void PeerThreadMain(std::string peer_id); - - void UpdateTermAndVotedFor( - const TermId new_term, - const std::experimental::optional<MemberId> &new_voted_for); - void CandidateOrLeaderTransitionToFollower(); - void CandidateTransitionToLeader(); - bool CandidateOrLeaderNoteTerm(const TermId new_term); - - void StartNewElection(); - void SetElectionTimer(); - bool CountVotes(); - void RequestVote(const MemberId &peer_id, RaftPeerState &peer_state, - std::unique_lock<std::mutex> &lock); - - void AdvanceCommitIndex(); - void AppendEntries(const MemberId &peer_id, RaftPeerState &peer_state, - std::unique_lock<std::mutex> &lock); - - RequestVoteReply OnRequestVote(const RequestVoteRequest &request); - AppendEntriesReply OnAppendEntries( - const AppendEntriesRequest<State> &request); - - ClientResult AddCommand(const typename State::Change &command, bool blocking); - - template <class... Args> - void LogInfo(const std::string &, Args &&...); - - RaftNetworkInterface<State> &network_; - RaftStorageInterface<State> &storage_; - - MemberId id_; - RaftConfig config_; - - TermId term_; - RaftMode mode_ = RaftMode::FOLLOWER; - std::experimental::optional<MemberId> voted_for_ = std::experimental::nullopt; - std::experimental::optional<MemberId> leader_ = std::experimental::nullopt; - - TimePoint next_election_time_; - - LogIndex commit_index_ = 0; - - bool exiting_ = false; - - std::map<std::string, std::unique_ptr<RaftPeerState>> peer_states_; - - /* This mutex protects all of the internal state. */ - std::mutex mutex_; - - /* Used to notify waiting threads that some of the internal state has changed. - * It is notified when following events occurr: - * - mode change - * - election start - * - `next_election_time_` update on RPC from leader or candidate - * - destructor is called - * - `commit_index_` is advanced - */ - std::condition_variable state_changed_; - - std::mt19937_64 rng_ = std::mt19937_64(std::random_device{}()); -}; - -} // namespace impl - -template <class State> -class RaftMember final { - public: - explicit RaftMember(RaftNetworkInterface<State> &network, - RaftStorageInterface<State> &storage, const MemberId &id, - const RaftConfig &config); - ~RaftMember(); - - ClientResult AddCommand(const typename State::Change &command, bool blocking); - - RequestVoteReply OnRequestVote(const RequestVoteRequest &request); - AppendEntriesReply OnAppendEntries( - const AppendEntriesRequest<State> &request); - - private: - RaftNetworkInterface<State> &network_; - impl::RaftMemberImpl<State> impl_; - - /* Timer thread for triggering elections. */ - std::thread timer_thread_; - - /* One thread per peer for outgoing RPCs. */ - std::vector<std::thread> peer_threads_; -}; - -} // namespace communication::raft - -#include "raft-inl.hpp" diff --git a/src/communication/raft/rpc.hpp b/src/communication/raft/rpc.hpp deleted file mode 100644 index 1cd5bb7f3..000000000 --- a/src/communication/raft/rpc.hpp +++ /dev/null @@ -1,120 +0,0 @@ -#pragma once - -#include <unordered_map> - -#include "glog/logging.h" - -#include "communication/raft/network_common.hpp" -#include "communication/raft/raft.hpp" -#include "communication/rpc/client.hpp" -#include "communication/rpc/server.hpp" -#include "io/network/endpoint.hpp" - -/* Implementation of `RaftNetworkInterface` using RPC. Raft RPC requests and - * responses are wrapped in `PeerRpcRequest` and `PeerRpcReply`. */ - -// TODO(mtomic): Unwrap RPCs and use separate request-response protocols instead -// of `PeerProtocol`, or at least use an union to avoid sending unnecessary data -// over the wire. - -namespace communication::raft { - -template <class State> -using PeerProtocol = rpc::RequestResponse<PeerRpcRequest<State>, PeerRpcReply>; - -template <class State> -class RpcNetwork : public RaftNetworkInterface<State> { - public: - RpcNetwork(rpc::Server &server, - std::unordered_map<std::string, io::network::Endpoint> directory) - : server_(server), directory_(std::move(directory)) {} - - virtual void Start(RaftMember<State> &member) override { - // TODO: Serialize RPC via Cap'n Proto -// server_.Register<PeerProtocol<State>>( -// [&member](const auto &req_reader, auto *res_builder) { -// PeerRpcRequest<State> request; -// request.Load(req_reader); -// PeerRpcReply reply; -// reply.type = request.type; -// switch (request.type) { -// case RpcType::REQUEST_VOTE: -// reply.request_vote = member.OnRequestVote(request.request_vote); -// break; -// case RpcType::APPEND_ENTRIES: -// reply.append_entries = -// member.OnAppendEntries(request.append_entries); -// break; -// default: -// LOG(ERROR) << "Unknown RPC type: " -// << static_cast<int>(request.type); -// } -// reply.Save(res_builder); -// }); - } - - virtual bool SendRequestVote(const MemberId &recipient, - const RequestVoteRequest &request, - RequestVoteReply &reply) override { - PeerRpcRequest<State> req; - PeerRpcReply rep; - - req.type = RpcType::REQUEST_VOTE; - req.request_vote = request; - - if (!SendRpc(recipient, req, rep)) { - return false; - } - - reply = rep.request_vote; - return true; - } - - virtual bool SendAppendEntries(const MemberId &recipient, - const AppendEntriesRequest<State> &request, - AppendEntriesReply &reply) override { - PeerRpcRequest<State> req; - PeerRpcReply rep; - - req.type = RpcType::APPEND_ENTRIES; - req.append_entries = request; - - if (!SendRpc(recipient, req, rep)) { - return false; - } - - reply = rep.append_entries; - return true; - } - - private: - bool SendRpc(const MemberId &recipient, const PeerRpcRequest<State> &request, - PeerRpcReply &reply) { - auto &client = GetClient(recipient); - auto response = client.template Call<PeerProtocol<State>>(request); - - if (!response) { - return false; - } - - reply = *response; - return true; - } - - rpc::Client &GetClient(const MemberId &id) { - auto it = clients_.find(id); - if (it == clients_.end()) { - auto ne = directory_[id]; - it = clients_.try_emplace(id, ne).first; - } - return it->second; - } - - rpc::Server &server_; - // TODO(mtomic): how to update and distribute this? - std::unordered_map<MemberId, io::network::Endpoint> directory_; - - std::unordered_map<MemberId, rpc::Client> clients_; -}; - -} // namespace communication::raft diff --git a/src/communication/raft/storage/file.hpp b/src/communication/raft/storage/file.hpp deleted file mode 100644 index e45b7011a..000000000 --- a/src/communication/raft/storage/file.hpp +++ /dev/null @@ -1,239 +0,0 @@ -/** - * @file - * - * Raft log is stored inside a folder. Each log entry is stored in a file named - * by its index. There is a special file named "metadata" which stores Raft - * metadata and also the last log index, which is used on startup to identify - * which log entry files are valid. - */ -#pragma once - -#include <fcntl.h> - -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" -#include "boost/iostreams/device/file_descriptor.hpp" -#include "boost/iostreams/stream.hpp" - -#include "communication/raft/raft.hpp" -#include "communication/raft/storage/memory.hpp" -#include "utils/file.hpp" - -namespace communication::raft { - -struct SimpleFileStorageMetadata { - TermId term; - std::experimental::optional<MemberId> voted_for; - LogIndex last_log_index; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &term &voted_for &last_log_index; - } -}; - -template <class State> -class SimpleFileStorage : public RaftStorageInterface<State> { - public: - explicit SimpleFileStorage(const fs::path &parent_dir) : memory_storage_() { - try { - dir_ = utils::OpenDir(parent_dir); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Error opening log directory: {}", e.what()); - } - - auto md = utils::TryOpenFile(dir_, "metadata", O_RDONLY); - if (!md) { - LOG(WARNING) << fmt::format("No metadata file found in directory '{}'", - parent_dir); - return; - } - - boost::iostreams::file_descriptor_source src( - md->Handle(), - boost::iostreams::file_descriptor_flags::never_close_handle); - boost::iostreams::stream<boost::iostreams::file_descriptor_source> is(src); - boost::archive::binary_iarchive iar(is); - - SimpleFileStorageMetadata metadata; - - try { - iar >> metadata; - } catch (boost::archive::archive_exception &e) { - LOG(FATAL) << "Failed to deserialize Raft metadata: " << e.what(); - } - - LOG(INFO) << fmt::format( - "Read term = {} and voted_for = {} from storage", metadata.term, - metadata.voted_for ? *metadata.voted_for : "(none)"); - - memory_storage_.term_ = metadata.term; - memory_storage_.voted_for_ = metadata.voted_for; - memory_storage_.log_.reserve(metadata.last_log_index); - - for (LogIndex idx = 1; idx <= metadata.last_log_index; ++idx) { - utils::File entry_file; - - try { - entry_file = utils::OpenFile(dir_, fmt::format("{}", idx), O_RDONLY); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to open entry file {}: {}", idx, - e.what()); - } - - boost::iostreams::file_descriptor_source src( - entry_file.Handle(), - boost::iostreams::file_descriptor_flags::never_close_handle); - boost::iostreams::stream<boost::iostreams::file_descriptor_source> is( - src); - boost::archive::binary_iarchive iar(is); - LogEntry<State> entry; - - try { - iar >> entry; - memory_storage_.log_.emplace_back(std::move(entry)); - } catch (boost::archive::archive_exception &e) { - LOG(FATAL) << fmt::format("Failed to deserialize log entry {}: {}", idx, - e.what()); - } - } - - LOG(INFO) << fmt::format("Read {} log entries", metadata.last_log_index); - } - - void WriteTermAndVotedFor( - TermId term, - const std::experimental::optional<MemberId> &voted_for) override { - memory_storage_.WriteTermAndVotedFor(term, voted_for); - WriteMetadata(); - - // Metadata file might be newly created so we have to fsync the directory. - try { - utils::Fsync(dir_); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to fsync Raft log directory: {}", - e.what()); - } - } - - std::pair<TermId, std::experimental::optional<MemberId>> GetTermAndVotedFor() - override { - return memory_storage_.GetTermAndVotedFor(); - } - - void AppendLogEntry(const LogEntry<State> &entry) override { - memory_storage_.AppendLogEntry(entry); - - utils::File entry_file; - - try { - entry_file = utils::OpenFile( - dir_, fmt::format("{}", memory_storage_.GetLastLogIndex()), - O_WRONLY | O_CREAT | O_TRUNC, 0644); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to open log entry file: {}", e.what()); - } - - boost::iostreams::file_descriptor_sink sink( - entry_file.Handle(), - boost::iostreams::file_descriptor_flags::never_close_handle); - boost::iostreams::stream<boost::iostreams::file_descriptor_sink> os(sink); - boost::archive::binary_oarchive oar(os); - - try { - oar << entry; - os.flush(); - } catch (boost::archive::archive_exception &e) { - LOG(FATAL) << fmt::format("Failed to serialize log entry: {}", e.what()); - } - - try { - utils::Fsync(entry_file); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to write log entry file to disk: {}", - e.what()); - } - - // We update the metadata only after the log entry file is written to - // disk. This ensures that no file in range [1, last_log_index] is - // corrupted. - WriteMetadata(); - - try { - utils::Fsync(dir_); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to fsync Raft log directory: {}", - e.what()); - } - } - - TermId GetLogTerm(const LogIndex index) override { - return memory_storage_.GetLogTerm(index); - } - - LogEntry<State> GetLogEntry(const LogIndex index) override { - return memory_storage_.GetLogEntry(index); - } - - std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) override { - return memory_storage_.GetLogSuffix(index); - } - - LogIndex GetLastLogIndex() override { - return memory_storage_.GetLastLogIndex(); - } - - void TruncateLogSuffix(const LogIndex index) override { - return memory_storage_.TruncateLogSuffix(index); - } - - private: - InMemoryStorage<State> memory_storage_; - utils::File dir_; - - void WriteMetadata() { - // We first write data to a temporary file, ensure data is safely written - // to disk, and then rename the file. Since rename is an atomic operation, - // "metadata" file won't get corrupted in case of program crash. - utils::File md_tmp; - try { - md_tmp = - OpenFile(dir_, "metadata.new", O_WRONLY | O_CREAT | O_TRUNC, 0644); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to open temporary metadata file: {}", - e.what()); - } - - boost::iostreams::file_descriptor_sink sink( - md_tmp.Handle(), - boost::iostreams::file_descriptor_flags::never_close_handle); - boost::iostreams::stream<boost::iostreams::file_descriptor_sink> os(sink); - boost::archive::binary_oarchive oar(os); - - try { - oar << SimpleFileStorageMetadata{ - memory_storage_.GetTermAndVotedFor().first, - memory_storage_.GetTermAndVotedFor().second, - memory_storage_.GetLastLogIndex()}; - } catch (boost::archive::archive_exception &e) { - LOG(FATAL) << "Error serializing Raft metadata"; - } - os.flush(); - - try { - utils::Fsync(md_tmp); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format( - "Failed to write temporary metadata file to disk: {}", e.what()); - } - - try { - utils::Rename(dir_, "metadata.new", dir_, "metadata"); - } catch (std::system_error &e) { - LOG(FATAL) << fmt::format("Failed to move temporary metadata file: {}", - e.what()); - } - } -}; - -} // namespace communication::raft diff --git a/src/communication/raft/storage/memory.hpp b/src/communication/raft/storage/memory.hpp deleted file mode 100644 index e280a29e9..000000000 --- a/src/communication/raft/storage/memory.hpp +++ /dev/null @@ -1,63 +0,0 @@ -#pragma once - -#include "communication/raft/raft.hpp" - -namespace communication::raft { - -template <class State> -class InMemoryStorage : public RaftStorageInterface<State> { - public: - InMemoryStorage() - : term_(0), voted_for_(std::experimental::nullopt), log_() {} - - InMemoryStorage(const TermId term, - const std::experimental::optional<std::string> &voted_for, - const std::vector<LogEntry<State>> log) - : term_(term), voted_for_(voted_for), log_(log) {} - - void WriteTermAndVotedFor( - const TermId term, - const std::experimental::optional<std::string> &voted_for) { - term_ = term; - voted_for_ = voted_for; - } - - std::pair<TermId, std::experimental::optional<MemberId>> - GetTermAndVotedFor() { - return {term_, voted_for_}; - } - - void AppendLogEntry(const LogEntry<State> &entry) { log_.push_back(entry); } - - TermId GetLogTerm(const LogIndex index) { - CHECK(0 <= index && index <= log_.size()) - << "Trying to read nonexistent log entry"; - return index > 0 ? log_[index - 1].term : 0; - } - - LogEntry<State> GetLogEntry(const LogIndex index) { - CHECK(1 <= index && index <= log_.size()) - << "Trying to get nonexistent log entry"; - return log_[index - 1]; - } - - std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) { - CHECK(1 <= index && index <= log_.size()) - << "Trying to get nonexistent log entries"; - return std::vector<LogEntry<State>>(log_.begin() + index - 1, log_.end()); - } - - LogIndex GetLastLogIndex(void) { return log_.size(); } - - void TruncateLogSuffix(const LogIndex index) { - CHECK(1 <= index <= log_.size()) - << "Trying to remove nonexistent log entries"; - log_.erase(log_.begin() + index - 1, log_.end()); - } - - TermId term_; - std::experimental::optional<MemberId> voted_for_; - std::vector<LogEntry<State>> log_; -}; - -} // namespace communication::raft diff --git a/src/communication/raft/test_utils.hpp b/src/communication/raft/test_utils.hpp deleted file mode 100644 index 97b212030..000000000 --- a/src/communication/raft/test_utils.hpp +++ /dev/null @@ -1,141 +0,0 @@ -#include <functional> - -#include "communication/raft/network_common.hpp" -#include "communication/raft/raft.hpp" - -namespace communication::raft::test_utils { - -struct DummyState { - struct Change { - bool operator==(const Change &) const { return true; } - bool operator!=(const Change &) const { return false; } - - template <class TArchive> - void serialize(TArchive &, unsigned int) {} - }; - - template <class TArchive> - void serialize(TArchive &, unsigned int) {} -}; - -struct IntState { - int x; - - struct Change { - enum Type { ADD, SUB, SET }; - Type t; - int d; - - bool operator==(const Change &rhs) const { - return t == rhs.t && d == rhs.d; - } - bool operator!=(const Change &rhs) const { return !(*this == rhs); }; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &t; - ar &d; - } - }; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &x; - } -}; - -/* Implementations of `RaftNetworkInterface` for simpler unit testing. */ - -/* `NoOpNetworkInterface` doesn't do anything -- it's like a server disconnected - * from the network. */ -template <class State> -class NoOpNetworkInterface : public RaftNetworkInterface<State> { - public: - ~NoOpNetworkInterface() {} - - virtual bool SendRequestVote(const MemberId &, const RequestVoteRequest &, - RequestVoteReply &) override { - return false; - } - - virtual bool SendAppendEntries(const MemberId &, - const AppendEntriesRequest<State> &, - AppendEntriesReply &) override { - return false; - } - - virtual void Start(RaftMember<State> &) override {} -}; - -/* `NextReplyNetworkInterface` has two fields: `on_request_` and `next_reply_` - * which is optional. `on_request_` is a callback that will be called before - * processing requets. If `next_reply_` is not set, `Send*` functions will - * return false, otherwise they return that reply. */ -template <class State> -class NextReplyNetworkInterface : public RaftNetworkInterface<State> { - public: - ~NextReplyNetworkInterface() {} - - virtual bool SendRequestVote(const MemberId &, - const RequestVoteRequest &request, - RequestVoteReply &reply) override { - PeerRpcRequest<State> req; - req.type = RpcType::REQUEST_VOTE; - req.request_vote = request; - on_request_(req); - if (!next_reply_) { - return false; - } - DCHECK(next_reply_->type == RpcType::REQUEST_VOTE) - << "`next_reply_` type doesn't match the request type"; - reply = next_reply_->request_vote; - return true; - } - - virtual bool SendAppendEntries(const MemberId &, - const AppendEntriesRequest<State> &request, - AppendEntriesReply &reply) override { - PeerRpcRequest<State> req; - req.type = RpcType::APPEND_ENTRIES; - req.append_entries = request; - on_request_(req); - if (!next_reply_) { - return false; - } - DCHECK(next_reply_->type == RpcType::APPEND_ENTRIES) - << "`next_reply_` type doesn't match the request type"; - reply = next_reply_->append_entries; - return true; - } - - virtual void Start(RaftMember<State> &) override {} - - std::function<void(const PeerRpcRequest<State> &)> on_request_; - std::experimental::optional<PeerRpcReply> next_reply_; -}; - -template <class State> -class NoOpStorageInterface : public RaftStorageInterface<State> { - public: - NoOpStorageInterface() {} - - void WriteTermAndVotedFor(const TermId, - const std::experimental::optional<std::string> &) {} - - std::pair<TermId, std::experimental::optional<MemberId>> - GetTermAndVotedFor() { - return {0, {}}; - } - void AppendLogEntry(const LogEntry<State> &) {} - TermId GetLogTerm(const LogIndex) { return 0; } - LogEntry<State> GetLogEntry(const LogIndex) { assert(false); } - std::vector<LogEntry<State>> GetLogSuffix(const LogIndex) { return {}; } - LogIndex GetLastLogIndex() { return 0; } - void TruncateLogSuffix(const LogIndex) {} - - TermId term_; - std::experimental::optional<MemberId> voted_for_; - std::vector<LogEntry<State>> log_; -}; - -} // namespace communication::raft::test_utils diff --git a/src/communication/rpc/client.cpp b/src/communication/rpc/client.cpp deleted file mode 100644 index 6a1c9b0fa..000000000 --- a/src/communication/rpc/client.cpp +++ /dev/null @@ -1,100 +0,0 @@ -#include <chrono> -#include <thread> - -#include "gflags/gflags.h" - -#include "communication/rpc/client.hpp" - -DEFINE_HIDDEN_bool(rpc_random_latency, false, - "If a random wait should happen on each RPC call, to " - "simulate network latency."); - -namespace communication::rpc { - -Client::Client(const io::network::Endpoint &endpoint) : endpoint_(endpoint) {} - -std::experimental::optional<::capnp::FlatArrayMessageReader> Client::Send( - ::capnp::MessageBuilder *message) { - std::lock_guard<std::mutex> guard(mutex_); - - if (FLAGS_rpc_random_latency) { - auto microseconds = (int)(1000 * rand_(gen_)); - std::this_thread::sleep_for(std::chrono::microseconds(microseconds)); - } - - // Check if the connection is broken (if we haven't used the client for a - // long time the server could have died). - if (client_ && client_->ErrorStatus()) { - client_ = std::experimental::nullopt; - } - - // Connect to the remote server. - if (!client_) { - client_.emplace(&context_); - if (!client_->Connect(endpoint_)) { - LOG(ERROR) << "Couldn't connect to remote address " << endpoint_; - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - } - - // Serialize and send request. - auto request_words = ::capnp::messageToFlatArray(*message); - auto request_bytes = request_words.asBytes(); - CHECK(request_bytes.size() <= std::numeric_limits<MessageSize>::max()) - << fmt::format( - "Trying to send message of size {}, max message size is {}", - request_bytes.size(), std::numeric_limits<MessageSize>::max()); - - MessageSize request_data_size = request_bytes.size(); - if (!client_->Write(reinterpret_cast<uint8_t *>(&request_data_size), - sizeof(MessageSize), true)) { - LOG(ERROR) << "Couldn't send request size to " << client_->endpoint(); - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - - if (!client_->Write(request_bytes.begin(), request_bytes.size())) { - LOG(ERROR) << "Couldn't send request data to " << client_->endpoint(); - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - - // Receive response data size. - if (!client_->Read(sizeof(MessageSize))) { - LOG(ERROR) << "Couldn't get response from " << client_->endpoint(); - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - MessageSize response_data_size = - *reinterpret_cast<MessageSize *>(client_->GetData()); - client_->ShiftData(sizeof(MessageSize)); - - // Receive response data. - if (!client_->Read(response_data_size)) { - LOG(ERROR) << "Couldn't get response from " << client_->endpoint(); - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - - // Read the response message. - auto data = ::kj::arrayPtr(client_->GetData(), response_data_size); - // Our data is word aligned and padded to 64bit because we use regular - // (non-packed) serialization of Cap'n Proto. So we can use reinterpret_cast. - auto data_words = - ::kj::arrayPtr(reinterpret_cast<::capnp::word *>(data.begin()), - reinterpret_cast<::capnp::word *>(data.end())); - ::capnp::FlatArrayMessageReader response_message(data_words.asConst()); - client_->ShiftData(response_data_size); - return std::experimental::make_optional(std::move(response_message)); -} - -void Client::Abort() { - if (!client_) return; - // We need to call Shutdown on the client to abort any pending read or - // write operations. - client_->Shutdown(); - client_ = std::experimental::nullopt; -} - -} // namespace communication::rpc diff --git a/src/communication/rpc/client.hpp b/src/communication/rpc/client.hpp deleted file mode 100644 index 971fb558f..000000000 --- a/src/communication/rpc/client.hpp +++ /dev/null @@ -1,101 +0,0 @@ -#pragma once - -#include <experimental/optional> -#include <memory> -#include <mutex> -#include <random> - -#include <capnp/message.h> -#include <capnp/serialize.h> -#include <glog/logging.h> - -#include "communication/client.hpp" -#include "communication/rpc/messages.capnp.h" -#include "communication/rpc/messages.hpp" -#include "io/network/endpoint.hpp" -#include "utils/demangle.hpp" - -namespace communication::rpc { - -/// Client is thread safe, but it is recommended to use thread_local clients. -class Client { - public: - explicit Client(const io::network::Endpoint &endpoint); - - /// Call function can initiate only one request at the time. Function blocks - /// until there is a response. If there was an error nullptr is returned. - template <class TRequestResponse, class... Args> - std::experimental::optional<typename TRequestResponse::Response> Call( - Args &&... args) { - return CallWithLoad<TRequestResponse>( - [](const auto &reader) { - typename TRequestResponse::Response response; - response.Load(reader); - return response; - }, - std::forward<Args>(args)...); - } - - /// Same as `Call` but the first argument is a response loading function. - template <class TRequestResponse, class... Args> - std::experimental::optional<typename TRequestResponse::Response> CallWithLoad( - std::function<typename TRequestResponse::Response( - const typename TRequestResponse::Response::Capnp::Reader &)> - load, - Args &&... args) { - typename TRequestResponse::Request request(std::forward<Args>(args)...); - auto req_type = TRequestResponse::Request::TypeInfo; - VLOG(12) << "[RpcClient] sent " << req_type.name; - ::capnp::MallocMessageBuilder req_msg; - { - auto builder = req_msg.initRoot<capnp::Message>(); - builder.setTypeId(req_type.id); - auto data_builder = builder.initData(); - auto req_builder = - data_builder - .template initAs<typename TRequestResponse::Request::Capnp>(); - request.Save(&req_builder); - } - auto maybe_response = Send(&req_msg); - if (!maybe_response) { - return std::experimental::nullopt; - } - auto res_msg = maybe_response->getRoot<capnp::Message>(); - auto res_type = TRequestResponse::Response::TypeInfo; - if (res_msg.getTypeId() != res_type.id) { - // Since message_id was checked in private Call function, this means - // something is very wrong (probably on the server side). - LOG(ERROR) << "Message response was of unexpected type"; - client_ = std::experimental::nullopt; - return std::experimental::nullopt; - } - - VLOG(12) << "[RpcClient] received " << res_type.name; - - auto data_reader = - res_msg.getData() - .template getAs<typename TRequestResponse::Response::Capnp>(); - return std::experimental::make_optional(load(data_reader)); - } - - /// Call this function from another thread to abort a pending RPC call. - void Abort(); - - private: - std::experimental::optional<::capnp::FlatArrayMessageReader> Send( - ::capnp::MessageBuilder *message); - - io::network::Endpoint endpoint_; - // TODO (mferencevic): currently the RPC client is hardcoded not to use SSL - communication::ClientContext context_; - std::experimental::optional<communication::Client> client_; - - std::mutex mutex_; - - // Random generator for simulated network latency (enable with a flag). - // Distribution parameters are rule-of-thumb chosen. - std::mt19937 gen_{std::random_device{}()}; - std::lognormal_distribution<> rand_{0.0, 1.11}; -}; - -} // namespace communication::rpc diff --git a/src/communication/rpc/client_pool.hpp b/src/communication/rpc/client_pool.hpp deleted file mode 100644 index bfd609abc..000000000 --- a/src/communication/rpc/client_pool.hpp +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include <mutex> -#include <stack> - -#include "communication/rpc/client.hpp" - -namespace communication::rpc { - -/** - * A simple client pool that creates new RPC clients on demand. Useful when you - * want to send RPCs to the same server from multiple threads without them - * blocking each other. - */ -class ClientPool { - public: - explicit ClientPool(const io::network::Endpoint &endpoint) - : endpoint_(endpoint) {} - - template <class TRequestResponse, class... Args> - std::experimental::optional<typename TRequestResponse::Response> Call( - Args &&... args) { - return WithUnusedClient([&](const auto &client) { - return client->template Call<TRequestResponse>( - std::forward<Args>(args)...); - }); - }; - - template <class TRequestResponse, class... Args> - std::experimental::optional<typename TRequestResponse::Response> CallWithLoad( - std::function<typename TRequestResponse::Response( - const typename TRequestResponse::Response::Capnp::Reader &)> - load, - Args &&... args) { - return WithUnusedClient([&](const auto &client) { - return client->template CallWithLoad<TRequestResponse>( - load, std::forward<Args>(args)...); - }); - }; - - private: - template <class TFun> - auto WithUnusedClient(const TFun &fun) { - std::unique_ptr<Client> client; - - std::unique_lock<std::mutex> lock(mutex_); - if (unused_clients_.empty()) { - client = std::make_unique<Client>(endpoint_); - } else { - client = std::move(unused_clients_.top()); - unused_clients_.pop(); - } - lock.unlock(); - - auto res = fun(client); - - lock.lock(); - unused_clients_.push(std::move(client)); - return res; - } - - io::network::Endpoint endpoint_; - - std::mutex mutex_; - std::stack<std::unique_ptr<Client>> unused_clients_; -}; - -} // namespace communication::rpc diff --git a/src/communication/rpc/messages.capnp b/src/communication/rpc/messages.capnp deleted file mode 100644 index 507d52148..000000000 --- a/src/communication/rpc/messages.capnp +++ /dev/null @@ -1,9 +0,0 @@ -@0xd3832c9a1a3d8ec7; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("communication::rpc::capnp"); - -struct Message { - typeId @0 :UInt64; - data @1 :AnyPointer; -} diff --git a/src/communication/rpc/messages.hpp b/src/communication/rpc/messages.hpp deleted file mode 100644 index 982f9896e..000000000 --- a/src/communication/rpc/messages.hpp +++ /dev/null @@ -1,54 +0,0 @@ -#pragma once - -#include <cstdint> -#include <memory> - -namespace communication::rpc { - -using MessageSize = uint32_t; - -/// Type information on a RPC message. -/// Each message should have a static member `TypeInfo` with this information. -struct MessageType { - /// Unique ID for a message. - uint64_t id; - /// Pretty name of the type. - std::string name; -}; - -inline bool operator==(const MessageType &a, const MessageType &b) { - return a.id == b.id; -} -inline bool operator!=(const MessageType &a, const MessageType &b) { - return a.id != b.id; -} -inline bool operator<(const MessageType &a, const MessageType &b) { - return a.id < b.id; -} -inline bool operator<=(const MessageType &a, const MessageType &b) { - return a.id <= b.id; -} -inline bool operator>(const MessageType &a, const MessageType &b) { - return a.id > b.id; -} -inline bool operator>=(const MessageType &a, const MessageType &b) { - return a.id >= b.id; -} - -/// Each RPC is defined via this struct. -/// -/// `TRequest` and `TResponse` are required to be classes which have a static -/// member `TypeInfo` of `MessageType` type. This is used for proper -/// registration and deserialization of RPC types. Additionally, both `TRequest` -/// and `TResponse` are required to define a nested `Capnp` type, which -/// corresponds to the Cap'n Proto schema type, as well as defined the following -/// serialization functions: -/// * void Save(Capnp::Builder *, ...) const -/// * void Load(const Capnp::Reader &, ...) -template <typename TRequest, typename TResponse> -struct RequestResponse { - using Request = TRequest; - using Response = TResponse; -}; - -} // namespace communication::rpc diff --git a/src/communication/rpc/protocol.cpp b/src/communication/rpc/protocol.cpp deleted file mode 100644 index f05788d49..000000000 --- a/src/communication/rpc/protocol.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include <sstream> - -#include "capnp/message.h" -#include "capnp/serialize.h" -#include "fmt/format.h" - -#include "communication/rpc/messages.capnp.h" -#include "communication/rpc/messages.hpp" -#include "communication/rpc/protocol.hpp" -#include "communication/rpc/server.hpp" -#include "utils/demangle.hpp" - -namespace communication::rpc { - -Session::Session(Server &server, communication::InputStream &input_stream, - communication::OutputStream &output_stream) - : server_(server), - input_stream_(input_stream), - output_stream_(output_stream) {} - -void Session::Execute() { - if (input_stream_.size() < sizeof(MessageSize)) return; - MessageSize request_len = - *reinterpret_cast<MessageSize *>(input_stream_.data()); - uint64_t request_size = sizeof(MessageSize) + request_len; - input_stream_.Resize(request_size); - if (input_stream_.size() < request_size) return; - - // Read the request message. - auto data = - ::kj::arrayPtr(input_stream_.data() + sizeof(request_len), request_len); - // Our data is word aligned and padded to 64bit because we use regular - // (non-packed) serialization of Cap'n Proto. So we can use reinterpret_cast. - auto data_words = - ::kj::arrayPtr(reinterpret_cast<::capnp::word *>(data.begin()), - reinterpret_cast<::capnp::word *>(data.end())); - ::capnp::FlatArrayMessageReader request_message(data_words.asConst()); - auto request = request_message.getRoot<capnp::Message>(); - input_stream_.Shift(sizeof(MessageSize) + request_len); - - auto callbacks_accessor = server_.callbacks_.access(); - auto it = callbacks_accessor.find(request.getTypeId()); - if (it == callbacks_accessor.end()) { - // Throw exception to close the socket and cleanup the session. - throw SessionException( - "Session trying to execute an unregistered RPC call!"); - } - - VLOG(12) << "[RpcServer] received " << it->second.req_type.name; - - ::capnp::MallocMessageBuilder response_message; - // callback fills the message data - auto response_builder = response_message.initRoot<capnp::Message>(); - it->second.callback(request, &response_builder); - - // Serialize and send response - auto response_words = ::capnp::messageToFlatArray(response_message); - auto response_bytes = response_words.asBytes(); - if (response_bytes.size() > std::numeric_limits<MessageSize>::max()) { - throw SessionException(fmt::format( - "Trying to send response of size {}, max response size is {}", - response_bytes.size(), std::numeric_limits<MessageSize>::max())); - } - - MessageSize input_stream_size = response_bytes.size(); - if (!output_stream_.Write(reinterpret_cast<uint8_t *>(&input_stream_size), - sizeof(MessageSize), true)) { - throw SessionException("Couldn't send response size!"); - } - if (!output_stream_.Write(response_bytes.begin(), response_bytes.size())) { - throw SessionException("Couldn't send response data!"); - } - - VLOG(12) << "[RpcServer] sent " << it->second.res_type.name; -} - -} // namespace communication::rpc diff --git a/src/communication/rpc/protocol.hpp b/src/communication/rpc/protocol.hpp deleted file mode 100644 index cbaacc7e3..000000000 --- a/src/communication/rpc/protocol.hpp +++ /dev/null @@ -1,55 +0,0 @@ -#pragma once - -#include <chrono> -#include <cstdint> -#include <memory> - -#include "communication/rpc/messages.hpp" -#include "communication/session.hpp" - -/** - * @brief Protocol - * - * Has classes and functions that implement the server side of our - * RPC protocol. - * - * Message layout: MessageSize message_size, - * message_size bytes serialized_message - */ -namespace communication::rpc { - -// Forward declaration of class Server -class Server; - -/** - * This class is thrown when the Session wants to indicate that a fatal error - * occured during execution. - */ -class SessionException : public utils::BasicException { - using utils::BasicException::BasicException; -}; - -/** - * Distributed Protocol Session - * - * This class is responsible for handling a single client connection. - */ -class Session { - public: - Session(Server &server, communication::InputStream &input_stream, - communication::OutputStream &output_stream); - - /** - * Executes the protocol after data has been read into the stream. - * Goes through the protocol states in order to execute commands from the - * client. - */ - void Execute(); - - private: - Server &server_; - communication::InputStream &input_stream_; - communication::OutputStream &output_stream_; -}; - -} // namespace communication::rpc diff --git a/src/communication/rpc/server.cpp b/src/communication/rpc/server.cpp deleted file mode 100644 index e0c697863..000000000 --- a/src/communication/rpc/server.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "communication/rpc/server.hpp" - -namespace communication::rpc { - -Server::Server(const io::network::Endpoint &endpoint, - size_t workers_count) - : server_(endpoint, *this, &context_, -1, "RPC", workers_count) {} - -void Server::StopProcessingCalls() { - server_.Shutdown(); - server_.AwaitShutdown(); -} - -const io::network::Endpoint &Server::endpoint() const { - return server_.endpoint(); -} -} // namespace communication::rpc diff --git a/src/communication/rpc/server.hpp b/src/communication/rpc/server.hpp deleted file mode 100644 index 3a28efb01..000000000 --- a/src/communication/rpc/server.hpp +++ /dev/null @@ -1,86 +0,0 @@ -#pragma once - -#include <unordered_map> -#include <vector> - -#include "capnp/any.h" - -#include "communication/rpc/messages.capnp.h" -#include "communication/rpc/messages.hpp" -#include "communication/rpc/protocol.hpp" -#include "communication/server.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "data_structures/queue.hpp" -#include "io/network/endpoint.hpp" -#include "utils/demangle.hpp" - -namespace communication::rpc { - -class Server { - public: - Server(const io::network::Endpoint &endpoint, - size_t workers_count = std::thread::hardware_concurrency()); - Server(const Server &) = delete; - Server(Server &&) = delete; - Server &operator=(const Server &) = delete; - Server &operator=(Server &&) = delete; - - void StopProcessingCalls(); - - const io::network::Endpoint &endpoint() const; - - template <class TRequestResponse> - void Register(std::function< - void(const typename TRequestResponse::Request::Capnp::Reader &, - typename TRequestResponse::Response::Capnp::Builder *)> - callback) { - RpcCallback rpc; - rpc.req_type = TRequestResponse::Request::TypeInfo; - rpc.res_type = TRequestResponse::Response::TypeInfo; - rpc.callback = [callback = callback](const auto &reader, auto *builder) { - auto req_data = - reader.getData() - .template getAs<typename TRequestResponse::Request::Capnp>(); - builder->setTypeId(TRequestResponse::Response::TypeInfo.id); - auto data_builder = builder->initData(); - auto res_builder = - data_builder - .template initAs<typename TRequestResponse::Response::Capnp>(); - callback(req_data, &res_builder); - }; - auto callbacks_accessor = callbacks_.access(); - auto got = - callbacks_accessor.insert(TRequestResponse::Request::TypeInfo.id, rpc); - CHECK(got.second) << "Callback for that message type already registered"; - VLOG(12) << "[RpcServer] register " << rpc.req_type.name << " -> " - << rpc.res_type.name; - } - - template <typename TRequestResponse> - void UnRegister() { - const MessageType &type = TRequestResponse::Request::TypeInfo; - auto callbacks_accessor = callbacks_.access(); - auto deleted = callbacks_accessor.remove(type.id); - CHECK(deleted) << "Trying to remove unknown message type callback"; - } - - private: - friend class Session; - - struct RpcCallback { - MessageType req_type; - std::function<void(const capnp::Message::Reader &, - capnp::Message::Builder *)> - callback; - MessageType res_type; - }; - - ConcurrentMap<uint64_t, RpcCallback> callbacks_; - - std::mutex mutex_; - // TODO (mferencevic): currently the RPC server is hardcoded not to use SSL - communication::ServerContext context_; - communication::Server<Session, Server> server_; -}; // namespace communication::rpc - -} // namespace communication::rpc diff --git a/src/database/config.cpp b/src/database/config.cpp index 27353e2a6..0285b643b 100644 --- a/src/database/config.cpp +++ b/src/database/config.cpp @@ -32,41 +32,6 @@ DEFINE_string(properties_on_disk, "", "Property names of properties which will be stored on available " "disk. Property names have to be separated with comma (,)."); -#ifndef MG_COMMUNITY -// Distributed master/worker flags. -DEFINE_VALIDATED_HIDDEN_int32(worker_id, 0, - "ID of a worker in a distributed system. Igored " - "in single-node.", - FLAG_IN_RANGE(0, 1 << gid::kWorkerIdSize)); -DEFINE_HIDDEN_string(master_host, "0.0.0.0", - "For master node indicates the host served on. For worker " - "node indicates the master location."); -DEFINE_VALIDATED_HIDDEN_int32( - master_port, 0, - "For master node the port on which to serve. For " - "worker node indicates the master's port.", - FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max())); -DEFINE_HIDDEN_string(worker_host, "0.0.0.0", - "For worker node indicates the host served on. For master " - "node this flag is not used."); -DEFINE_VALIDATED_HIDDEN_int32( - worker_port, 0, - "For master node it's unused. For worker node " - "indicates the port on which to serve. If zero (default value), a port is " - "chosen at random. Sent to the master when registring worker node.", - FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max())); -DEFINE_VALIDATED_HIDDEN_int32(rpc_num_workers, - std::max(std::thread::hardware_concurrency(), 1U), - "Number of workers (RPC)", - FLAG_IN_RANGE(1, INT32_MAX)); -DEFINE_VALIDATED_int32(recovering_cluster_size, 0, - "Number of workers (including master) in the " - "previously snapshooted/wal cluster.", - FLAG_IN_RANGE(0, INT32_MAX)); -DEFINE_bool(dynamic_graph_partitioner_enabled, false, - "If the dynamic graph partitioner should be enabled."); -#endif - // clang-format off database::Config::Config() // Durability flags. @@ -81,17 +46,5 @@ database::Config::Config() query_execution_time_sec{FLAGS_query_execution_time_sec}, // Data location. properties_on_disk(utils::Split(FLAGS_properties_on_disk, ",")) -#ifndef MG_COMMUNITY - , - // Distributed flags. - dynamic_graph_partitioner_enabled{FLAGS_dynamic_graph_partitioner_enabled}, - rpc_num_workers{FLAGS_rpc_num_workers}, - worker_id{FLAGS_worker_id}, - master_endpoint{FLAGS_master_host, - static_cast<uint16_t>(FLAGS_master_port)}, - worker_endpoint{FLAGS_worker_host, - static_cast<uint16_t>(FLAGS_worker_port)}, - recovering_cluster_size{FLAGS_recovering_cluster_size} -#endif {} // clang-format on diff --git a/src/database/counters.cpp b/src/database/counters.cpp index 97814b31d..d392616d7 100644 --- a/src/database/counters.cpp +++ b/src/database/counters.cpp @@ -1,7 +1,5 @@ #include "database/counters.hpp" -#include "database/counters_rpc_messages.hpp" - namespace database { int64_t SingleNodeCounters::Get(const std::string &name) { @@ -16,33 +14,4 @@ void SingleNodeCounters::Set(const std::string &name, int64_t value) { if (!name_counter_pair.second) name_counter_pair.first->second.store(value); } -MasterCounters::MasterCounters(communication::rpc::Server &server) - : rpc_server_(server) { - rpc_server_.Register<CountersGetRpc>( - [this](const auto &req_reader, auto *res_builder) { - CountersGetRes res(Get(req_reader.getName())); - res.Save(res_builder); - }); - rpc_server_.Register<CountersSetRpc>( - [this](const auto &req_reader, auto *res_builder) { - Set(req_reader.getName(), req_reader.getValue()); - return std::make_unique<CountersSetRes>(); - }); -} - -WorkerCounters::WorkerCounters( - communication::rpc::ClientPool &master_client_pool) - : master_client_pool_(master_client_pool) {} - -int64_t WorkerCounters::Get(const std::string &name) { - auto response = master_client_pool_.Call<CountersGetRpc>(name); - CHECK(response) << "CountersGetRpc failed"; - return response->value; -} - -void WorkerCounters::Set(const std::string &name, int64_t value) { - auto response = master_client_pool_.Call<CountersSetRpc>(name, value); - CHECK(response) << "CountersSetRpc failed"; -} - } // namespace database diff --git a/src/database/counters.hpp b/src/database/counters.hpp index 1da76d7bd..e125498f2 100644 --- a/src/database/counters.hpp +++ b/src/database/counters.hpp @@ -4,8 +4,6 @@ #include <cstdint> #include <string> -#include "communication/rpc/client_pool.hpp" -#include "communication/rpc/server.hpp" #include "data_structures/concurrent/concurrent_map.hpp" namespace database { @@ -41,25 +39,4 @@ class SingleNodeCounters : public Counters { ConcurrentMap<std::string, std::atomic<int64_t>> counters_; }; -/** Implementation for distributed master. */ -class MasterCounters : public SingleNodeCounters { - public: - explicit MasterCounters(communication::rpc::Server &server); - - private: - communication::rpc::Server &rpc_server_; -}; - -/** Implementation for distributed worker. */ -class WorkerCounters : public Counters { - public: - explicit WorkerCounters(communication::rpc::ClientPool &master_client_pool); - - int64_t Get(const std::string &name) override; - void Set(const std::string &name, int64_t value) override; - - private: - communication::rpc::ClientPool &master_client_pool_; -}; - } // namespace database diff --git a/src/database/counters_rpc_messages.lcp b/src/database/counters_rpc_messages.lcp deleted file mode 100644 index 9b1834b83..000000000 --- a/src/database/counters_rpc_messages.lcp +++ /dev/null @@ -1,23 +0,0 @@ -#>cpp -#pragma once - -#include <string> - -#include "communication/rpc/messages.hpp" -#include "database/counters_rpc_messages.capnp.h" -cpp<# - -(lcp:namespace database) - -(lcp:capnp-namespace "database") - -(lcp:define-rpc counters-get - (:request ((name "std::string"))) - (:response ((value :int64_t)))) - -(lcp:define-rpc counters-set - (:request ((name "std::string") - (value :int64_t))) - (:response ())) - -(lcp:pop-namespace) ;; database diff --git a/src/database/graph_db.cpp b/src/database/graph_db.cpp index fa833e4c2..36abeb18d 100644 --- a/src/database/graph_db.cpp +++ b/src/database/graph_db.cpp @@ -2,41 +2,14 @@ #include "glog/logging.h" -#include "communication/rpc/server.hpp" #include "database/graph_db.hpp" -#include "database/storage_gc_master.hpp" +#include "database/graph_db_accessor.hpp" #include "database/storage_gc_single_node.hpp" -#include "database/storage_gc_worker.hpp" -#include "distributed/bfs_rpc_clients.hpp" -#include "distributed/bfs_rpc_server.hpp" -#include "distributed/cluster_discovery_master.hpp" -#include "distributed/cluster_discovery_worker.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_worker.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/data_rpc_clients.hpp" -#include "distributed/data_rpc_server.hpp" -#include "distributed/durability_rpc_clients.hpp" -#include "distributed/durability_rpc_messages.hpp" -#include "distributed/durability_rpc_server.hpp" -#include "distributed/index_rpc_server.hpp" -#include "distributed/plan_consumer.hpp" -#include "distributed/plan_dispatcher.hpp" -#include "distributed/produce_rpc_server.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "distributed/token_sharing_rpc_server.hpp" -#include "distributed/transactional_cache_cleaner.hpp" -#include "distributed/updates_rpc_clients.hpp" -#include "distributed/updates_rpc_server.hpp" #include "durability/paths.hpp" #include "durability/recovery.hpp" #include "durability/snapshooter.hpp" -#include "storage/concurrent_id_mapper_master.hpp" #include "storage/concurrent_id_mapper_single_node.hpp" -#include "storage/concurrent_id_mapper_worker.hpp" -#include "transactions/engine_master.hpp" #include "transactions/engine_single_node.hpp" -#include "transactions/engine_worker.hpp" #include "utils/file.hpp" #include "utils/flag_validation.hpp" @@ -44,6 +17,7 @@ using namespace std::literals::chrono_literals; using namespace storage; namespace database { + namespace impl { class PrivateBase : public GraphDb { @@ -76,22 +50,6 @@ class PrivateBase : public GraphDb { std::make_unique<Storage>(WorkerId(), config_.properties_on_disk); } - distributed::PullRpcClients &pull_clients() override { - LOG(FATAL) << "Remote pull clients only available in master."; - } - distributed::ProduceRpcServer &produce_server() override { - LOG(FATAL) << "Remote produce server only available in worker."; - } - distributed::PlanConsumer &plan_consumer() override { - LOG(FATAL) << "Plan consumer only available in distributed worker."; - } - distributed::PlanDispatcher &plan_dispatcher() override { - LOG(FATAL) << "Plan dispatcher only available in distributed master."; - } - distributed::IndexRpcClients &index_rpc_clients() override { - LOG(FATAL) << "Index RPC clients only available in distributed master."; - } - protected: std::unique_ptr<Storage> storage_ = std::make_unique<Storage>(config_.worker_id, config_.properties_on_disk); @@ -128,7 +86,6 @@ struct TypemapPack { class SingleNode : public PrivateBase { public: explicit SingleNode(const Config &config) : PrivateBase(config) {} - GraphDb::Type type() const override { return GraphDb::Type::SINGLE_NODE; } IMPL_GETTERS tx::SingleNodeEngine tx_engine_{&wal_}; @@ -139,33 +96,6 @@ class SingleNode : public PrivateBase { storage_->PropertiesOnDisk()}; database::SingleNodeCounters counters_; std::vector<int> GetWorkerIds() const override { return {0}; } - distributed::BfsRpcServer &bfs_subcursor_server() override { - LOG(FATAL) << "Subcursor server not available in single-node."; - } - distributed::BfsRpcClients &bfs_subcursor_clients() override { - LOG(FATAL) << "Subcursor clients not available in single-node."; - } - distributed::DataRpcServer &data_server() override { - LOG(FATAL) << "Remote data server not available in single-node."; - } - distributed::DataRpcClients &data_clients() override { - LOG(FATAL) << "Remote data clients not available in single-node."; - } - distributed::PlanDispatcher &plan_dispatcher() override { - LOG(FATAL) << "Plan Dispatcher not available in single-node."; - } - distributed::PlanConsumer &plan_consumer() override { - LOG(FATAL) << "Plan Consumer not available in single-node."; - } - distributed::UpdatesRpcServer &updates_server() override { - LOG(FATAL) << "Remote updates server not available in single-node."; - } - distributed::UpdatesRpcClients &updates_clients() override { - LOG(FATAL) << "Remote updates clients not available in single-node."; - } - distributed::DataManager &data_manager() override { - LOG(FATAL) << "Remote data manager not available in single-node."; - } void ReinitializeStorage() override { // Release gc scheduler to stop it from touching storage storage_gc_ = nullptr; @@ -175,170 +105,6 @@ class SingleNode : public PrivateBase { } }; -#define IMPL_DISTRIBUTED_GETTERS \ - std::vector<int> GetWorkerIds() const override { \ - return coordination_.GetWorkerIds(); \ - } \ - distributed::BfsRpcServer &bfs_subcursor_server() override { \ - return bfs_subcursor_server_; \ - } \ - distributed::BfsRpcClients &bfs_subcursor_clients() override { \ - return bfs_subcursor_clients_; \ - } \ - distributed::DataRpcServer &data_server() override { return data_server_; } \ - distributed::DataRpcClients &data_clients() override { \ - return data_clients_; \ - } \ - distributed::UpdatesRpcServer &updates_server() override { \ - return updates_server_; \ - } \ - distributed::UpdatesRpcClients &updates_clients() override { \ - return updates_clients_; \ - } \ - distributed::DataManager &data_manager() override { return data_manager_; } - -class Master : public PrivateBase { - public: - explicit Master(const Config &config) : PrivateBase(config) {} - - GraphDb::Type type() const override { - return GraphDb::Type::DISTRIBUTED_MASTER; - } - - // Makes a local snapshot and forces the workers to do the same. Snapshot is - // written here only if workers sucesfully created their own snapshot - bool MakeSnapshot(GraphDbAccessor &accessor) override { - auto workers_snapshot = - durability_rpc_clients_.MakeSnapshot(accessor.transaction_id()); - if (!workers_snapshot.get()) return false; - // This can be further optimized by creating master snapshot at the same - // time as workers snapshots but this forces us to delete the master - // snapshot if we succeed in creating it and workers somehow fail. Because - // we have an assumption that every snapshot that exists on master with some - // tx_id visibility also exists on workers - return PrivateBase::MakeSnapshot(accessor); - } - - IMPL_GETTERS - IMPL_DISTRIBUTED_GETTERS - distributed::PlanDispatcher &plan_dispatcher() override { - return plan_dispatcher_; - } - distributed::PullRpcClients &pull_clients() override { return pull_clients_; } - distributed::IndexRpcClients &index_rpc_clients() override { - return index_rpc_clients_; - } - - void ReinitializeStorage() override { - // Release gc scheduler to stop it from touching storage - storage_gc_ = nullptr; - PrivateBase::ReinitializeStorage(); - storage_gc_ = std::make_unique<StorageGcMaster>( - *storage_, tx_engine_, config_.gc_cycle_sec, server_, coordination_); - } - - communication::rpc::Server server_{ - config_.master_endpoint, static_cast<size_t>(config_.rpc_num_workers)}; - tx::MasterEngine tx_engine_{server_, rpc_worker_clients_, &wal_}; - distributed::MasterCoordination coordination_{server_.endpoint()}; - std::unique_ptr<StorageGcMaster> storage_gc_ = - std::make_unique<StorageGcMaster>( - *storage_, tx_engine_, config_.gc_cycle_sec, server_, coordination_); - distributed::RpcWorkerClients rpc_worker_clients_{coordination_}; - TypemapPack<MasterConcurrentIdMapper> typemap_pack_{server_}; - database::MasterCounters counters_{server_}; - distributed::BfsSubcursorStorage subcursor_storage_{this}; - distributed::BfsRpcServer bfs_subcursor_server_{this, &server_, - &subcursor_storage_}; - distributed::BfsRpcClients bfs_subcursor_clients_{this, &subcursor_storage_, - &rpc_worker_clients_}; - distributed::DurabilityRpcClients durability_rpc_clients_{ - rpc_worker_clients_}; - distributed::DataRpcServer data_server_{*this, server_}; - distributed::DataRpcClients data_clients_{rpc_worker_clients_}; - distributed::PlanDispatcher plan_dispatcher_{rpc_worker_clients_}; - distributed::PullRpcClients pull_clients_{rpc_worker_clients_}; - distributed::IndexRpcClients index_rpc_clients_{rpc_worker_clients_}; - distributed::UpdatesRpcServer updates_server_{*this, server_}; - distributed::UpdatesRpcClients updates_clients_{rpc_worker_clients_}; - distributed::DataManager data_manager_{*this, data_clients_}; - distributed::TransactionalCacheCleaner cache_cleaner_{ - tx_engine_, updates_server_, data_manager_}; - distributed::ClusterDiscoveryMaster cluster_discovery_{server_, coordination_, - rpc_worker_clients_}; - distributed::TokenSharingRpcClients token_sharing_clients_{ - &rpc_worker_clients_}; - distributed::TokenSharingRpcServer token_sharing_server_{ - this, config_.worker_id, &coordination_, &server_, - &token_sharing_clients_}; -}; - -class Worker : public PrivateBase { - public: - explicit Worker(const Config &config) : PrivateBase(config) { - cluster_discovery_.RegisterWorker(config.worker_id); - } - - GraphDb::Type type() const override { - return GraphDb::Type::DISTRIBUTED_WORKER; - } - IMPL_GETTERS - IMPL_DISTRIBUTED_GETTERS - distributed::PlanConsumer &plan_consumer() override { return plan_consumer_; } - distributed::ProduceRpcServer &produce_server() override { - return produce_server_; - } - - void ReinitializeStorage() override { - // Release gc scheduler to stop it from touching storage - storage_gc_ = nullptr; - PrivateBase::ReinitializeStorage(); - storage_gc_ = std::make_unique<StorageGcWorker>( - *storage_, tx_engine_, config_.gc_cycle_sec, - rpc_worker_clients_.GetClientPool(0), config_.worker_id); - } - - communication::rpc::Server server_{ - config_.worker_endpoint, static_cast<size_t>(config_.rpc_num_workers)}; - distributed::WorkerCoordination coordination_{server_, - config_.master_endpoint}; - distributed::RpcWorkerClients rpc_worker_clients_{coordination_}; - tx::WorkerEngine tx_engine_{rpc_worker_clients_.GetClientPool(0)}; - std::unique_ptr<StorageGcWorker> storage_gc_ = - std::make_unique<StorageGcWorker>( - *storage_, tx_engine_, config_.gc_cycle_sec, - rpc_worker_clients_.GetClientPool(0), config_.worker_id); - TypemapPack<WorkerConcurrentIdMapper> typemap_pack_{ - rpc_worker_clients_.GetClientPool(0)}; - database::WorkerCounters counters_{rpc_worker_clients_.GetClientPool(0)}; - distributed::BfsSubcursorStorage subcursor_storage_{this}; - distributed::BfsRpcServer bfs_subcursor_server_{this, &server_, - &subcursor_storage_}; - distributed::BfsRpcClients bfs_subcursor_clients_{this, &subcursor_storage_, - &rpc_worker_clients_}; - distributed::DataRpcServer data_server_{*this, server_}; - distributed::DataRpcClients data_clients_{rpc_worker_clients_}; - distributed::PlanConsumer plan_consumer_{server_}; - distributed::ProduceRpcServer produce_server_{*this, tx_engine_, server_, - plan_consumer_}; - distributed::IndexRpcServer index_rpc_server_{*this, server_}; - distributed::UpdatesRpcServer updates_server_{*this, server_}; - distributed::UpdatesRpcClients updates_clients_{rpc_worker_clients_}; - distributed::DataManager data_manager_{*this, data_clients_}; - distributed::WorkerTransactionalCacheCleaner cache_cleaner_{ - tx_engine_, server_, produce_server_, updates_server_, data_manager_}; - distributed::DurabilityRpcServer durability_rpc_server_{*this, server_}; - distributed::ClusterDiscoveryWorker cluster_discovery_{ - server_, coordination_, rpc_worker_clients_.GetClientPool(0)}; - distributed::TokenSharingRpcClients token_sharing_clients_{ - &rpc_worker_clients_}; - distributed::TokenSharingRpcServer token_sharing_server_{ - this, config_.worker_id, &coordination_, &server_, - &token_sharing_clients_}; -}; - -#undef IMPL_GETTERS - PublicBase::PublicBase(std::unique_ptr<PrivateBase> impl) : impl_(std::move(impl)) { if (impl_->config_.durability_enabled) @@ -346,61 +112,18 @@ PublicBase::PublicBase(std::unique_ptr<PrivateBase> impl) // Durability recovery. { - auto db_type = impl_->type(); - // What we should recover. std::experimental::optional<durability::RecoveryInfo> required_recovery_info; - if (db_type == Type::DISTRIBUTED_WORKER) { - required_recovery_info = dynamic_cast<impl::Worker *>(impl_.get()) - ->cluster_discovery_.recovery_info(); - } // What we recover. std::experimental::optional<durability::RecoveryInfo> recovery_info; // Recover only if necessary. - if ((db_type != Type::DISTRIBUTED_WORKER && - impl_->config_.db_recover_on_startup) || - (db_type == Type::DISTRIBUTED_WORKER && required_recovery_info)) { + if (impl_->config_.db_recover_on_startup) { recovery_info = durability::Recover(impl_->config_.durability_directory, *impl_, required_recovery_info); } - - // Post-recovery setup and checking. - switch (db_type) { - case Type::DISTRIBUTED_MASTER: - dynamic_cast<impl::Master *>(impl_.get()) - ->coordination_.SetRecoveryInfo(recovery_info); - if (recovery_info) { - CHECK(impl_->config_.recovering_cluster_size > 0) - << "Invalid cluster recovery size flag. Recovered cluster size " - "should be at least 1"; - while (dynamic_cast<impl::Master *>(impl_.get()) - ->coordination_.CountRecoveredWorkers() != - impl_->config_.recovering_cluster_size - 1) { - LOG(INFO) << "Waiting for workers to finish recovering.."; - std::this_thread::sleep_for(2s); - } - } - - // Start the dynamic graph partitioner inside token sharing server - if (impl_->config_.dynamic_graph_partitioner_enabled) { - dynamic_cast<impl::Master *>(impl_.get()) - ->token_sharing_server_.StartTokenSharing(); - } - - break; - case Type::DISTRIBUTED_WORKER: - if (required_recovery_info != recovery_info) - LOG(FATAL) << "Memgraph worker failed to recover the database state " - "recovered on the master"; - dynamic_cast<impl::Worker *>(impl_.get()) - ->cluster_discovery_.NotifyWorkerRecovered(); - break; - case Type::SINGLE_NODE: - break; - } } if (impl_->config_.durability_enabled) { @@ -434,14 +157,12 @@ PublicBase::~PublicBase() { // If we are not a worker we can do a snapshot on exit if it's enabled. Doing // this on the master forces workers to do the same through rpcs - if (impl_->config_.snapshot_on_exit && - impl_->type() != Type::DISTRIBUTED_WORKER) { + if (impl_->config_.snapshot_on_exit) { GraphDbAccessor dba(*this); MakeSnapshot(dba); } } -GraphDb::Type PublicBase::type() const { return impl_->type(); } Storage &PublicBase::storage() { return impl_->storage(); } durability::WriteAheadLog &PublicBase::wal() { return impl_->wal(); } tx::Engine &PublicBase::tx_engine() { return impl_->tx_engine(); } @@ -460,42 +181,6 @@ int PublicBase::WorkerId() const { return impl_->WorkerId(); } std::vector<int> PublicBase::GetWorkerIds() const { return impl_->GetWorkerIds(); } -distributed::BfsRpcServer &PublicBase::bfs_subcursor_server() { - return impl_->bfs_subcursor_server(); -} -distributed::BfsRpcClients &PublicBase::bfs_subcursor_clients() { - return impl_->bfs_subcursor_clients(); -} -distributed::DataRpcServer &PublicBase::data_server() { - return impl_->data_server(); -} -distributed::DataRpcClients &PublicBase::data_clients() { - return impl_->data_clients(); -} -distributed::PlanDispatcher &PublicBase::plan_dispatcher() { - return impl_->plan_dispatcher(); -} -distributed::IndexRpcClients &PublicBase::index_rpc_clients() { - return impl_->index_rpc_clients(); -} -distributed::PlanConsumer &PublicBase::plan_consumer() { - return impl_->plan_consumer(); -} -distributed::PullRpcClients &PublicBase::pull_clients() { - return impl_->pull_clients(); -} -distributed::ProduceRpcServer &PublicBase::produce_server() { - return impl_->produce_server(); -} -distributed::UpdatesRpcServer &PublicBase::updates_server() { - return impl_->updates_server(); -} -distributed::UpdatesRpcClients &PublicBase::updates_clients() { - return impl_->updates_clients(); -} -distributed::DataManager &PublicBase::data_manager() { - return impl_->data_manager(); -} bool PublicBase::MakeSnapshot(GraphDbAccessor &accessor) { return impl_->MakeSnapshot(accessor); @@ -524,31 +209,4 @@ MasterBase::~MasterBase() { snapshot_creator_ = nullptr; } SingleNode::SingleNode(Config config) : MasterBase(std::make_unique<impl::SingleNode>(config)) {} -Master::Master(Config config) - : MasterBase(std::make_unique<impl::Master>(config)) {} - -io::network::Endpoint Master::endpoint() const { - return dynamic_cast<impl::Master *>(impl_.get())->server_.endpoint(); -} - -io::network::Endpoint Master::GetEndpoint(int worker_id) { - return dynamic_cast<impl::Master *>(impl_.get()) - ->coordination_.GetEndpoint(worker_id); -} - -Worker::Worker(Config config) - : PublicBase(std::make_unique<impl::Worker>(config)) {} - -io::network::Endpoint Worker::endpoint() const { - return dynamic_cast<impl::Worker *>(impl_.get())->server_.endpoint(); -} - -io::network::Endpoint Worker::GetEndpoint(int worker_id) { - return dynamic_cast<impl::Worker *>(impl_.get()) - ->coordination_.GetEndpoint(worker_id); -} - -void Worker::WaitForShutdown() { - dynamic_cast<impl::Worker *>(impl_.get())->coordination_.WaitForShutdown(); -} } // namespace database diff --git a/src/database/graph_db.hpp b/src/database/graph_db.hpp index a5de5bab0..cb0e167bf 100644 --- a/src/database/graph_db.hpp +++ b/src/database/graph_db.hpp @@ -14,21 +14,6 @@ #include "transactions/engine.hpp" #include "utils/scheduler.hpp" -namespace distributed { -class BfsRpcServer; -class BfsRpcClients; -class DataRpcServer; -class DataRpcClients; -class PlanDispatcher; -class PlanConsumer; -class PullRpcClients; -class ProduceRpcServer; -class UpdatesRpcServer; -class UpdatesRpcClients; -class DataManager; -class IndexRpcClients; -} // namespace distributed - namespace database { /// Database configuration. Initialized from flags, but modifiable. @@ -84,12 +69,9 @@ struct Config { */ class GraphDb { public: - enum class Type { SINGLE_NODE, DISTRIBUTED_MASTER, DISTRIBUTED_WORKER }; - GraphDb() {} virtual ~GraphDb() {} - virtual Type type() const = 0; virtual Storage &storage() = 0; virtual durability::WriteAheadLog &wal() = 0; virtual tx::Engine &tx_engine() = 0; @@ -102,25 +84,6 @@ class GraphDb { virtual int WorkerId() const = 0; virtual std::vector<int> GetWorkerIds() const = 0; - // Supported only in distributed master and worker, not in single-node. - virtual distributed::BfsRpcServer &bfs_subcursor_server() = 0; - virtual distributed::BfsRpcClients &bfs_subcursor_clients() = 0; - virtual distributed::DataRpcServer &data_server() = 0; - virtual distributed::DataRpcClients &data_clients() = 0; - virtual distributed::UpdatesRpcServer &updates_server() = 0; - virtual distributed::UpdatesRpcClients &updates_clients() = 0; - virtual distributed::DataManager &data_manager() = 0; - - // Supported only in distributed master. - virtual distributed::PullRpcClients &pull_clients() = 0; - virtual distributed::PlanDispatcher &plan_dispatcher() = 0; - virtual distributed::IndexRpcClients &index_rpc_clients() = 0; - - // Supported only in distributed worker. - // TODO remove once end2end testing is possible. - virtual distributed::ProduceRpcServer &produce_server() = 0; - virtual distributed::PlanConsumer &plan_consumer() = 0; - // Makes a snapshot from the visibility of the given accessor virtual bool MakeSnapshot(GraphDbAccessor &accessor) = 0; @@ -146,7 +109,6 @@ class PrivateBase; // initialization and cleanup. class PublicBase : public GraphDb { public: - Type type() const override; Storage &storage() override; durability::WriteAheadLog &wal() override; tx::Engine &tx_engine() override; @@ -157,18 +119,6 @@ class PublicBase : public GraphDb { void CollectGarbage() override; int WorkerId() const override; std::vector<int> GetWorkerIds() const override; - distributed::BfsRpcServer &bfs_subcursor_server() override; - distributed::BfsRpcClients &bfs_subcursor_clients() override; - distributed::DataRpcServer &data_server() override; - distributed::DataRpcClients &data_clients() override; - distributed::PlanDispatcher &plan_dispatcher() override; - distributed::IndexRpcClients &index_rpc_clients() override; - distributed::PlanConsumer &plan_consumer() override; - distributed::PullRpcClients &pull_clients() override; - distributed::ProduceRpcServer &produce_server() override; - distributed::UpdatesRpcServer &updates_server() override; - distributed::UpdatesRpcClients &updates_clients() override; - distributed::DataManager &data_manager() override; bool is_accepting_transactions() const { return is_accepting_transactions_; } bool MakeSnapshot(GraphDbAccessor &accessor) override; @@ -201,24 +151,4 @@ class SingleNode : public MasterBase { explicit SingleNode(Config config = Config()); }; -class Master : public MasterBase { - public: - explicit Master(Config config = Config()); - /** Gets this master's endpoint. */ - io::network::Endpoint endpoint() const; - /** Gets the endpoint of the worker with the given id. */ - // TODO make const once Coordination::GetEndpoint is const. - io::network::Endpoint GetEndpoint(int worker_id); -}; - -class Worker : public impl::PublicBase { - public: - explicit Worker(Config config = Config()); - /** Gets this worker's endpoint. */ - io::network::Endpoint endpoint() const; - /** Gets the endpoint of the worker with the given id. */ - // TODO make const once Coordination::GetEndpoint is const. - io::network::Endpoint GetEndpoint(int worker_id); - void WaitForShutdown(); -}; } // namespace database diff --git a/src/database/graph_db_accessor.cpp b/src/database/graph_db_accessor.cpp index ffd9d1337..740f25b18 100644 --- a/src/database/graph_db_accessor.cpp +++ b/src/database/graph_db_accessor.cpp @@ -4,9 +4,6 @@ #include "database/graph_db_accessor.hpp" #include "database/state_delta.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "distributed/updates_rpc_clients.hpp" #include "storage/address_types.hpp" #include "storage/edge.hpp" #include "storage/edge_accessor.hpp" @@ -92,26 +89,6 @@ VertexAccessor GraphDbAccessor::InsertVertex( return va; } -VertexAccessor GraphDbAccessor::InsertVertexIntoRemote( - int worker_id, const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties) { - CHECK(worker_id != db().WorkerId()) - << "Not allowed to call InsertVertexIntoRemote for local worker"; - - gid::Gid gid = db().updates_clients().CreateVertex( - worker_id, transaction_id(), labels, properties); - - auto vertex = std::make_unique<Vertex>(); - vertex->labels_ = labels; - for (auto &kv : properties) vertex->properties_.set(kv.first, kv.second); - - db().data_manager() - .Elements<Vertex>(transaction_id()) - .emplace(gid, nullptr, std::move(vertex)); - return VertexAccessor({gid, worker_id}, *this); -} - std::experimental::optional<VertexAccessor> GraphDbAccessor::FindVertexOptional( gid::Gid gid, bool current_state) { VertexAccessor record_accessor(db_.storage().LocalAddress<Vertex>(gid), @@ -144,8 +121,6 @@ EdgeAccessor GraphDbAccessor::FindEdge(gid::Gid gid, bool current_state) { void GraphDbAccessor::BuildIndex(storage::Label label, storage::Property property) { DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted"; - DCHECK(db_.type() != GraphDb::Type::DISTRIBUTED_WORKER) - << "BuildIndex invoked on worker"; db_.storage().index_build_tx_in_progress_.access().insert(transaction_.id_); @@ -192,13 +167,6 @@ void GraphDbAccessor::BuildIndex(storage::Label label, std::experimental::optional<std::vector<utils::Future<bool>>> index_rpc_completions; - // Notify all workers to start building an index if we are the master since - // they don't have to wait anymore - if (db_.type() == GraphDb::Type::DISTRIBUTED_MASTER) { - index_rpc_completions.emplace(db_.index_rpc_clients().GetBuildIndexFutures( - label, property, transaction_id(), this->db_.WorkerId())); - } - // Add transaction to the build_tx_in_progress as this transaction doesn't // change data and shouldn't block other parallel index creations auto read_transaction_id = dba.transaction().id_; @@ -352,14 +320,6 @@ bool GraphDbAccessor::RemoveVertex(VertexAccessor &vertex_accessor, bool check_empty) { DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted"; - if (!vertex_accessor.is_local()) { - auto address = vertex_accessor.address(); - db().updates_clients().RemoveVertex(address.worker_id(), transaction_id(), - address.gid(), check_empty); - // We can't know if we are going to be able to remove vertex until deferred - // updates on a remote worker are executed - return true; - } vertex_accessor.SwitchNew(); // it's possible the vertex was removed already in this transaction // due to it getting matched multiple times by some patterns @@ -402,59 +362,33 @@ EdgeAccessor GraphDbAccessor::InsertEdge( storage::EdgeAddress edge_address; Vertex *from_updated; - if (from.is_local()) { - auto edge_accessor = - InsertOnlyEdge(from.address(), to.address(), edge_type, requested_gid); - edge_address = edge_accessor.address(), - from.SwitchNew(); - from_updated = &from.update(); + auto edge_accessor = + InsertOnlyEdge(from.address(), to.address(), edge_type, requested_gid); + edge_address = edge_accessor.address(), - // TODO when preparing WAL for distributed, most likely never use - // `CREATE_EDGE`, but always have it split into 3 parts (edge insertion, - // in/out modification). - wal().Emplace(database::StateDelta::CreateEdge( - transaction_.id_, edge_accessor.gid(), from.gid(), to.gid(), edge_type, - EdgeTypeName(edge_type))); + from.SwitchNew(); + from_updated = &from.update(); - } else { - edge_address = db().updates_clients().CreateEdge(transaction_id(), from, to, - edge_type); + // TODO when preparing WAL for distributed, most likely never use + // `CREATE_EDGE`, but always have it split into 3 parts (edge insertion, + // in/out modification). + wal().Emplace(database::StateDelta::CreateEdge( + transaction_.id_, edge_accessor.gid(), from.gid(), to.gid(), edge_type, + EdgeTypeName(edge_type))); - from_updated = db().data_manager() - .Elements<Vertex>(transaction_id()) - .FindNew(from.gid()); - - // Create an Edge and insert it into the Cache so we see it locally. - db().data_manager() - .Elements<Edge>(transaction_id()) - .emplace( - edge_address.gid(), nullptr, - std::make_unique<Edge>(from.address(), to.address(), edge_type)); - } from_updated->out_.emplace( db_.storage().LocalizedAddressIfPossible(to.address()), edge_address, edge_type); Vertex *to_updated; - if (to.is_local()) { - // ensure that the "to" accessor has the latest version (Switch new) - // WARNING: must do that after the above "from.update()" for cases when - // we are creating a cycle and "from" and "to" are the same vlist - to.SwitchNew(); - to_updated = &to.update(); - } else { - // The RPC call for the `to` side is already handled if `from` is not local. - if (from.is_local() || - from.address().worker_id() != to.address().worker_id()) { - db().updates_clients().AddInEdge( - transaction_id(), from, - db().storage().GlobalizedAddress(edge_address), to, edge_type); - } - to_updated = db().data_manager() - .Elements<Vertex>(transaction_id()) - .FindNew(to.gid()); - } + + // ensure that the "to" accessor has the latest version (Switch new) + // WARNING: must do that after the above "from.update()" for cases when + // we are creating a cycle and "from" and "to" are the same vlist + to.SwitchNew(); + to_updated = &to.update(); + to_updated->in_.emplace( db_.storage().LocalizedAddressIfPossible(from.address()), edge_address, edge_type); @@ -492,35 +426,17 @@ int64_t GraphDbAccessor::EdgesCount() const { void GraphDbAccessor::RemoveEdge(EdgeAccessor &edge, bool remove_out_edge, bool remove_in_edge) { DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted"; - if (edge.is_local()) { - // it's possible the edge was removed already in this transaction - // due to it getting matched multiple times by some patterns - // we can only delete it once, so check if it's already deleted - edge.SwitchNew(); - if (edge.current().is_expired_by(transaction_)) return; - if (remove_out_edge) edge.from().RemoveOutEdge(edge.address()); - if (remove_in_edge) edge.to().RemoveInEdge(edge.address()); - edge.address().local()->remove(edge.current_, transaction_); - wal().Emplace( - database::StateDelta::RemoveEdge(transaction_.id_, edge.gid())); - } else { - auto edge_addr = edge.GlobalAddress(); - auto from_addr = db().storage().GlobalizedAddress(edge.from_addr()); - CHECK(edge_addr.worker_id() == from_addr.worker_id()) - << "Edge and it's 'from' vertex not on the same worker"; - auto to_addr = db().storage().GlobalizedAddress(edge.to_addr()); - db().updates_clients().RemoveEdge(transaction_id(), edge_addr.worker_id(), - edge_addr.gid(), from_addr.gid(), - to_addr); + // it's possible the edge was removed already in this transaction + // due to it getting matched multiple times by some patterns + // we can only delete it once, so check if it's already deleted + edge.SwitchNew(); + if (edge.current().is_expired_by(transaction_)) return; + if (remove_out_edge) edge.from().RemoveOutEdge(edge.address()); + if (remove_in_edge) edge.to().RemoveInEdge(edge.address()); - // Another RPC is necessary only if the first did not handle vertices on - // both sides. - if (edge_addr.worker_id() != to_addr.worker_id()) { - db().updates_clients().RemoveInEdge(transaction_id(), to_addr.worker_id(), - to_addr.gid(), edge_addr); - } - } + edge.address().local()->remove(edge.current_, transaction_); + wal().Emplace(database::StateDelta::RemoveEdge(transaction_.id_, edge.gid())); } storage::Label GraphDbAccessor::Label(const std::string &label_name) { diff --git a/src/database/graph_db_accessor.hpp b/src/database/graph_db_accessor.hpp index bacb710d0..f2a961ddc 100644 --- a/src/database/graph_db_accessor.hpp +++ b/src/database/graph_db_accessor.hpp @@ -9,7 +9,6 @@ #include "glog/logging.h" #include "database/graph_db.hpp" -#include "distributed/cache.hpp" #include "query/typed_value.hpp" #include "storage/address_types.hpp" #include "storage/edge_accessor.hpp" @@ -78,13 +77,6 @@ class GraphDbAccessor { VertexAccessor InsertVertex(std::experimental::optional<gid::Gid> requested_gid = std::experimental::nullopt); - /** Creates a new Vertex on the given worker. It is NOT allowed to call this - * function with this worker's id. */ - VertexAccessor InsertVertexIntoRemote( - int worker_id, const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties); - /** * Removes the vertex of the given accessor. If the vertex has any outgoing or * incoming edges, it is not deleted. See `DetachRemoveVertex` if you want to diff --git a/src/database/state_delta.lcp b/src/database/state_delta.lcp index f395f4c3c..0f9d63601 100644 --- a/src/database/state_delta.lcp +++ b/src/database/state_delta.lcp @@ -3,22 +3,17 @@ #include "communication/bolt/v1/decoder/decoder.hpp" #include "communication/bolt/v1/encoder/primitive_encoder.hpp" -#include "database/state_delta.capnp.h" #include "durability/hashed_file_reader.hpp" #include "durability/hashed_file_writer.hpp" #include "storage/address_types.hpp" #include "storage/gid.hpp" #include "storage/property_value.hpp" -#include "utils/serialization.hpp" cpp<# (lcp:namespace database) (lcp:capnp-namespace "database") -(lcp:capnp-import 'storage "/storage/serialization.capnp") -(lcp:capnp-import 'dis "/distributed/serialization.capnp") - (lcp:capnp-type-conversion "tx::TransactionId" "UInt64") (lcp:capnp-type-conversion "gid::Gid" "UInt64") (lcp:capnp-type-conversion "storage::Label" "Storage.Common") @@ -108,7 +103,7 @@ in StateDeltas.") "Defines StateDelta type. For each type the comment indicates which values need to be stored. All deltas have the transaction_id member, so that's omitted in the comment.") - (:serialize :capnp)) + (:serialize)) #>cpp StateDelta() = default; StateDelta(const enum Type &type, tx::TransactionId tx_id) @@ -174,6 +169,6 @@ omitted in the comment.") /// Applies CRUD delta to database accessor. Fails on other types of deltas void Apply(GraphDbAccessor &dba) const; cpp<#) - (:serialize :capnp)) + (:serialize)) (lcp:pop-namespace) ;; database diff --git a/src/database/storage_gc.hpp b/src/database/storage_gc.hpp index cb8e34f5d..40b94c830 100644 --- a/src/database/storage_gc.hpp +++ b/src/database/storage_gc.hpp @@ -6,7 +6,6 @@ #include "data_structures/concurrent/concurrent_map.hpp" #include "database/storage.hpp" #include "mvcc/version_list.hpp" -#include "stats/metrics.hpp" #include "storage/deferred_deleter.hpp" #include "storage/edge.hpp" #include "storage/garbage_collector.hpp" diff --git a/src/database/storage_gc_master.hpp b/src/database/storage_gc_master.hpp deleted file mode 100644 index 81d5635ac..000000000 --- a/src/database/storage_gc_master.hpp +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include <mutex> - -#include "database/storage_gc.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/storage_gc_rpc_messages.hpp" - -namespace database { -class StorageGcMaster : public StorageGc { - public: - using StorageGc::StorageGc; - StorageGcMaster(Storage &storage, tx::Engine &tx_engine, int pause_sec, - communication::rpc::Server &rpc_server, - distributed::MasterCoordination &coordination) - : StorageGc(storage, tx_engine, pause_sec), - rpc_server_(rpc_server), - coordination_(coordination) { - rpc_server_.Register<distributed::RanLocalGcRpc>( - [this](const auto &req_reader, auto *res_builder) { - distributed::RanLocalGcReq req; - req.Load(req_reader); - std::unique_lock<std::mutex> lock(worker_safe_transaction_mutex_); - worker_safe_transaction_[req.worker_id] = req.local_oldest_active; - }); - } - - ~StorageGcMaster() { - // We have to stop scheduler before destroying this class because otherwise - // a task might try to utilize methods in this class which might cause pure - // virtual method called since they are not implemented for the base class. - scheduler_.Stop(); - rpc_server_.UnRegister<distributed::RanLocalGcRpc>(); - } - - void CollectCommitLogGarbage(tx::TransactionId oldest_active) final { - // Workers are sending information when it's safe to delete every - // transaction older than oldest_active from their perspective i.e. there - // won't exist another transaction in the future with id larger than or - // equal to oldest_active that might trigger a query into a commit log about - // the state of transactions which we are deleting. - auto safe_transaction = GetClogSafeTransaction(oldest_active); - if (safe_transaction) { - tx::TransactionId min_safe = *safe_transaction; - { - std::unique_lock<std::mutex> lock(worker_safe_transaction_mutex_); - for (auto worker_id : coordination_.GetWorkerIds()) { - // Skip itself - if (worker_id == 0) continue; - min_safe = std::min(min_safe, worker_safe_transaction_[worker_id]); - } - } - // All workers reported back at least once - if (min_safe > 0) { - tx_engine_.GarbageCollectCommitLog(min_safe); - LOG(INFO) << "Clearing master commit log with tx: " << min_safe; - } - } - } - - communication::rpc::Server &rpc_server_; - distributed::MasterCoordination &coordination_; - // Mapping of worker ids and oldest active transaction which is safe for - // deletion from worker perspective - std::unordered_map<int, tx::TransactionId> worker_safe_transaction_; - std::mutex worker_safe_transaction_mutex_; -}; -} // namespace database diff --git a/src/database/storage_gc_worker.hpp b/src/database/storage_gc_worker.hpp deleted file mode 100644 index 4d938dbb9..000000000 --- a/src/database/storage_gc_worker.hpp +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include "communication/rpc/client_pool.hpp" -#include "database/storage_gc.hpp" -#include "distributed/storage_gc_rpc_messages.hpp" - -#include "transactions/engine_worker.hpp" -#include "transactions/transaction.hpp" - -namespace database { -class StorageGcWorker : public StorageGc { - public: - StorageGcWorker(Storage &storage, tx::Engine &tx_engine, int pause_sec, - communication::rpc::ClientPool &master_client_pool, - int worker_id) - : StorageGc(storage, tx_engine, pause_sec), - master_client_pool_(master_client_pool), - worker_id_(worker_id) {} - - ~StorageGcWorker() { - // We have to stop scheduler before destroying this class because otherwise - // a task might try to utilize methods in this class which might cause pure - // virtual method called since they are not implemented for the base class. - scheduler_.Stop(); - } - - void CollectCommitLogGarbage(tx::TransactionId oldest_active) final { - // We first need to delete transactions that we can delete to be sure that - // the locks are released as well. Otherwise some new transaction might - // try to acquire a lock which hasn't been released (if the transaction - // cache cleaner was not scheduled at this time), and take a look into the - // commit log which no longer contains that transaction id. - dynamic_cast<tx::WorkerEngine &>(tx_engine_) - .ClearTransactionalCache(oldest_active); - auto safe_to_delete = GetClogSafeTransaction(oldest_active); - if (safe_to_delete) { - master_client_pool_.Call<distributed::RanLocalGcRpc>(*safe_to_delete, - worker_id_); - tx_engine_.GarbageCollectCommitLog(*safe_to_delete); - } - } - - communication::rpc::ClientPool &master_client_pool_; - int worker_id_; -}; -} // namespace database diff --git a/src/distributed/bfs_rpc_clients.cpp b/src/distributed/bfs_rpc_clients.cpp deleted file mode 100644 index c0a29d9eb..000000000 --- a/src/distributed/bfs_rpc_clients.cpp +++ /dev/null @@ -1,178 +0,0 @@ -#include "distributed/bfs_rpc_messages.hpp" -#include "distributed/data_manager.hpp" - -#include "bfs_rpc_clients.hpp" - -namespace distributed { - -BfsRpcClients::BfsRpcClients( - database::GraphDb *db, distributed::BfsSubcursorStorage *subcursor_storage, - distributed::RpcWorkerClients *clients) - : db_(db), subcursor_storage_(subcursor_storage), clients_(clients) {} - -std::unordered_map<int16_t, int64_t> BfsRpcClients::CreateBfsSubcursors( - tx::TransactionId tx_id, query::EdgeAtom::Direction direction, - const std::vector<storage::EdgeType> &edge_types, - query::GraphView graph_view) { - auto futures = clients_->ExecuteOnWorkers<std::pair<int16_t, int64_t>>( - db_->WorkerId(), - [tx_id, direction, &edge_types, graph_view](int worker_id, auto &client) { - auto res = client.template Call<CreateBfsSubcursorRpc>( - tx_id, direction, edge_types, graph_view); - CHECK(res) << "CreateBfsSubcursor RPC failed!"; - return std::make_pair(worker_id, res->member); - }); - std::unordered_map<int16_t, int64_t> subcursor_ids; - subcursor_ids.emplace( - db_->WorkerId(), - subcursor_storage_->Create(tx_id, direction, edge_types, graph_view)); - for (auto &future : futures) { - auto got = subcursor_ids.emplace(future.get()); - CHECK(got.second) << "CreateBfsSubcursors failed: duplicate worker id"; - } - return subcursor_ids; -} - -void BfsRpcClients::RegisterSubcursors( - const std::unordered_map<int16_t, int64_t> &subcursor_ids) { - auto futures = clients_->ExecuteOnWorkers<void>( - db_->WorkerId(), [&subcursor_ids](int worker_id, auto &client) { - auto res = client.template Call<RegisterSubcursorsRpc>(subcursor_ids); - CHECK(res) << "RegisterSubcursors RPC failed!"; - }); - subcursor_storage_->Get(subcursor_ids.at(db_->WorkerId())) - ->RegisterSubcursors(subcursor_ids); -} - -void BfsRpcClients::RemoveBfsSubcursors( - const std::unordered_map<int16_t, int64_t> &subcursor_ids) { - auto futures = clients_->ExecuteOnWorkers<void>( - db_->WorkerId(), [&subcursor_ids](int worker_id, auto &client) { - auto res = client.template Call<RemoveBfsSubcursorRpc>( - subcursor_ids.at(worker_id)); - CHECK(res) << "RemoveBfsSubcursor RPC failed!"; - }); - subcursor_storage_->Erase(subcursor_ids.at(db_->WorkerId())); -} - -std::experimental::optional<VertexAccessor> BfsRpcClients::Pull( - int16_t worker_id, int64_t subcursor_id, database::GraphDbAccessor *dba) { - if (worker_id == db_->WorkerId()) { - return subcursor_storage_->Get(subcursor_id)->Pull(); - } - - auto res = - clients_->GetClientPool(worker_id).Call<SubcursorPullRpc>(subcursor_id); - CHECK(res) << "SubcursorPull RPC failed!"; - if (!res->vertex) return std::experimental::nullopt; - - db_->data_manager() - .Elements<Vertex>(dba->transaction_id()) - .emplace(res->vertex->global_address.gid(), - std::move(res->vertex->old_element_output), - std::move(res->vertex->new_element_output)); - return VertexAccessor(res->vertex->global_address, *dba); -} - -bool BfsRpcClients::ExpandLevel( - const std::unordered_map<int16_t, int64_t> &subcursor_ids) { - auto futures = clients_->ExecuteOnWorkers<bool>( - db_->WorkerId(), [&subcursor_ids](int worker_id, auto &client) { - auto res = - client.template Call<ExpandLevelRpc>(subcursor_ids.at(worker_id)); - CHECK(res) << "ExpandLevel RPC failed!"; - return res->member; - }); - bool expanded = - subcursor_storage_->Get(subcursor_ids.at(db_->WorkerId()))->ExpandLevel(); - for (auto &future : futures) { - expanded |= future.get(); - } - return expanded; -} - -void BfsRpcClients::SetSource( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::VertexAddress source_address) { - CHECK(source_address.is_remote()) - << "SetSource should be called with global address"; - - int worker_id = source_address.worker_id(); - if (worker_id == db_->WorkerId()) { - subcursor_storage_->Get(subcursor_ids.at(db_->WorkerId())) - ->SetSource(source_address); - } else { - auto res = clients_->GetClientPool(worker_id).Call<SetSourceRpc>( - subcursor_ids.at(worker_id), source_address); - CHECK(res) << "SetSourceRpc failed!"; - } -} - -bool BfsRpcClients::ExpandToRemoteVertex( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - EdgeAccessor edge, VertexAccessor vertex) { - CHECK(!vertex.is_local()) - << "ExpandToRemoteVertex should not be called with local vertex"; - int worker_id = vertex.address().worker_id(); - auto res = clients_->GetClientPool(worker_id).Call<ExpandToRemoteVertexRpc>( - subcursor_ids.at(worker_id), edge.GlobalAddress(), - vertex.GlobalAddress()); - CHECK(res) << "ExpandToRemoteVertex RPC failed!"; - return res->member; -} - -PathSegment BuildPathSegment(ReconstructPathRes *res, - database::GraphDbAccessor *dba) { - std::vector<EdgeAccessor> edges; - for (auto &edge : res->edges) { - dba->db() - .data_manager() - .Elements<Edge>(dba->transaction_id()) - .emplace(edge.global_address.gid(), std::move(edge.old_element_output), - std::move(edge.new_element_output)); - edges.emplace_back(edge.global_address, *dba); - } - - return PathSegment{edges, res->next_vertex, res->next_edge}; -} - -PathSegment BfsRpcClients::ReconstructPath( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::VertexAddress vertex, database::GraphDbAccessor *dba) { - int worker_id = vertex.worker_id(); - if (worker_id == db_->WorkerId()) { - return subcursor_storage_->Get(subcursor_ids.at(worker_id)) - ->ReconstructPath(vertex); - } - - auto res = clients_->GetClientPool(worker_id).Call<ReconstructPathRpc>( - subcursor_ids.at(worker_id), vertex); - return BuildPathSegment(&res.value(), dba); -} - -PathSegment BfsRpcClients::ReconstructPath( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::EdgeAddress edge, database::GraphDbAccessor *dba) { - int worker_id = edge.worker_id(); - if (worker_id == db_->WorkerId()) { - return subcursor_storage_->Get(subcursor_ids.at(worker_id)) - ->ReconstructPath(edge); - } - auto res = clients_->GetClientPool(worker_id).Call<ReconstructPathRpc>( - subcursor_ids.at(worker_id), edge); - return BuildPathSegment(&res.value(), dba); -} - -void BfsRpcClients::PrepareForExpand( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, bool clear) { - auto res = clients_->ExecuteOnWorkers<void>( - db_->WorkerId(), [clear, &subcursor_ids](int worker_id, auto &client) { - auto res = client.template Call<PrepareForExpandRpc>( - subcursor_ids.at(worker_id), clear); - CHECK(res) << "PrepareForExpand RPC failed!"; - }); - subcursor_storage_->Get(subcursor_ids.at(db_->WorkerId())) - ->PrepareForExpand(clear); -} - -} // namespace distributed diff --git a/src/distributed/bfs_rpc_clients.hpp b/src/distributed/bfs_rpc_clients.hpp deleted file mode 100644 index a60acdf29..000000000 --- a/src/distributed/bfs_rpc_clients.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/// @file -#pragma once - -#include "distributed/bfs_subcursor.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "transactions/transaction.hpp" - -namespace distributed { - -/// Along with `BfsRpcServer`, this class is used to expose `BfsSubcursor` -/// interface over the network so that subcursors can communicate during the -/// traversal. It is just a thin wrapper making RPC calls that also takes -/// care for storing remote data into cache upon receival. Special care is taken -/// to avoid sending local RPCs. Instead, subcursor storage is accessed -/// directly. -class BfsRpcClients { - public: - BfsRpcClients(database::GraphDb *db, - distributed::BfsSubcursorStorage *subcursor_storage, - distributed::RpcWorkerClients *clients); - - std::unordered_map<int16_t, int64_t> CreateBfsSubcursors( - tx::TransactionId tx_id, query::EdgeAtom::Direction direction, - const std::vector<storage::EdgeType> &edge_types, - query::GraphView graph_view); - - void RegisterSubcursors( - const std::unordered_map<int16_t, int64_t> &subcursor_ids); - - void RemoveBfsSubcursors( - const std::unordered_map<int16_t, int64_t> &subcursor_ids); - - std::experimental::optional<VertexAccessor> Pull( - int16_t worker_id, int64_t subcursor_id, database::GraphDbAccessor *dba); - - bool ExpandLevel(const std::unordered_map<int16_t, int64_t> &subcursor_ids); - - void SetSource(const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::VertexAddress source_address); - - bool ExpandToRemoteVertex( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - EdgeAccessor edge, VertexAccessor vertex); - - PathSegment ReconstructPath( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::EdgeAddress edge, database::GraphDbAccessor *dba); - - PathSegment ReconstructPath( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, - storage::VertexAddress vertex, database::GraphDbAccessor *dba); - - void PrepareForExpand( - const std::unordered_map<int16_t, int64_t> &subcursor_ids, bool clear); - - private: - database::GraphDb *db_; - distributed::BfsSubcursorStorage *subcursor_storage_; - distributed::RpcWorkerClients *clients_; -}; - -} // namespace distributed diff --git a/src/distributed/bfs_rpc_messages.lcp b/src/distributed/bfs_rpc_messages.lcp deleted file mode 100644 index 4cb7c42b7..000000000 --- a/src/distributed/bfs_rpc_messages.lcp +++ /dev/null @@ -1,280 +0,0 @@ -#>cpp -#pragma once - -#include <tuple> - -#include "communication/rpc/messages.hpp" -#include "distributed/bfs_rpc_messages.capnp.h" -#include "distributed/bfs_subcursor.hpp" -#include "query/plan/operator.hpp" -#include "transactions/type.hpp" -#include "utils/serialization.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'ast "/query/frontend/ast/ast.capnp") -(lcp:capnp-import 'dis "/distributed/serialization.capnp") -(lcp:capnp-import 'query "/query/common.capnp") -(lcp:capnp-import 'storage "/storage/serialization.capnp") -(lcp:capnp-import 'utils "/utils/serialization.capnp") - -(lcp:capnp-type-conversion "storage::EdgeAddress" "Storage.Address") -(lcp:capnp-type-conversion "storage::VertexAddress" "Storage.Address") - -(defun save-element (builder member) - #>cpp - if (${member}) { - if constexpr (std::is_same<TElement, Vertex>::value) { - auto builder = ${builder}.initVertex(); - SaveVertex(*${member}, &builder, worker_id); - } else { - auto builder = ${builder}.initEdge(); - SaveEdge(*${member}, &builder, worker_id); - } - } else { - ${builder}.setNull(); - } - cpp<#) - -(defun load-element (reader member) - (let ((output-member (cl-ppcre:regex-replace "input$" member "output"))) - #>cpp - if (!${reader}.isNull()) { - if constexpr (std::is_same<TElement, Vertex>::value) { - const auto reader = ${reader}.getVertex(); - ${output-member} = LoadVertex(reader); - } else { - const auto reader = ${reader}.getEdge(); - ${output-member} = LoadEdge(reader); - } - } - cpp<#)) - -(lcp:define-struct (serialized-graph-element t-element) () - ((global-address "storage::Address<mvcc::VersionList<TElement>>" - :capnp-type "Storage.Address") - (old-element-input "TElement *" - :save-fun - "if (old_element_input) { - ar << true; - SaveElement(ar, *old_element_input, worker_id); - } else { - ar << false; - }" - :load-fun "" - :capnp-type '((null "Void") (vertex "Dis.Vertex") (edge "Dis.Edge")) - :capnp-save #'save-element :capnp-load #'load-element) - (old-element-output "std::unique_ptr<TElement>" - :save-fun "" - :load-fun - "bool has_old; - ar >> has_old; - if (has_old) { - if constexpr (std::is_same<TElement, Vertex>::value) { - old_element_output = std::move(LoadVertex(ar)); - } else { - old_element_output = std::move(LoadEdge(ar)); - } - }" - :capnp-save :dont-save) - (new-element-input "TElement *" - :save-fun - "if (new_element_input) { - ar << true; - SaveElement(ar, *new_element_input, worker_id); - } else { - ar << false; - }" - :load-fun "" - :capnp-type '((null "Void") (vertex "Dis.Vertex") (edge "Dis.Edge")) - :capnp-save #'save-element :capnp-load #'load-element) - (new-element-output "std::unique_ptr<TElement>" - :save-fun "" - :load-fun - "bool has_new; - ar >> has_new; - if (has_new) { - if constexpr (std::is_same<TElement, Vertex>::value) { - new_element_output = std::move(LoadVertex(ar)); - } else { - new_element_output = std::move(LoadEdge(ar)); - } - }" - :capnp-save :dont-save) - (worker-id :int16_t :save-fun "" :load-fun "" :capnp-save :dont-save)) - (:public - #>cpp - SerializedGraphElement(storage::Address<mvcc::VersionList<TElement>> global_address, - TElement *old_element_input, TElement *new_element_input, - int16_t worker_id) - : global_address(global_address), - old_element_input(old_element_input), - old_element_output(nullptr), - new_element_input(new_element_input), - new_element_output(nullptr), - worker_id(worker_id) { - CHECK(global_address.is_remote()) - << "Only global addresses should be used with SerializedGraphElement"; - } - - SerializedGraphElement(const RecordAccessor<TElement> &accessor) - : SerializedGraphElement(accessor.GlobalAddress(), accessor.GetOld(), - accessor.GetNew(), - accessor.db_accessor().db().WorkerId()) {} - - SerializedGraphElement() {} - cpp<#) - (:serialize :capnp :type-args '(vertex edge))) - -#>cpp -using SerializedVertex = SerializedGraphElement<Vertex>; -using SerializedEdge = SerializedGraphElement<Edge>; -cpp<# - -(lcp:define-rpc create-bfs-subcursor - (:request - ((tx-id "tx::TransactionId" :capnp-type "UInt64") - (direction "query::EdgeAtom::Direction" - :capnp-type "Ast.EdgeAtom.Direction" :capnp-init nil - :capnp-save (lcp:capnp-save-enum "::query::capnp::EdgeAtom::Direction" - "query::EdgeAtom::Direction" - '(in out both)) - :capnp-load (lcp:capnp-load-enum "::query::capnp::EdgeAtom::Direction" - "query::EdgeAtom::Direction" - '(in out both))) - ;; TODO(mtomic): Why isn't edge-types serialized? - (edge-types "std::vector<storage::EdgeType>" - :save-fun "" :load-fun "" :capnp-save :dont-save) - (graph-view "query::GraphView" - :capnp-type "Query.GraphView" :capnp-init nil - :capnp-save (lcp:capnp-save-enum "::query::capnp::GraphView" - "query::GraphView" - '(old new)) - :capnp-load (lcp:capnp-load-enum "::query::capnp::GraphView" - "query::GraphView" - '(old new))))) - (:response ((member :int64_t)))) - -(lcp:define-rpc register-subcursors - (:request ((subcursor-ids "std::unordered_map<int16_t, int64_t>" - :capnp-type "Utils.Map(Utils.BoxInt16, Utils.BoxInt64)" - :capnp-save - (lambda (builder member) - #>cpp - utils::SaveMap<utils::capnp::BoxInt16, utils::capnp::BoxInt64>( - ${member}, &${builder}, - [](auto *builder, const auto &entry) { - auto key_builder = builder->initKey(); - key_builder.setValue(entry.first); - auto value_builder = builder->initValue(); - value_builder.setValue(entry.second); - }); - cpp<#) - :capnp-load - (lambda (reader member) - #>cpp - utils::LoadMap<utils::capnp::BoxInt16, utils::capnp::BoxInt64>( - &${member}, ${reader}, - [](const auto &reader) { - int16_t key = reader.getKey().getValue(); - int64_t value = reader.getValue().getValue(); - return std::make_pair(key, value); - }); - cpp<#)))) - (:response ())) - -(lcp:define-rpc remove-bfs-subcursor - (:request ((member :int64_t))) - (:response ())) - -(lcp:define-rpc expand-level - (:request ((member :int64_t))) - (:response ((member :bool)))) - -(lcp:define-rpc subcursor-pull - (:request ((member :int64_t))) - (:response ((vertex "std::experimental::optional<SerializedVertex>" :initarg :move - :capnp-type "Utils.Optional(SerializedGraphElement)" - :capnp-save (lcp:capnp-save-optional "capnp::SerializedGraphElement" "SerializedVertex") - :capnp-load (lcp:capnp-load-optional "capnp::SerializedGraphElement" "SerializedVertex"))))) -(lcp:define-rpc set-source - (:request - ((subcursor-id :int64_t) - (source "storage::VertexAddress"))) - (:response ())) - -(lcp:define-rpc expand-to-remote-vertex - (:request - ((subcursor-id :int64_t) - (edge "storage::EdgeAddress") - (vertex "storage::VertexAddress"))) - (:response ((member :bool)))) - -(lcp:define-rpc reconstruct-path - (:request - ((subcursor-id :int64_t) - (vertex "std::experimental::optional<storage::VertexAddress>" - :capnp-save (lcp:capnp-save-optional "storage::capnp::Address" "storage::VertexAddress") - :capnp-load (lcp:capnp-load-optional "storage::capnp::Address" "storage::VertexAddress")) - (edge "std::experimental::optional<storage::EdgeAddress>" - :capnp-save (lcp:capnp-save-optional "storage::capnp::Address" "storage::EdgeAddress") - :capnp-load (lcp:capnp-load-optional "storage::capnp::Address" "storage::EdgeAddress"))) - (:public - #>cpp - using Capnp = capnp::ReconstructPathReq; - static const communication::rpc::MessageType TypeInfo; - - ReconstructPathReq() {} - - ReconstructPathReq(int64_t subcursor_id, storage::VertexAddress vertex) - : subcursor_id(subcursor_id), - vertex(vertex), - edge(std::experimental::nullopt) {} - - ReconstructPathReq(int64_t subcursor_id, storage::EdgeAddress edge) - : subcursor_id(subcursor_id), - vertex(std::experimental::nullopt), - edge(edge) {} - cpp<#)) - (:response - ((subcursor-id :int64_t ;; TODO(mtomic): Unused? - :save-fun "" :load-fun "" :capnp-save :dont-save) - (edges "std::vector<SerializedEdge>" :capnp-type "List(SerializedGraphElement)" - :capnp-save (lcp:capnp-save-vector "capnp::SerializedGraphElement" "SerializedEdge") - :capnp-load (lcp:capnp-load-vector "capnp::SerializedGraphElement" "SerializedEdge")) - (next-vertex "std::experimental::optional<storage::VertexAddress>" - :capnp-save (lcp:capnp-save-optional "storage::capnp::Address" "storage::VertexAddress") - :capnp-load (lcp:capnp-load-optional "storage::capnp::Address" "storage::VertexAddress")) - (next-edge "std::experimental::optional<storage::EdgeAddress>" - :capnp-save (lcp:capnp-save-optional "storage::capnp::Address" "storage::EdgeAddress") - :capnp-load (lcp:capnp-load-optional "storage::capnp::Address" "storage::EdgeAddress"))) - (:public - #>cpp - using Capnp = capnp::ReconstructPathRes; - static const communication::rpc::MessageType TypeInfo; - - ReconstructPathRes() {} - - ReconstructPathRes( - const std::vector<EdgeAccessor> &edge_accessors, - std::experimental::optional<storage::VertexAddress> next_vertex, - std::experimental::optional<storage::EdgeAddress> next_edge) - : next_vertex(std::move(next_vertex)), next_edge(std::move(next_edge)) { - CHECK(!static_cast<bool>(next_vertex) || !static_cast<bool>(next_edge)) - << "At most one of `next_vertex` and `next_edge` should be set"; - for (const auto &edge : edge_accessors) { - edges.emplace_back(edge); - } - } - cpp<#))) - -(lcp:define-rpc prepare-for-expand - (:request - ((subcursor-id :int64_t) - (clear :bool))) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/bfs_rpc_server.hpp b/src/distributed/bfs_rpc_server.hpp deleted file mode 100644 index 2c6832030..000000000 --- a/src/distributed/bfs_rpc_server.hpp +++ /dev/null @@ -1,126 +0,0 @@ -/// @file -#pragma once - -#include <map> - -#include "communication/rpc/server.hpp" - -#include "distributed/bfs_rpc_messages.hpp" -#include "distributed/bfs_subcursor.hpp" - -namespace distributed { - -/// Along with `BfsRpcClients`, this class is used to expose `BfsSubcursor` -/// interface over the network so that subcursors can communicate during the -/// traversal. It is just a thin wrapper forwarding RPC calls to subcursors in -/// subcursor storage. -class BfsRpcServer { - public: - BfsRpcServer(database::GraphDb *db, communication::rpc::Server *server, - BfsSubcursorStorage *subcursor_storage) - : db_(db), server_(server), subcursor_storage_(subcursor_storage) { - server_->Register<CreateBfsSubcursorRpc>( - [this](const auto &req_reader, auto *res_builder) { - CreateBfsSubcursorReq req; - req.Load(req_reader); - CreateBfsSubcursorRes res(subcursor_storage_->Create( - req.tx_id, req.direction, req.edge_types, req.graph_view)); - res.Save(res_builder); - }); - - server_->Register<RegisterSubcursorsRpc>( - [this](const auto &req_reader, auto *res_builder) { - RegisterSubcursorsReq req; - req.Load(req_reader); - subcursor_storage_->Get(req.subcursor_ids.at(db_->WorkerId())) - ->RegisterSubcursors(req.subcursor_ids); - RegisterSubcursorsRes res; - res.Save(res_builder); - }); - - server_->Register<RemoveBfsSubcursorRpc>( - [this](const auto &req_reader, auto *res_builder) { - RemoveBfsSubcursorReq req; - req.Load(req_reader); - subcursor_storage_->Erase(req.member); - RemoveBfsSubcursorRes res; - res.Save(res_builder); - }); - - server_->Register<SetSourceRpc>( - [this](const auto &req_reader, auto *res_builder) { - SetSourceReq req; - req.Load(req_reader); - subcursor_storage_->Get(req.subcursor_id)->SetSource(req.source); - SetSourceRes res; - res.Save(res_builder); - }); - - server_->Register<ExpandLevelRpc>([this](const auto &req_reader, - auto *res_builder) { - ExpandLevelReq req; - req.Load(req_reader); - ExpandLevelRes res(subcursor_storage_->Get(req.member)->ExpandLevel()); - res.Save(res_builder); - }); - - server_->Register<SubcursorPullRpc>( - [this](const auto &req_reader, auto *res_builder) { - SubcursorPullReq req; - req.Load(req_reader); - auto vertex = subcursor_storage_->Get(req.member)->Pull(); - if (!vertex) { - SubcursorPullRes res; - res.Save(res_builder); - return; - } - SubcursorPullRes res(*vertex); - res.Save(res_builder); - }); - - server_->Register<ExpandToRemoteVertexRpc>( - [this](const auto &req_reader, auto *res_builder) { - ExpandToRemoteVertexReq req; - req.Load(req_reader); - ExpandToRemoteVertexRes res( - subcursor_storage_->Get(req.subcursor_id) - ->ExpandToLocalVertex(req.edge, req.vertex)); - res.Save(res_builder); - }); - - server_->Register<ReconstructPathRpc>([this](const auto &req_reader, - auto *res_builder) { - ReconstructPathReq req; - req.Load(req_reader); - auto subcursor = subcursor_storage_->Get(req.subcursor_id); - PathSegment result; - if (req.vertex) { - result = subcursor->ReconstructPath(*req.vertex); - } else if (req.edge) { - result = subcursor->ReconstructPath(*req.edge); - } else { - LOG(FATAL) << "`edge` or `vertex` should be set in ReconstructPathReq"; - } - ReconstructPathRes res(result.edges, result.next_vertex, - result.next_edge); - res.Save(res_builder); - }); - - server_->Register<PrepareForExpandRpc>([this](const auto &req_reader, - auto *res_builder) { - PrepareForExpandReq req; - req.Load(req_reader); - subcursor_storage_->Get(req.subcursor_id)->PrepareForExpand(req.clear); - PrepareForExpandRes res; - res.Save(res_builder); - }); - } - - private: - database::GraphDb *db_; - - communication::rpc::Server *server_; - BfsSubcursorStorage *subcursor_storage_; -}; - -} // namespace distributed diff --git a/src/distributed/bfs_subcursor.cpp b/src/distributed/bfs_subcursor.cpp deleted file mode 100644 index b1df54323..000000000 --- a/src/distributed/bfs_subcursor.cpp +++ /dev/null @@ -1,196 +0,0 @@ -#include <unordered_map> - -#include "distributed/bfs_rpc_clients.hpp" -#include "query/plan/operator.hpp" -#include "storage/address_types.hpp" -#include "storage/vertex_accessor.hpp" - -#include "bfs_subcursor.hpp" - -namespace distributed { - -using query::TypedValue; - -ExpandBfsSubcursor::ExpandBfsSubcursor( - database::GraphDb *db, tx::TransactionId tx_id, - query::EdgeAtom::Direction direction, - std::vector<storage::EdgeType> edge_types, query::GraphView graph_view) - - : dba_(*db, tx_id), - direction_(direction), - edge_types_(std::move(edge_types)), - graph_view_(graph_view) { - Reset(); -} - -void ExpandBfsSubcursor::Reset() { - pull_index_ = 0; - processed_.clear(); - to_visit_current_.clear(); - to_visit_next_.clear(); -} - -void ExpandBfsSubcursor::SetSource(storage::VertexAddress source_address) { - Reset(); - auto source = VertexAccessor(source_address, dba_); - SwitchAccessor(source, graph_view_); - processed_.emplace(source, std::experimental::nullopt); - ExpandFromVertex(source); -} - -void ExpandBfsSubcursor::PrepareForExpand(bool clear) { - if (clear) { - Reset(); - } else { - std::swap(to_visit_current_, to_visit_next_); - to_visit_next_.clear(); - } -} - -bool ExpandBfsSubcursor::ExpandLevel() { - bool expanded = false; - for (const auto &expansion : to_visit_current_) { - expanded |= ExpandFromVertex(expansion.second); - } - pull_index_ = 0; - return expanded; -} - -std::experimental::optional<VertexAccessor> ExpandBfsSubcursor::Pull() { - return pull_index_ < to_visit_next_.size() - ? std::experimental::make_optional( - to_visit_next_[pull_index_++].second) - : std::experimental::nullopt; -} - -bool ExpandBfsSubcursor::ExpandToLocalVertex(storage::EdgeAddress edge, - VertexAccessor vertex) { - CHECK(vertex.address().is_local()) - << "ExpandToLocalVertex called with remote vertex"; - - edge = dba_.db().storage().LocalizedAddressIfPossible(edge); - SwitchAccessor(vertex, graph_view_); - - std::lock_guard<std::mutex> lock(mutex_); - auto got = processed_.emplace(vertex, edge); - if (got.second) { - to_visit_next_.emplace_back(edge, vertex); - } - return got.second; -} - -bool ExpandBfsSubcursor::ExpandToLocalVertex(storage::EdgeAddress edge, - storage::VertexAddress vertex) { - auto vertex_accessor = VertexAccessor(vertex, dba_); - return ExpandToLocalVertex(edge, VertexAccessor(vertex, dba_)); -} - -PathSegment ExpandBfsSubcursor::ReconstructPath( - storage::EdgeAddress edge_address) { - EdgeAccessor edge(edge_address, dba_); - CHECK(edge.address().is_local()) << "ReconstructPath called with remote edge"; - DCHECK(edge.from_addr().is_local()) << "`from` vertex should always be local"; - DCHECK(!edge.to_addr().is_local()) << "`to` vertex should be remote when " - "calling ReconstructPath with edge"; - - PathSegment result; - result.edges.emplace_back(edge); - ReconstructPathHelper(edge.from(), &result); - return result; -} - -PathSegment ExpandBfsSubcursor::ReconstructPath( - storage::VertexAddress vertex_addr) { - VertexAccessor vertex(vertex_addr, dba_); - CHECK(vertex.address().is_local()) - << "ReconstructPath called with remote vertex"; - PathSegment result; - ReconstructPathHelper(vertex, &result); - return result; -} - -void ExpandBfsSubcursor::ReconstructPathHelper(VertexAccessor vertex, - PathSegment *result) { - auto it = processed_.find(vertex); - CHECK(it != processed_.end()) - << "ReconstructPath called with unvisited vertex"; - - auto in_edge_address = it->second; - while (in_edge_address) { - // In-edge is stored on another worker. It should be returned to master from - // that worker, and path reconstruction should be continued there. - if (in_edge_address->is_remote()) { - result->next_edge = in_edge_address; - break; - } - - result->edges.emplace_back(*in_edge_address, dba_); - - auto &in_edge = result->edges.back(); - auto next_vertex_address = - in_edge.from_is(vertex) ? in_edge.to_addr() : in_edge.from_addr(); - - // We own the in-edge, but the next vertex on the path is stored on another - // worker. - if (next_vertex_address.is_remote()) { - result->next_vertex = next_vertex_address; - break; - } - - vertex = VertexAccessor(next_vertex_address, dba_); - in_edge_address = processed_[vertex]; - } -} - -bool ExpandBfsSubcursor::ExpandToVertex(EdgeAccessor edge, - VertexAccessor vertex) { - // TODO(mtomic): lambda filtering in distributed - return vertex.is_local() - ? ExpandToLocalVertex(edge.address(), vertex) - : dba_.db().bfs_subcursor_clients().ExpandToRemoteVertex( - subcursor_ids_, edge, vertex); -} - -bool ExpandBfsSubcursor::ExpandFromVertex(VertexAccessor vertex) { - bool expanded = false; - if (direction_ != query::EdgeAtom::Direction::IN) { - for (const EdgeAccessor &edge : vertex.out(&edge_types_)) - expanded |= ExpandToVertex(edge, edge.to()); - } - if (direction_ != query::EdgeAtom::Direction::OUT) { - for (const EdgeAccessor &edge : vertex.in(&edge_types_)) - expanded |= ExpandToVertex(edge, edge.from()); - } - return expanded; -} - -BfsSubcursorStorage::BfsSubcursorStorage(database::GraphDb *db) : db_(db) {} - -int64_t BfsSubcursorStorage::Create(tx::TransactionId tx_id, - query::EdgeAtom::Direction direction, - std::vector<storage::EdgeType> edge_types, - query::GraphView graph_view) { - std::lock_guard<std::mutex> lock(mutex_); - int64_t id = next_subcursor_id_++; - auto got = storage_.emplace( - id, std::make_unique<ExpandBfsSubcursor>( - db_, tx_id, direction, std::move(edge_types), graph_view)); - CHECK(got.second) << "Subcursor with ID " << id << " already exists"; - return id; -} - -void BfsSubcursorStorage::Erase(int64_t subcursor_id) { - std::lock_guard<std::mutex> lock(mutex_); - auto removed = storage_.erase(subcursor_id); - CHECK(removed == 1) << "Subcursor with ID " << subcursor_id << " not found"; -} - -ExpandBfsSubcursor *BfsSubcursorStorage::Get(int64_t subcursor_id) { - std::lock_guard<std::mutex> lock(mutex_); - auto it = storage_.find(subcursor_id); - CHECK(it != storage_.end()) - << "Subcursor with ID " << subcursor_id << " not found"; - return it->second.get(); -} - -} // namespace distributed diff --git a/src/distributed/bfs_subcursor.hpp b/src/distributed/bfs_subcursor.hpp deleted file mode 100644 index 7959e537d..000000000 --- a/src/distributed/bfs_subcursor.hpp +++ /dev/null @@ -1,141 +0,0 @@ -/// @file -#pragma once - -#include <map> -#include <memory> -#include <unordered_map> - -#include "glog/logging.h" - -#include "query/plan/operator.hpp" - -namespace database { -class GraphDb; -} - -namespace distributed { - -/// Path from BFS source to a vertex might span multiple workers. This struct -/// stores information describing segment of a path stored on a worker and -/// information necessary to continue path reconstruction on another worker. -struct PathSegment { - std::vector<EdgeAccessor> edges; - std::experimental::optional<storage::VertexAddress> next_vertex; - std::experimental::optional<storage::EdgeAddress> next_edge; -}; - -/// Class storing the worker-local state of distributed BFS traversal. For each -/// traversal (uniquely identified by cursor id), there is one instance of this -/// class per worker, and those instances communicate via RPC calls. -class ExpandBfsSubcursor { - public: - ExpandBfsSubcursor(database::GraphDb *db, tx::TransactionId tx_id, - query::EdgeAtom::Direction direction, - std::vector<storage::EdgeType> edge_types, - query::GraphView graph_view); - - // Stores subcursor ids of other workers. - void RegisterSubcursors(std::unordered_map<int16_t, int64_t> subcursor_ids) { - subcursor_ids_ = std::move(subcursor_ids); - } - - /// Sets the source to be used for new expansion. - void SetSource(storage::VertexAddress source_address); - - /// Notifies the subcursor that a new expansion should take place. - /// `to_visit_next_` must be moved to `to_visit_current_` synchronously for - /// all subcursors participating in expansion to avoid race condition with - /// `ExpandToRemoteVertex` RPC requests. Also used before setting new source - /// with `clear` set to true, to avoid a race condition similar to one - /// described above. - /// - /// @param clear if set to true, `Reset` will be called instead of moving - /// `to_visit_next_` - void PrepareForExpand(bool clear); - - /// Expands the BFS frontier once. Returns true if there was a successful - /// expansion. - bool ExpandLevel(); - - /// Pulls the next vertex in the current BFS frontier, if there is one. - std::experimental::optional<VertexAccessor> Pull(); - - /// Expands to a local vertex, if it wasn't already visited. Returns true if - /// expansion was successful. - bool ExpandToLocalVertex(storage::EdgeAddress edge, VertexAccessor vertex); - bool ExpandToLocalVertex(storage::EdgeAddress edge, - storage::VertexAddress vertex); - - /// Reconstruct the part of path ending with given edge, stored on this - /// worker. - PathSegment ReconstructPath(storage::EdgeAddress edge_address); - - /// Reconstruct the part of path to given vertex stored on this worker. - PathSegment ReconstructPath(storage::VertexAddress vertex_addr); - - private: - /// Used to reset subcursor state before starting expansion from new source. - void Reset(); - - /// Expands to a local or remote vertex, returns true if expansion was - /// successful. - bool ExpandToVertex(EdgeAccessor edge, VertexAccessor vertex); - - /// Tries to expand to all vertices connected to given one and returns true if - /// any of them was successful. - bool ExpandFromVertex(VertexAccessor vertex); - - /// Helper for path reconstruction doing the actual work. - void ReconstructPathHelper(VertexAccessor vertex, PathSegment *result); - - database::GraphDbAccessor dba_; - - /// IDs of subcursors on other workers, used when sending RPCs. - std::unordered_map<int16_t, int64_t> subcursor_ids_; - - query::EdgeAtom::Direction direction_; - std::vector<storage::EdgeType> edge_types_; - query::GraphView graph_view_; - - /// Mutex protecting `to_visit_next_` and `processed_`, because there is a - /// race between expansions done locally using `ExpandToLocalVertex` and - /// incoming `ExpandToRemoteVertex` RPCs. - std::mutex mutex_; - - /// List of visited vertices and their incoming edges. Local address is stored - /// for local edges, global address for remote edges. - std::unordered_map<VertexAccessor, - std::experimental::optional<storage::EdgeAddress>> - processed_; - - /// List of vertices at the current expansion level. - std::vector<std::pair<storage::EdgeAddress, VertexAccessor>> - to_visit_current_; - - /// List of unvisited vertices reachable from current expansion level. - std::vector<std::pair<storage::EdgeAddress, VertexAccessor>> to_visit_next_; - - /// Index of the vertex from `to_visit_next_` to return on next pull. - size_t pull_index_; -}; - -/// Thread-safe storage for BFS subcursors. -class BfsSubcursorStorage { - public: - explicit BfsSubcursorStorage(database::GraphDb *db); - - int64_t Create(tx::TransactionId tx_id, query::EdgeAtom::Direction direction, - std::vector<storage::EdgeType> edge_types, - query::GraphView graph_view); - void Erase(int64_t subcursor_id); - ExpandBfsSubcursor *Get(int64_t subcursor_id); - - private: - database::GraphDb *db_; - - std::mutex mutex_; - std::map<int64_t, std::unique_ptr<ExpandBfsSubcursor>> storage_; - int64_t next_subcursor_id_{0}; -}; - -} // namespace distributed diff --git a/src/distributed/cache.cpp b/src/distributed/cache.cpp deleted file mode 100644 index dc3e7721b..000000000 --- a/src/distributed/cache.cpp +++ /dev/null @@ -1,99 +0,0 @@ - -#include "glog/logging.h" - -#include "database/storage.hpp" -#include "distributed/cache.hpp" -#include "storage/edge.hpp" -#include "storage/vertex.hpp" - -namespace distributed { - -template <typename TRecord> -TRecord *Cache<TRecord>::FindNew(gid::Gid gid) { - std::lock_guard<std::mutex> guard{lock_}; - auto found = cache_.find(gid); - DCHECK(found != cache_.end()) - << "FindNew for uninitialized remote Vertex/Edge"; - auto &pair = found->second; - if (!pair.second) { - pair.second = std::unique_ptr<TRecord>(pair.first->CloneData()); - } - return pair.second.get(); -} - -template <typename TRecord> -void Cache<TRecord>::FindSetOldNew(tx::TransactionId tx_id, int worker_id, - gid::Gid gid, TRecord *&old_record, - TRecord *&new_record) { - { - std::lock_guard<std::mutex> guard(lock_); - auto found = cache_.find(gid); - if (found != cache_.end()) { - old_record = found->second.first.get(); - new_record = found->second.second.get(); - return; - } - } - - auto remote = data_clients_.RemoteElement<TRecord>(worker_id, tx_id, gid); - LocalizeAddresses(*remote); - - // This logic is a bit strange because we need to make sure that someone - // else didn't get a response and updated the cache before we did and we - // need a lock for that, but we also need to check if we can now return - // that result - otherwise we could get incosistent results for remote - // FindSetOldNew - std::lock_guard<std::mutex> guard(lock_); - auto it_pair = cache_.emplace( - gid, std::make_pair<rec_uptr, rec_uptr>(std::move(remote), nullptr)); - - old_record = it_pair.first->second.first.get(); - new_record = it_pair.first->second.second.get(); -} - -template <typename TRecord> -void Cache<TRecord>::emplace(gid::Gid gid, rec_uptr old_record, - rec_uptr new_record) { - if (old_record) LocalizeAddresses(*old_record); - if (new_record) LocalizeAddresses(*new_record); - - std::lock_guard<std::mutex> guard{lock_}; - // We can't replace existing data because some accessors might be using - // it. - // TODO - consider if it's necessary and OK to copy just the data content. - auto found = cache_.find(gid); - if (found != cache_.end()) - return; - else - cache_[gid] = std::make_pair(std::move(old_record), std::move(new_record)); -} - -template <typename TRecord> -void Cache<TRecord>::ClearCache() { - std::lock_guard<std::mutex> guard{lock_}; - cache_.clear(); -} - -template <> -void Cache<Vertex>::LocalizeAddresses(Vertex &vertex) { - auto localize_edges = [this](auto &edges) { - for (auto &element : edges) { - element.vertex = storage_.LocalizedAddressIfPossible(element.vertex); - element.edge = storage_.LocalizedAddressIfPossible(element.edge); - } - }; - - localize_edges(vertex.in_.storage()); - localize_edges(vertex.out_.storage()); -} - -template <> -void Cache<Edge>::LocalizeAddresses(Edge &edge) { - edge.from_ = storage_.LocalizedAddressIfPossible(edge.from_); - edge.to_ = storage_.LocalizedAddressIfPossible(edge.to_); -} - -template class Cache<Vertex>; -template class Cache<Edge>; - -} // namespace distributed diff --git a/src/distributed/cache.hpp b/src/distributed/cache.hpp deleted file mode 100644 index d41eb1ca2..000000000 --- a/src/distributed/cache.hpp +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include <mutex> -#include <unordered_map> - -#include "distributed/data_rpc_clients.hpp" -#include "storage/gid.hpp" - -namespace database { -class Storage; -} - -namespace distributed { - -/** - * Used for caching Vertices and Edges that are stored on another worker in a - * distributed system. Maps global IDs to (old, new) Vertex/Edge pointer - * pairs. It is possible that either "old" or "new" are nullptrs, but at - * least one must be not-null. The Cache is the owner of TRecord - * objects it points to. - * - * @tparam TRecord - Edge or Vertex - */ -template <typename TRecord> -class Cache { - using rec_uptr = std::unique_ptr<TRecord>; - - public: - Cache(database::Storage &storage, distributed::DataRpcClients &data_clients) - : storage_(storage), data_clients_(data_clients) {} - - /// Returns the new data for the given ID. Creates it (as copy of old) if - /// necessary. - TRecord *FindNew(gid::Gid gid); - - /// For the Vertex/Edge with the given global ID, looks for the data visible - /// from the given transaction's ID and command ID, and caches it. Sets the - /// given pointers to point to the fetched data. Analogue to - /// mvcc::VersionList::find_set_old_new. - void FindSetOldNew(tx::TransactionId tx_id, int worker_id, gid::Gid gid, - TRecord *&old_record, TRecord *&new_record); - - /// Sets the given records as (new, old) data for the given gid. - void emplace(gid::Gid gid, rec_uptr old_record, rec_uptr new_record); - - /// Removes all the data from the cache. - void ClearCache(); - - private: - database::Storage &storage_; - - std::mutex lock_; - distributed::DataRpcClients &data_clients_; - // TODO it'd be better if we had VertexData and EdgeData in here, as opposed - // to Vertex and Edge. - std::unordered_map<gid::Gid, std::pair<rec_uptr, rec_uptr>> cache_; - - // Localizes all the addresses in the record. - void LocalizeAddresses(TRecord &record); -}; - -} // namespace distributed diff --git a/src/distributed/cluster_discovery_master.cpp b/src/distributed/cluster_discovery_master.cpp deleted file mode 100644 index 9c03a1e6f..000000000 --- a/src/distributed/cluster_discovery_master.cpp +++ /dev/null @@ -1,42 +0,0 @@ -#include "communication/rpc/client_pool.hpp" -#include "distributed/cluster_discovery_master.hpp" -#include "distributed/coordination_rpc_messages.hpp" - -namespace distributed { -using Server = communication::rpc::Server; - -ClusterDiscoveryMaster::ClusterDiscoveryMaster( - Server &server, MasterCoordination &coordination, - RpcWorkerClients &rpc_worker_clients) - : server_(server), - coordination_(coordination), - rpc_worker_clients_(rpc_worker_clients) { - server_.Register<RegisterWorkerRpc>([this](const auto &req_reader, - auto *res_builder) { - RegisterWorkerReq req; - req.Load(req_reader); - bool registration_successful = - this->coordination_.RegisterWorker(req.desired_worker_id, req.endpoint); - - if (registration_successful) { - rpc_worker_clients_.ExecuteOnWorkers<void>( - 0, [req](int worker_id, communication::rpc::ClientPool &client_pool) { - auto result = client_pool.Call<ClusterDiscoveryRpc>( - req.desired_worker_id, req.endpoint); - CHECK(result) << "ClusterDiscoveryRpc failed"; - }); - } - - RegisterWorkerRes res(registration_successful, - this->coordination_.RecoveryInfo(), - this->coordination_.GetWorkers()); - res.Save(res_builder); - }); - - server_.Register<NotifyWorkerRecoveredRpc>( - [this](const auto &req_reader, auto *res_builder) { - this->coordination_.WorkerRecovered(req_reader.getMember()); - }); -} - -} // namespace distributed diff --git a/src/distributed/cluster_discovery_master.hpp b/src/distributed/cluster_discovery_master.hpp deleted file mode 100644 index cc402e357..000000000 --- a/src/distributed/cluster_discovery_master.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include "communication/rpc/server.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/rpc_worker_clients.hpp" - -namespace distributed { -using Server = communication::rpc::Server; - -/** Handle cluster discovery on master. - * - * Cluster discovery on master handles worker registration and broadcasts new - * worker information to already registered workers, and already registered - * worker information to the new worker. - */ -class ClusterDiscoveryMaster final { - public: - ClusterDiscoveryMaster(Server &server, MasterCoordination &coordination, - RpcWorkerClients &rpc_worker_clients); - - private: - Server &server_; - MasterCoordination &coordination_; - RpcWorkerClients &rpc_worker_clients_; -}; - -} // namespace distributed diff --git a/src/distributed/cluster_discovery_worker.cpp b/src/distributed/cluster_discovery_worker.cpp deleted file mode 100644 index 85746797c..000000000 --- a/src/distributed/cluster_discovery_worker.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include "distributed/cluster_discovery_worker.hpp" -#include "distributed/coordination_rpc_messages.hpp" - -namespace distributed { -using Server = communication::rpc::Server; - -ClusterDiscoveryWorker::ClusterDiscoveryWorker( - Server &server, WorkerCoordination &coordination, - communication::rpc::ClientPool &client_pool) - : server_(server), coordination_(coordination), client_pool_(client_pool) { - server_.Register<ClusterDiscoveryRpc>( - [this](const auto &req_reader, auto *res_builder) { - ClusterDiscoveryReq req; - req.Load(req_reader); - this->coordination_.RegisterWorker(req.worker_id, req.endpoint); - }); -} - -void ClusterDiscoveryWorker::RegisterWorker(int worker_id) { - auto result = - client_pool_.Call<RegisterWorkerRpc>(worker_id, server_.endpoint()); - CHECK(result) << "RegisterWorkerRpc failed"; - CHECK(result->registration_successful) - << "Unable to assign requested ID (" << worker_id << ") to worker!"; - - worker_id_ = worker_id; - for (auto &kv : result->workers) { - coordination_.RegisterWorker(kv.first, kv.second); - } - recovery_info_ = result->recovery_info; -} - -void ClusterDiscoveryWorker::NotifyWorkerRecovered() { - CHECK(worker_id_ >= 0) - << "Workers id is not yet assigned, preform registration before " - "notifying that the recovery finished"; - auto result = client_pool_.Call<NotifyWorkerRecoveredRpc>(worker_id_); - CHECK(result) << "NotifyWorkerRecoveredRpc failed"; -} - -} // namespace distributed diff --git a/src/distributed/cluster_discovery_worker.hpp b/src/distributed/cluster_discovery_worker.hpp deleted file mode 100644 index 19fb98be1..000000000 --- a/src/distributed/cluster_discovery_worker.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include <experimental/optional> - -#include "communication/rpc/client_pool.hpp" -#include "communication/rpc/server.hpp" -#include "distributed/coordination_worker.hpp" -#include "durability/recovery.hpp" - -namespace distributed { -using Server = communication::rpc::Server; -using ClientPool = communication::rpc::ClientPool; - -/** Handle cluster discovery on worker. - * - * Cluster discovery on worker handles worker registration by sending an rpc - * request to master and processes received rpc response with other worker - * information. - */ -class ClusterDiscoveryWorker final { - public: - ClusterDiscoveryWorker(Server &server, WorkerCoordination &coordination, - ClientPool &client_pool); - - /** - * Registers a worker with the master. - * - * @param worker_id - Desired ID. If master can't assign the desired worker - * id, worker will exit. - */ - void RegisterWorker(int worker_id); - - /** - * Notifies the master that the worker finished recovering. Assumes that the - * worker was already registered with master. - */ - void NotifyWorkerRecovered(); - - /** Returns the recovery info. Valid only after registration. */ - auto recovery_info() const { return recovery_info_; } - - private: - int worker_id_{-1}; - Server &server_; - WorkerCoordination &coordination_; - communication::rpc::ClientPool &client_pool_; - std::experimental::optional<durability::RecoveryInfo> recovery_info_; -}; - -} // namespace distributed diff --git a/src/distributed/coordination.cpp b/src/distributed/coordination.cpp deleted file mode 100644 index c112aeecf..000000000 --- a/src/distributed/coordination.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "glog/logging.h" - -#include "distributed/coordination.hpp" - -namespace distributed { -using Endpoint = io::network::Endpoint; - -Coordination::Coordination(const Endpoint &master_endpoint) { - // The master is always worker 0. - workers_.emplace(0, master_endpoint); -} - -Endpoint Coordination::GetEndpoint(int worker_id) { - auto found = workers_.find(worker_id); - CHECK(found != workers_.end()) << "No endpoint registered for worker id: " - << worker_id; - return found->second; -} - -std::vector<int> Coordination::GetWorkerIds() const { - std::vector<int> worker_ids; - for (auto worker : workers_) worker_ids.push_back(worker.first); - return worker_ids; -} - -void Coordination::AddWorker(int worker_id, Endpoint endpoint) { - workers_.emplace(worker_id, endpoint); -} - -std::unordered_map<int, Endpoint> Coordination::GetWorkers() { - return workers_; -} - -} // namespace distributed diff --git a/src/distributed/coordination.hpp b/src/distributed/coordination.hpp deleted file mode 100644 index a7018313b..000000000 --- a/src/distributed/coordination.hpp +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include <unordered_map> -#include <vector> - -#include "io/network/endpoint.hpp" - -namespace distributed { - -/** Coordination base class. This class is not thread safe. */ -class Coordination { - public: - explicit Coordination(const io::network::Endpoint &master_endpoint); - - /** Gets the endpoint for the given worker ID from the master. */ - io::network::Endpoint GetEndpoint(int worker_id); - - /** Returns all workers id, this includes master id(0). */ - std::vector<int> GetWorkerIds() const; - - /** Gets the mapping of worker id to worker endpoint including master (worker - * id = 0). - */ - std::unordered_map<int, io::network::Endpoint> GetWorkers(); - - protected: - ~Coordination() {} - - /** Adds a worker to coordination. */ - void AddWorker(int worker_id, io::network::Endpoint endpoint); - - private: - std::unordered_map<int, io::network::Endpoint> workers_; -}; - -} // namespace distributed diff --git a/src/distributed/coordination_master.cpp b/src/distributed/coordination_master.cpp deleted file mode 100644 index ef90d690b..000000000 --- a/src/distributed/coordination_master.cpp +++ /dev/null @@ -1,92 +0,0 @@ -#include <chrono> -#include <thread> - -#include "glog/logging.h" - -#include "communication/rpc/client.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_rpc_messages.hpp" -#include "io/network/utils.hpp" - -namespace distributed { - -MasterCoordination::MasterCoordination(const Endpoint &master_endpoint) - : Coordination(master_endpoint) {} - -bool MasterCoordination::RegisterWorker(int desired_worker_id, - Endpoint endpoint) { - // Worker's can't register before the recovery phase on the master is done to - // ensure the whole cluster is in a consistent state. - while (true) { - { - std::lock_guard<std::mutex> guard(lock_); - if (recovery_done_) break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - } - - std::lock_guard<std::mutex> guard(lock_); - auto workers = GetWorkers(); - // Check if the desired worker id already exists. - if (workers.find(desired_worker_id) != workers.end()) { - LOG(WARNING) << "Unable to assign requested ID (" << desired_worker_id - << ") to worker at: " << endpoint; - // If the desired worker ID is already assigned, return -1 and don't add - // that worker to master coordination. - return false; - } - - AddWorker(desired_worker_id, endpoint); - return true; -} - -void MasterCoordination::WorkerRecovered(int worker_id) { - CHECK(recovered_workers_.insert(worker_id).second) - << "Worker already notified about finishing recovery"; -} - -Endpoint MasterCoordination::GetEndpoint(int worker_id) { - std::lock_guard<std::mutex> guard(lock_); - return Coordination::GetEndpoint(worker_id); -} - -MasterCoordination::~MasterCoordination() { - using namespace std::chrono_literals; - std::lock_guard<std::mutex> guard(lock_); - auto workers = GetWorkers(); - for (const auto &kv : workers) { - // Skip master (self). - if (kv.first == 0) continue; - communication::rpc::Client client(kv.second); - auto result = client.Call<StopWorkerRpc>(); - CHECK(result) << "StopWorkerRpc failed for worker: " << kv.first; - } - - // Make sure all workers have died. - for (const auto &kv : workers) { - // Skip master (self). - if (kv.first == 0) continue; - while (io::network::CanEstablishConnection(kv.second)) - std::this_thread::sleep_for(0.5s); - } -} - -void MasterCoordination::SetRecoveryInfo( - std::experimental::optional<durability::RecoveryInfo> info) { - std::lock_guard<std::mutex> guard(lock_); - recovery_done_ = true; - recovery_info_ = info; -} - -int MasterCoordination::CountRecoveredWorkers() const { - return recovered_workers_.size(); -} - -std::experimental::optional<durability::RecoveryInfo> -MasterCoordination::RecoveryInfo() const { - std::lock_guard<std::mutex> guard(lock_); - CHECK(recovery_done_) << "RecoveryInfo requested before it's available"; - return recovery_info_; -} - -} // namespace distributed diff --git a/src/distributed/coordination_master.hpp b/src/distributed/coordination_master.hpp deleted file mode 100644 index e5c8b5895..000000000 --- a/src/distributed/coordination_master.hpp +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -#include <experimental/optional> -#include <mutex> -#include <set> -#include <unordered_map> - -#include "distributed/coordination.hpp" -#include "durability/recovery.hpp" -#include "io/network/endpoint.hpp" - -namespace distributed { -using Endpoint = io::network::Endpoint; - -/** Handles worker registration, getting of other workers' endpoints and - * coordinated shutdown in a distributed memgraph. Master side. */ -class MasterCoordination final : public Coordination { - public: - explicit MasterCoordination(const Endpoint &master_endpoint); - - /** Shuts down all the workers and this master server. */ - ~MasterCoordination(); - - /** Registers a new worker with this master coordination. - * - * @param desired_worker_id - The ID the worker would like to have. - * @return True if the desired ID for the worker is available, or false - * if the desired ID is already taken. - */ - bool RegisterWorker(int desired_worker_id, Endpoint endpoint); - - /* - * Worker `worker_id` finished with recovering, adds it to the set of - * recovered workers. - */ - void WorkerRecovered(int worker_id); - - Endpoint GetEndpoint(int worker_id); - - /// Sets the recovery info. nullopt indicates nothing was recovered. - void SetRecoveryInfo( - std::experimental::optional<durability::RecoveryInfo> info); - - std::experimental::optional<durability::RecoveryInfo> RecoveryInfo() const; - - int CountRecoveredWorkers() const; - - private: - // Most master functions aren't thread-safe. - mutable std::mutex lock_; - - /// Durabiliry recovery info. - /// Indicates if the recovery phase is done. - bool recovery_done_{false}; - /// Set of workers that finished sucesfully recovering - std::set<int> recovered_workers_; - /// If nullopt nothing was recovered. - std::experimental::optional<durability::RecoveryInfo> recovery_info_; -}; - -} // namespace distributed diff --git a/src/distributed/coordination_rpc_messages.lcp b/src/distributed/coordination_rpc_messages.lcp deleted file mode 100644 index 8237740cb..000000000 --- a/src/distributed/coordination_rpc_messages.lcp +++ /dev/null @@ -1,72 +0,0 @@ -#>cpp -#pragma once - -#include <experimental/optional> -#include <unordered_map> - -#include "communication/rpc/messages.hpp" -#include "distributed/coordination_rpc_messages.capnp.h" -#include "durability/recovery.hpp" -#include "io/network/endpoint.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'dur "/durability/recovery.capnp") -(lcp:capnp-import 'io "/io/network/endpoint.capnp") -(lcp:capnp-import 'utils "/utils/serialization.capnp") - -(lcp:define-rpc register-worker - (:request - ((desired-worker-id :int16_t) - (endpoint "io::network::Endpoint" :capnp-type "Io.Endpoint"))) - (:response - ((registration-successful :bool) - (recovery-info "std::experimental::optional<durability::RecoveryInfo>" - :capnp-type "Utils.Optional(Dur.RecoveryInfo)" - :capnp-save (lcp:capnp-save-optional "durability::capnp::RecoveryInfo" - "durability::RecoveryInfo") - :capnp-load (lcp:capnp-load-optional "durability::capnp::RecoveryInfo" - "durability::RecoveryInfo")) - (workers "std::unordered_map<int, io::network::Endpoint>" - :capnp-type "Utils.Map(Utils.BoxInt16, Io.Endpoint)" - :capnp-save - (lambda (builder member) - #>cpp - utils::SaveMap<utils::capnp::BoxInt16, io::network::capnp::Endpoint>(${member}, &${builder}, - [](auto *builder, const auto &entry) { - auto key_builder = builder->initKey(); - key_builder.setValue(entry.first); - auto value_builder = builder->initValue(); - entry.second.Save(&value_builder); - }); - cpp<#) - :capnp-load - (lambda (reader member) - #>cpp - utils::LoadMap<utils::capnp::BoxInt16, io::network::capnp::Endpoint>(&${member}, ${reader}, - [](const auto &reader) { - io::network::Endpoint value; - value.Load(reader.getValue()); - return std::make_pair(reader.getKey().getValue(), value); - }); - cpp<#))))) - -(lcp:define-rpc cluster-discovery - (:request - ((worker-id :int16_t) - (endpoint "io::network::Endpoint" :capnp-type "Io.Endpoint"))) - (:response ())) - -(lcp:define-rpc stop-worker - (:request ()) - (:response ())) - -(lcp:define-rpc notify-worker-recovered - (:request ((member :int64_t))) - (:response ())) - -(lcp:pop-namespace) ;; distributed - diff --git a/src/distributed/coordination_worker.cpp b/src/distributed/coordination_worker.cpp deleted file mode 100644 index a094a20c4..000000000 --- a/src/distributed/coordination_worker.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#include <chrono> -#include <condition_variable> -#include <mutex> -#include <thread> - -#include "glog/logging.h" - -#include "distributed/coordination_rpc_messages.hpp" -#include "distributed/coordination_worker.hpp" - -namespace distributed { - -using namespace std::literals::chrono_literals; - -WorkerCoordination::WorkerCoordination(communication::rpc::Server &server, - const Endpoint &master_endpoint) - : Coordination(master_endpoint), server_(server) {} - -void WorkerCoordination::RegisterWorker(int worker_id, Endpoint endpoint) { - std::lock_guard<std::mutex> guard(lock_); - AddWorker(worker_id, endpoint); -} - -void WorkerCoordination::WaitForShutdown() { - using namespace std::chrono_literals; - std::mutex mutex; - std::condition_variable cv; - bool shutdown = false; - - server_.Register<StopWorkerRpc>([&](const auto &req_reader, auto *res_builder) { - std::unique_lock<std::mutex> lk(mutex); - shutdown = true; - lk.unlock(); - cv.notify_one(); - }); - - std::unique_lock<std::mutex> lk(mutex); - cv.wait(lk, [&shutdown] { return shutdown; }); -} - -io::network::Endpoint WorkerCoordination::GetEndpoint(int worker_id) { - std::lock_guard<std::mutex> guard(lock_); - return Coordination::GetEndpoint(worker_id); -} - -} // namespace distributed diff --git a/src/distributed/coordination_worker.hpp b/src/distributed/coordination_worker.hpp deleted file mode 100644 index d18e44e74..000000000 --- a/src/distributed/coordination_worker.hpp +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include <mutex> -#include <unordered_map> - -#include "communication/rpc/server.hpp" -#include "distributed/coordination.hpp" - -namespace distributed { - -/** Handles worker registration, getting of other workers' endpoints and - * coordinated shutdown in a distributed memgraph. Worker side. */ -class WorkerCoordination final : public Coordination { - using Endpoint = io::network::Endpoint; - - public: - WorkerCoordination(communication::rpc::Server &server, - const Endpoint &master_endpoint); - - /** Registers the worker with the given endpoint. */ - void RegisterWorker(int worker_id, Endpoint endpoint); - - /** Starts listening for a remote shutdown command (issued by the master). - * Blocks the calling thread until that has finished. */ - void WaitForShutdown(); - - Endpoint GetEndpoint(int worker_id); - - private: - communication::rpc::Server &server_; - mutable std::mutex lock_; -}; -} // namespace distributed diff --git a/src/distributed/data_manager.cpp b/src/distributed/data_manager.cpp deleted file mode 100644 index 9a619d692..000000000 --- a/src/distributed/data_manager.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include "database/storage.hpp" -#include "distributed/data_manager.hpp" - -namespace distributed { - -template <typename TRecord> -Cache<TRecord> &DataManager::GetCache(CacheT<TRecord> &collection, - tx::TransactionId tx_id) { - auto access = collection.access(); - auto found = access.find(tx_id); - if (found != access.end()) return found->second; - - return access - .emplace( - tx_id, std::make_tuple(tx_id), - std::make_tuple(std::ref(db_.storage()), std::ref(data_clients_))) - .first->second; -} - -template <> -Cache<Vertex> &DataManager::Elements<Vertex>(tx::TransactionId tx_id) { - return GetCache(vertices_caches_, tx_id); -} - -template <> -Cache<Edge> &DataManager::Elements<Edge>(tx::TransactionId tx_id) { - return GetCache(edges_caches_, tx_id); -} - -DataManager::DataManager(database::GraphDb &db, - distributed::DataRpcClients &data_clients) - : db_(db), data_clients_(data_clients) {} - -void DataManager::ClearCacheForSingleTransaction(tx::TransactionId tx_id) { - Elements<Vertex>(tx_id).ClearCache(); - Elements<Edge>(tx_id).ClearCache(); -} - -void DataManager::ClearTransactionalCache(tx::TransactionId oldest_active) { - auto vertex_access = vertices_caches_.access(); - for (auto &kv : vertex_access) { - if (kv.first < oldest_active) { - vertex_access.remove(kv.first); - } - } - auto edge_access = edges_caches_.access(); - for (auto &kv : edge_access) { - if (kv.first < oldest_active) { - edge_access.remove(kv.first); - } - } -} - -} // namespace distributed diff --git a/src/distributed/data_manager.hpp b/src/distributed/data_manager.hpp deleted file mode 100644 index 4f2888ac2..000000000 --- a/src/distributed/data_manager.hpp +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -#include "data_structures/concurrent/concurrent_map.hpp" -#include "database/graph_db.hpp" -#include "distributed/cache.hpp" -#include "distributed/data_rpc_clients.hpp" -#include "transactions/type.hpp" - -class Vertex; -class Edge; - -namespace distributed { - -/// Handles remote data caches for edges and vertices, per transaction. -class DataManager { - template <typename TRecord> - using CacheT = ConcurrentMap<tx::TransactionId, Cache<TRecord>>; - - // Helper, gets or inserts a data cache for the given transaction. - template <typename TRecord> - Cache<TRecord> &GetCache(CacheT<TRecord> &collection, - tx::TransactionId tx_id); - - public: - DataManager(database::GraphDb &db, distributed::DataRpcClients &data_clients); - - /// Gets or creates the remote vertex/edge cache for the given transaction. - template <typename TRecord> - Cache<TRecord> &Elements(tx::TransactionId tx_id); - - /// Removes all the caches for a single transaction. - void ClearCacheForSingleTransaction(tx::TransactionId tx_id); - - /// Clears the cache of local transactions that have expired. The signature of - /// this method is dictated by `distributed::TransactionalCacheCleaner`. - void ClearTransactionalCache(tx::TransactionId oldest_active); - - private: - database::GraphDb &db_; - DataRpcClients &data_clients_; - CacheT<Vertex> vertices_caches_; - CacheT<Edge> edges_caches_; -}; - -} // namespace distributed diff --git a/src/distributed/data_rpc_clients.cpp b/src/distributed/data_rpc_clients.cpp deleted file mode 100644 index ac3ffa4ff..000000000 --- a/src/distributed/data_rpc_clients.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include <unordered_map> - -#include "distributed/data_rpc_clients.hpp" -#include "distributed/data_rpc_messages.hpp" -#include "storage/edge.hpp" -#include "storage/vertex.hpp" - -namespace distributed { - -template <> -std::unique_ptr<Edge> DataRpcClients::RemoteElement(int worker_id, - tx::TransactionId tx_id, - gid::Gid gid) { - auto response = - clients_.GetClientPool(worker_id).Call<EdgeRpc>(TxGidPair{tx_id, gid}); - CHECK(response) << "EdgeRpc failed"; - return std::move(response->edge_output); -} - -template <> -std::unique_ptr<Vertex> DataRpcClients::RemoteElement(int worker_id, - tx::TransactionId tx_id, - gid::Gid gid) { - auto response = - clients_.GetClientPool(worker_id).Call<VertexRpc>(TxGidPair{tx_id, gid}); - CHECK(response) << "VertexRpc failed"; - return std::move(response->vertex_output); -} - -std::unordered_map<int, int64_t> DataRpcClients::VertexCounts( - tx::TransactionId tx_id) { - auto future_results = clients_.ExecuteOnWorkers<std::pair<int, int64_t>>( - -1, [tx_id](int worker_id, communication::rpc::ClientPool &client_pool) { - auto response = client_pool.Call<VertexCountRpc>(tx_id); - CHECK(response) << "VertexCountRpc failed"; - return std::make_pair(worker_id, response->member); - }); - - std::unordered_map<int, int64_t> results; - for (auto &result : future_results) { - auto result_pair = result.get(); - int worker = result_pair.first; - int vertex_count = result_pair.second; - results[worker] = vertex_count; - } - return results; -} - -} // namespace distributed diff --git a/src/distributed/data_rpc_clients.hpp b/src/distributed/data_rpc_clients.hpp deleted file mode 100644 index 94bbd56a2..000000000 --- a/src/distributed/data_rpc_clients.hpp +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include <mutex> -#include <utility> - -#include "distributed/rpc_worker_clients.hpp" -#include "storage/gid.hpp" -#include "transactions/type.hpp" - -namespace distributed { - -/// Provides access to other worker's data. -class DataRpcClients { - public: - DataRpcClients(RpcWorkerClients &clients) : clients_(clients) {} - /// Returns a remote worker's record (vertex/edge) data for the given params. - /// That worker must own the vertex/edge for the given id, and that vertex - /// must be visible in given transaction. - template <typename TRecord> - std::unique_ptr<TRecord> RemoteElement(int worker_id, tx::TransactionId tx_id, - gid::Gid gid); - - /// Returns (worker_id, vertex_count) for each worker and the number of - /// vertices on it from the perspective of transaction `tx_id`. - std::unordered_map<int, int64_t> VertexCounts(tx::TransactionId tx_id); - - private: - RpcWorkerClients &clients_; -}; - -} // namespace distributed diff --git a/src/distributed/data_rpc_messages.lcp b/src/distributed/data_rpc_messages.lcp deleted file mode 100644 index 5f0f1ca3f..000000000 --- a/src/distributed/data_rpc_messages.lcp +++ /dev/null @@ -1,76 +0,0 @@ -#>cpp -#pragma once - -#include <memory> -#include <string> - -#include "communication/rpc/messages.hpp" -#include "distributed/data_rpc_messages.capnp.h" -#include "distributed/serialization.hpp" -#include "storage/edge.hpp" -#include "storage/gid.hpp" -#include "storage/vertex.hpp" -#include "transactions/type.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'utils "/utils/serialization.capnp") -(lcp:capnp-import 'dist "/distributed/serialization.capnp") - -(lcp:define-struct tx-gid-pair () - ((tx-id "tx::TransactionId" :capnp-type "UInt64") - (gid "gid::Gid" :capnp-type "UInt64")) - (:serialize :capnp)) - -(lcp:define-rpc vertex - (:request ((member "TxGidPair"))) - (:response - ((vertex-input "const Vertex *" - :save-fun "SaveVertex(ar, *vertex_input, worker_id);" :load-fun "" - :capnp-type "Dist.Vertex" - :capnp-save - (lambda (builder member) - #>cpp - SaveVertex(*${member}, &${builder}, worker_id); - cpp<#) - :capnp-load - (lambda (reader member) - (declare (ignore member)) - #>cpp - vertex_output = LoadVertex<const capnp::Vertex::Reader>(${reader}); - cpp<#)) - (worker-id :int64_t :save-fun "" :load-fun "" :capnp-save :dont-save) - (vertex-output "std::unique_ptr<Vertex>" :initarg nil - :save-fun "" :load-fun "vertex_output = LoadVertex(ar);" - :capnp-save :dont-save)))) - -(lcp:define-rpc edge - (:request ((member "TxGidPair"))) - (:response - ((edge-input "const Edge *" - :save-fun "SaveEdge(ar, *edge_input, worker_id);" :load-fun "" - :capnp-type "Dist.Edge" - :capnp-save - (lambda (builder member) - #>cpp - SaveEdge(*${member}, &${builder}, worker_id); - cpp<#) - :capnp-load - (lambda (reader member) - (declare (ignore member)) - #>cpp - edge_output = LoadEdge<const capnp::Edge::Reader>(${reader}); - cpp<#)) - (worker-id :int64_t :save-fun "" :load-fun "" :capnp-save :dont-save) - (edge-output "std::unique_ptr<Edge>" :initarg nil - :save-fun "" :load-fun "edge_output = LoadEdge(ar);" - :capnp-save :dont-save)))) - -(lcp:define-rpc vertex-count - (:request ((member "tx::TransactionId" :capnp-type "UInt64"))) - (:response ((member :int64_t)))) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/data_rpc_server.cpp b/src/distributed/data_rpc_server.cpp deleted file mode 100644 index 62c09ce4c..000000000 --- a/src/distributed/data_rpc_server.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include <memory> - -#include "data_rpc_server.hpp" -#include "database/graph_db_accessor.hpp" -#include "distributed/data_rpc_messages.hpp" - -namespace distributed { - -DataRpcServer::DataRpcServer(database::GraphDb &db, - communication::rpc::Server &server) - : db_(db), rpc_server_(server) { - rpc_server_.Register<VertexRpc>( - [this](const auto &req_reader, auto *res_builder) { - database::GraphDbAccessor dba(db_, req_reader.getMember().getTxId()); - auto vertex = dba.FindVertex(req_reader.getMember().getGid(), false); - CHECK(vertex.GetOld()) - << "Old record must exist when sending vertex by RPC"; - VertexRes response(vertex.GetOld(), db_.WorkerId()); - response.Save(res_builder); - }); - - rpc_server_.Register<EdgeRpc>([this](const auto &req_reader, - auto *res_builder) { - database::GraphDbAccessor dba(db_, req_reader.getMember().getTxId()); - auto edge = dba.FindEdge(req_reader.getMember().getGid(), false); - CHECK(edge.GetOld()) << "Old record must exist when sending edge by RPC"; - EdgeRes response(edge.GetOld(), db_.WorkerId()); - response.Save(res_builder); - }); - - rpc_server_.Register<VertexCountRpc>( - [this](const auto &req_reader, auto *res_builder) { - VertexCountReq req; - req.Load(req_reader); - database::GraphDbAccessor dba(db_, req.member); - int64_t size = 0; - for (auto vertex : dba.Vertices(false)) ++size; - VertexCountRes res(size); - res.Save(res_builder); - }); -} - -} // namespace distributed diff --git a/src/distributed/data_rpc_server.hpp b/src/distributed/data_rpc_server.hpp deleted file mode 100644 index 91612a5cc..000000000 --- a/src/distributed/data_rpc_server.hpp +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "communication/rpc/server.hpp" -#include "database/graph_db.hpp" - -namespace distributed { - -/// Serves this worker's data to others. -class DataRpcServer { - public: - DataRpcServer(database::GraphDb &db, communication::rpc::Server &server); - - private: - database::GraphDb &db_; - communication::rpc::Server &rpc_server_; -}; -} // namespace distributed diff --git a/src/distributed/durability_rpc_clients.cpp b/src/distributed/durability_rpc_clients.cpp deleted file mode 100644 index 660965cba..000000000 --- a/src/distributed/durability_rpc_clients.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include "distributed/durability_rpc_clients.hpp" - -#include "distributed/durability_rpc_messages.hpp" -#include "transactions/transaction.hpp" -#include "utils/future.hpp" - -namespace distributed { -utils::Future<bool> DurabilityRpcClients::MakeSnapshot(tx::TransactionId tx) { - return utils::make_future(std::async(std::launch::async, [this, tx] { - auto futures = clients_.ExecuteOnWorkers<bool>( - 0, [tx](int worker_id, communication::rpc::ClientPool &client_pool) { - auto res = client_pool.Call<MakeSnapshotRpc>(tx); - if (!res) return false; - return res->member; - }); - - bool created = true; - for (auto &future : futures) { - created &= future.get(); - } - - return created; - })); -} -} // namespace distributed diff --git a/src/distributed/durability_rpc_clients.hpp b/src/distributed/durability_rpc_clients.hpp deleted file mode 100644 index 880bde3d9..000000000 --- a/src/distributed/durability_rpc_clients.hpp +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include <future> -#include <mutex> -#include <utility> - -#include "distributed/rpc_worker_clients.hpp" -#include "storage/gid.hpp" -#include "transactions/type.hpp" - -namespace distributed { - -/// Provides an ability to trigger snapshooting on other workers. -class DurabilityRpcClients { - public: - DurabilityRpcClients(RpcWorkerClients &clients) : clients_(clients) {} - - // Sends a snapshot request to workers and returns a future which becomes true - // if all workers sucesfully completed their snapshot creation, false - // otherwise - // @param tx - transaction from which to take db snapshot - utils::Future<bool> MakeSnapshot(tx::TransactionId tx); - - private: - RpcWorkerClients &clients_; -}; - -} // namespace distributed diff --git a/src/distributed/durability_rpc_messages.lcp b/src/distributed/durability_rpc_messages.lcp deleted file mode 100644 index 9027569f1..000000000 --- a/src/distributed/durability_rpc_messages.lcp +++ /dev/null @@ -1,20 +0,0 @@ -#>cpp -#pragma once - -#include "boost/serialization/access.hpp" -#include "boost/serialization/base_object.hpp" - -#include "communication/rpc/messages.hpp" -#include "distributed/durability_rpc_messages.capnp.h" -#include "transactions/transaction.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:define-rpc make-snapshot - (:request ((member "tx::TransactionId" :capnp-type "UInt64"))) - (:response ((member :bool)))) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/durability_rpc_server.cpp b/src/distributed/durability_rpc_server.cpp deleted file mode 100644 index 031dc73dc..000000000 --- a/src/distributed/durability_rpc_server.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "distributed/durability_rpc_server.hpp" - -#include "database/graph_db.hpp" -#include "database/graph_db_accessor.hpp" -#include "distributed/durability_rpc_messages.hpp" - -namespace distributed { - -DurabilityRpcServer::DurabilityRpcServer(database::GraphDb &db, - communication::rpc::Server &server) - : db_(db), rpc_server_(server) { - rpc_server_.Register<MakeSnapshotRpc>( - [this](const auto &req_reader, auto *res_builder) { - database::GraphDbAccessor dba(this->db_, req_reader.getMember()); - MakeSnapshotRes res(this->db_.MakeSnapshot(dba)); - res.Save(res_builder); - }); -} - -} // namespace distributed diff --git a/src/distributed/durability_rpc_server.hpp b/src/distributed/durability_rpc_server.hpp deleted file mode 100644 index 1373b6aec..000000000 --- a/src/distributed/durability_rpc_server.hpp +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include "communication/rpc/server.hpp" - -namespace database { -class GraphDb; -}; - -namespace distributed { - -class DurabilityRpcServer { - public: - DurabilityRpcServer(database::GraphDb &db, - communication::rpc::Server &server); - - private: - database::GraphDb &db_; - communication::rpc::Server &rpc_server_; -}; - -} // namespace distributed diff --git a/src/distributed/index_rpc_messages.lcp b/src/distributed/index_rpc_messages.lcp deleted file mode 100644 index d1573b53a..000000000 --- a/src/distributed/index_rpc_messages.lcp +++ /dev/null @@ -1,25 +0,0 @@ -#>cpp -#pragma once - -#include <memory> -#include <string> - -#include "communication/rpc/messages.hpp" -#include "distributed/serialization.hpp" -#include "distributed/index_rpc_messages.capnp.h" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'storage "/storage/serialization.capnp") - -(lcp:define-rpc build-index - (:request - ((label "storage::Label" :capnp-type "Storage.Common") - (property "storage::Property" :capnp-type "Storage.Common") - (tx-id "tx::TransactionId" :capnp-type "UInt64"))) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/index_rpc_server.cpp b/src/distributed/index_rpc_server.cpp deleted file mode 100644 index a88b0595d..000000000 --- a/src/distributed/index_rpc_server.cpp +++ /dev/null @@ -1,33 +0,0 @@ -#include "database/graph_db.hpp" -#include "database/graph_db_accessor.hpp" -#include "distributed/index_rpc_server.hpp" - -namespace distributed { - -IndexRpcServer::IndexRpcServer(database::GraphDb &db, - communication::rpc::Server &server) - : db_(db), rpc_server_(server) { - rpc_server_.Register<BuildIndexRpc>( - [this](const auto &req_reader, auto *res_builder) { - BuildIndexReq req; - req.Load(req_reader); - database::LabelPropertyIndex::Key key{req.label, req.property}; - database::GraphDbAccessor dba(db_, req.tx_id); - - if (db_.storage().label_property_index_.CreateIndex(key) == false) { - // If we are a distributed worker we just have to wait till the index - // (which should be in progress of being created) is created so that - // our return guarantess that the index has been built - this assumes - // that no worker thread that is creating an index will fail - while (!dba.LabelPropertyIndexExists(key.label_, key.property_)) { - // TODO reconsider this constant, currently rule-of-thumb chosen - std::this_thread::sleep_for(std::chrono::microseconds(100)); - } - } else { - dba.PopulateIndex(key); - dba.EnableIndex(key); - } - }); -} - -} // namespace distributed diff --git a/src/distributed/index_rpc_server.hpp b/src/distributed/index_rpc_server.hpp deleted file mode 100644 index 3aec58b2f..000000000 --- a/src/distributed/index_rpc_server.hpp +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -namespace communication::rpc { -class Server; -} - -namespace database { -class GraphDb; -} - -namespace distributed { - -class IndexRpcServer { - public: - IndexRpcServer(database::GraphDb &db, communication::rpc::Server &server); - - private: - database::GraphDb &db_; - communication::rpc::Server &rpc_server_; -}; - -} // namespace distributed diff --git a/src/distributed/plan_consumer.cpp b/src/distributed/plan_consumer.cpp deleted file mode 100644 index fa48f2ce2..000000000 --- a/src/distributed/plan_consumer.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include "distributed/plan_consumer.hpp" - -namespace distributed { - -PlanConsumer::PlanConsumer(communication::rpc::Server &server) - : server_(server) { - server_.Register<DispatchPlanRpc>( - [this](const auto &req_reader, auto *res_builder) { - DispatchPlanReq req; - req.Load(req_reader); - plan_cache_.access().insert( - req.plan_id, std::make_unique<PlanPack>(req.plan, req.symbol_table, - std::move(req.storage))); - DispatchPlanRes res; - res.Save(res_builder); - }); - - server_.Register<RemovePlanRpc>( - [this](const auto &req_reader, auto *res_builder) { - plan_cache_.access().remove(req_reader.getMember()); - }); -} - -PlanConsumer::PlanPack &PlanConsumer::PlanForId(int64_t plan_id) const { - auto accessor = plan_cache_.access(); - auto found = accessor.find(plan_id); - CHECK(found != accessor.end()) - << "Missing plan and symbol table for plan id: " << plan_id; - return *found->second; -} - -std::vector<int64_t> PlanConsumer::CachedPlanIds() const { - std::vector<int64_t> plan_ids; - auto access = plan_cache_.access(); - plan_ids.reserve(access.size()); - for (auto &kv : access) plan_ids.emplace_back(kv.first); - - return plan_ids; -} - -} // namespace distributed diff --git a/src/distributed/plan_consumer.hpp b/src/distributed/plan_consumer.hpp deleted file mode 100644 index ee3792edb..000000000 --- a/src/distributed/plan_consumer.hpp +++ /dev/null @@ -1,44 +0,0 @@ -#pragma once - -#include <vector> - -#include "communication/rpc/server.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "distributed/plan_rpc_messages.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/plan/operator.hpp" - -namespace distributed { - -/** Handles plan consumption from master. Creates and holds a local cache of - * plans. Worker side. */ -class PlanConsumer { - public: - struct PlanPack { - PlanPack(std::shared_ptr<query::plan::LogicalOperator> plan, - query::SymbolTable symbol_table, query::AstStorage storage) - : plan(plan), - symbol_table(std::move(symbol_table)), - storage(std::move(storage)) {} - - std::shared_ptr<query::plan::LogicalOperator> plan; - query::SymbolTable symbol_table; - const query::AstStorage storage; - }; - - explicit PlanConsumer(communication::rpc::Server &server); - - /** Return cached plan and symbol table for a given plan id. */ - PlanPack &PlanForId(int64_t plan_id) const; - - /** Return the ids of all the cached plans. For testing. */ - std::vector<int64_t> CachedPlanIds() const; - - private: - communication::rpc::Server &server_; - // TODO remove unique_ptr. This is to get it to work, emplacing into a - // ConcurrentMap is tricky. - mutable ConcurrentMap<int64_t, std::unique_ptr<PlanPack>> plan_cache_; -}; - -} // namespace distributed diff --git a/src/distributed/plan_dispatcher.cpp b/src/distributed/plan_dispatcher.cpp deleted file mode 100644 index bd1b34429..000000000 --- a/src/distributed/plan_dispatcher.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include <distributed/plan_dispatcher.hpp> - -namespace distributed { - -PlanDispatcher::PlanDispatcher(RpcWorkerClients &clients) : clients_(clients) {} - -void PlanDispatcher::DispatchPlan( - int64_t plan_id, std::shared_ptr<query::plan::LogicalOperator> plan, - const query::SymbolTable &symbol_table) { - auto futures = clients_.ExecuteOnWorkers<void>( - 0, [plan_id, plan, symbol_table]( - int worker_id, communication::rpc::ClientPool &client_pool) { - auto result = - client_pool.Call<DispatchPlanRpc>(plan_id, plan, symbol_table); - CHECK(result) << "DispatchPlanRpc failed"; - }); - - for (auto &future : futures) { - future.wait(); - } -} - -void PlanDispatcher::RemovePlan(int64_t plan_id) { - auto futures = clients_.ExecuteOnWorkers<void>( - 0, [plan_id](int worker_id, communication::rpc::ClientPool &client_pool) { - auto result = client_pool.Call<RemovePlanRpc>(plan_id); - CHECK(result) << "Failed to remove plan from worker"; - }); - - for (auto &future : futures) { - future.wait(); - } -} - -} // namespace distributed diff --git a/src/distributed/plan_dispatcher.hpp b/src/distributed/plan_dispatcher.hpp deleted file mode 100644 index c8763f7e3..000000000 --- a/src/distributed/plan_dispatcher.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include "distributed/coordination.hpp" -#include "distributed/plan_rpc_messages.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/plan/operator.hpp" - -namespace distributed { - -/** Handles plan dispatching to all workers. Uses MasterCoordination to - * acomplish that. Master side. - */ -class PlanDispatcher { - public: - explicit PlanDispatcher(RpcWorkerClients &clients); - - /** Dispatch a plan to all workers and wait for their acknowledgement. */ - void DispatchPlan(int64_t plan_id, - std::shared_ptr<query::plan::LogicalOperator> plan, - const query::SymbolTable &symbol_table); - - /** Remove a plan from all workers and wait for their acknowledgement. */ - void RemovePlan(int64_t plan_id); - - private: - RpcWorkerClients &clients_; -}; - -} // namespace distributed diff --git a/src/distributed/plan_rpc_messages.lcp b/src/distributed/plan_rpc_messages.lcp deleted file mode 100644 index bf2f892a5..000000000 --- a/src/distributed/plan_rpc_messages.lcp +++ /dev/null @@ -1,59 +0,0 @@ -#>cpp -#pragma once - -#include "communication/rpc/messages.hpp" -#include "query/frontend/ast/ast.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/plan/operator.hpp" - -#include "distributed/plan_rpc_messages.capnp.h" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'utils "/utils/serialization.capnp") -(lcp:capnp-import 'plan "/query/plan/operator.capnp") -(lcp:capnp-import 'sem "/query/frontend/semantic/symbol.capnp") - -(defun load-plan (reader member) - #>cpp - query::plan::LogicalOperator::LoadHelper helper; - ${member} = utils::LoadSharedPtr<query::plan::capnp::LogicalOperator, query::plan::LogicalOperator>( - ${reader}, [&helper](const auto &reader) { - auto op = query::plan::LogicalOperator::Construct(reader); - op->Load(reader, &helper); - return op.release(); - }, &helper.loaded_ops); - storage = std::move(helper.ast_storage); - cpp<#) - -(defun save-plan (builder member) - #>cpp - query::plan::LogicalOperator::SaveHelper helper; - utils::SaveSharedPtr<query::plan::capnp::LogicalOperator, query::plan::LogicalOperator>( - ${member}, &${builder}, - [&helper](auto *builder, const auto &val) { - val.Save(builder, &helper); - }, &helper.saved_ops); - cpp<#) - -(lcp:define-rpc dispatch-plan - (:request - ((plan-id :int64_t) - (plan "std::shared_ptr<query::plan::LogicalOperator>" - :capnp-type "Utils.SharedPtr(Plan.LogicalOperator)" - :capnp-save #'save-plan :capnp-load #'load-plan) - (symbol-table "query::SymbolTable" :capnp-type "Sem.SymbolTable") - (storage "query::AstStorage" :initarg nil - :save-fun "" - :load-fun "storage = std::move(ar.template get_helper<query::AstStorage>(query::AstStorage::kHelperId));" - :capnp-save :dont-save))) - (:response ())) - -(lcp:define-rpc remove-plan - (:request ((member :int64_t))) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/produce_rpc_server.cpp b/src/distributed/produce_rpc_server.cpp deleted file mode 100644 index 466bbe30e..000000000 --- a/src/distributed/produce_rpc_server.cpp +++ /dev/null @@ -1,176 +0,0 @@ -#include "distributed/produce_rpc_server.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/pull_produce_rpc_messages.hpp" -#include "query/common.hpp" -#include "query/exceptions.hpp" -#include "transactions/engine_worker.hpp" - -namespace distributed { - -ProduceRpcServer::OngoingProduce::OngoingProduce( - database::GraphDb &db, tx::TransactionId tx_id, - std::shared_ptr<query::plan::LogicalOperator> op, - query::SymbolTable symbol_table, Parameters parameters, - std::vector<query::Symbol> pull_symbols) - : dba_{db, tx_id}, - context_(dba_), - pull_symbols_(std::move(pull_symbols)), - frame_(symbol_table.max_position()), - cursor_(op->MakeCursor(dba_)) { - context_.symbol_table_ = std::move(symbol_table); - context_.parameters_ = std::move(parameters); -} - -std::pair<std::vector<query::TypedValue>, PullState> -ProduceRpcServer::OngoingProduce::Pull() { - if (!accumulation_.empty()) { - auto results = std::move(accumulation_.back()); - accumulation_.pop_back(); - for (auto &element : results) { - try { - query::ReconstructTypedValue(element); - } catch (query::ReconstructionException &) { - cursor_state_ = PullState::RECONSTRUCTION_ERROR; - return std::make_pair(std::move(results), cursor_state_); - } - } - - return std::make_pair(std::move(results), PullState::CURSOR_IN_PROGRESS); - } - - return PullOneFromCursor(); -} - -PullState ProduceRpcServer::OngoingProduce::Accumulate() { - while (true) { - auto result = PullOneFromCursor(); - if (result.second != PullState::CURSOR_IN_PROGRESS) - return result.second; - else - accumulation_.emplace_back(std::move(result.first)); - } -} - -std::pair<std::vector<query::TypedValue>, PullState> -ProduceRpcServer::OngoingProduce::PullOneFromCursor() { - std::vector<query::TypedValue> results; - - // Check if we already exhausted this cursor (or it entered an error - // state). This happens when we accumulate before normal pull. - if (cursor_state_ != PullState::CURSOR_IN_PROGRESS) { - return std::make_pair(results, cursor_state_); - } - - try { - if (cursor_->Pull(frame_, context_)) { - results.reserve(pull_symbols_.size()); - for (const auto &symbol : pull_symbols_) { - results.emplace_back(std::move(frame_[symbol])); - } - } else { - cursor_state_ = PullState::CURSOR_EXHAUSTED; - } - } catch (const mvcc::SerializationError &) { - cursor_state_ = PullState::SERIALIZATION_ERROR; - } catch (const utils::LockTimeoutException &) { - cursor_state_ = PullState::LOCK_TIMEOUT_ERROR; - } catch (const RecordDeletedError &) { - cursor_state_ = PullState::UPDATE_DELETED_ERROR; - } catch (const query::ReconstructionException &) { - cursor_state_ = PullState::RECONSTRUCTION_ERROR; - } catch (const query::RemoveAttachedVertexException &) { - cursor_state_ = PullState::UNABLE_TO_DELETE_VERTEX_ERROR; - } catch (const query::QueryRuntimeException &) { - cursor_state_ = PullState::QUERY_ERROR; - } catch (const query::HintedAbortError &) { - cursor_state_ = PullState::HINTED_ABORT_ERROR; - } - return std::make_pair(std::move(results), cursor_state_); -} - -ProduceRpcServer::ProduceRpcServer( - database::GraphDb &db, tx::Engine &tx_engine, - communication::rpc::Server &server, - const distributed::PlanConsumer &plan_consumer) - : db_(db), - produce_rpc_server_(server), - plan_consumer_(plan_consumer), - tx_engine_(tx_engine) { - produce_rpc_server_.Register<PullRpc>( - [this](const auto &req_reader, auto *res_builder) { - PullReq req; - req.Load(req_reader); - PullRes res(Pull(req)); - res.Save(res_builder); - }); - - produce_rpc_server_.Register<TransactionCommandAdvancedRpc>( - [this](const auto &req_reader, auto *res_builder) { - TransactionCommandAdvancedReq req; - req.Load(req_reader); - tx_engine_.UpdateCommand(req.member); - db_.data_manager().ClearCacheForSingleTransaction(req.member); - TransactionCommandAdvancedRes res; - res.Save(res_builder); - }); -} - -void ProduceRpcServer::FinishAndClearOngoingProducePlans( - tx::TransactionId tx_id) { - std::lock_guard<std::mutex> guard{ongoing_produces_lock_}; - for (auto it = ongoing_produces_.begin(); it != ongoing_produces_.end();) { - if (std::get<0>(it->first) == tx_id) { - it = ongoing_produces_.erase(it); - } else { - ++it; - } - } -} - -ProduceRpcServer::OngoingProduce &ProduceRpcServer::GetOngoingProduce( - const PullReq &req) { - auto key_tuple = std::make_tuple(req.tx_id, req.command_id, req.plan_id); - std::lock_guard<std::mutex> guard{ongoing_produces_lock_}; - auto found = ongoing_produces_.find(key_tuple); - if (found != ongoing_produces_.end()) { - return found->second; - } - if (db_.type() == database::GraphDb::Type::DISTRIBUTED_WORKER) { - // On the worker cache the snapshot to have one RPC less. - dynamic_cast<tx::WorkerEngine &>(tx_engine_) - .RunningTransaction(req.tx_id, req.tx_snapshot); - } - auto &plan_pack = plan_consumer_.PlanForId(req.plan_id); - return ongoing_produces_ - .emplace(std::piecewise_construct, std::forward_as_tuple(key_tuple), - std::forward_as_tuple(db_, req.tx_id, plan_pack.plan, - plan_pack.symbol_table, req.params, - req.symbols)) - .first->second; -} - -PullResData ProduceRpcServer::Pull(const PullReq &req) { - auto &ongoing_produce = GetOngoingProduce(req); - - PullResData result(db_.WorkerId(), req.send_old, req.send_new); - result.pull_state = PullState::CURSOR_IN_PROGRESS; - - if (req.accumulate) { - result.pull_state = ongoing_produce.Accumulate(); - // If an error ocurred, we need to return that error. - if (result.pull_state != PullState::CURSOR_EXHAUSTED) { - return result; - } - } - - for (int i = 0; i < req.batch_size; ++i) { - auto pull_result = ongoing_produce.Pull(); - result.pull_state = pull_result.second; - if (pull_result.second != PullState::CURSOR_IN_PROGRESS) break; - result.frames.emplace_back(std::move(pull_result.first)); - } - - return result; -} - -} // namespace distributed diff --git a/src/distributed/produce_rpc_server.hpp b/src/distributed/produce_rpc_server.hpp deleted file mode 100644 index 922a6c692..000000000 --- a/src/distributed/produce_rpc_server.hpp +++ /dev/null @@ -1,92 +0,0 @@ -#pragma once - -#include <cstdint> -#include <map> -#include <mutex> -#include <utility> -#include <vector> - -#include "communication/rpc/server.hpp" -#include "database/graph_db.hpp" -#include "database/graph_db_accessor.hpp" -#include "distributed/plan_consumer.hpp" -#include "query/context.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/interpret/frame.hpp" -#include "query/parameters.hpp" -#include "query/plan/operator.hpp" -#include "query/typed_value.hpp" -#include "transactions/engine.hpp" -#include "transactions/type.hpp" - -namespace distributed { - -/// Handles the execution of a plan on the worker, requested by the remote -/// master. Assumes that (tx_id, plan_id) uniquely identifies an execution, and -/// that there will never be parallel requests for the same execution thus -/// identified. -class ProduceRpcServer { - /// Encapsulates a Cursor execution in progress. Can be used for pulling a - /// single result from the execution, or pulling all and accumulating the - /// results. Accumulations are used for synchronizing updates in distributed - /// MG (see query::plan::Synchronize). - class OngoingProduce { - public: - OngoingProduce(database::GraphDb &db, tx::TransactionId tx_id, - std::shared_ptr<query::plan::LogicalOperator> op, - query::SymbolTable symbol_table, Parameters parameters, - std::vector<query::Symbol> pull_symbols); - - /// Returns a vector of typed values (one for each `pull_symbol`), and an - /// indication of the pull result. The result data is valid only if the - /// returned state is CURSOR_IN_PROGRESS. - std::pair<std::vector<query::TypedValue>, PullState> Pull(); - - /// Accumulates all the frames pulled from the cursor and returns - /// CURSOR_EXHAUSTED. If an error occurs, an appropriate value is returned. - PullState Accumulate(); - - private: - database::GraphDbAccessor dba_; - query::Context context_; - std::vector<query::Symbol> pull_symbols_; - query::Frame frame_; - PullState cursor_state_{PullState::CURSOR_IN_PROGRESS}; - std::vector<std::vector<query::TypedValue>> accumulation_; - std::unique_ptr<query::plan::Cursor> cursor_; - - /// Pulls and returns a single result from the cursor. - std::pair<std::vector<query::TypedValue>, PullState> PullOneFromCursor(); - }; - - public: - ProduceRpcServer(database::GraphDb &db, tx::Engine &tx_engine, - communication::rpc::Server &server, - const distributed::PlanConsumer &plan_consumer); - - /// Finish and clear ongoing produces for all plans that are tied to a - /// transaction with tx_id. - void FinishAndClearOngoingProducePlans(tx::TransactionId tx_id); - - private: - std::mutex ongoing_produces_lock_; - /// Mapping of (tx id, command id, plan id) to OngoingProduce. - /// The command_id should be the command_id at the initialization of a cursor - /// that can call ProduceRpcServer. - std::map<std::tuple<tx::TransactionId, tx::CommandId, int64_t>, - OngoingProduce> - ongoing_produces_; - database::GraphDb &db_; - communication::rpc::Server &produce_rpc_server_; - const distributed::PlanConsumer &plan_consumer_; - tx::Engine &tx_engine_; - - /// Gets an ongoing produce for the given pull request. Creates a new one if - /// there is none currently existing. - OngoingProduce &GetOngoingProduce(const PullReq &req); - - /// Performs a single remote pull for the given request. - PullResData Pull(const PullReq &req); -}; - -} // namespace distributed diff --git a/src/distributed/pull_produce_rpc_messages.lcp b/src/distributed/pull_produce_rpc_messages.lcp deleted file mode 100644 index 849121140..000000000 --- a/src/distributed/pull_produce_rpc_messages.lcp +++ /dev/null @@ -1,547 +0,0 @@ -#>cpp -#pragma once - -#include <cstdint> -#include <functional> -#include <string> - -#include "communication/rpc/messages.hpp" -#include "distributed/pull_produce_rpc_messages.capnp.h" -#include "distributed/serialization.hpp" -#include "query/frontend/semantic/symbol.hpp" -#include "query/parameters.hpp" -#include "storage/address_types.hpp" -#include "transactions/type.hpp" -#include "utils/serialization.hpp" -cpp<# - -(lcp:in-impl - #>cpp - #include "database/graph_db_accessor.hpp" - #include "distributed/data_manager.hpp" - cpp<#) - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'dis "/distributed/serialization.capnp") -(lcp:capnp-import 'sem "/query/frontend/semantic/symbol.capnp") -(lcp:capnp-import 'tx "/transactions/common.capnp") -(lcp:capnp-import 'utils "/utils/serialization.capnp") - -(lcp:capnp-type-conversion "tx::CommandId" "UInt32") -(lcp:capnp-type-conversion "tx::Snapshot" "Tx.Snapshot") -(lcp:capnp-type-conversion "tx::TransactionId" "UInt64") - -#>cpp -/// The default number of results returned via RPC from remote execution to the -/// master that requested it. -constexpr int kDefaultBatchSize = 20; -cpp<# - -(lcp:define-enum pull-state - (cursor-exhausted - cursor-in-progress - serialization-error - lock-timeout-error - update-deleted-error - reconstruction-error - unable-to-delete-vertex-error - hinted-abort-error - query-error) - (:documentation "Returned along with a batch of results in the remote-pull -RPC. Indicates the state of execution on the worker.") - (:serialize)) - -(lcp:define-struct pull-data () - ((pull-state "PullState") - (frames "std::vector<std::vector<query::TypedValue>>")) - (:documentation - "The data returned to the end consumer (the Pull operator). Contains only -the relevant parts of the response, ready for use.")) - -(lcp:define-struct pull-res-data () - ((pull-state "PullState" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::PullState" "PullState") - :capnp-load (lcp:capnp-load-enum "capnp::PullState" "PullState")) - (frames "std::vector<std::vector<query::TypedValue>>" - :capnp-type "List(List(Dis.TypedValue))" - :capnp-save - (lambda (builder member) - #>cpp - for (size_t frame_i = 0; frame_i < ${member}.size(); ++frame_i) { - const auto &frame = ${member}[frame_i]; - auto frame_builder = ${builder}.init(frame_i, frame.size()); - for (size_t val_i = 0; val_i < frame.size(); ++val_i) { - const auto &value = frame[val_i]; - auto value_builder = frame_builder[val_i]; - utils::SaveCapnpTypedValue( - value, &value_builder, - [this](const auto &value, auto *builder) { - this->SaveGraphElement(value, builder); - }); - } - } - cpp<#) - :capnp-load - (lambda (reader member) - #>cpp - ${member}.reserve(${reader}.size()); - for (const auto &frame_reader : ${reader}) { - std::vector<query::TypedValue> current_frame; - current_frame.reserve(frame_reader.size()); - for (const auto &value_reader : frame_reader) { - query::TypedValue value; - utils::LoadCapnpTypedValue( - value_reader, &value, - [this, dba](const auto &reader, auto *value) { - this->LoadGraphElement(dba, reader, value); - }); - current_frame.emplace_back(value); - } - ${member}.emplace_back(current_frame); - } - cpp<#)) - (worker-id :int16_t :capnp-save :dont-save - :documentation - "Id of the worker on which the response is created, used for -serializing vertices (converting local to global addresses). Indicates which -of (old, new) records of a graph element should be sent.") - (send-old :bool :capnp-save :dont-save) - (send-new :bool :capnp-save :dont-save) - ;; Temporary caches used between deserialization and post-processing - ;; (transfering the ownership of this data to a Cache). - (vertices "std::vector<GraphElementData<Vertex>>" :capnp-save :dont-save) - (edges "std::vector<GraphElementData<Edge>>" :capnp-save :dont-save) - (paths "std::vector<PathData>" :capnp-save :dont-save)) - (:documentation - "The data of the remote pull response. Post-processing is required after -deserialization to initialize Vertex/Edge typed values in the frames (possibly -encapsulated in lists/maps) to their proper values. This requires a -GraphDbAccessor and therefore can't be done as part of deserialization. - -TODO - make it possible to inject a &GraphDbAcessor from the Pull layer all -the way into RPC data deserialization to remove the requirement for -post-processing. The current approach of holding references to parts of the -frame (potentially embedded in lists/maps) is too error-prone.") - (:public - #>cpp - private: - cpp<# - (lcp:define-struct (graph-element-data t-record) () - ((global-address "storage::Address<mvcc::VersionList<TRecord>>") - (old-record "std::unique_ptr<TRecord>") - (new-record "std::unique_ptr<TRecord>") - (element-in-frame - "query::TypedValue *" - :documentation - "The position in frame is optional. This same structure is used for -deserializing path elements, in which case the vertex/edge in question is not -directly part of the frame.")) - (:documentation - "Temp cache for deserialized vertices and edges. These objects are -created during deserialization. They are used immediatelly after during -post-processing. The vertex/edge data ownership gets transfered to the Cache, -and the `element_in_frame` reference is used to set the appropriate accessor -to the appropriate value. Not used on side that generates the response.") - (:public - #>cpp - GraphElementData(storage::Address<mvcc::VersionList<TRecord>> address, - std::unique_ptr<TRecord> old_record, std::unique_ptr<TRecord> new_record, - query::TypedValue *element_in_frame) - : global_address(address), - old_record(std::move(old_record)), - new_record(std::move(new_record)), - element_in_frame(element_in_frame) {} - cpp<#)) - (lcp:define-struct path-data () - ((vertices "std::vector<GraphElementData<Vertex>>") - (edges "std::vector<GraphElementData<Edge>>") - (path-in-frame "query::TypedValue *")) - (:public - #>cpp - PathData(query::TypedValue *path_in_frame) : path_in_frame(path_in_frame) {} - cpp<#) - (:documentation "Same like `GraphElementData`, but for paths.")) - #>cpp - public: - PullResData() {} // Default constructor required for serialization. - PullResData(int worker_id, bool send_old, bool send_new) - : worker_id(worker_id), send_old(send_old), send_new(send_new) {} - - PullResData(const PullResData &) = delete; - PullResData &operator=(const PullResData &) = delete; - PullResData(PullResData &&) = default; - PullResData &operator=(PullResData &&) = default; - - - /// Saves a typed value that is a vertex/edge/path. - template <class TArchive> - void SaveGraphElement(TArchive &ar, const query::TypedValue &value) const { - // Helper template function for storing a vertex or an edge. - auto save_element = [&ar, this](auto element_accessor) { - ar << element_accessor.GlobalAddress().raw(); - - // If both old and new are null, we need to reconstruct. - if (!(element_accessor.GetOld() || element_accessor.GetNew())) { - bool result = element_accessor.Reconstruct(); - CHECK(result) << "Attempting to serialize an element not visible to " - "current transaction."; - } - auto *old_rec = element_accessor.GetOld(); - if (send_old && old_rec) { - ar << true; - distributed::SaveElement(ar, *old_rec, worker_id); - } else { - ar << false; - } - if (send_new) { - // Must call SwitchNew as that will trigger a potentially necesary - // Reconstruct. - element_accessor.SwitchNew(); - auto *new_rec = element_accessor.GetNew(); - if (new_rec) { - ar << true; - distributed::SaveElement(ar, *new_rec, worker_id); - } else { - ar << false; - } - } else { - ar << false; - } - }; - switch (value.type()) { - case query::TypedValue::Type::Vertex: - save_element(value.ValueVertex()); - break; - case query::TypedValue::Type::Edge: - save_element(value.ValueEdge()); - break; - case query::TypedValue::Type::Path: { - auto &path = value.ValuePath(); - ar << path.size(); - save_element(path.vertices()[0]); - for (size_t i = 0; i < path.size(); ++i) { - save_element(path.edges()[i]); - save_element(path.vertices()[i + 1]); - } - break; - } - default: - LOG(FATAL) << "Unsupported graph element type: " << value.type(); - } - } - - /// Loads a typed value that is a vertex/edge/path. Part of the - /// deserialization process, populates the temporary data caches which are - /// processed later. - template <class TArchive> - void LoadGraphElement(TArchive &ar, query::TypedValue::Type type, - query::TypedValue &value) { - auto load_edge = [](auto &ar) { - bool exists; - ar >> exists; - return exists ? LoadEdge(ar) : nullptr; - }; - auto load_vertex = [](auto &ar) { - bool exists; - ar >> exists; - return exists ? LoadVertex(ar) : nullptr; - }; - - switch (type) { - case query::TypedValue::Type::Vertex: { - storage::VertexAddress::StorageT address; - ar >> address; - vertices.emplace_back(storage::VertexAddress(address), load_vertex(ar), - load_vertex(ar), &value); - break; - } - case query::TypedValue::Type::Edge: { - storage::VertexAddress::StorageT address; - ar >> address; - edges.emplace_back(storage::EdgeAddress(address), load_edge(ar), - load_edge(ar), &value); - break; - } - case query::TypedValue::Type::Path: { - size_t path_size; - ar >> path_size; - - paths.emplace_back(&value); - auto &path_data = paths.back(); - - storage::VertexAddress::StorageT vertex_address; - storage::EdgeAddress::StorageT edge_address; - ar >> vertex_address; - path_data.vertices.emplace_back(storage::VertexAddress(vertex_address), - load_vertex(ar), load_vertex(ar), - nullptr); - for (size_t i = 0; i < path_size; ++i) { - ar >> edge_address; - path_data.edges.emplace_back(storage::EdgeAddress(edge_address), - load_edge(ar), load_edge(ar), nullptr); - ar >> vertex_address; - path_data.vertices.emplace_back( - storage::VertexAddress(vertex_address), load_vertex(ar), - load_vertex(ar), nullptr); - } - break; - } - default: - LOG(FATAL) << "Unsupported graph element type: " << type; - } - } - cpp<#) - (:private - #>cpp - void SaveGraphElement(const query::TypedValue &, - distributed::capnp::TypedValue::Builder *) const; - void LoadGraphElement(database::GraphDbAccessor *, - const distributed::capnp::TypedValue::Reader &, - query::TypedValue *); - cpp<#) - (:serialize :capnp :load-args '((dba "database::GraphDbAccessor *")))) - -(lcp:in-impl - #>cpp - void PullResData::SaveGraphElement( - const query::TypedValue &value, - distributed::capnp::TypedValue::Builder *builder) const { - auto save_element = [this](auto accessor, auto *builder) { - builder->setAddress(accessor.GlobalAddress().raw()); - // If both old and new are null, we need to reconstruct - if (!(accessor.GetOld() || accessor.GetNew())) { - bool result = accessor.Reconstruct(); - CHECK(result) << "Attempting to serialize an element not visible to " - "current transaction."; - } - auto *old_rec = accessor.GetOld(); - if (send_old && old_rec) { - auto old_builder = builder->initOld(); - distributed::SaveElement(*old_rec, &old_builder, worker_id); - } - if (send_new) { - // Must call SwitchNew as that will trigger a potentially necesary - // Reconstruct. - accessor.SwitchNew(); - auto *new_rec = accessor.GetNew(); - if (new_rec) { - auto new_builder = builder->initNew(); - distributed::SaveElement(*new_rec, &new_builder, worker_id); - } - } - }; - switch (value.type()) { - case query::TypedValue::Type::Vertex: { - auto vertex_builder = builder->initVertex(); - save_element(value.ValueVertex(), &vertex_builder); - break; - } - case query::TypedValue::Type::Edge: { - auto edge_builder = builder->initEdge(); - save_element(value.ValueEdge(), &edge_builder); - break; - } - case query::TypedValue::Type::Path: { - const auto &path = value.ValuePath(); - auto path_builder = builder->initPath(); - auto vertices_builder = path_builder.initVertices(path.vertices().size()); - for (size_t i = 0; i < path.vertices().size(); ++i) { - auto vertex_builder = vertices_builder[i]; - save_element(path.vertices()[i], &vertex_builder); - } - auto edges_builder = path_builder.initEdges(path.edges().size()); - for (size_t i = 0; i < path.edges().size(); ++i) { - auto edge_builder = edges_builder[i]; - save_element(path.edges()[i], &edge_builder); - } - break; - } - default: - LOG(FATAL) << "Unsupported graph element type: " << value.type(); - } - } - -void PullResData::LoadGraphElement( - database::GraphDbAccessor *dba, - const distributed::capnp::TypedValue::Reader &reader, - query::TypedValue *value) { - auto load_vertex = [dba](const auto &vertex_reader) { - storage::VertexAddress global_address(vertex_reader.getAddress()); - auto old_record = - vertex_reader.hasOld() - ? distributed::LoadVertex<const distributed::capnp::Vertex::Reader>( - vertex_reader.getOld()) - : nullptr; - auto new_record = - vertex_reader.hasNew() - ? distributed::LoadVertex<const distributed::capnp::Vertex::Reader>( - vertex_reader.getNew()) - : nullptr; - dba->db() - .data_manager() - .Elements<Vertex>(dba->transaction_id()) - .emplace(global_address.gid(), std::move(old_record), - std::move(new_record)); - return VertexAccessor(global_address, *dba); - }; - auto load_edge = [dba](const auto &edge_reader) { - storage::EdgeAddress global_address(edge_reader.getAddress()); - auto old_record = - edge_reader.hasOld() - ? distributed::LoadEdge<const distributed::capnp::Edge::Reader>( - edge_reader.getOld()) - : nullptr; - auto new_record = - edge_reader.hasNew() - ? distributed::LoadEdge<const distributed::capnp::Edge::Reader>( - edge_reader.getNew()) - : nullptr; - dba->db() - .data_manager() - .Elements<Edge>(dba->transaction_id()) - .emplace(global_address.gid(), std::move(old_record), - std::move(new_record)); - return EdgeAccessor(global_address, *dba); - }; - switch (reader.which()) { - case distributed::capnp::TypedValue::VERTEX: - *value = load_vertex(reader.getVertex()); - break; - case distributed::capnp::TypedValue::EDGE: - *value = load_edge(reader.getEdge()); - break; - case distributed::capnp::TypedValue::PATH: { - auto vertices_reader = reader.getPath().getVertices(); - auto edges_reader = reader.getPath().getEdges(); - query::Path path(load_vertex(vertices_reader[0])); - for (size_t i = 0; i < edges_reader.size(); ++i) { - path.Expand(load_edge(edges_reader[i])); - path.Expand(load_vertex(vertices_reader[i + 1])); - } - *value = path; - break; - } - default: - LOG(FATAL) << "Unsupported graph element type."; - } -} - - cpp<#) - -(lcp:define-rpc pull - (:request - ((tx-id "tx::TransactionId") - (tx-snapshot "tx::Snapshot") - (plan-id :int64_t) - (command-id "tx::CommandId") - (params "Parameters" - :save-fun - " - ar << params.size(); - for (auto &kv : params) { - ar << kv.first; - // Params never contain a vertex/edge, so save plan TypedValue. - utils::SaveTypedValue(ar, kv.second); - } - " - :load-fun - " - size_t params_size; - ar >> params_size; - for (size_t i = 0; i < params_size; ++i) { - int token_pos; - ar >> token_pos; - query::TypedValue param; - // Params never contain a vertex/edge, so load plan TypedValue. - utils::LoadTypedValue(ar, param); - params.Add(token_pos, param); - } - " - :capnp-type "Utils.Map(Utils.BoxInt64, Dis.TypedValue)" - :capnp-save - (lambda (builder member) - #>cpp - auto entries_builder = ${builder}.initEntries(${member}.size()); - size_t i = 0; - for (auto &entry : params) { - auto builder = entries_builder[i]; - auto key_builder = builder.initKey(); - key_builder.setValue(entry.first); - auto value_builder = builder.initValue(); - utils::SaveCapnpTypedValue(entry.second, &value_builder); - ++i; - } - cpp<#) - :capnp-load - (lambda (reader member) - #>cpp - for (const auto &entry_reader : ${reader}.getEntries()) { - query::TypedValue value; - utils::LoadCapnpTypedValue(entry_reader.getValue(), &value); - ${member}.Add(entry_reader.getKey().getValue(), value); - } - cpp<#)) - (symbols "std::vector<query::Symbol>" - :capnp-type "List(Sem.Symbol)" - :capnp-save (lcp:capnp-save-vector "query::capnp::Symbol" "query::Symbol") - :capnp-load (lcp:capnp-load-vector "query::capnp::Symbol" "query::Symbol")) - (accumulate :bool) - (batch-size :int64_t) - ;; Indicates which of (old, new) records of a graph element should be sent. - (send-old :bool) - (send-new :bool))) - (:response - ((data "PullResData" :initarg :move - :save-fun - " - ar << data.pull_state; - ar << data.frames.size(); - // We need to indicate how many values are in each frame. - // Assume all the frames have an equal number of elements. - ar << (data.frames.size() == 0 ? 0 : data.frames[0].size()); - for (const auto &frame : data.frames) { - for (const auto &value : frame) { - utils::SaveTypedValue<TArchive>( - ar, value, [this](TArchive &ar, const query::TypedValue &value) { - data.SaveGraphElement(ar, value); - }); - } - } - " - :load-fun - " - ar >> data.pull_state; - size_t frame_count; - ar >> frame_count; - data.frames.reserve(frame_count); - size_t frame_size; - ar >> frame_size; - for (size_t i = 0; i < frame_count; ++i) { - data.frames.emplace_back(); - auto ¤t_frame = data.frames.back(); - current_frame.reserve(frame_size); - for (size_t j = 0; j < frame_size; ++j) { - current_frame.emplace_back(); - utils::LoadTypedValue<TArchive>( - ar, current_frame.back(), - [this](TArchive &ar, query::TypedValue::TypedValue::Type type, - query::TypedValue &value) { - data.LoadGraphElement(ar, type, value); - }); - } - } - ")) - (:serialize :capnp :base t :load-args '((dba "database::GraphDbAccessor *"))))) - -;; TODO make a separate RPC for the continuation of an existing pull, as an -;; optimization not to have to send the full PullReqData pack every time. - -(lcp:define-rpc transaction-command-advanced - (:request ((member "tx::TransactionId"))) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/pull_rpc_clients.cpp b/src/distributed/pull_rpc_clients.cpp deleted file mode 100644 index 8652a3830..000000000 --- a/src/distributed/pull_rpc_clients.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include <functional> - -#include "distributed/data_manager.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "storage/edge.hpp" -#include "storage/vertex.hpp" - -namespace distributed { - -utils::Future<PullData> PullRpcClients::Pull( - database::GraphDbAccessor &dba, int worker_id, int64_t plan_id, - tx::CommandId command_id, const Parameters ¶ms, - const std::vector<query::Symbol> &symbols, bool accumulate, - int batch_size) { - return clients_.ExecuteOnWorker< - PullData>(worker_id, [&dba, plan_id, command_id, params, symbols, - accumulate, batch_size](int worker_id, - ClientPool &client_pool) { - auto load_pull_res = [&dba](const auto &res_reader) { - PullRes res; - res.Load(res_reader, &dba); - return res; - }; - auto result = client_pool.CallWithLoad<PullRpc>( - load_pull_res, dba.transaction_id(), dba.transaction().snapshot(), - plan_id, command_id, params, symbols, accumulate, batch_size, true, - true); - return PullData{result->data.pull_state, std::move(result->data.frames)}; - }); -} - -std::vector<utils::Future<void>> -PullRpcClients::NotifyAllTransactionCommandAdvanced(tx::TransactionId tx_id) { - return clients_.ExecuteOnWorkers<void>( - 0, [tx_id](int worker_id, auto &client) { - auto res = client.template Call<TransactionCommandAdvancedRpc>(tx_id); - CHECK(res) << "TransactionCommandAdvanceRpc failed"; - }); -} - -} // namespace distributed diff --git a/src/distributed/pull_rpc_clients.hpp b/src/distributed/pull_rpc_clients.hpp deleted file mode 100644 index c9cf998cf..000000000 --- a/src/distributed/pull_rpc_clients.hpp +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#include <vector> - -#include "database/graph_db_accessor.hpp" -#include "distributed/pull_produce_rpc_messages.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "query/frontend/semantic/symbol.hpp" -#include "query/parameters.hpp" -#include "transactions/type.hpp" -#include "utils/future.hpp" - -namespace distributed { - -/// Provides means of calling for the execution of a plan on some remote worker, -/// and getting the results of that execution. The results are returned in -/// batches and are therefore accompanied with an enum indicator of the state of -/// remote execution. -class PullRpcClients { - using ClientPool = communication::rpc::ClientPool; - - public: - PullRpcClients(RpcWorkerClients &clients) : clients_(clients) {} - - /// Calls a remote pull asynchroniously. IMPORTANT: take care not to call this - /// function for the same (tx_id, worker_id, plan_id, command_id) before the - /// previous call has ended. - /// - /// @todo: it might be cleaner to split Pull into {InitRemoteCursor, - /// Pull, RemoteAccumulate}, but that's a lot of refactoring and more - /// RPC calls. - utils::Future<PullData> Pull(database::GraphDbAccessor &dba, int worker_id, - int64_t plan_id, tx::CommandId command_id, - const Parameters ¶ms, - const std::vector<query::Symbol> &symbols, - bool accumulate, - int batch_size = kDefaultBatchSize); - - auto GetWorkerIds() { return clients_.GetWorkerIds(); } - - std::vector<utils::Future<void>> NotifyAllTransactionCommandAdvanced( - tx::TransactionId tx_id); - - private: - RpcWorkerClients &clients_; -}; - -} // namespace distributed diff --git a/src/distributed/rpc_worker_clients.hpp b/src/distributed/rpc_worker_clients.hpp deleted file mode 100644 index 9fd4cc55a..000000000 --- a/src/distributed/rpc_worker_clients.hpp +++ /dev/null @@ -1,154 +0,0 @@ -#pragma once - -#include <functional> -#include <type_traits> -#include <unordered_map> - -#include "communication/rpc/client_pool.hpp" -#include "distributed/coordination.hpp" -#include "distributed/index_rpc_messages.hpp" -#include "distributed/token_sharing_rpc_messages.hpp" -#include "distributed/transactional_cache_cleaner_rpc_messages.hpp" -#include "storage/types.hpp" -#include "transactions/transaction.hpp" -#include "utils/future.hpp" -#include "utils/thread.hpp" - -namespace distributed { - -/** A cache of RPC clients (of the given name/kind) per MG distributed worker. - * Thread safe. */ -class RpcWorkerClients { - public: - explicit RpcWorkerClients(Coordination &coordination) - : coordination_(coordination), - thread_pool_(std::thread::hardware_concurrency()) {} - - RpcWorkerClients(const RpcWorkerClients &) = delete; - RpcWorkerClients(RpcWorkerClients &&) = delete; - RpcWorkerClients &operator=(const RpcWorkerClients &) = delete; - RpcWorkerClients &operator=(RpcWorkerClients &&) = delete; - - auto &GetClientPool(int worker_id) { - std::lock_guard<std::mutex> guard{lock_}; - auto found = client_pools_.find(worker_id); - if (found != client_pools_.end()) return found->second; - return client_pools_ - .emplace(std::piecewise_construct, std::forward_as_tuple(worker_id), - std::forward_as_tuple(coordination_.GetEndpoint(worker_id))) - .first->second; - } - - auto GetWorkerIds() { return coordination_.GetWorkerIds(); } - - /** Asynchroniously executes the given function on the rpc client for the - * given worker id. Returns an `utils::Future` of the given `execute` - * function's - * return type. */ - template <typename TResult> - auto ExecuteOnWorker( - int worker_id, - std::function<TResult(int worker_id, communication::rpc::ClientPool &)> - execute) { - auto &client_pool = GetClientPool(worker_id); - return thread_pool_.Run(execute, worker_id, std::ref(client_pool)); - } - - /** Asynchroniously executes the `execute` function on all worker rpc clients - * except the one whose id is `skip_worker_id`. Returns a vectore of futures - * contaning the results of the `execute` function. */ - template <typename TResult> - auto ExecuteOnWorkers( - int skip_worker_id, - std::function<TResult(int worker_id, communication::rpc::ClientPool &)> - execute) { - std::vector<utils::Future<TResult>> futures; - for (auto &worker_id : coordination_.GetWorkerIds()) { - if (worker_id == skip_worker_id) continue; - futures.emplace_back(std::move(ExecuteOnWorker(worker_id, execute))); - } - return futures; - } - - private: - // TODO make Coordination const, it's member GetEndpoint must be const too. - Coordination &coordination_; - std::unordered_map<int, communication::rpc::ClientPool> client_pools_; - std::mutex lock_; - utils::ThreadPool thread_pool_; -}; - -/** Wrapper class around a RPC call to build indices. - */ -class IndexRpcClients { - public: - explicit IndexRpcClients(RpcWorkerClients &clients) : clients_(clients) {} - - auto GetBuildIndexFutures(const storage::Label &label, - const storage::Property &property, - tx::TransactionId transaction_id, int worker_id) { - return clients_.ExecuteOnWorkers<bool>( - worker_id, - [label, property, transaction_id]( - int worker_id, communication::rpc::ClientPool &client_pool) { - return static_cast<bool>( - client_pool.Call<BuildIndexRpc>(label, property, transaction_id)); - }); - } - - private: - RpcWorkerClients &clients_; -}; - -/** Wrapper class around a RPC call to share token between workers. - */ -class TokenSharingRpcClients { - public: - explicit TokenSharingRpcClients(RpcWorkerClients *clients) - : clients_(clients) {} - - auto TransferToken(int worker_id) { - return clients_->ExecuteOnWorker<void>( - worker_id, - [](int worker_id, communication::rpc::ClientPool &client_pool) { - CHECK(client_pool.Call<TokenTransferRpc>()) - << "Unable to transfer token"; - }); - } - - private: - RpcWorkerClients *clients_; -}; - -/** Join ongoing produces on all workers. - * - * Sends a RPC request to all workers when a transaction is ending, notifying - * them to end all ongoing produces tied to that transaction. - */ -class OngoingProduceJoinerRpcClients { - public: - OngoingProduceJoinerRpcClients(RpcWorkerClients &clients) - : clients_(clients) {} - - void JoinOngoingProduces(tx::TransactionId tx_id) { - auto futures = clients_.ExecuteOnWorkers<void>( - 0, [tx_id](int worker_id, communication::rpc::ClientPool &client_pool) { - auto result = - client_pool.Call<distributed::WaitOnTransactionEndRpc>(tx_id); - CHECK(result) - << "[WaitOnTransactionEndRpc] failed to notify that transaction " - << tx_id << " ended"; - }); - - // We need to wait for all workers to destroy pending futures to avoid - // using already destroyed (released) transaction objects. - for (auto &future : futures) { - future.wait(); - } - } - - private: - RpcWorkerClients &clients_; -}; - -} // namespace distributed diff --git a/src/distributed/serialization.capnp b/src/distributed/serialization.capnp deleted file mode 100644 index 4f51247c1..000000000 --- a/src/distributed/serialization.capnp +++ /dev/null @@ -1,71 +0,0 @@ -@0xccb448f0b998d9c8; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("distributed::capnp"); - -struct Address { - gid @0 :UInt64; - workerId @1 :Int16; -} - -struct PropertyValue { - id @0 :UInt16; - value @1 :TypedValue; -} - -struct Edge { - from @0 :Address; - to @1 :Address; - typeId @2 :UInt16; - properties @3 :List(PropertyValue); -} - -struct Vertex { - outEdges @0 :List(EdgeEntry); - inEdges @1 :List(EdgeEntry); - labelIds @2 :List(UInt16); - properties @3 :List(PropertyValue); - - struct EdgeEntry { - vertexAddress @0 :Address; - edgeAddress @1 :Address; - edgeTypeId @2 :UInt16; - } -} - -struct TypedValue { - union { - nullType @0 :Void; - bool @1 :Bool; - integer @2 :Int64; - double @3 :Float64; - string @4 :Text; - list @5 :List(TypedValue); - map @6 :List(Entry); - vertex @7 :VertexAccessor; - edge @8 :EdgeAccessor; - path @9 :Path; - } - - struct Entry { - key @0 :Text; - value @1 :TypedValue; - } - - struct VertexAccessor { - address @0 :UInt64; - old @1 :Vertex; - new @2: Vertex; - } - - struct EdgeAccessor { - address @0 :UInt64; - old @1 :Edge; - new @2: Edge; - } - - struct Path { - vertices @0 :List(VertexAccessor); - edges @1 :List(EdgeAccessor); - } -} diff --git a/src/distributed/serialization.cpp b/src/distributed/serialization.cpp deleted file mode 100644 index e8c74f831..000000000 --- a/src/distributed/serialization.cpp +++ /dev/null @@ -1,120 +0,0 @@ -#include "distributed/serialization.hpp" - -namespace { - -template <class TAddress> -void SaveAddress(TAddress address, - distributed::capnp::Address::Builder *builder, - int16_t worker_id) { - builder->setGid(address.is_local() ? address.local()->gid_ : address.gid()); - builder->setWorkerId(address.is_local() ? worker_id : address.worker_id()); -} - -storage::VertexAddress LoadVertexAddress( - const distributed::capnp::Address::Reader &reader) { - return {reader.getGid(), reader.getWorkerId()}; -} - -storage::EdgeAddress LoadEdgeAddress( - const distributed::capnp::Address::Reader &reader) { - return {reader.getGid(), reader.getWorkerId()}; -} - -void SaveProperties( - const PropertyValueStore &props, - ::capnp::List<distributed::capnp::PropertyValue>::Builder *builder) { - int64_t i = 0; - for (const auto &kv : props) { - auto prop_builder = (*builder)[i]; - prop_builder.setId(kv.first.Id()); - auto value_builder = prop_builder.initValue(); - utils::SaveCapnpTypedValue(kv.second, &value_builder); - ++i; - } -} - -PropertyValueStore LoadProperties( - const ::capnp::List<distributed::capnp::PropertyValue>::Reader &reader) { - PropertyValueStore props; - for (const auto &prop_reader : reader) { - query::TypedValue value; - utils::LoadCapnpTypedValue(prop_reader.getValue(), &value); - props.set(storage::Property(prop_reader.getId()), value); - } - return props; -} - -} // namespace - -namespace distributed { - -void SaveVertex(const Vertex &vertex, capnp::Vertex::Builder *builder, - int16_t worker_id) { - auto save_edges = [worker_id](const auto &edges, auto *edges_builder) { - int64_t i = 0; - for (const auto &edge : edges) { - auto edge_builder = (*edges_builder)[i]; - auto vertex_addr_builder = edge_builder.initVertexAddress(); - SaveAddress(edge.vertex, &vertex_addr_builder, worker_id); - auto edge_addr_builder = edge_builder.initEdgeAddress(); - SaveAddress(edge.edge, &edge_addr_builder, worker_id); - edge_builder.setEdgeTypeId(edge.edge_type.Id()); - ++i; - } - }; - auto out_builder = builder->initOutEdges(vertex.out_.size()); - save_edges(vertex.out_, &out_builder); - auto in_builder = builder->initInEdges(vertex.in_.size()); - save_edges(vertex.in_, &in_builder); - auto labels_builder = builder->initLabelIds(vertex.labels_.size()); - for (size_t i = 0; i < vertex.labels_.size(); ++i) { - labels_builder.set(i, vertex.labels_[i].Id()); - } - auto properties_builder = builder->initProperties(vertex.properties_.size()); - SaveProperties(vertex.properties_, &properties_builder); -} - -template <> -std::unique_ptr<Vertex> LoadVertex(const capnp::Vertex::Reader &reader) { - auto vertex = std::make_unique<Vertex>(); - auto load_edges = [](const auto &edges_reader) { - Edges edges; - for (const auto &edge_reader : edges_reader) { - auto vertex_address = LoadVertexAddress(edge_reader.getVertexAddress()); - auto edge_address = LoadEdgeAddress(edge_reader.getEdgeAddress()); - storage::EdgeType edge_type(edge_reader.getEdgeTypeId()); - edges.emplace(vertex_address, edge_address, edge_type); - } - return edges; - }; - vertex->out_ = load_edges(reader.getOutEdges()); - vertex->in_ = load_edges(reader.getInEdges()); - for (const auto &label_id : reader.getLabelIds()) { - vertex->labels_.emplace_back(label_id); - } - vertex->properties_ = LoadProperties(reader.getProperties()); - return vertex; -} - -void SaveEdge(const Edge &edge, capnp::Edge::Builder *builder, - int16_t worker_id) { - auto from_builder = builder->initFrom(); - SaveAddress(edge.from_, &from_builder, worker_id); - auto to_builder = builder->initTo(); - SaveAddress(edge.to_, &to_builder, worker_id); - builder->setTypeId(edge.edge_type_.Id()); - auto properties_builder = builder->initProperties(edge.properties_.size()); - SaveProperties(edge.properties_, &properties_builder); -} - -template <> -std::unique_ptr<Edge> LoadEdge(const capnp::Edge::Reader &reader) { - auto from = LoadVertexAddress(reader.getFrom()); - auto to = LoadVertexAddress(reader.getTo()); - auto edge = - std::make_unique<Edge>(from, to, storage::EdgeType{reader.getTypeId()}); - edge->properties_ = LoadProperties(reader.getProperties()); - return edge; -} - -} // namespace distributed diff --git a/src/distributed/serialization.hpp b/src/distributed/serialization.hpp deleted file mode 100644 index 463c3cea5..000000000 --- a/src/distributed/serialization.hpp +++ /dev/null @@ -1,209 +0,0 @@ -#pragma once - -#include <cstdint> -#include <memory> -#include <vector> - -#include "distributed/serialization.capnp.h" -#include "storage/address_types.hpp" -#include "storage/edge.hpp" -#include "storage/types.hpp" -#include "storage/vertex.hpp" -#include "utils/serialization.hpp" - -namespace distributed { - -namespace impl { - -// Saves the given address into the given archive. Converts a local address to a -// global one, using the given worker_id. -template <typename TArchive, typename TAddress> -void SaveAddress(TArchive &ar, TAddress address, int worker_id) { - if (address.is_local()) { - ar << address.local()->gid_; - ar << worker_id; - } else { - ar << address.gid(); - ar << address.worker_id(); - } -}; - -// Saves the given properties into the given archive. -template <typename TArchive> -void SaveProperties(TArchive &ar, const PropertyValueStore &props) { - ar << props.size(); - for (auto &kv : props) { - ar << kv.first.Id(); - utils::SaveTypedValue(ar, kv.second); - } -} -} // namespace impl - -void SaveVertex(const Vertex &vertex, capnp::Vertex::Builder *builder, - int16_t worker_id); - -/** - * Saves the given vertex into the given Boost archive. - * - * @param ar - Archive into which to serialize. - * @param vertex - Getting serialized. - * @param worker_id - ID of the worker this is happening on. Necessary for local - * to global address conversion. - * @tparam TArchive - type of archive. - */ -template <typename TArchive> -void SaveVertex(TArchive &ar, const Vertex &vertex, int worker_id) { - auto save_edges = [&ar, worker_id](auto &edges) { - ar << edges.size(); - for (auto &edge_struct : edges) { - impl::SaveAddress(ar, edge_struct.vertex, worker_id); - impl::SaveAddress(ar, edge_struct.edge, worker_id); - ar << edge_struct.edge_type.Id(); - } - }; - save_edges(vertex.out_); - save_edges(vertex.in_); - - ar << vertex.labels_.size(); - for (auto &label : vertex.labels_) { - ar << label.Id(); - } - - impl::SaveProperties(ar, vertex.properties_); -} - -void SaveEdge(const Edge &edge, capnp::Edge::Builder *builder, - int16_t worker_id); - -/** - * Saves the given edge into the given Boost archive. - * - * @param - Archive into which to serialize. - * @param edge - Getting serialized. - * @param worker_id - ID of the worker this is happening on. Necessary for local - * to global address conversion. - * @tparam TArchive - type of archive. - */ -template <typename TArchive> -void SaveEdge(TArchive &ar, const Edge &edge, int worker_id) { - impl::SaveAddress(ar, edge.from_, worker_id); - impl::SaveAddress(ar, edge.to_, worker_id); - ar << edge.edge_type_.Id(); - impl::SaveProperties(ar, edge.properties_); -} - -/// Alias for `SaveEdge` allowing for param type resolution. -inline void SaveElement(const Edge &record, capnp::Edge::Builder *builder, - int16_t worker_id) { - return SaveEdge(record, builder, worker_id); -} - -/// Alias for `SaveVertex` allowing for param type resolution. -inline void SaveElement(const Vertex &record, capnp::Vertex::Builder *builder, - int16_t worker_id) { - return SaveVertex(record, builder, worker_id); -} - -/// Alias for `SaveEdge` allowing for param type resolution. -template <typename TArchive> -void SaveElement(TArchive &ar, const Edge &record, int worker_id) { - return SaveEdge(ar, record, worker_id); -} - -/// Alias for `SaveVertex` allowing for param type resolution. -template <typename TArchive> -void SaveElement(TArchive &ar, const Vertex &record, int worker_id) { - return SaveVertex(ar, record, worker_id); -} - -namespace impl { - -template <typename TArchive> -storage::VertexAddress LoadVertexAddress(TArchive &ar) { - gid::Gid vertex_id; - ar >> vertex_id; - int worker_id; - ar >> worker_id; - return {vertex_id, worker_id}; -} - -template <typename TArchive> -void LoadProperties(TArchive &ar, PropertyValueStore &store) { - size_t count; - ar >> count; - for (size_t i = 0; i < count; ++i) { - storage::Property::IdT prop; - ar >> prop; - query::TypedValue value; - utils::LoadTypedValue(ar, value); - store.set(storage::Property(prop), static_cast<PropertyValue>(value)); - } -} - -} // namespace impl - -/** - * Loads a Vertex from the given archive and returns it. - * - * @param ar - The archive to load from. - * @tparam TArchive - archive type. - */ -template <typename TArchive> -std::unique_ptr<Vertex> LoadVertex(TArchive &ar) { - auto vertex = std::make_unique<Vertex>(); - - auto decode_edges = [&ar](Edges &edges) { - size_t count; - ar >> count; - for (size_t i = 0; i < count; ++i) { - auto vertex_address = impl::LoadVertexAddress(ar); - storage::EdgeType::IdT edge_type; - gid::Gid edge_id; - ar >> edge_id; - int edge_worker_id; - ar >> edge_worker_id; - ar >> edge_type; - edges.emplace(vertex_address, {edge_id, edge_worker_id}, - storage::EdgeType(edge_type)); - } - }; - decode_edges(vertex->out_); - decode_edges(vertex->in_); - - size_t count; - ar >> count; - for (size_t i = 0; i < count; ++i) { - storage::Label::IdT label; - ar >> label; - vertex->labels_.emplace_back(label); - } - impl::LoadProperties(ar, vertex->properties_); - - return vertex; -} - -template <> -std::unique_ptr<Vertex> LoadVertex(const capnp::Vertex::Reader &reader); - -/** - * Loads an Edge from the given archive and returns it. - * - * @param ar - The archive to load from. - * @tparam TArchive - archive type. - */ -template <typename TArchive> -std::unique_ptr<Edge> LoadEdge(TArchive &ar) { - auto from = impl::LoadVertexAddress(ar); - auto to = impl::LoadVertexAddress(ar); - storage::EdgeType::IdT edge_type; - ar >> edge_type; - auto edge = std::make_unique<Edge>(from, to, storage::EdgeType{edge_type}); - impl::LoadProperties(ar, edge->properties_); - - return edge; -} - -template <> -std::unique_ptr<Edge> LoadEdge(const capnp::Edge::Reader &reader); - -} // namespace distributed diff --git a/src/distributed/storage_gc_rpc_messages.lcp b/src/distributed/storage_gc_rpc_messages.lcp deleted file mode 100644 index a6bff3311..000000000 --- a/src/distributed/storage_gc_rpc_messages.lcp +++ /dev/null @@ -1,20 +0,0 @@ -#>cpp -#pragma once - -#include "communication/rpc/messages.hpp" -#include "distributed/storage_gc_rpc_messages.capnp.h" -#include "io/network/endpoint.hpp" -#include "transactions/transaction.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:define-rpc ran-local-gc - (:request - ((local-oldest-active "tx::TransactionId" :capnp-type "UInt64") - (worker-id :int16_t))) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/token_sharing_rpc_messages.lcp b/src/distributed/token_sharing_rpc_messages.lcp deleted file mode 100644 index 6c3450d05..000000000 --- a/src/distributed/token_sharing_rpc_messages.lcp +++ /dev/null @@ -1,20 +0,0 @@ -#>cpp -#pragma once - -#include <memory> -#include <string> - -#include "communication/rpc/messages.hpp" -#include "distributed/serialization.hpp" -#include "distributed/token_sharing_rpc_messages.capnp.h" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:define-rpc token-transfer - (:request ()) - (:response ())) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/token_sharing_rpc_server.hpp b/src/distributed/token_sharing_rpc_server.hpp deleted file mode 100644 index a3d8c3fe0..000000000 --- a/src/distributed/token_sharing_rpc_server.hpp +++ /dev/null @@ -1,100 +0,0 @@ -#pragma once - -#include "distributed/rpc_worker_clients.hpp" -#include "storage/dynamic_graph_partitioner/dgp.hpp" - -namespace communication::rpc { -class Server; -} - -namespace database { -class GraphDb; -}; - -namespace distributed { - -/// Shares the token between dynamic graph partitioners instances across workers -/// by passing the token from one worker to another, in a circular fashion. This -/// guarantees that no two workers will execute the dynamic graph partitioner -/// step in the same time. -class TokenSharingRpcServer { - public: - TokenSharingRpcServer(database::GraphDb *db, int worker_id, - distributed::Coordination *coordination, - communication::rpc::Server *server, - distributed::TokenSharingRpcClients *clients) - : worker_id_(worker_id), - coordination_(coordination), - server_(server), - clients_(clients), - dgp_(db) { - server_->Register<distributed::TokenTransferRpc>( - [this](const auto &req_reader, auto *res_builder) { token_ = true; }); - - runner_ = std::thread([this]() { - while (true) { - // Wait till we get the token - while (!token_) { - if (shutting_down_) break; - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - - if (shutting_down_) break; - - token_ = false; - dgp_.Run(); - - // Transfer token to next - auto workers = coordination_->GetWorkerIds(); - sort(workers.begin(), workers.end()); - - int next_worker = -1; - auto pos = std::upper_bound(workers.begin(), workers.end(), worker_id_); - if (pos != workers.end()) { - next_worker = *pos; - } else { - next_worker = workers[0]; - } - - clients_->TransferToken(next_worker); - } - }); - } - - /// Starts the token sharing server which in turn starts the dynamic graph - /// partitioner. - void StartTokenSharing() { - started_ = true; - token_ = true; - } - - ~TokenSharingRpcServer() { - shutting_down_ = true; - if (runner_.joinable()) runner_.join(); - if (started_ && worker_id_ == 0) { - // Wait till we get the token back otherwise some worker might try to - // migrate to another worker while that worker is shutting down or - // something else bad might happen - // TODO(dgleich): Solve this better in the future since this blocks - // shutting down until spinner steps complete - while (!token_) { - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - } - } - } - - private: - int worker_id_; - distributed::Coordination *coordination_; - communication::rpc::Server *server_; - distributed::TokenSharingRpcClients *clients_; - - std::atomic<bool> started_{false}; - std::atomic<bool> token_{false}; - std::atomic<bool> shutting_down_{false}; - std::thread runner_; - - DynamicGraphPartitioner dgp_; -}; - -} // namespace distributed diff --git a/src/distributed/transactional_cache_cleaner.hpp b/src/distributed/transactional_cache_cleaner.hpp deleted file mode 100644 index 98e6007fd..000000000 --- a/src/distributed/transactional_cache_cleaner.hpp +++ /dev/null @@ -1,86 +0,0 @@ -#pragma once - -#include <functional> -#include <vector> - -#include "communication/rpc/server.hpp" -#include "distributed/produce_rpc_server.hpp" -#include "distributed/transactional_cache_cleaner_rpc_messages.hpp" -#include "transactions/engine.hpp" -#include "transactions/engine_worker.hpp" -#include "utils/scheduler.hpp" - -namespace distributed { - -/// Periodically calls `ClearTransactionalCache(oldest_transaction)` on all -/// registered objects. -class TransactionalCacheCleaner { - /// The wait time between two releases of local transaction objects that have - /// expired on the master. - static constexpr std::chrono::seconds kCacheReleasePeriod{1}; - - public: - template <typename... T> - TransactionalCacheCleaner(tx::Engine &tx_engine, T &... caches) - : tx_engine_(tx_engine) { - Register(caches...); - cache_clearing_scheduler_.Run( - "DistrTxCacheGc", kCacheReleasePeriod, - [this]() { this->Clear(tx_engine_.GlobalGcSnapshot().back()); }); - } - - protected: - /// Registers the given object for transactional cleaning. The object will - /// periodically get it's `ClearCache(tx::TransactionId)` method called - /// with the oldest active transaction id. Note that the ONLY guarantee for - /// the call param is that there are no transactions alive that have an id - /// lower than it. - template <typename TCache> - void Register(TCache &cache) { - functions_.emplace_back([&cache](tx::TransactionId oldest_active) { - cache.ClearTransactionalCache(oldest_active); - }); - } - - private: - template <typename TCache, typename... T> - void Register(TCache &cache, T &... caches) { - Register(cache); - Register(caches...); - } - - void Clear(tx::TransactionId oldest_active) { - for (auto &f : functions_) f(oldest_active); - } - - tx::Engine &tx_engine_; - std::vector<std::function<void(tx::TransactionId &oldest_active)>> functions_; - utils::Scheduler cache_clearing_scheduler_; -}; - -/// Registers a RPC server that listens for `WaitOnTransactionEnd` requests -/// that require all ongoing produces to finish. It also periodically calls -/// `ClearTransactionalCache` on all registered objects. -class WorkerTransactionalCacheCleaner : public TransactionalCacheCleaner { - public: - template <class... T> - WorkerTransactionalCacheCleaner(tx::WorkerEngine &tx_engine, - communication::rpc::Server &server, - ProduceRpcServer &produce_server, - T &... caches) - : TransactionalCacheCleaner(tx_engine, caches...), - rpc_server_(server), - produce_server_(produce_server) { - Register(tx_engine); - rpc_server_.Register<WaitOnTransactionEndRpc>([this](const auto &req_reader, - auto *res_builder) { - produce_server_.FinishAndClearOngoingProducePlans(req_reader.getMember()); - }); - } - - private: - communication::rpc::Server &rpc_server_; - ProduceRpcServer &produce_server_; -}; - -} // namespace distributed diff --git a/src/distributed/transactional_cache_cleaner_rpc_messages.lcp b/src/distributed/transactional_cache_cleaner_rpc_messages.lcp deleted file mode 100644 index 7580bd8e3..000000000 --- a/src/distributed/transactional_cache_cleaner_rpc_messages.lcp +++ /dev/null @@ -1,17 +0,0 @@ -#>cpp -#pragma once - -#include "distributed/transactional_cache_cleaner_rpc_messages.capnp.h" -#include "communication/rpc/messages.hpp" -#include "transactions/type.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:define-rpc wait-on-transaction-end - (:request ((member "tx::TransactionId" :capnp-type "UInt64"))) - (:response ())) - -(lcp:pop-namespace) diff --git a/src/distributed/updates_rpc_clients.cpp b/src/distributed/updates_rpc_clients.cpp deleted file mode 100644 index 0f0b61f20..000000000 --- a/src/distributed/updates_rpc_clients.cpp +++ /dev/null @@ -1,116 +0,0 @@ - -#include <unordered_map> -#include <vector> - -#include "distributed/updates_rpc_clients.hpp" -#include "query/exceptions.hpp" -#include "utils/thread/sync.hpp" - -namespace distributed { - -namespace { -void RaiseIfRemoteError(UpdateResult result) { - switch (result) { - case UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR: - throw query::RemoveAttachedVertexException(); - case UpdateResult::SERIALIZATION_ERROR: - throw mvcc::SerializationError(); - case UpdateResult::LOCK_TIMEOUT_ERROR: - throw utils::LockTimeoutException( - "Remote LockTimeoutError during edge creation"); - case UpdateResult::UPDATE_DELETED_ERROR: - throw RecordDeletedError(); - case UpdateResult::DONE: - break; - } -} -} - -UpdateResult UpdatesRpcClients::Update(int worker_id, - const database::StateDelta &delta) { - auto res = worker_clients_.GetClientPool(worker_id).Call<UpdateRpc>(delta); - CHECK(res) << "UpdateRpc failed on worker: " << worker_id; - return res->member; -} - -gid::Gid UpdatesRpcClients::CreateVertex( - int worker_id, tx::TransactionId tx_id, - const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties) { - auto res = worker_clients_.GetClientPool(worker_id).Call<CreateVertexRpc>( - CreateVertexReqData{tx_id, labels, properties}); - CHECK(res) << "CreateVertexRpc failed on worker: " << worker_id; - CHECK(res->member.result == UpdateResult::DONE) - << "Remote Vertex creation result not UpdateResult::DONE"; - return res->member.gid; -} - -storage::EdgeAddress UpdatesRpcClients::CreateEdge( - tx::TransactionId tx_id, VertexAccessor &from, VertexAccessor &to, - storage::EdgeType edge_type) { - CHECK(from.address().is_remote()) << "In CreateEdge `from` must be remote"; - int from_worker = from.address().worker_id(); - auto res = worker_clients_.GetClientPool(from_worker) - .Call<CreateEdgeRpc>(CreateEdgeReqData{ - from.gid(), to.GlobalAddress(), edge_type, tx_id}); - CHECK(res) << "CreateEdge RPC failed on worker: " << from_worker; - RaiseIfRemoteError(res->member.result); - return {res->member.gid, from_worker}; -} - -void UpdatesRpcClients::AddInEdge(tx::TransactionId tx_id, - VertexAccessor &from, - storage::EdgeAddress edge_address, - VertexAccessor &to, - storage::EdgeType edge_type) { - CHECK(to.address().is_remote() && edge_address.is_remote() && - (from.GlobalAddress().worker_id() != to.address().worker_id())) - << "AddInEdge should only be called when `to` is remote and " - "`from` is not on the same worker as `to`."; - auto worker_id = to.GlobalAddress().worker_id(); - auto res = worker_clients_.GetClientPool(worker_id).Call<AddInEdgeRpc>( - AddInEdgeReqData{from.GlobalAddress(), edge_address, to.gid(), edge_type, - tx_id}); - CHECK(res) << "AddInEdge RPC failed on worker: " << worker_id; - RaiseIfRemoteError(res->member); -} - -void UpdatesRpcClients::RemoveVertex(int worker_id, tx::TransactionId tx_id, - gid::Gid gid, bool check_empty) { - auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveVertexRpc>( - RemoveVertexReqData{gid, tx_id, check_empty}); - CHECK(res) << "RemoveVertex RPC failed on worker: " << worker_id; - RaiseIfRemoteError(res->member); -} - -void UpdatesRpcClients::RemoveEdge(tx::TransactionId tx_id, int worker_id, - gid::Gid edge_gid, gid::Gid vertex_from_id, - storage::VertexAddress vertex_to_addr) { - auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveEdgeRpc>( - RemoveEdgeData{tx_id, edge_gid, vertex_from_id, vertex_to_addr}); - CHECK(res) << "RemoveEdge RPC failed on worker: " << worker_id; - RaiseIfRemoteError(res->member); -} - -void UpdatesRpcClients::RemoveInEdge(tx::TransactionId tx_id, int worker_id, - gid::Gid vertex_id, - storage::EdgeAddress edge_address) { - CHECK(edge_address.is_remote()) << "RemoveInEdge edge_address is local."; - auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveInEdgeRpc>( - RemoveInEdgeData{tx_id, vertex_id, edge_address}); - CHECK(res) << "RemoveInEdge RPC failed on worker: " << worker_id; - RaiseIfRemoteError(res->member); -} - -std::vector<utils::Future<UpdateResult>> UpdatesRpcClients::UpdateApplyAll( - int skip_worker_id, tx::TransactionId tx_id) { - return worker_clients_.ExecuteOnWorkers<UpdateResult>( - skip_worker_id, [tx_id](int worker_id, auto &client) { - auto res = client.template Call<UpdateApplyRpc>(tx_id); - CHECK(res) << "UpdateApplyRpc failed"; - return res->member; - }); -} - -} // namespace distributed diff --git a/src/distributed/updates_rpc_clients.hpp b/src/distributed/updates_rpc_clients.hpp deleted file mode 100644 index a5baf55f7..000000000 --- a/src/distributed/updates_rpc_clients.hpp +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once - -#include <unordered_map> -#include <vector> - -#include "database/state_delta.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "distributed/updates_rpc_messages.hpp" -#include "query/typed_value.hpp" -#include "storage/address_types.hpp" -#include "storage/gid.hpp" -#include "storage/types.hpp" -#include "transactions/type.hpp" -#include "utils/future.hpp" - -namespace distributed { - -/// Exposes the functionality to send updates to other workers (that own the -/// graph element we are updating). Also enables us to call for a worker to -/// apply the accumulated deferred updates, or discard them. -class UpdatesRpcClients { - public: - explicit UpdatesRpcClients(RpcWorkerClients &clients) - : worker_clients_(clients) {} - - /// Sends an update delta to the given worker. - UpdateResult Update(int worker_id, const database::StateDelta &delta); - - /// Creates a vertex on the given worker and returns it's id. - gid::Gid CreateVertex( - int worker_id, tx::TransactionId tx_id, - const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties); - - /// Creates an edge on the given worker and returns it's address. If the `to` - /// vertex is on the same worker as `from`, then all remote CRUD will be - /// handled by a call to this function. Otherwise a separate call to - /// `AddInEdge` might be necessary. Throws all the exceptions that can - /// occur remotely as a result of updating a vertex. - storage::EdgeAddress CreateEdge(tx::TransactionId tx_id, - VertexAccessor &from, VertexAccessor &to, - storage::EdgeType edge_type); - - /// Adds the edge with the given address to the `to` vertex as an incoming - /// edge. Only used when `to` is remote and not on the same worker as `from`. - void AddInEdge(tx::TransactionId tx_id, VertexAccessor &from, - storage::EdgeAddress edge_address, VertexAccessor &to, - storage::EdgeType edge_type); - - /// Removes a vertex from the other worker. - void RemoveVertex(int worker_id, tx::TransactionId tx_id, gid::Gid gid, - bool check_empty); - - /// Removes an edge on another worker. This also handles the `from` vertex - /// outgoing edge, as that vertex is on the same worker as the edge. If the - /// `to` vertex is on the same worker, then that side is handled too by the - /// single RPC call, otherwise a separate call has to be made to - /// RemoveInEdge. - void RemoveEdge(tx::TransactionId tx_id, int worker_id, gid::Gid edge_gid, - gid::Gid vertex_from_id, - storage::VertexAddress vertex_to_addr); - - void RemoveInEdge(tx::TransactionId tx_id, int worker_id, - gid::Gid vertex_id, storage::EdgeAddress edge_address); - - /// Calls for all the workers (except the given one) to apply their updates - /// and returns the future results. - std::vector<utils::Future<UpdateResult>> UpdateApplyAll( - int skip_worker_id, tx::TransactionId tx_id); - - private: - RpcWorkerClients &worker_clients_; -}; - -} // namespace distributed diff --git a/src/distributed/updates_rpc_messages.lcp b/src/distributed/updates_rpc_messages.lcp deleted file mode 100644 index e9be24b4f..000000000 --- a/src/distributed/updates_rpc_messages.lcp +++ /dev/null @@ -1,187 +0,0 @@ -#>cpp -#pragma once - -#include <unordered_map> - -#include "communication/rpc/messages.hpp" -#include "database/state_delta.hpp" -#include "distributed/updates_rpc_messages.capnp.h" -#include "storage/address_types.hpp" -#include "storage/gid.hpp" -#include "transactions/type.hpp" -#include "utils/serialization.hpp" -cpp<# - -(lcp:namespace distributed) - -(lcp:capnp-namespace "distributed") - -(lcp:capnp-import 'db "/database/state_delta.capnp") -(lcp:capnp-import 'dis "/distributed/serialization.capnp") -(lcp:capnp-import 'storage "/storage/serialization.capnp") -(lcp:capnp-import 'utils "/utils/serialization.capnp") - -(lcp:capnp-type-conversion "tx::TransactionId" "UInt64") -(lcp:capnp-type-conversion "gid::Gid" "UInt64") -(lcp:capnp-type-conversion "storage::Label" "Storage.Common") -(lcp:capnp-type-conversion "storage::EdgeType" "Storage.Common") -(lcp:capnp-type-conversion "storage::Property" "Storage.Common") -(lcp:capnp-type-conversion "storage::EdgeAddress" "Storage.Address") -(lcp:capnp-type-conversion "storage::VertexAddress" "Storage.Address") - -(lcp:define-enum update-result - (done - serialization-error - lock-timeout-error - update-deleted-error - unable-to-delete-vertex-error) - (:documentation "The result of sending or applying a deferred update to a worker.") - (:serialize)) - -(lcp:define-rpc update - (:request ((member "database::StateDelta" :capnp-type "Db.StateDelta"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:define-rpc update-apply - (:request ((member "tx::TransactionId"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:define-struct create-result () - ((result "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult")) - (gid "gid::Gid" :documentation "Only valid if creation was successful.")) - (:serialize :boost :capnp)) - -(lcp:define-struct create-vertex-req-data () - ((tx-id "tx::TransactionId") - (labels "std::vector<storage::Label>" - :capnp-save (lcp:capnp-save-vector "storage::capnp::Common" "storage::Label") - :capnp-load (lcp:capnp-load-vector "storage::capnp::Common" "storage::Label")) - (properties "std::unordered_map<storage::Property, query::TypedValue>" - :save-fun - #>cpp - ar << properties.size(); - for (auto &kv : properties) { - ar << kv.first; - utils::SaveTypedValue(ar, kv.second); - } - cpp<# - :load-fun - #>cpp - size_t props_size; - ar >> props_size; - for (size_t i = 0; i < props_size; ++i) { - storage::Property p; - ar >> p; - query::TypedValue tv; - utils::LoadTypedValue(ar, tv); - properties.emplace(p, std::move(tv)); - } - cpp<# - :capnp-type "Utils.Map(Storage.Common, Dis.TypedValue)" - :capnp-save - (lambda (builder member) - #>cpp - utils::SaveMap<storage::capnp::Common, capnp::TypedValue>( - ${member}, &${builder}, - [](auto *builder, const auto &entry) { - auto key_builder = builder->initKey(); - entry.first.Save(&key_builder); - auto value_builder = builder->initValue(); - utils::SaveCapnpTypedValue(entry.second, &value_builder); - }); - cpp<#) - :capnp-load - (lambda (reader member) - #>cpp - utils::LoadMap<storage::capnp::Common, capnp::TypedValue>( - &${member}, ${reader}, - [](const auto &reader) { - storage::Property prop; - prop.Load(reader.getKey()); - query::TypedValue value; - utils::LoadCapnpTypedValue(reader.getValue(), &value); - return std::make_pair(prop, value); - }); - cpp<#))) - (:serialize :capnp)) - -(lcp:define-rpc create-vertex - (:request ((member "CreateVertexReqData"))) - (:response ((member "CreateResult")))) - -(lcp:define-struct create-edge-req-data () - ((from "gid::Gid") - (to "storage::VertexAddress") - (edge-type "storage::EdgeType") - (tx-id "tx::TransactionId")) - (:serialize :capnp)) - -(lcp:define-rpc create-edge - (:request ((member "CreateEdgeReqData"))) - (:response ((member "CreateResult")))) - -(lcp:define-struct add-in-edge-req-data () - ((from "storage::VertexAddress") - (edge-address "storage::EdgeAddress") - (to "gid::Gid") - (edge-type "storage::EdgeType") - (tx-id "tx::TransactionId")) - (:serialize :capnp)) - -(lcp:define-rpc add-in-edge - (:request ((member "AddInEdgeReqData"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:define-struct remove-vertex-req-data () - ((gid "gid::Gid") - (tx-id "tx::TransactionId") - (check-empty :bool)) - (:serialize :capnp)) - -(lcp:define-rpc remove-vertex - (:request ((member "RemoveVertexReqData"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:define-struct remove-edge-data () - ((tx-id "tx::TransactionId") - (edge-id "gid::Gid") - (vertex-from-id "gid::Gid") - (vertex-to-address "storage::VertexAddress")) - (:serialize :capnp)) - -(lcp:define-rpc remove-edge - (:request ((member "RemoveEdgeData"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:define-struct remove-in-edge-data () - ((tx-id "tx::TransactionId") - (vertex "gid::Gid") - (edge-address "storage::EdgeAddress")) - (:serialize :capnp)) - -(lcp:define-rpc remove-in-edge - (:request ((member "RemoveInEdgeData"))) - (:response ((member "UpdateResult" - :capnp-init nil - :capnp-save (lcp:capnp-save-enum "capnp::UpdateResult" "UpdateResult") - :capnp-load (lcp:capnp-load-enum "capnp::UpdateResult" "UpdateResult"))))) - -(lcp:pop-namespace) ;; distributed diff --git a/src/distributed/updates_rpc_server.cpp b/src/distributed/updates_rpc_server.cpp deleted file mode 100644 index 106d2d8f5..000000000 --- a/src/distributed/updates_rpc_server.cpp +++ /dev/null @@ -1,385 +0,0 @@ -#include <utility> - -#include "glog/logging.h" - -#include "distributed/updates_rpc_server.hpp" -#include "utils/thread/sync.hpp" - -namespace distributed { - -template <typename TRecordAccessor> -UpdateResult UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::Emplace( - const database::StateDelta &delta) { - auto gid = std::is_same<TRecordAccessor, VertexAccessor>::value - ? delta.vertex_id - : delta.edge_id; - std::lock_guard<utils::SpinLock> guard{lock_}; - auto found = deltas_.find(gid); - if (found == deltas_.end()) { - found = - deltas_ - .emplace(gid, std::make_pair(FindAccessor(gid), - std::vector<database::StateDelta>{})) - .first; - } - - found->second.second.emplace_back(delta); - - // TODO call `RecordAccessor::update` to force serialization errors to - // fail-fast (as opposed to when all the deltas get applied). - // - // This is problematic because `VersionList::update` needs to become - // thread-safe within the same transaction. Note that the concurrency is - // possible both between the owner worker interpretation thread and an RPC - // thread (current thread), as well as multiple RPC threads if this - // object's lock is released (perhaps desirable). - // - // A potential solution *might* be that `LockStore::Lock` returns a `bool` - // indicating if the caller was the one obtaining the lock (not the same - // as lock already being held by the same transaction). - // - // Another thing that needs to be done (if we do this) is ensuring that - // `LockStore::Take` is thread-safe when called in parallel in the same - // transaction. Currently it's thread-safe only when called in parallel - // from different transactions (only one manages to take the RecordLock). - // - // Deferring the implementation of this as it's tricky, and essentially an - // optimization. - // - // try { - // found->second.first.update(); - // } catch (const mvcc::SerializationError &) { - // return UpdateResult::SERIALIZATION_ERROR; - // } catch (const RecordDeletedError &) { - // return UpdateResult::UPDATE_DELETED_ERROR; - // } catch (const utils::LockTimeoutException &) { - // return UpdateResult::LOCK_TIMEOUT_ERROR; - // } - return UpdateResult::DONE; -} - -template <typename TRecordAccessor> -gid::Gid UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::CreateVertex( - const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties) { - auto result = db_accessor_.InsertVertex(); - for (auto &label : labels) result.add_label(label); - for (auto &kv : properties) result.PropsSet(kv.first, kv.second); - std::lock_guard<utils::SpinLock> guard{lock_}; - deltas_.emplace(result.gid(), - std::make_pair(result, std::vector<database::StateDelta>{})); - return result.gid(); -} - -template <typename TRecordAccessor> -gid::Gid UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::CreateEdge( - gid::Gid from, storage::VertexAddress to, storage::EdgeType edge_type) { - auto &db = db_accessor_.db(); - auto from_addr = db.storage().LocalizedAddressIfPossible( - storage::VertexAddress(from, db.WorkerId())); - auto to_addr = db.storage().LocalizedAddressIfPossible(to); - auto edge = db_accessor_.InsertOnlyEdge(from_addr, to_addr, edge_type); - std::lock_guard<utils::SpinLock> guard{lock_}; - deltas_.emplace(edge.gid(), - std::make_pair(edge, std::vector<database::StateDelta>{})); - return edge.gid(); -} - -template <typename TRecordAccessor> -UpdateResult UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::Apply() { - std::lock_guard<utils::SpinLock> guard{lock_}; - for (auto &kv : deltas_) { - auto &record_accessor = kv.second.first; - // We need to reconstruct the record as in the meantime some local - // update might have updated it. - record_accessor.Reconstruct(); - for (database::StateDelta &delta : kv.second.second) { - try { - auto &dba = db_accessor_; - switch (delta.type) { - case database::StateDelta::Type::TRANSACTION_BEGIN: - case database::StateDelta::Type::TRANSACTION_COMMIT: - case database::StateDelta::Type::TRANSACTION_ABORT: - case database::StateDelta::Type::CREATE_VERTEX: - case database::StateDelta::Type::CREATE_EDGE: - case database::StateDelta::Type::BUILD_INDEX: - LOG(FATAL) << "Can only apply record update deltas for remote " - "graph element"; - case database::StateDelta::Type::REMOVE_VERTEX: - if (!db_accessor().RemoveVertex( - reinterpret_cast<VertexAccessor &>(record_accessor), - delta.check_empty)) { - return UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR; - } - break; - case database::StateDelta::Type::SET_PROPERTY_VERTEX: - case database::StateDelta::Type::SET_PROPERTY_EDGE: - record_accessor.PropsSet(delta.property, delta.value); - break; - case database::StateDelta::Type::ADD_LABEL: - reinterpret_cast<VertexAccessor &>(record_accessor) - .add_label(delta.label); - break; - case database::StateDelta::Type::REMOVE_LABEL: - reinterpret_cast<VertexAccessor &>(record_accessor) - .remove_label(delta.label); - break; - case database::StateDelta::Type::ADD_OUT_EDGE: - reinterpret_cast<Vertex &>(record_accessor.update()) - .out_.emplace(dba.db().storage().LocalizedAddressIfPossible( - delta.vertex_to_address), - dba.db().storage().LocalizedAddressIfPossible( - delta.edge_address), - delta.edge_type); - dba.wal().Emplace(delta); - break; - case database::StateDelta::Type::ADD_IN_EDGE: - reinterpret_cast<Vertex &>(record_accessor.update()) - .in_.emplace(dba.db().storage().LocalizedAddressIfPossible( - delta.vertex_from_address), - dba.db().storage().LocalizedAddressIfPossible( - delta.edge_address), - delta.edge_type); - dba.wal().Emplace(delta); - break; - case database::StateDelta::Type::REMOVE_EDGE: - // We only remove the edge as a result of this StateDelta, - // because the removal of edge from vertex in/out is performed - // in REMOVE_[IN/OUT]_EDGE deltas. - db_accessor_.RemoveEdge( - reinterpret_cast<EdgeAccessor &>(record_accessor), false, - false); - break; - case database::StateDelta::Type::REMOVE_OUT_EDGE: - reinterpret_cast<VertexAccessor &>(record_accessor) - .RemoveOutEdge(delta.edge_address); - break; - case database::StateDelta::Type::REMOVE_IN_EDGE: - reinterpret_cast<VertexAccessor &>(record_accessor) - .RemoveInEdge(delta.edge_address); - break; - } - } catch (const mvcc::SerializationError &) { - return UpdateResult::SERIALIZATION_ERROR; - } catch (const RecordDeletedError &) { - return UpdateResult::UPDATE_DELETED_ERROR; - } catch (const utils::LockTimeoutException &) { - return UpdateResult::LOCK_TIMEOUT_ERROR; - } - } - } - return UpdateResult::DONE; -} - -UpdatesRpcServer::UpdatesRpcServer(database::GraphDb &db, - communication::rpc::Server &server) - : db_(db) { - server.Register<UpdateRpc>([this](const auto &req_reader, auto *res_builder) { - UpdateReq req; - req.Load(req_reader); - using DeltaType = database::StateDelta::Type; - auto &delta = req.member; - switch (delta.type) { - case DeltaType::SET_PROPERTY_VERTEX: - case DeltaType::ADD_LABEL: - case DeltaType::REMOVE_LABEL: - case database::StateDelta::Type::REMOVE_OUT_EDGE: - case database::StateDelta::Type::REMOVE_IN_EDGE: { - UpdateRes res( - GetUpdates(vertex_updates_, delta.transaction_id).Emplace(delta)); - res.Save(res_builder); - return; - } - case DeltaType::SET_PROPERTY_EDGE: { - UpdateRes res( - GetUpdates(edge_updates_, delta.transaction_id).Emplace(delta)); - res.Save(res_builder); - return; - } - default: - LOG(FATAL) << "Can't perform a remote update with delta type: " - << static_cast<int>(req.member.type); - } - }); - - server.Register<UpdateApplyRpc>( - [this](const auto &req_reader, auto *res_builder) { - UpdateApplyReq req; - req.Load(req_reader); - UpdateApplyRes res(Apply(req.member)); - res.Save(res_builder); - }); - - server.Register<CreateVertexRpc>([this](const auto &req_reader, - auto *res_builder) { - CreateVertexReq req; - req.Load(req_reader); - gid::Gid gid = GetUpdates(vertex_updates_, req.member.tx_id) - .CreateVertex(req.member.labels, req.member.properties); - CreateVertexRes res(CreateResult{UpdateResult::DONE, gid}); - res.Save(res_builder); - }); - - server.Register<CreateEdgeRpc>( - [this](const auto &req_reader, auto *res_builder) { - CreateEdgeReq req; - req.Load(req_reader); - auto data = req.member; - auto creation_result = CreateEdge(data); - - // If `from` and `to` are both on this worker, we handle it in this - // RPC call. Do it only if CreateEdge succeeded. - if (creation_result.result == UpdateResult::DONE && - data.to.worker_id() == db_.WorkerId()) { - auto to_delta = database::StateDelta::AddInEdge( - data.tx_id, data.to.gid(), {data.from, db_.WorkerId()}, - {creation_result.gid, db_.WorkerId()}, data.edge_type); - creation_result.result = - GetUpdates(vertex_updates_, data.tx_id).Emplace(to_delta); - } - - CreateEdgeRes res(creation_result); - res.Save(res_builder); - }); - - server.Register<AddInEdgeRpc>( - [this](const auto &req_reader, auto *res_builder) { - AddInEdgeReq req; - req.Load(req_reader); - auto to_delta = database::StateDelta::AddInEdge( - req.member.tx_id, req.member.to, req.member.from, - req.member.edge_address, req.member.edge_type); - auto result = - GetUpdates(vertex_updates_, req.member.tx_id).Emplace(to_delta); - AddInEdgeRes res(result); - res.Save(res_builder); - }); - - server.Register<RemoveVertexRpc>( - [this](const auto &req_reader, auto *res_builder) { - RemoveVertexReq req; - req.Load(req_reader); - auto to_delta = database::StateDelta::RemoveVertex( - req.member.tx_id, req.member.gid, req.member.check_empty); - auto result = - GetUpdates(vertex_updates_, req.member.tx_id).Emplace(to_delta); - RemoveVertexRes res(result); - res.Save(res_builder); - }); - - server.Register<RemoveEdgeRpc>( - [this](const auto &req_reader, auto *res_builder) { - RemoveEdgeReq req; - req.Load(req_reader); - RemoveEdgeRes res(RemoveEdge(req.member)); - res.Save(res_builder); - }); - - server.Register<RemoveInEdgeRpc>([this](const auto &req_reader, - auto *res_builder) { - RemoveInEdgeReq req; - req.Load(req_reader); - auto data = req.member; - RemoveInEdgeRes res(GetUpdates(vertex_updates_, data.tx_id) - .Emplace(database::StateDelta::RemoveInEdge( - data.tx_id, data.vertex, data.edge_address))); - res.Save(res_builder); - }); -} - -UpdateResult UpdatesRpcServer::Apply(tx::TransactionId tx_id) { - auto apply = [tx_id](auto &collection) { - auto access = collection.access(); - auto found = access.find(tx_id); - if (found == access.end()) { - return UpdateResult::DONE; - } - auto result = found->second.Apply(); - access.remove(tx_id); - return result; - }; - - auto vertex_result = apply(vertex_updates_); - auto edge_result = apply(edge_updates_); - if (vertex_result != UpdateResult::DONE) return vertex_result; - if (edge_result != UpdateResult::DONE) return edge_result; - return UpdateResult::DONE; -} - -void UpdatesRpcServer::ClearTransactionalCache( - tx::TransactionId oldest_active) { - auto vertex_access = vertex_updates_.access(); - for (auto &kv : vertex_access) { - if (kv.first < oldest_active) { - vertex_access.remove(kv.first); - } - } - auto edge_access = edge_updates_.access(); - for (auto &kv : edge_access) { - if (kv.first < oldest_active) { - edge_access.remove(kv.first); - } - } -} - -// Gets/creates the TransactionUpdates for the given transaction. -template <typename TAccessor> -UpdatesRpcServer::TransactionUpdates<TAccessor> &UpdatesRpcServer::GetUpdates( - MapT<TAccessor> &updates, tx::TransactionId tx_id) { - return updates.access() - .emplace(tx_id, std::make_tuple(tx_id), - std::make_tuple(std::ref(db_), tx_id)) - .first->second; -} - -CreateResult UpdatesRpcServer::CreateEdge(const CreateEdgeReqData &req) { - auto gid = GetUpdates(edge_updates_, req.tx_id) - .CreateEdge(req.from, req.to, req.edge_type); - - auto from_delta = database::StateDelta::AddOutEdge( - req.tx_id, req.from, req.to, {gid, db_.WorkerId()}, req.edge_type); - - auto result = GetUpdates(vertex_updates_, req.tx_id).Emplace(from_delta); - return {result, gid}; -} - -UpdateResult UpdatesRpcServer::RemoveEdge(const RemoveEdgeData &data) { - // Edge removal. - auto deletion_delta = - database::StateDelta::RemoveEdge(data.tx_id, data.edge_id); - auto result = GetUpdates(edge_updates_, data.tx_id).Emplace(deletion_delta); - - // Out-edge removal, for sure is local. - if (result == UpdateResult::DONE) { - auto remove_out_delta = database::StateDelta::RemoveOutEdge( - data.tx_id, data.vertex_from_id, {data.edge_id, db_.WorkerId()}); - result = GetUpdates(vertex_updates_, data.tx_id).Emplace(remove_out_delta); - } - - // In-edge removal, might not be local. - if (result == UpdateResult::DONE && - data.vertex_to_address.worker_id() == db_.WorkerId()) { - auto remove_in_delta = database::StateDelta::RemoveInEdge( - data.tx_id, data.vertex_to_address.gid(), - {data.edge_id, db_.WorkerId()}); - result = GetUpdates(vertex_updates_, data.tx_id).Emplace(remove_in_delta); - } - - return result; -} - -template <> -VertexAccessor -UpdatesRpcServer::TransactionUpdates<VertexAccessor>::FindAccessor( - gid::Gid gid) { - return db_accessor_.FindVertex(gid, false); -} - -template <> -EdgeAccessor UpdatesRpcServer::TransactionUpdates<EdgeAccessor>::FindAccessor( - gid::Gid gid) { - return db_accessor_.FindEdge(gid, false); -} - -} // namespace distributed diff --git a/src/distributed/updates_rpc_server.hpp b/src/distributed/updates_rpc_server.hpp deleted file mode 100644 index dc81d9eb6..000000000 --- a/src/distributed/updates_rpc_server.hpp +++ /dev/null @@ -1,104 +0,0 @@ -#pragma once - -#include <unordered_map> -#include <vector> - -#include "glog/logging.h" - -#include "communication/rpc/server.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "database/graph_db.hpp" -#include "database/graph_db_accessor.hpp" -#include "database/state_delta.hpp" -#include "distributed/updates_rpc_messages.hpp" -#include "query/typed_value.hpp" -#include "storage/edge_accessor.hpp" -#include "storage/gid.hpp" -#include "storage/types.hpp" -#include "storage/vertex_accessor.hpp" -#include "transactions/type.hpp" -#include "utils/thread/sync.hpp" - -namespace distributed { - -/// An RPC server that accepts and holds deferred updates (deltas) until it's -/// told to apply or discard them. The updates are organized and applied per -/// transaction in this single updates server. -/// -/// Attempts to get serialization and update-after-delete errors to happen as -/// soon as possible during query execution (fail fast). -class UpdatesRpcServer { - // Remote updates for one transaction. - template <typename TRecordAccessor> - class TransactionUpdates { - public: - TransactionUpdates(database::GraphDb &db, tx::TransactionId tx_id) - : db_accessor_(db, tx_id) {} - - /// Adds a delta and returns the result. Does not modify the state (data) of - /// the graph element the update is for, but calls the `update` method to - /// fail-fast on serialization and update-after-delete errors. - UpdateResult Emplace(const database::StateDelta &delta); - - /// Creates a new vertex and returns it's gid. - gid::Gid CreateVertex( - const std::vector<storage::Label> &labels, - const std::unordered_map<storage::Property, query::TypedValue> - &properties); - - /// Creates a new edge and returns it's gid. Does not update vertices at the - /// end of the edge. - gid::Gid CreateEdge(gid::Gid from, storage::VertexAddress to, - storage::EdgeType edge_type); - - /// Applies all the deltas on the record. - UpdateResult Apply(); - - auto &db_accessor() { return db_accessor_; } - - private: - database::GraphDbAccessor db_accessor_; - std::unordered_map< - gid::Gid, std::pair<TRecordAccessor, std::vector<database::StateDelta>>> - deltas_; - // Multiple workers might be sending remote updates concurrently. - utils::SpinLock lock_; - - // Helper method specialized for [Vertex|Edge]Accessor. - TRecordAccessor FindAccessor(gid::Gid gid); - }; - - public: - UpdatesRpcServer(database::GraphDb &db, communication::rpc::Server &server); - - /// Applies all existsing updates for the given transaction ID. If there are - /// no updates for that transaction, nothing happens. Clears the updates cache - /// after applying them, regardless of the result. - UpdateResult Apply(tx::TransactionId tx_id); - - /// Clears the cache of local transactions that are completed. The signature - /// of this method is dictated by `distributed::TransactionalCacheCleaner`. - void ClearTransactionalCache(tx::TransactionId oldest_active); - - private: - database::GraphDb &db_; - - template <typename TAccessor> - using MapT = - ConcurrentMap<tx::TransactionId, TransactionUpdates<TAccessor>>; - MapT<VertexAccessor> vertex_updates_; - MapT<EdgeAccessor> edge_updates_; - - // Gets/creates the TransactionUpdates for the given transaction. - template <typename TAccessor> - TransactionUpdates<TAccessor> &GetUpdates(MapT<TAccessor> &updates, - tx::TransactionId tx_id); - - // Performs edge creation for the given request. - CreateResult CreateEdge(const CreateEdgeReqData &req); - - // Performs edge removal for the given request. - UpdateResult RemoveEdge(const RemoveEdgeData &data); -}; - -} // namespace distributed diff --git a/src/durability/recovery.capnp b/src/durability/recovery.capnp deleted file mode 100644 index 243b295c6..000000000 --- a/src/durability/recovery.capnp +++ /dev/null @@ -1,9 +0,0 @@ -@0xb3d70bc0576218f3; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("durability::capnp"); - -struct RecoveryInfo { - snapshotTxId @0 :UInt64; - maxWalTxId @1 :UInt64; -} diff --git a/src/durability/recovery.hpp b/src/durability/recovery.hpp index 87cfe6c11..61db9e747 100644 --- a/src/durability/recovery.hpp +++ b/src/durability/recovery.hpp @@ -5,7 +5,6 @@ #include "database/graph_db.hpp" #include "durability/hashed_file_reader.hpp" -#include "durability/recovery.capnp.h" #include "storage/vertex_accessor.hpp" #include "transactions/type.hpp" @@ -25,25 +24,6 @@ struct RecoveryInfo { max_wal_tx_id == other.max_wal_tx_id; } bool operator!=(const RecoveryInfo &other) const { return !(*this == other); } - - void Save(capnp::RecoveryInfo::Builder *builder) const { - builder->setSnapshotTxId(snapshot_tx_id); - builder->setMaxWalTxId(max_wal_tx_id); - } - - void Load(const capnp::RecoveryInfo::Reader &reader) { - snapshot_tx_id = reader.getSnapshotTxId(); - max_wal_tx_id = reader.getMaxWalTxId(); - } - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, unsigned int) { - ar &snapshot_tx_id; - ar &max_wal_tx_id; - } }; /** Reads snapshot metadata from the end of the file without messing up the diff --git a/src/io/CMakeLists.txt b/src/io/CMakeLists.txt index 10cb3263a..0aa3c631d 100644 --- a/src/io/CMakeLists.txt +++ b/src/io/CMakeLists.txt @@ -4,29 +4,5 @@ set(io_src_files network/socket.cpp network/utils.cpp) -# Use this function to add each capnp file to generation. This way each file is -# standalone and we avoid recompiling everything. -# NOTE: io_src_files and io_capnp_files are globally updated. -# TODO: This is duplicated from src/CMakeLists.txt, find a good way to -# generalize this on per subdirectory basis. -function(add_capnp capnp_src_file) - set(cpp_file ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file}.c++) - set(h_file ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file}.h) - add_custom_command(OUTPUT ${cpp_file} ${h_file} - COMMAND ${CAPNP_EXE} compile -o${CAPNP_CXX_EXE} ${capnp_src_file} -I ${CMAKE_CURRENT_SOURCE_DIR} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${capnp_src_file} capnproto-proj - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - # Update *global* io_capnp_files - set(io_capnp_files ${io_capnp_files} ${cpp_file} ${h_file} PARENT_SCOPE) - # Update *global* io_src_files - set(io_src_files ${io_src_files} ${cpp_file} PARENT_SCOPE) -endfunction(add_capnp) - -add_capnp(network/endpoint.capnp) - -add_custom_target(generate_io_capnp DEPENDS ${io_capnp_files}) - add_library(mg-io STATIC ${io_src_files}) target_link_libraries(mg-io stdc++fs Threads::Threads fmt glog mg-utils) -target_link_libraries(mg-io capnp kj) -add_dependencies(mg-io generate_io_capnp) diff --git a/src/io/network/endpoint.capnp b/src/io/network/endpoint.capnp deleted file mode 100644 index bc58b2869..000000000 --- a/src/io/network/endpoint.capnp +++ /dev/null @@ -1,10 +0,0 @@ -@0x93c2449a1e02365a; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("io::network::capnp"); - -struct Endpoint { - address @0 :Text; - port @1 :UInt16; - family @2 :UInt8; -} diff --git a/src/io/network/endpoint.cpp b/src/io/network/endpoint.cpp index c1f94bb22..9761d1b60 100644 --- a/src/io/network/endpoint.cpp +++ b/src/io/network/endpoint.cpp @@ -24,18 +24,6 @@ Endpoint::Endpoint(const std::string &address, uint16_t port) CHECK(family_ != 0) << "Not a valid IPv4 or IPv6 address: " << address; } -void Endpoint::Save(capnp::Endpoint::Builder *builder) const { - builder->setAddress(address_); - builder->setPort(port_); - builder->setFamily(family_); -} - -void Endpoint::Load(const capnp::Endpoint::Reader &reader) { - address_ = reader.getAddress(); - port_ = reader.getPort(); - family_ = reader.getFamily(); -} - bool Endpoint::operator==(const Endpoint &other) const { return address_ == other.address_ && port_ == other.port_ && family_ == other.family_; diff --git a/src/io/network/endpoint.hpp b/src/io/network/endpoint.hpp index bc17ccfd3..64c9aaf78 100644 --- a/src/io/network/endpoint.hpp +++ b/src/io/network/endpoint.hpp @@ -5,7 +5,6 @@ #include <iostream> #include <string> -#include "io/network/endpoint.capnp.h" #include "utils/exceptions.hpp" namespace io::network { @@ -27,9 +26,6 @@ class Endpoint { bool operator==(const Endpoint &other) const; friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint); - void Save(capnp::Endpoint::Builder *builder) const; - void Load(const capnp::Endpoint::Reader &reader); - private: std::string address_; uint16_t port_{0}; diff --git a/src/lisp/lcp.lisp b/src/lisp/lcp.lisp index 84736c9fe..017bec11e 100644 --- a/src/lisp/lcp.lisp +++ b/src/lisp/lcp.lisp @@ -1514,7 +1514,6 @@ code generation." (when schema (write-line schema out)))))) ;; Now generate the save/load C++ code in the cpp file. (write-line "// Autogenerated Cap'n Proto serialization code" cpp-out) - (write-line "#include \"utils/serialization.hpp\"" cpp-out) (let (open-namespaces) (dolist (cpp-class (remove-if (lambda (cpp-type) (not (typep cpp-type 'cpp-class))) cpp-types)) ;; Check if we need to open or close namespaces diff --git a/src/memgraph_bolt.cpp b/src/memgraph_bolt.cpp index 56b231c6a..1c85bd467 100644 --- a/src/memgraph_bolt.cpp +++ b/src/memgraph_bolt.cpp @@ -12,9 +12,9 @@ #include <glog/logging.h> #include "communication/bolt/v1/session.hpp" +#include "communication/server.hpp" #include "config.hpp" #include "database/graph_db.hpp" -#include "stats/stats.hpp" #include "telemetry/telemetry.hpp" #include "utils/flag_validation.hpp" #include "utils/signals.hpp" @@ -103,8 +103,7 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) { /// Run the Memgraph server. /// /// Sets up all the required state before running `memgraph_main` and does any -/// required cleanup afterwards. `get_stats_prefix` is used to obtain the -/// prefix when logging Memgraph's statistics. +/// required cleanup afterwards. /// /// Command line arguments and configuration files are read before calling any /// of the supplied functions. Therefore, you should use flags only from those @@ -116,8 +115,7 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) { /// /// @code /// int main(int argc, char *argv[]) { -/// auto get_stats_prefix = []() -> std::string { return "memgraph"; }; -/// return WithInit(argc, argv, get_stats_prefix, SingleNodeMain); +/// return WithInit(argc, argv, SingleNodeMain); /// } /// @endcode /// @@ -126,8 +124,8 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) { /// `InitSignalHandlers` with appropriate function to shutdown the server you /// started. int WithInit(int argc, char **argv, - const std::function<std::string()> &get_stats_prefix, const std::function<void()> &memgraph_main) { + google::SetUsageMessage("Memgraph database server"); gflags::SetVersionString(version_string); // Load config before parsing arguments, so that flags from the command line @@ -142,9 +140,6 @@ int WithInit(int argc, char **argv, // Unhandled exception handler init. std::set_terminate(&utils::TerminateHandler); - stats::InitStatsLogging(get_stats_prefix()); - utils::OnScopeExit stop_stats([] { stats::StopStatsLogging(); }); - // Initialize the communication library. communication::Init(); @@ -163,7 +158,6 @@ int WithInit(int argc, char **argv, } void SingleNodeMain() { - google::SetUsageMessage("Memgraph single-node database server"); database::SingleNode db; SessionData session_data{db}; @@ -206,79 +200,6 @@ void SingleNodeMain() { // End common stuff for enterprise and community editions -#ifdef MG_COMMUNITY - int main(int argc, char **argv) { - return WithInit(argc, argv, []() { return "memgraph"; }, SingleNodeMain); + return WithInit(argc, argv, SingleNodeMain); } - -#else // enterprise edition - -// Distributed flags. -DEFINE_HIDDEN_bool( - master, false, - "If this Memgraph server is the master in a distributed deployment."); -DEFINE_HIDDEN_bool( - worker, false, - "If this Memgraph server is a worker in a distributed deployment."); -DECLARE_int32(worker_id); - -void MasterMain() { - google::SetUsageMessage("Memgraph distributed master"); - - database::Master db; - SessionData session_data{db}; - - ServerContext context; - std::string service_name = "Bolt"; - if (FLAGS_key_file != "" && FLAGS_cert_file != "") { - context = ServerContext(FLAGS_key_file, FLAGS_cert_file); - service_name = "BoltS"; - } - - ServerT server({FLAGS_interface, static_cast<uint16_t>(FLAGS_port)}, - session_data, &context, FLAGS_session_inactivity_timeout, - service_name, FLAGS_num_workers); - - // Handler for regular termination signals - auto shutdown = [&server] { - // Server needs to be shutdown first and then the database. This prevents a - // race condition when a transaction is accepted during server shutdown. - server.Shutdown(); - }; - - InitSignalHandlers(shutdown); - server.AwaitShutdown(); -} - -void WorkerMain() { - google::SetUsageMessage("Memgraph distributed worker"); - database::Worker db; - db.WaitForShutdown(); -} - -int main(int argc, char **argv) { - auto get_stats_prefix = [&]() -> std::string { - if (FLAGS_master) { - return "master"; - } else if (FLAGS_worker) { - return fmt::format("worker-{}", FLAGS_worker_id); - } - return "memgraph"; - }; - - auto memgraph_main = [&]() { - CHECK(!(FLAGS_master && FLAGS_worker)) - << "Can't run Memgraph as worker and master at the same time"; - if (FLAGS_master) - MasterMain(); - else if (FLAGS_worker) - WorkerMain(); - else - SingleNodeMain(); - }; - - return WithInit(argc, argv, get_stats_prefix, memgraph_main); -} - -#endif // enterprise edition diff --git a/src/query/common.capnp b/src/query/common.capnp deleted file mode 100644 index 5dd38e778..000000000 --- a/src/query/common.capnp +++ /dev/null @@ -1,15 +0,0 @@ -@0xcbc2c66202fdf643; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("query::capnp"); - -using Ast = import "/query/frontend/ast/ast.capnp"; - -enum GraphView { - old @0; - new @1; -} - -struct TypedValueVectorCompare { - ordering @0 :List(Ast.Ordering); -} diff --git a/src/query/common.cpp b/src/query/common.cpp index 6261d989d..7ffb01de0 100644 --- a/src/query/common.cpp +++ b/src/query/common.cpp @@ -8,7 +8,6 @@ #include "glog/logging.h" #include "query/exceptions.hpp" -#include "utils/serialization.hpp" #include "utils/string.hpp" namespace query { @@ -281,28 +280,6 @@ bool TypedValueVectorCompare::TypedValueCompare(const TypedValue &a, } } -void TypedValueVectorCompare::Save( - capnp::TypedValueVectorCompare::Builder *builder) const { - auto ordering_builder = builder->initOrdering(ordering_.size()); - for (size_t i = 0; i < ordering_.size(); ++i) { - ordering_builder.set(i, ordering_[i] == Ordering::ASC - ? capnp::Ordering::ASC - : capnp::Ordering::DESC); - } -} - -void TypedValueVectorCompare::Load( - const capnp::TypedValueVectorCompare::Reader &reader) { - std::vector<Ordering> ordering; - ordering.reserve(reader.getOrdering().size()); - for (auto ordering_reader : reader.getOrdering()) { - ordering.push_back(ordering_reader == capnp::Ordering::ASC - ? Ordering::ASC - : Ordering::DESC); - } - ordering_ = ordering; -} - template <typename TAccessor> void SwitchAccessor(TAccessor &accessor, GraphView graph_view) { switch (graph_view) { diff --git a/src/query/common.hpp b/src/query/common.hpp index 8b4ff26cc..a7e453750 100644 --- a/src/query/common.hpp +++ b/src/query/common.hpp @@ -3,12 +3,9 @@ #include <cstdint> #include <string> -#include "boost/serialization/serialization.hpp" #include "query/frontend/ast/ast.hpp" #include "query/typed_value.hpp" -#include "query/common.capnp.h" - namespace query { // These are the functions for parsing literals and parameter names from @@ -49,18 +46,9 @@ class TypedValueVectorCompare final { const auto &ordering() const { return ordering_; } - void Save(capnp::TypedValueVectorCompare::Builder *builder) const; - void Load(const capnp::TypedValueVectorCompare::Reader &reader); - private: std::vector<Ordering> ordering_; - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &ordering_; - } // Custom comparison for TypedValue objects. // // Behaves generally like Neo's ORDER BY comparison operator: diff --git a/src/query/exceptions.hpp b/src/query/exceptions.hpp index e9352ee63..2b04e78f5 100644 --- a/src/query/exceptions.hpp +++ b/src/query/exceptions.hpp @@ -115,11 +115,4 @@ class RemoveAttachedVertexException : public QueryRuntimeException { "connections. Consider using DETACH DELETE.") {} }; -class UserModificationInMulticommandTxException : public QueryException { - public: - UserModificationInMulticommandTxException() - : QueryException( - "User modification not allowed in multicommand transactions") {} -}; - } // namespace query diff --git a/src/query/frontend/ast/ast.capnp b/src/query/frontend/ast/ast.capnp deleted file mode 100644 index 17bc814d5..000000000 --- a/src/query/frontend/ast/ast.capnp +++ /dev/null @@ -1,396 +0,0 @@ -@0xb107d3d6b4b1600b; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("query::capnp"); - -using Dis = import "/distributed/serialization.capnp"; -using Storage = import "/storage/serialization.capnp"; -using Symbols = import "/query/frontend/semantic/symbol.capnp"; - -struct Tree { - uid @0 :Int64; - - union { - expression @1 :Expression; - where @2 :Where; - namedExpression @3 :NamedExpression; - patternAtom @4 :PatternAtom; - pattern @5 :Pattern; - clause @6 :Clause; - singleQuery @7 :SingleQuery; - cypherUnion @8 :CypherUnion; - query @9 :Query; - } -} - -struct Expression { - union { - binaryOperator @0 :BinaryOperator; - unaryOperator @1 :UnaryOperator; - baseLiteral @2 :BaseLiteral; - listSlicingOperator @3 :ListSlicingOperator; - ifOperator @4 :IfOperator; - identifier @5 :Identifier; - propertyLookup @6 :PropertyLookup; - labelsTest @7 :LabelsTest; - function @8 :Function; - reduce @9 :Reduce; - all @10 :All; - single @11 :Single; - parameterLookup @12 :ParameterLookup; - } -} - -struct Where { - expression @0 :Tree; -} - -struct NamedExpression { - name @0 :Text; - expression @1 :Tree; - tokenPosition @2 :Int32; -} - -struct PatternAtom { - union { - nodeAtom @0 :NodeAtom; - edgeAtom @1 :EdgeAtom; - } - identifier @2 :Tree; -} - -struct Pair(First, Second) { - first @0 :First; - second @1 :Second; -} - -struct NodeAtom { - properties @0 :List(Entry); - struct Entry { - key @0 :Pair(Text, Storage.Common); - value @1 :Tree; - } - - labels @1 :List(Storage.Common); -} - -struct EdgeAtom { - enum Type { - single @0; - depthFirst @1; - breadthFirst @2; - weightedShortestPath @3; - } - type @0 :Type; - - enum Direction { - in @0; - out @1; - both @2; - } - direction @1 :Direction; - - properties @2 :List(Entry); - struct Entry { - key @0 :Pair(Text, Storage.Common); - value @1 :Tree; - } - - lowerBound @3 :Tree; - upperBound @4 :Tree; - - filterLambda @5 :Lambda; - weightLambda @6 :Lambda; - struct Lambda { - innerEdge @0 :Tree; - innerNode @1 :Tree; - expression @2 :Tree; - } - - totalWeight @7 :Tree; - edgeTypes @8 :List(Storage.Common); -} - -struct Pattern { - identifier @0 :Tree; - atoms @1 :List(Tree); -} - -struct Clause { - union { - create @0 :Create; - match @1 :Match; - return @2 :Return; - with @3 :With; - delete @4 :Delete; - setProperty @5 :SetProperty; - setProperties @6 :SetProperties; - setLabels @7 :SetLabels; - removeProperty @8 :RemoveProperty; - removeLabels @9 :RemoveLabels; - merge @10 :Merge; - unwind @11 :Unwind; - createIndex @12 :CreateIndex; - modifyUser @13 :ModifyUser; - dropUser @14 :DropUser; - } -} - -struct SingleQuery { - clauses @0 :List(Tree); -} - -struct CypherUnion { - singleQuery @0 :Tree; - distinct @1 :Bool; - unionSymbols @2 :List(Symbols.Symbol); -} - -struct Query { - singleQuery @0 :Tree; - cypherUnions @1 :List(Tree); -} - -struct BinaryOperator { - union { - orOperator @0 :OrOperator; - xorOperator @1 :XorOperator; - andOperator @2 :AndOperator; - additionOperator @3 :AdditionOperator; - subtractionOperator @4 :SubtractionOperator; - multiplicationOperator @5 :MultiplicationOperator; - divisionOperator @6 :DivisionOperator; - modOperator @7 :ModOperator; - notEqualOperator @8 :NotEqualOperator; - equalOperator @9 :EqualOperator; - lessOperator @10 :LessOperator; - greaterOperator @11 :GreaterOperator; - lessEqualOperator @12 :LessEqualOperator; - greaterEqualOperator @13 :GreaterEqualOperator; - inListOperator @14 :InListOperator; - listMapIndexingOperator @15 :ListMapIndexingOperator; - aggregation @16 :Aggregation; - } - expression1 @17 :Tree; - expression2 @18 :Tree; -} - -struct OrOperator {} -struct XorOperator {} -struct AndOperator {} -struct AdditionOperator {} -struct SubtractionOperator {} -struct MultiplicationOperator {} -struct DivisionOperator {} -struct ModOperator {} -struct NotEqualOperator {} -struct EqualOperator {} -struct LessOperator {} -struct GreaterOperator {} -struct LessEqualOperator {} -struct GreaterEqualOperator {} -struct InListOperator {} -struct ListMapIndexingOperator {} -struct Aggregation { - enum Op { - count @0; - min @1; - max @2; - sum @3; - avg @4 ; - collectList @5; - collectMap @6; - } - op @0 :Op; -} - -struct UnaryOperator { - union { - notOperator @0 :NotOperator; - unaryPlusOperator @1 :UnaryPlusOperator; - unaryMinusOperator @2 :UnaryMinusOperator; - isNullOperator @3 :IsNullOperator; - } - expression @4 :Tree; -} - -struct NotOperator {} -struct UnaryPlusOperator {} -struct UnaryMinusOperator {} -struct IsNullOperator {} - -struct BaseLiteral { - union { - primitiveLiteral @0 :PrimitiveLiteral; - listLiteral @1 :ListLiteral; - mapLiteral @2 :MapLiteral; - } -} - -struct PrimitiveLiteral { - tokenPosition @0 :Int32; - value @1 :Dis.TypedValue; -} - -struct ListLiteral { - elements @0 :List(Tree); -} - -struct MapLiteral { - elements @0 :List(Entry); - struct Entry { - key @0 :Pair(Text, Storage.Common); - value @1 :Tree; - } -} - -struct ListSlicingOperator { - list @0 :Tree; - lowerBound @1 :Tree; - upperBound @2 :Tree; -} - -struct IfOperator { - condition @0 :Tree; - thenExpression @1 :Tree; - elseExpression @2 :Tree; -} - -struct Identifier { - name @0 :Text; - userDeclared @1 :Bool; -} - -struct PropertyLookup { - expression @0 :Tree; - propertyName @1 :Text; - property @2 :Storage.Common; -} - -struct LabelsTest { - expression @0 :Tree; - labels @1 :List(Storage.Common); -} - -struct Function { - functionName @0 :Text; - arguments @1 :List(Tree); -} - -struct Reduce { - accumulator @0 :Tree; - initializer @1 :Tree; - identifier @2 :Tree; - list @3 :Tree; - expression @4 :Tree; -} - -struct All { - identifier @0 :Tree; - listExpression @1 :Tree; - where @2 :Tree; -} - -struct Single { - identifier @0 :Tree; - listExpression @1 :Tree; - where @2 :Tree; -} - -struct ParameterLookup { - tokenPosition @0 :Int32; -} - -struct Create { - patterns @0 :List(Tree); -} - -struct Match { - patterns @0 :List(Tree); - where @1 :Tree; - optional @2 :Bool; -} - -enum Ordering { - asc @0; - desc @1; -} - -struct ReturnBody { - distinct @0 :Bool; - allIdentifiers @1 :Bool; - namedExpressions @2 :List(Tree); - orderBy @3 :List(Pair); - - struct Pair { - ordering @0 :Ordering; - expression @1 :Tree; - } - - skip @4 :Tree; - limit @5 :Tree; -} - -struct Return { - returnBody @0 :ReturnBody; -} - -struct With { - returnBody @0 :ReturnBody; - where @1 :Tree; -} - -struct Delete { - detach @0 :Bool; - expressions @1 :List(Tree); -} - -struct SetProperty { - propertyLookup @0 :Tree; - expression @1 :Tree; -} - -struct SetProperties { - identifier @0 :Tree; - expression @1 :Tree; - update @2 :Bool; -} - -struct SetLabels { - identifier @0 :Tree; - labels @1 :List(Storage.Common); -} - -struct RemoveProperty { - propertyLookup @0 :Tree; -} - -struct RemoveLabels { - identifier @0 :Tree; - labels @1 :List(Storage.Common); -} - -struct Merge { - pattern @0 :Tree; - onMatch @1 :List(Tree); - onCreate @2 :List(Tree); -} - -struct Unwind { - namedExpression @0 :Tree; -} - -struct CreateIndex { - label @0 :Storage.Common; - property @1 :Storage.Common; -} - -struct ModifyUser { - username @0 :Text; - password @1 :Tree; - isCreate @2 :Bool; -} - -struct DropUser { - usernames @0 :List(Text); -} diff --git a/src/query/frontend/ast/ast.cpp b/src/query/frontend/ast/ast.cpp index 417ac7587..2f62499fc 100644 --- a/src/query/frontend/ast/ast.cpp +++ b/src/query/frontend/ast/ast.cpp @@ -1,12 +1,6 @@ #include "query/frontend/ast/ast.hpp" #include <algorithm> -// Include archives before registering most derived types. -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" -#include "boost/serialization/export.hpp" - -#include "utils/serialization.capnp.h" namespace query { @@ -45,2400 +39,11 @@ ReturnBody CloneReturnBody(AstStorage &storage, const ReturnBody &body) { return new_body; } -// Capnproto serialization. - -Tree *AstStorage::Load(const capnp::Tree::Reader &tree, - std::vector<int> *loaded_uids) { - auto uid = tree.getUid(); - - // Check if element already deserialized and if yes, return existing - // element from storage. - if (utils::Contains(*loaded_uids, uid)) { - auto found = std::find_if(storage_.begin(), storage_.end(), - [&](const auto &n) { return n->uid() == uid; }); - DCHECK(found != storage_.end()); - return found->get(); - } - - Tree *ret = nullptr; - switch (tree.which()) { - case capnp::Tree::EXPRESSION: { - auto expr_reader = tree.getExpression(); - ret = Expression::Construct(expr_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::WHERE: { - auto where_reader = tree.getWhere(); - ret = Where::Construct(where_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::CLAUSE: { - auto clause_reader = tree.getClause(); - ret = Clause::Construct(clause_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::CYPHER_UNION: { - auto cu_reader = tree.getCypherUnion(); - ret = CypherUnion::Construct(cu_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::NAMED_EXPRESSION: { - auto ne_reader = tree.getNamedExpression(); - ret = NamedExpression::Construct(ne_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::PATTERN: { - auto pattern_reader = tree.getPattern(); - ret = Pattern::Construct(pattern_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::PATTERN_ATOM: { - auto pa_reader = tree.getPatternAtom(); - ret = PatternAtom::Construct(pa_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - case capnp::Tree::QUERY: { - this->query()->Load(tree, this, loaded_uids); - ret = this->query(); - break; - } - case capnp::Tree::SINGLE_QUERY: { - auto single_reader = tree.getSingleQuery(); - ret = SingleQuery::Construct(single_reader, this); - ret->Load(tree, this, loaded_uids); - break; - } - } - DCHECK(ret != nullptr); - loaded_uids->emplace_back(ret->uid_); - auto previous_max = std::max_element( - storage_.begin(), storage_.end(), - [](const std::unique_ptr<Tree> &a, const std::unique_ptr<Tree> &b) { - return a->uid() < b->uid(); - }); - next_uid_ = (*previous_max)->uid() + 1; - return ret; -} - -// Tree. -void Tree::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - tree_builder->setUid(uid_); -} - bool Tree::IsSaved(const std::vector<int> &saved_uids) { return utils::Contains(saved_uids, uid_); } -void Tree::Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) { - uid_ = reader.getUid(); -} - void Tree::AddToSaved(std::vector<int> *saved_uids) { saved_uids->emplace_back(uid_); } - -// Expression. -void Expression::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto expr_builder = tree_builder->initExpression(); - Save(&expr_builder, saved_uids); - AddToSaved(saved_uids); } - -Expression *Expression::Construct(const capnp::Expression::Reader &reader, - AstStorage *storage) { - switch (reader.which()) { - case capnp::Expression::BINARY_OPERATOR: { - auto bop_reader = reader.getBinaryOperator(); - return BinaryOperator::Construct(bop_reader, storage); - } - case capnp::Expression::UNARY_OPERATOR: { - auto uop_reader = reader.getUnaryOperator(); - return UnaryOperator::Construct(uop_reader, storage); - } - case capnp::Expression::BASE_LITERAL: { - auto bl_reader = reader.getBaseLiteral(); - return BaseLiteral::Construct(bl_reader, storage); - } - case capnp::Expression::LIST_SLICING_OPERATOR: { - auto lso_reader = reader.getListSlicingOperator(); - return ListSlicingOperator::Construct(lso_reader, storage); - } - case capnp::Expression::IF_OPERATOR: { - auto if_reader = reader.getIfOperator(); - return IfOperator::Construct(if_reader, storage); - } - case capnp::Expression::ALL: { - auto all_reader = reader.getAll(); - return All::Construct(all_reader, storage); - } - case capnp::Expression::FUNCTION: { - auto func_reader = reader.getFunction(); - return Function::Construct(func_reader, storage); - } - case capnp::Expression::IDENTIFIER: { - auto id_reader = reader.getIdentifier(); - return Identifier::Construct(id_reader, storage); - } - case capnp::Expression::LABELS_TEST: { - auto labels_reader = reader.getLabelsTest(); - return LabelsTest::Construct(labels_reader, storage); - } - case capnp::Expression::PARAMETER_LOOKUP: { - auto pl_reader = reader.getParameterLookup(); - return ParameterLookup::Construct(pl_reader, storage); - } - case capnp::Expression::PROPERTY_LOOKUP: { - auto pl_reader = reader.getPropertyLookup(); - return PropertyLookup::Construct(pl_reader, storage); - } - case capnp::Expression::REDUCE: { - auto reduce_reader = reader.getReduce(); - return Reduce::Construct(reduce_reader, storage); - } - case capnp::Expression::SINGLE: { - auto single_reader = reader.getSingle(); - return Single::Construct(single_reader, storage); - } - } -} - -// Base Literal. -void BaseLiteral::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto base_literal_builder = expr_builder->initBaseLiteral(); - Save(&base_literal_builder, saved_uids); -} - -BaseLiteral *BaseLiteral::Construct(const capnp::BaseLiteral::Reader &reader, - AstStorage *storage) { - switch (reader.which()) { - case capnp::BaseLiteral::PRIMITIVE_LITERAL: { - auto literal = reader.getPrimitiveLiteral(); - return PrimitiveLiteral::Construct(literal, storage); - } - case capnp::BaseLiteral::LIST_LITERAL: { - auto literal = reader.getListLiteral(); - return ListLiteral::Construct(literal, storage); - } - case capnp::BaseLiteral::MAP_LITERAL: { - auto literal = reader.getMapLiteral(); - return MapLiteral::Construct(literal, storage); - } - } -} - -// Primitive Literal. -void PrimitiveLiteral::Save(capnp::BaseLiteral::Builder *base_literal_builder, - std::vector<int> *saved_uids) { - BaseLiteral::Save(base_literal_builder, saved_uids); - auto primitive_literal_builder = base_literal_builder->initPrimitiveLiteral(); - primitive_literal_builder.setTokenPosition(token_position_); - auto typed_value_builder = primitive_literal_builder.getValue(); - utils::SaveCapnpTypedValue(value_, &typed_value_builder); -} - -void PrimitiveLiteral::Load(const capnp::Tree::Reader &reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - BaseLiteral::Load(reader, storage, loaded_uids); - auto pl_reader = - reader.getExpression().getBaseLiteral().getPrimitiveLiteral(); - auto typed_value_reader = pl_reader.getValue(); - utils::LoadCapnpTypedValue(typed_value_reader, &value_); - token_position_ = pl_reader.getTokenPosition(); -} - -PrimitiveLiteral *PrimitiveLiteral::Construct( - const capnp::PrimitiveLiteral::Reader &reader, AstStorage *storage) { - return storage->Create<PrimitiveLiteral>(); -} - -// List Literal. -void ListLiteral::Save(capnp::BaseLiteral::Builder *base_literal_builder, - std::vector<int> *saved_uids) { - BaseLiteral::Save(base_literal_builder, saved_uids); - auto list_literal_builder = base_literal_builder->initListLiteral(); - ::capnp::List<capnp::Tree>::Builder tree_builders = - list_literal_builder.initElements(elements_.size()); - for (size_t i = 0; i < elements_.size(); ++i) { - auto tree_builder = tree_builders[i]; - elements_[i]->Save(&tree_builder, saved_uids); - } -} - -void ListLiteral::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - BaseLiteral::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getBaseLiteral().getListLiteral(); - for (const auto tree_reader : reader.getElements()) { - auto tree = storage->Load(tree_reader, loaded_uids); - elements_.push_back(dynamic_cast<Expression *>(tree)); - } -} - -ListLiteral *ListLiteral::Construct(const capnp::ListLiteral::Reader &reader, - AstStorage *storage) { - return storage->Create<ListLiteral>(); -} - -// Map Literal. -void MapLiteral::Save(capnp::BaseLiteral::Builder *base_literal_builder, - std::vector<int> *saved_uids) { - BaseLiteral::Save(base_literal_builder, saved_uids); - auto map_literal_builder = base_literal_builder->initMapLiteral(); - ::capnp::List<capnp::MapLiteral::Entry>::Builder map_builder = - map_literal_builder.initElements(elements_.size()); - size_t i = 0; - for (auto &entry : elements_) { - auto entry_builder = map_builder[i]; - auto key_builder = entry_builder.getKey(); - key_builder.setFirst(entry.first.first); - auto storage_property_builder = key_builder.getSecond(); - entry.first.second.Save(&storage_property_builder); - auto value_builder = entry_builder.getValue(); - if (entry.second) entry.second->Save(&value_builder, saved_uids); - ++i; - } -} - -void MapLiteral::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - BaseLiteral::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getBaseLiteral().getMapLiteral(); - for (auto entry_reader : reader.getElements()) { - auto key_pair_reader = entry_reader.getKey(); - auto key_first = key_pair_reader.getFirst().cStr(); - auto storage_property_reader = key_pair_reader.getSecond(); - storage::Property key_second; - key_second.Load(storage_property_reader); - const auto value_reader = entry_reader.getValue(); - auto value = storage->Load(value_reader, loaded_uids); - auto key = std::make_pair(key_first, key_second); - // TODO Maybe check for nullptr expression? - elements_.emplace(key, dynamic_cast<Expression *>(value)); - } -} - -MapLiteral *MapLiteral::Construct(const capnp::MapLiteral::Reader &reader, - AstStorage *storage) { - return storage->Create<MapLiteral>(); -} - -// Binary Operator. -void BinaryOperator::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initBinaryOperator(); - Save(&builder, saved_uids); -} - -void BinaryOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - if (expression1_) { - auto expr1_builder = builder->getExpression1(); - expression1_->Save(&expr1_builder, saved_uids); - } - if (expression2_) { - auto expr2_builder = builder->getExpression2(); - expression2_->Save(&expr2_builder, saved_uids); - } -} - -void BinaryOperator::Load(const capnp::Tree::Reader &reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Expression::Load(reader, storage, loaded_uids); - auto bop_reader = reader.getExpression().getBinaryOperator(); - if (bop_reader.hasExpression1()) { - const auto expr1_reader = bop_reader.getExpression1(); - expression1_ = - dynamic_cast<Expression *>(storage->Load(expr1_reader, loaded_uids)); - } - if (bop_reader.hasExpression2()) { - const auto expr2_reader = bop_reader.getExpression2(); - expression2_ = - dynamic_cast<Expression *>(storage->Load(expr2_reader, loaded_uids)); - } -} - -BinaryOperator *BinaryOperator::Construct( - const capnp::BinaryOperator::Reader &reader, AstStorage *storage) { - switch (reader.which()) { - case capnp::BinaryOperator::ADDITION_OPERATOR: { - auto literal = reader.getAdditionOperator(); - return AdditionOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::AGGREGATION: { - auto literal = reader.getAggregation(); - return Aggregation::Construct(literal, storage); - } - case capnp::BinaryOperator::AND_OPERATOR: { - auto literal = reader.getAndOperator(); - return AndOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::DIVISION_OPERATOR: { - auto literal = reader.getDivisionOperator(); - return DivisionOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::EQUAL_OPERATOR: { - auto literal = reader.getEqualOperator(); - return EqualOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::GREATER_EQUAL_OPERATOR: { - auto literal = reader.getGreaterEqualOperator(); - return GreaterEqualOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::GREATER_OPERATOR: { - auto literal = reader.getGreaterOperator(); - return GreaterOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::IN_LIST_OPERATOR: { - auto literal = reader.getInListOperator(); - return InListOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::LESS_EQUAL_OPERATOR: { - auto literal = reader.getLessEqualOperator(); - return LessEqualOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::LESS_OPERATOR: { - auto literal = reader.getLessOperator(); - return LessOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::LIST_MAP_INDEXING_OPERATOR: { - auto literal = reader.getListMapIndexingOperator(); - return ListMapIndexingOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::MOD_OPERATOR: { - auto literal = reader.getModOperator(); - return ModOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::MULTIPLICATION_OPERATOR: { - auto literal = reader.getMultiplicationOperator(); - return MultiplicationOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::NOT_EQUAL_OPERATOR: { - auto literal = reader.getNotEqualOperator(); - return NotEqualOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::OR_OPERATOR: { - auto literal = reader.getOrOperator(); - return OrOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::SUBTRACTION_OPERATOR: { - auto literal = reader.getSubtractionOperator(); - return SubtractionOperator::Construct(literal, storage); - } - case capnp::BinaryOperator::XOR_OPERATOR: { - auto literal = reader.getXorOperator(); - return XorOperator::Construct(literal, storage); - } - } -} - -void OrOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initOrOperator(); -} - -OrOperator *OrOperator::Construct(const capnp::OrOperator::Reader &, - AstStorage *storage) { - return storage->Create<OrOperator>(); -} - -void XorOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initXorOperator(); -} - -XorOperator *XorOperator::Construct(const capnp::XorOperator::Reader &, - AstStorage *storage) { - return storage->Create<XorOperator>(); -} - -void AndOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initAndOperator(); -} - -AndOperator *AndOperator::Construct(const capnp::AndOperator::Reader &, - AstStorage *storage) { - return storage->Create<AndOperator>(); -} - -void AdditionOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initAdditionOperator(); -} - -AdditionOperator *AdditionOperator::Construct( - const capnp::AdditionOperator::Reader &, AstStorage *storage) { - return storage->Create<AdditionOperator>(); -} - -void SubtractionOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initSubtractionOperator(); -} - -SubtractionOperator *SubtractionOperator::Construct( - capnp::SubtractionOperator::Reader &, AstStorage *storage) { - return storage->Create<SubtractionOperator>(); -} - -void MultiplicationOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initMultiplicationOperator(); -} - -MultiplicationOperator *MultiplicationOperator::Construct( - capnp::MultiplicationOperator::Reader &, AstStorage *storage) { - return storage->Create<MultiplicationOperator>(); -} - -void DivisionOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initDivisionOperator(); -} - -DivisionOperator *DivisionOperator::Construct( - const capnp::DivisionOperator::Reader &, AstStorage *storage) { - return storage->Create<DivisionOperator>(); -} - -void ModOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initModOperator(); -} - -ModOperator *ModOperator::Construct(const capnp::ModOperator::Reader &, - AstStorage *storage) { - return storage->Create<ModOperator>(); -} - -void NotEqualOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initNotEqualOperator(); -} - -NotEqualOperator *NotEqualOperator::Construct( - const capnp::NotEqualOperator::Reader &, AstStorage *storage) { - return storage->Create<NotEqualOperator>(); -} - -void EqualOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initEqualOperator(); -} - -EqualOperator *EqualOperator::Construct(const capnp::EqualOperator::Reader &, - AstStorage *storage) { - return storage->Create<EqualOperator>(); -} - -void LessOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initLessOperator(); -} - -LessOperator *LessOperator::Construct(const capnp::LessOperator::Reader &, - AstStorage *storage) { - return storage->Create<LessOperator>(); -} - -void GreaterOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initGreaterOperator(); -} - -GreaterOperator *GreaterOperator::Construct( - const capnp::GreaterOperator::Reader &, AstStorage *storage) { - return storage->Create<GreaterOperator>(); -} - -void LessEqualOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initLessEqualOperator(); -} - -LessEqualOperator *LessEqualOperator::Construct( - const capnp::LessEqualOperator::Reader &, AstStorage *storage) { - return storage->Create<LessEqualOperator>(); -} - -void GreaterEqualOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initGreaterEqualOperator(); -} - -GreaterEqualOperator *GreaterEqualOperator::Construct( - const capnp::GreaterEqualOperator::Reader &, AstStorage *storage) { - return storage->Create<GreaterEqualOperator>(); -} - -void InListOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initInListOperator(); -} - -InListOperator *InListOperator::Construct(const capnp::InListOperator::Reader &, - AstStorage *storage) { - return storage->Create<InListOperator>(); -} - -void ListMapIndexingOperator::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - builder->initListMapIndexingOperator(); -} - -ListMapIndexingOperator *ListMapIndexingOperator::Construct( - capnp::ListMapIndexingOperator::Reader &, AstStorage *storage) { - return storage->Create<ListMapIndexingOperator>(); -} - -void Aggregation::Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - BinaryOperator::Save(builder, saved_uids); - auto ag_builder = builder->initAggregation(); - switch (op_) { - case Op::AVG: - ag_builder.setOp(capnp::Aggregation::Op::AVG); - break; - case Op::COLLECT_LIST: - ag_builder.setOp(capnp::Aggregation::Op::COLLECT_LIST); - break; - case Op::COLLECT_MAP: - ag_builder.setOp(capnp::Aggregation::Op::COLLECT_MAP); - break; - case Op::COUNT: - ag_builder.setOp(capnp::Aggregation::Op::COUNT); - break; - case Op::MAX: - ag_builder.setOp(capnp::Aggregation::Op::MAX); - break; - case Op::MIN: - ag_builder.setOp(capnp::Aggregation::Op::MIN); - break; - case Op::SUM: - ag_builder.setOp(capnp::Aggregation::Op::SUM); - break; - } -} - -Aggregation *Aggregation::Construct(const capnp::Aggregation::Reader &reader, - AstStorage *storage) { - Op op; - switch (reader.getOp()) { - case capnp::Aggregation::Op::AVG: - op = Op::AVG; - break; - case capnp::Aggregation::Op::COLLECT_LIST: - op = Op::COLLECT_LIST; - break; - case capnp::Aggregation::Op::COLLECT_MAP: - op = Op::COLLECT_MAP; - break; - case capnp::Aggregation::Op::COUNT: - op = Op::COUNT; - break; - case capnp::Aggregation::Op::MAX: - op = Op::MAX; - break; - case capnp::Aggregation::Op::MIN: - op = Op::MIN; - break; - case capnp::Aggregation::Op::SUM: - op = Op::SUM; - break; - } - return storage->Create<Aggregation>(op); -} - -// Unary Operator. -void UnaryOperator::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initUnaryOperator(); - Save(&builder, saved_uids); -} - -void UnaryOperator::Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - if (expression_) { - auto expr_builder = builder->getExpression(); - expression_->Save(&expr_builder, saved_uids); - } -} - -void UnaryOperator::Load(const capnp::Tree::Reader &reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Expression::Load(reader, storage, loaded_uids); - if (reader.hasExpression()) { - const auto expr_reader = - reader.getExpression().getUnaryOperator().getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } -} - -UnaryOperator *UnaryOperator::Construct( - const capnp::UnaryOperator::Reader &reader, AstStorage *storage) { - switch (reader.which()) { - case capnp::UnaryOperator::IS_NULL_OPERATOR: { - auto op = reader.getIsNullOperator(); - return IsNullOperator::Construct(op, storage); - } - case capnp::UnaryOperator::NOT_OPERATOR: { - auto op = reader.getNotOperator(); - return NotOperator::Construct(op, storage); - } - case capnp::UnaryOperator::UNARY_MINUS_OPERATOR: { - auto op = reader.getUnaryMinusOperator(); - return UnaryMinusOperator::Construct(op, storage); - } - case capnp::UnaryOperator::UNARY_PLUS_OPERATOR: { - auto op = reader.getUnaryPlusOperator(); - return UnaryPlusOperator::Construct(op, storage); - } - } -} - -// IsNull Operator. -void IsNullOperator::Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - UnaryOperator::Save(builder, saved_uids); - builder->initIsNullOperator(); -} - -IsNullOperator *IsNullOperator::Construct(const capnp::IsNullOperator::Reader &, - AstStorage *storage) { - return storage->Create<IsNullOperator>(); -} - -// Not Operator. -void NotOperator::Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - UnaryOperator::Save(builder, saved_uids); - builder->initNotOperator(); -} - -NotOperator *NotOperator::Construct(const capnp::NotOperator::Reader &, - AstStorage *storage) { - return storage->Create<NotOperator>(); -} - -// UnaryPlus Operator. -void UnaryPlusOperator::Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - UnaryOperator::Save(builder, saved_uids); - builder->initUnaryPlusOperator(); -} - -UnaryPlusOperator *UnaryPlusOperator::Construct( - const capnp::UnaryPlusOperator::Reader &, AstStorage *storage) { - return storage->Create<UnaryPlusOperator>(); -} - -// UnaryMinus Operator. -void UnaryMinusOperator::Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) { - UnaryOperator::Save(builder, saved_uids); - builder->initUnaryMinusOperator(); -} - -UnaryMinusOperator *UnaryMinusOperator::Construct( - capnp::UnaryMinusOperator::Reader &, AstStorage *storage) { - return storage->Create<UnaryMinusOperator>(); -} - -// ListSlicing Operator. -void ListSlicingOperator::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initListSlicingOperator(); - Save(&builder, saved_uids); -} - -void ListSlicingOperator::Save(capnp::ListSlicingOperator::Builder *builder, - std::vector<int> *saved_uids) { - if (list_) { - auto list_builder = builder->getList(); - list_->Save(&list_builder, saved_uids); - } - if (lower_bound_) { - auto lb_builder = builder->getLowerBound(); - lower_bound_->Save(&lb_builder, saved_uids); - } - if (upper_bound_) { - auto up_builder = builder->getUpperBound(); - upper_bound_->Save(&up_builder, saved_uids); - } -} - -void ListSlicingOperator::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getListSlicingOperator(); - if (reader.hasList()) { - const auto list_reader = reader.getList(); - list_ = dynamic_cast<Expression *>(storage->Load(list_reader, loaded_uids)); - } - if (reader.hasUpperBound()) { - const auto ub_reader = reader.getUpperBound(); - upper_bound_ = - dynamic_cast<Expression *>(storage->Load(ub_reader, loaded_uids)); - } - if (reader.hasLowerBound()) { - const auto lb_reader = reader.getLowerBound(); - lower_bound_ = - dynamic_cast<Expression *>(storage->Load(lb_reader, loaded_uids)); - } -} - -ListSlicingOperator *ListSlicingOperator::Construct( - const capnp::ListSlicingOperator::Reader &reader, AstStorage *storage) { - return storage->Create<ListSlicingOperator>(nullptr, nullptr, nullptr); -} - -// If Operator. -void IfOperator::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initIfOperator(); - Save(&builder, saved_uids); -} - -void IfOperator::Save(capnp::IfOperator::Builder *builder, - std::vector<int> *saved_uids) { - auto condition_builder = builder->getCondition(); - condition_->Save(&condition_builder, saved_uids); - auto then_builder = builder->getThenExpression(); - then_expression_->Save(&then_builder, saved_uids); - auto else_builder = builder->getElseExpression(); - else_expression_->Save(&else_builder, saved_uids); -} - -void IfOperator::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getIfOperator(); - const auto condition_reader = reader.getCondition(); - condition_ = - dynamic_cast<Expression *>(storage->Load(condition_reader, loaded_uids)); - const auto then_reader = reader.getThenExpression(); - then_expression_ = - dynamic_cast<Expression *>(storage->Load(then_reader, loaded_uids)); - const auto else_reader = reader.getElseExpression(); - else_expression_ = - dynamic_cast<Expression *>(storage->Load(else_reader, loaded_uids)); -} - -IfOperator *IfOperator::Construct(const capnp::IfOperator::Reader &reader, - AstStorage *storage) { - return storage->Create<IfOperator>(nullptr, nullptr, nullptr); -} - -// All -void All::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initAll(); - Save(&builder, saved_uids); -} - -void All::Save(capnp::All::Builder *builder, std::vector<int> *saved_uids) { - auto identifier_builder = builder->getIdentifier(); - identifier_->Save(&identifier_builder, saved_uids); - auto expr_builder = builder->getListExpression(); - list_expression_->Save(&expr_builder, saved_uids); - auto where_builder = builder->getWhere(); - where_->Save(&where_builder, saved_uids); -} - -void All::Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getAll(); - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - const auto expr_reader = reader.getListExpression(); - list_expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - const auto where_reader = reader.getWhere(); - where_ = dynamic_cast<Where *>(storage->Load(where_reader, loaded_uids)); -} - -All *All::Construct(const capnp::All::Reader &reader, AstStorage *storage) { - return storage->Create<All>(nullptr, nullptr, nullptr); -} - -// Function -void Function::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initFunction(); - Save(&builder, saved_uids); -} - -void Function::Save(capnp::Function::Builder *builder, - std::vector<int> *saved_uids) { - builder->setFunctionName(function_name_); - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initArguments(arguments_.size()); - for (size_t i = 0; i < arguments_.size(); ++i) { - auto tree_builder = tree_builders[i]; - arguments_[i]->Save(&tree_builder, saved_uids); - } -} - -void Function::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getFunction(); - function_name_ = reader.getFunctionName().cStr(); - for (const auto tree_reader : reader.getArguments()) { - auto tree = storage->Load(tree_reader, loaded_uids); - arguments_.push_back(dynamic_cast<Expression *>(tree)); - } - function_ = NameToFunction(function_name_); -} - -Function *Function::Construct(const capnp::Function::Reader &reader, - AstStorage *storage) { - return storage->Create<Function>(); -} - -// Identifier -void Identifier::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initIdentifier(); - Save(&builder, saved_uids); -} - -void Identifier::Save(capnp::Identifier::Builder *builder, - std::vector<int> *saved_uids) { - builder->setName(name_); - builder->setUserDeclared(user_declared_); -} - -Identifier *Identifier::Construct(const capnp::Identifier::Reader &reader, - AstStorage *storage) { - auto name = reader.getName().cStr(); - auto user_declared = reader.getUserDeclared(); - return storage->Create<Identifier>(name, user_declared); -} - -// LabelsTest -void LabelsTest::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initLabelsTest(); - Save(&builder, saved_uids); -} - -void LabelsTest::Save(capnp::LabelsTest::Builder *builder, - std::vector<int> *saved_uids) { - if (expression_) { - auto expr_builder = builder->initExpression(); - expression_->Save(&expr_builder, saved_uids); - } - auto common_builders = builder->initLabels(labels_.size()); - for (size_t i = 0; i < labels_.size(); ++i) { - auto common_builder = common_builders[i]; - labels_[i].Save(&common_builder); - } -} - -void LabelsTest::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getLabelsTest(); - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } - for (auto label_reader : reader.getLabels()) { - storage::Label label; - label.Load(label_reader); - labels_.push_back(label); - } -} - -LabelsTest *LabelsTest::Construct(const capnp::LabelsTest::Reader &reader, - AstStorage *storage) { - return storage->Create<LabelsTest>(nullptr, std::vector<storage::Label>()); -} - -// ParameterLookup -void ParameterLookup::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initParameterLookup(); - Save(&builder, saved_uids); -} - -void ParameterLookup::Save(capnp::ParameterLookup::Builder *builder, - std::vector<int> *saved_uids) { - builder->setTokenPosition(token_position_); -} - -ParameterLookup *ParameterLookup::Construct( - const capnp::ParameterLookup::Reader &reader, AstStorage *storage) { - auto token_position = reader.getTokenPosition(); - return storage->Create<ParameterLookup>(token_position); -} - -// PropertyLookup -void PropertyLookup::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initPropertyLookup(); - Save(&builder, saved_uids); -} - -void PropertyLookup::Save(capnp::PropertyLookup::Builder *builder, - std::vector<int> *saved_uids) { - if (expression_) { - auto expr_builder = builder->initExpression(); - expression_->Save(&expr_builder, saved_uids); - } - builder->setPropertyName(property_name_); - auto storage_property_builder = builder->initProperty(); - property_.Save(&storage_property_builder); -} - -void PropertyLookup::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getPropertyLookup(); - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } - property_name_ = reader.getPropertyName().cStr(); - auto storage_property_reader = reader.getProperty(); - property_.Load(storage_property_reader); -} - -PropertyLookup *PropertyLookup::Construct( - const capnp::PropertyLookup::Reader &reader, AstStorage *storage) { - return storage->Create<PropertyLookup>(nullptr, "", storage::Property()); -} - -// Reduce -void Reduce::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initReduce(); - Save(&builder, saved_uids); -} - -void Reduce::Save(capnp::Reduce::Builder *builder, - std::vector<int> *saved_uids) { - auto acc_builder = builder->initAccumulator(); - accumulator_->Save(&acc_builder, saved_uids); - auto init_builder = builder->initInitializer(); - initializer_->Save(&init_builder, saved_uids); - auto id_builder = builder->initIdentifier(); - identifier_->Save(&id_builder, saved_uids); - auto list_builder = builder->initList(); - list_->Save(&list_builder, saved_uids); - auto expr_builder = builder->initExpression(); - expression_->Save(&expr_builder, saved_uids); -} - -void Reduce::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getReduce(); - const auto acc_reader = reader.getAccumulator(); - accumulator_ = - dynamic_cast<Identifier *>(storage->Load(acc_reader, loaded_uids)); - const auto init_reader = reader.getInitializer(); - initializer_ = - dynamic_cast<Expression *>(storage->Load(init_reader, loaded_uids)); - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - const auto list_reader = reader.getList(); - list_ = dynamic_cast<Expression *>(storage->Load(list_reader, loaded_uids)); - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); -} - -Reduce *Reduce::Construct(const capnp::Reduce::Reader &reader, - AstStorage *storage) { - return storage->Create<Reduce>(nullptr, nullptr, nullptr, nullptr, nullptr); -} - -// Single -void Single::Save(capnp::Expression::Builder *expr_builder, - std::vector<int> *saved_uids) { - Expression::Save(expr_builder, saved_uids); - auto builder = expr_builder->initSingle(); - Save(&builder, saved_uids); -} - -void Single::Save(capnp::Single::Builder *builder, - std::vector<int> *saved_uids) { - auto where_builder = builder->initWhere(); - where_->Save(&where_builder, saved_uids); - auto id_builder = builder->initIdentifier(); - identifier_->Save(&id_builder, saved_uids); - auto expr_builder = builder->initListExpression(); - list_expression_->Save(&expr_builder, saved_uids); -} - -void Single::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Expression::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getExpression().getSingle(); - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - const auto list_reader = reader.getListExpression(); - list_expression_ = - dynamic_cast<Expression *>(storage->Load(list_reader, loaded_uids)); - const auto where_reader = reader.getWhere(); - where_ = dynamic_cast<Where *>(storage->Load(where_reader, loaded_uids)); -} - -Single *Single::Construct(const capnp::Single::Reader &reader, - AstStorage *storage) { - return storage->Create<Single>(nullptr, nullptr, nullptr); -} - -// Where -void Where::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initWhere(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void Where::Save(capnp::Where::Builder *builder, std::vector<int> *saved_uids) { - if (expression_) { - auto expr_builder = builder->initExpression(); - expression_->Save(&expr_builder, saved_uids); - } -} - -void Where::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Tree::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getWhere(); - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } -} - -Where *Where::Construct(const capnp::Where::Reader &reader, - AstStorage *storage) { - return storage->Create<Where>(); -} - -// Clause. -void Clause::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto clause_builder = tree_builder->initClause(); - Save(&clause_builder, saved_uids); - AddToSaved(saved_uids); -} - -Clause *Clause::Construct(const capnp::Clause::Reader &reader, - AstStorage *storage) { - switch (reader.which()) { - case capnp::Clause::CREATE: { - auto create_reader = reader.getCreate(); - return Create::Construct(create_reader, storage); - } - case capnp::Clause::CREATE_INDEX: { - auto ci_reader = reader.getCreateIndex(); - return CreateIndex::Construct(ci_reader, storage); - } - case capnp::Clause::DELETE: { - auto del_reader = reader.getDelete(); - return Delete::Construct(del_reader, storage); - } - case capnp::Clause::MATCH: { - auto match_reader = reader.getMatch(); - return Match::Construct(match_reader, storage); - } - case capnp::Clause::MERGE: { - auto merge_reader = reader.getMerge(); - return Merge::Construct(merge_reader, storage); - } - case capnp::Clause::REMOVE_LABELS: { - auto rl_reader = reader.getRemoveLabels(); - return RemoveLabels::Construct(rl_reader, storage); - } - case capnp::Clause::REMOVE_PROPERTY: { - auto rp_reader = reader.getRemoveProperty(); - return RemoveProperty::Construct(rp_reader, storage); - } - case capnp::Clause::RETURN: { - auto ret_reader = reader.getReturn(); - return Return::Construct(ret_reader, storage); - } - case capnp::Clause::SET_LABELS: { - auto sl_reader = reader.getSetLabels(); - return SetLabels::Construct(sl_reader, storage); - break; - } - case capnp::Clause::SET_PROPERTY: { - auto sp_reader = reader.getSetProperty(); - return SetProperty::Construct(sp_reader, storage); - } - case capnp::Clause::SET_PROPERTIES: { - auto sp_reader = reader.getSetProperties(); - return SetProperties::Construct(sp_reader, storage); - } - case capnp::Clause::UNWIND: { - auto unwind_reader = reader.getUnwind(); - return Unwind::Construct(unwind_reader, storage); - } - case capnp::Clause::WITH: { - auto with_reader = reader.getWith(); - return With::Construct(with_reader, storage); - } - case capnp::Clause::MODIFY_USER: { - auto mu_reader = reader.getModifyUser(); - return ModifyUser::Construct(mu_reader, storage); - } - case capnp::Clause::DROP_USER: { - auto du_reader = reader.getDropUser(); - return DropUser::Construct(du_reader, storage); - } - } -} - -// Create. -void Create::Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) { - Clause::Save(builder, saved_uids); - auto create_builder = builder->initCreate(); - Create::Save(&create_builder, saved_uids); -} - -void Create::Save(capnp::Create::Builder *builder, - std::vector<int> *saved_uids) { - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initPatterns(patterns_.size()); - for (size_t i = 0; i < patterns_.size(); ++i) { - auto tree_builder = tree_builders[i]; - patterns_[i]->Save(&tree_builder, saved_uids); - } -} - -void Create::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getCreate(); - for (const auto pattern_reader : reader.getPatterns()) { - auto tree = storage->Load(pattern_reader, loaded_uids); - patterns_.push_back(dynamic_cast<Pattern *>(tree)); - } -} - -Create *Create::Construct(const capnp::Create::Reader &reader, - AstStorage *storage) { - return storage->Create<Create>(); -} - -// CreateIndex. -void CreateIndex::Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) { - Clause::Save(builder, saved_uids); - auto create_builder = builder->initCreateIndex(); - CreateIndex::Save(&create_builder, saved_uids); -} - -void CreateIndex::Save(capnp::CreateIndex::Builder *builder, - std::vector<int> *saved_uids) { - auto label_builder = builder->getLabel(); - label_.Save(&label_builder); - auto property_builder = builder->getProperty(); - property_.Save(&property_builder); -} - -CreateIndex *CreateIndex::Construct(const capnp::CreateIndex::Reader &reader, - AstStorage *storage) { - auto label_reader = reader.getLabel(); - storage::Label label; - label.Load(label_reader); - auto property_reader = reader.getProperty(); - storage::Property property; - property.Load(property_reader); - return storage->Create<CreateIndex>(label, property); -} - -// Delete. -void Delete::Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) { - Clause::Save(builder, saved_uids); - auto del_builder = builder->initDelete(); - Delete::Save(&del_builder, saved_uids); -} - -void Delete::Save(capnp::Delete::Builder *builder, - std::vector<int> *saved_uids) { - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initExpressions(expressions_.size()); - for (size_t i = 0; i < expressions_.size(); ++i) { - auto tree_builder = tree_builders[i]; - expressions_[i]->Save(&tree_builder, saved_uids); - } - builder->setDetach(detach_); -} - -void Delete::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getDelete(); - for (const auto tree_reader : reader.getExpressions()) { - auto tree = storage->Load(tree_reader, loaded_uids); - expressions_.push_back(dynamic_cast<Expression *>(tree)); - } - detach_ = reader.getDetach(); -} - -Delete *Delete::Construct(const capnp::Delete::Reader &reader, - AstStorage *storage) { - return storage->Create<Delete>(); -} - -// Match. -void Match::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initMatch(); - Match::Save(&builder, saved_uids); -} - -void Match::Save(capnp::Match::Builder *builder, std::vector<int> *saved_uids) { - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initPatterns(patterns_.size()); - for (size_t i = 0; i < patterns_.size(); ++i) { - auto tree_builder = tree_builders[i]; - patterns_[i]->Save(&tree_builder, saved_uids); - } - - if (where_) { - auto where_builder = builder->initWhere(); - where_->Save(&where_builder, saved_uids); - } - builder->setOptional(optional_); -} - -void Match::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getMatch(); - for (const auto tree_reader : reader.getPatterns()) { - auto tree = storage->Load(tree_reader, loaded_uids); - patterns_.push_back(dynamic_cast<Pattern *>(tree)); - } - if (reader.hasWhere()) { - const auto where_reader = reader.getWhere(); - where_ = dynamic_cast<Where *>(storage->Load(where_reader, loaded_uids)); - } - optional_ = reader.getOptional(); -} - -Match *Match::Construct(const capnp::Match::Reader &reader, - AstStorage *storage) { - return storage->Create<Match>(); -} - -// Merge. -void Merge::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initMerge(); - Merge::Save(&builder, saved_uids); -} - -void Merge::Save(capnp::Merge::Builder *builder, std::vector<int> *saved_uids) { - ::capnp::List<capnp::Tree>::Builder match_builder = - builder->initOnMatch(on_match_.size()); - for (size_t i = 0; i < on_match_.size(); ++i) { - auto tree_builder = match_builder[i]; - on_match_[i]->Save(&tree_builder, saved_uids); - } - - ::capnp::List<capnp::Tree>::Builder create_builder = - builder->initOnCreate(on_create_.size()); - for (size_t i = 0; i < on_create_.size(); ++i) { - auto tree_builder = create_builder[i]; - on_create_[i]->Save(&tree_builder, saved_uids); - } - - if (pattern_) { - auto pattern_builder = builder->getPattern(); - pattern_->Save(&pattern_builder, saved_uids); - } -} - -void Merge::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getMerge(); - for (const auto tree_reader : reader.getOnMatch()) { - auto tree = storage->Load(tree_reader, loaded_uids); - on_match_.push_back(dynamic_cast<Clause *>(tree)); - } - - for (const auto tree_reader : reader.getOnCreate()) { - auto tree = storage->Load(tree_reader, loaded_uids); - on_create_.push_back(dynamic_cast<Clause *>(tree)); - } - if (reader.hasPattern()) { - const auto pattern_reader = reader.getPattern(); - pattern_ = - dynamic_cast<Pattern *>(storage->Load(pattern_reader, loaded_uids)); - } -} -Merge *Merge::Construct(const capnp::Merge::Reader &reader, - AstStorage *storage) { - return storage->Create<Merge>(); -} - -// RemoveLabels. -void RemoveLabels::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initRemoveLabels(); - RemoveLabels::Save(&builder, saved_uids); -} - -void RemoveLabels::Save(capnp::RemoveLabels::Builder *builder, - std::vector<int> *saved_uids) { - if (identifier_) { - auto id_builder = builder->getIdentifier(); - identifier_->Save(&id_builder, saved_uids); - } - auto common_builders = builder->initLabels(labels_.size()); - for (size_t i = 0; i < labels_.size(); ++i) { - auto common_builder = common_builders[i]; - labels_[i].Save(&common_builder); - } -} - -void RemoveLabels::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getRemoveLabels(); - if (reader.hasIdentifier()) { - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } - for (auto label_reader : reader.getLabels()) { - storage::Label label; - label.Load(label_reader); - labels_.push_back(label); - } -} - -RemoveLabels *RemoveLabels::Construct(const capnp::RemoveLabels::Reader &reader, - AstStorage *storage) { - return storage->Create<RemoveLabels>(); -} - -// RemoveProperty. -void RemoveProperty::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initRemoveProperty(); - RemoveProperty::Save(&builder, saved_uids); -} - -void RemoveProperty::Save(capnp::RemoveProperty::Builder *builder, - std::vector<int> *saved_uids) { - if (property_lookup_) { - auto pl_builder = builder->getPropertyLookup(); - property_lookup_->Save(&pl_builder, saved_uids); - } -} - -void RemoveProperty::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getRemoveProperty(); - if (reader.hasPropertyLookup()) { - const auto pl_reader = reader.getPropertyLookup(); - property_lookup_ = - dynamic_cast<PropertyLookup *>(storage->Load(pl_reader, loaded_uids)); - } -} - -RemoveProperty *RemoveProperty::Construct( - const capnp::RemoveProperty::Reader &reader, AstStorage *storage) { - return storage->Create<RemoveProperty>(); -} - -// Return. -void Return::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initReturn(); - Return::Save(&builder, saved_uids); -} - -void SaveReturnBody(capnp::ReturnBody::Builder *rb_builder, ReturnBody &body, - std::vector<int> *saved_uids) { - rb_builder->setDistinct(body.distinct); - rb_builder->setAllIdentifiers(body.all_identifiers); - - ::capnp::List<capnp::Tree>::Builder named_expressions = - rb_builder->initNamedExpressions(body.named_expressions.size()); - for (size_t i = 0; i < body.named_expressions.size(); ++i) { - auto tree_builder = named_expressions[i]; - body.named_expressions[i]->Save(&tree_builder, saved_uids); - } - - ::capnp::List<capnp::ReturnBody::Pair>::Builder order_by = - rb_builder->initOrderBy(body.order_by.size()); - for (size_t i = 0; i < body.order_by.size(); ++i) { - auto pair_builder = order_by[i]; - auto ordering = body.order_by[i].first == Ordering::ASC - ? capnp::Ordering::ASC - : capnp::Ordering::DESC; - pair_builder.setOrdering(ordering); - auto tree_builder = pair_builder.getExpression(); - body.order_by[i].second->Save(&tree_builder, saved_uids); - } - - if (body.skip) { - auto skip_builder = rb_builder->getSkip(); - body.skip->Save(&skip_builder, saved_uids); - } - if (body.limit) { - auto limit_builder = rb_builder->getLimit(); - body.limit->Save(&limit_builder, saved_uids); - } -} - -void Return::Save(capnp::Return::Builder *builder, - std::vector<int> *saved_uids) { - auto rb_builder = builder->initReturnBody(); - SaveReturnBody(&rb_builder, body_, saved_uids); -} - -void LoadReturnBody(capnp::ReturnBody::Reader &rb_reader, ReturnBody &body, - AstStorage *storage, std::vector<int> *loaded_uids) { - body.distinct = rb_reader.getDistinct(); - body.all_identifiers = rb_reader.getAllIdentifiers(); - - for (const auto tree_reader : rb_reader.getNamedExpressions()) { - auto tree = storage->Load(tree_reader, loaded_uids); - body.named_expressions.push_back(dynamic_cast<NamedExpression *>(tree)); - } - - for (auto pair_reader : rb_reader.getOrderBy()) { - auto ordering = pair_reader.getOrdering() == capnp::Ordering::ASC - ? Ordering::ASC - : Ordering::DESC; - const auto tree_reader = pair_reader.getExpression(); - // TODO Check if expression is null? - auto tree = - dynamic_cast<Expression *>(storage->Load(tree_reader, loaded_uids)); - body.order_by.push_back(std::make_pair(ordering, tree)); - } - - if (rb_reader.hasSkip()) { - const auto skip_reader = rb_reader.getSkip(); - body.skip = - dynamic_cast<Expression *>(storage->Load(skip_reader, loaded_uids)); - } - if (rb_reader.hasLimit()) { - const auto limit_reader = rb_reader.getLimit(); - body.limit = - dynamic_cast<Expression *>(storage->Load(limit_reader, loaded_uids)); - } -} - -void Return::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getReturn(); - auto rb_reader = reader.getReturnBody(); - LoadReturnBody(rb_reader, body_, storage, loaded_uids); -} - -Return *Return::Construct(const capnp::Return::Reader &reader, - AstStorage *storage) { - return storage->Create<Return>(); -} - -// SetLabels. -void SetLabels::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initSetLabels(); - SetLabels::Save(&builder, saved_uids); -} - -void SetLabels::Save(capnp::SetLabels::Builder *builder, - std::vector<int> *saved_uids) { - if (identifier_) { - auto id_builder = builder->getIdentifier(); - identifier_->Save(&id_builder, saved_uids); - } - auto common_builders = builder->initLabels(labels_.size()); - for (size_t i = 0; i < labels_.size(); ++i) { - auto common_builder = common_builders[i]; - labels_[i].Save(&common_builder); - } -} - -void SetLabels::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getSetLabels(); - if (reader.hasIdentifier()) { - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } - for (auto label_reader : reader.getLabels()) { - storage::Label label; - label.Load(label_reader); - labels_.push_back(label); - } -} - -SetLabels *SetLabels::Construct(const capnp::SetLabels::Reader &reader, - AstStorage *storage) { - return storage->Create<SetLabels>(); -} - -// SetProperty. -void SetProperty::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initSetProperty(); - SetProperty::Save(&builder, saved_uids); -} - -void SetProperty::Save(capnp::SetProperty::Builder *builder, - std::vector<int> *saved_uids) { - if (property_lookup_) { - auto pl_builder = builder->getPropertyLookup(); - property_lookup_->Save(&pl_builder, saved_uids); - } - if (expression_) { - auto expr_builder = builder->getExpression(); - expression_->Save(&expr_builder, saved_uids); - } -} - -void SetProperty::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getSetProperty(); - if (reader.hasPropertyLookup()) { - const auto pl_reader = reader.getPropertyLookup(); - property_lookup_ = - dynamic_cast<PropertyLookup *>(storage->Load(pl_reader, loaded_uids)); - } - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } -} - -SetProperty *SetProperty::Construct(const capnp::SetProperty::Reader &reader, - AstStorage *storage) { - return storage->Create<SetProperty>(); -} - -// SetProperties. -void SetProperties::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initSetProperties(); - SetProperties::Save(&builder, saved_uids); -} - -void SetProperties::Save(capnp::SetProperties::Builder *builder, - std::vector<int> *saved_uids) { - if (identifier_) { - auto id_builder = builder->getIdentifier(); - identifier_->Save(&id_builder, saved_uids); - } - if (expression_) { - auto expr_builder = builder->getExpression(); - expression_->Save(&expr_builder, saved_uids); - } - builder->setUpdate(update_); -} - -void SetProperties::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getSetProperties(); - if (reader.hasIdentifier()) { - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } - update_ = reader.getUpdate(); -} - -SetProperties *SetProperties::Construct( - const capnp::SetProperties::Reader &reader, AstStorage *storage) { - return storage->Create<SetProperties>(); -} - -// Unwind. -void Unwind::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initUnwind(); - Unwind::Save(&builder, saved_uids); -} - -void Unwind::Save(capnp::Unwind::Builder *builder, - std::vector<int> *saved_uids) { - if (named_expression_) { - auto expr_builder = builder->getNamedExpression(); - named_expression_->Save(&expr_builder, saved_uids); - } -} - -void Unwind::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getUnwind(); - if (reader.hasNamedExpression()) { - const auto expr_reader = reader.getNamedExpression(); - named_expression_ = dynamic_cast<NamedExpression *>( - storage->Load(expr_reader, loaded_uids)); - } -} - -Unwind *Unwind::Construct(const capnp::Unwind::Reader &reader, - AstStorage *storage) { - return storage->Create<Unwind>(); -} - -// With. -void With::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initWith(); - With::Save(&builder, saved_uids); -} - -void With::Save(capnp::With::Builder *builder, std::vector<int> *saved_uids) { - if (where_) { - auto where_builder = builder->getWhere(); - where_->Save(&where_builder, saved_uids); - } - auto rb_builder = builder->initReturnBody(); - SaveReturnBody(&rb_builder, body_, saved_uids); -} - -void With::Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getWith(); - if (reader.hasWhere()) { - const auto where_reader = reader.getWhere(); - where_ = dynamic_cast<Where *>(storage->Load(where_reader, loaded_uids)); - } - auto rb_reader = reader.getReturnBody(); - LoadReturnBody(rb_reader, body_, storage, loaded_uids); -} - -With *With::Construct(const capnp::With::Reader &reader, - AstStorage *storage) { - return storage->Create<With>(); -} - -// ModifyUser. -void ModifyUser::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initModifyUser(); - ModifyUser::Save(&builder, saved_uids); -} - -void ModifyUser::Save(capnp::ModifyUser::Builder *builder, - std::vector<int> *saved_uids) { - builder->setUsername(username_); - if (password_) { - auto password_builder = builder->getPassword(); - password_->Save(&password_builder, saved_uids); - } - builder->setIsCreate(is_create_); -} - -void ModifyUser::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getModifyUser(); - username_ = reader.getUsername(); - if (reader.hasPassword()) { - const auto password_reader = reader.getPassword(); - password_ = - dynamic_cast<Expression *>(storage->Load(password_reader, loaded_uids)); - } else { - password_ = nullptr; - } - is_create_ = reader.getIsCreate(); -} - -ModifyUser *ModifyUser::Construct(const capnp::ModifyUser::Reader &reader, - AstStorage *storage) { - return storage->Create<ModifyUser>(); -} - -// DropUser. -void DropUser::Save(capnp::Clause::Builder *clause_builder, - std::vector<int> *saved_uids) { - Clause::Save(clause_builder, saved_uids); - auto builder = clause_builder->initDropUser(); - DropUser::Save(&builder, saved_uids); -} - -void DropUser::Save(capnp::DropUser::Builder *builder, - std::vector<int> *saved_uids) { - auto usernames_builder = builder->initUsernames(usernames_.size()); - utils::SaveVector(usernames_, &usernames_builder); -} - -void DropUser::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Clause::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getClause().getDropUser(); - usernames_.clear(); - utils::LoadVector(&usernames_, reader.getUsernames()); -} - -DropUser *DropUser::Construct(const capnp::DropUser::Reader &reader, - AstStorage *storage) { - return storage->Create<DropUser>(); -} - -// CypherUnion -void CypherUnion::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initCypherUnion(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void CypherUnion::Save(capnp::CypherUnion::Builder *builder, - std::vector<int> *saved_uids) { - if (single_query_) { - auto sq_builder = builder->initSingleQuery(); - single_query_->Save(&sq_builder, saved_uids); - } - builder->setDistinct(distinct_); - auto symbol_builders = builder->initUnionSymbols(union_symbols_.size()); - for (size_t i = 0; i < union_symbols_.size(); ++i) { - auto symbol_builder = symbol_builders[i]; - union_symbols_[i].Save(&symbol_builder); - } -} - -void CypherUnion::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Tree::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getCypherUnion(); - if (reader.hasSingleQuery()) { - const auto sq_reader = reader.getSingleQuery(); - single_query_ = - dynamic_cast<SingleQuery *>(storage->Load(sq_reader, loaded_uids)); - } - distinct_ = reader.getDistinct(); - for (auto symbol_reader : reader.getUnionSymbols()) { - Symbol symbol; - symbol.Load(symbol_reader); - union_symbols_.push_back(symbol); - } -} - -CypherUnion *CypherUnion::Construct(const capnp::CypherUnion::Reader &reader, - AstStorage *storage) { - return storage->Create<CypherUnion>(); -} - -// NamedExpression -void NamedExpression::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initNamedExpression(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void NamedExpression::Save(capnp::NamedExpression::Builder *builder, - std::vector<int> *saved_uids) { - builder->setName(name_); - builder->setTokenPosition(token_position_); - if (expression_) { - auto expr_builder = builder->getExpression(); - expression_->Save(&expr_builder, saved_uids); - } -} - -void NamedExpression::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, - std::vector<int> *loaded_uids) { - Tree::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getNamedExpression(); - name_ = reader.getName().cStr(); - token_position_ = reader.getTokenPosition(); - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - expression_ = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } -} - -NamedExpression *NamedExpression::Construct( - const capnp::NamedExpression::Reader &reader, AstStorage *storage) { - return storage->Create<NamedExpression>(); -} - -// Pattern -void Pattern::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initPattern(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void Pattern::Save(capnp::Pattern::Builder *builder, - std::vector<int> *saved_uids) { - if (identifier_) { - auto id_builder = builder->getIdentifier(); - identifier_->Save(&id_builder, saved_uids); - } - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initAtoms(atoms_.size()); - for (size_t i = 0; i < atoms_.size(); ++i) { - auto tree_builder = tree_builders[i]; - atoms_[i]->Save(&tree_builder, saved_uids); - } -} - -void Pattern::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Tree::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getPattern(); - if (reader.hasIdentifier()) { - const auto id_reader = reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } - for (const auto tree_reader : reader.getAtoms()) { - auto tree = storage->Load(tree_reader, loaded_uids); - atoms_.push_back(dynamic_cast<PatternAtom *>(tree)); - } -} - -Pattern *Pattern::Construct(const capnp::Pattern::Reader &reader, - AstStorage *storage) { - return storage->Create<Pattern>(); -} - -// PatternAtom. -void PatternAtom::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto pattern_builder = tree_builder->initPatternAtom(); - Save(&pattern_builder, saved_uids); - AddToSaved(saved_uids); -} - -void PatternAtom::Save(capnp::PatternAtom::Builder *builder, - std::vector<int> *saved_uids) { - if (identifier_) { - auto id_builder = builder->getIdentifier(); - identifier_->Save(&id_builder, saved_uids); - } -} - -PatternAtom *PatternAtom::Construct(const capnp::PatternAtom::Reader &reader, - AstStorage *storage) { - switch (reader.which()) { - case capnp::PatternAtom::EDGE_ATOM: { - auto edge_reader = reader.getEdgeAtom(); - return EdgeAtom::Construct(edge_reader, storage); - } - case capnp::PatternAtom::NODE_ATOM: { - auto node_reader = reader.getNodeAtom(); - return NodeAtom::Construct(node_reader, storage); - } - } -} - -void PatternAtom::Load(const capnp::Tree::Reader &reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Tree::Load(reader, storage, loaded_uids); - auto pa_reader = reader.getPatternAtom(); - if (pa_reader.hasIdentifier()) { - const auto id_reader = pa_reader.getIdentifier(); - identifier_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } -} - -// NodeAtom -void NodeAtom::Save(capnp::PatternAtom::Builder *pattern_builder, - std::vector<int> *saved_uids) { - PatternAtom::Save(pattern_builder, saved_uids); - auto builder = pattern_builder->initNodeAtom(); - Save(&builder, saved_uids); -} - -void NodeAtom::Save(capnp::NodeAtom::Builder *builder, - std::vector<int> *saved_uids) { - ::capnp::List<capnp::NodeAtom::Entry>::Builder map_builder = - builder->initProperties(properties_.size()); - size_t i = 0; - for (auto &entry : properties_) { - auto entry_builder = map_builder[i]; - auto key_builder = entry_builder.getKey(); - key_builder.setFirst(entry.first.first); - auto storage_property_builder = key_builder.getSecond(); - entry.first.second.Save(&storage_property_builder); - auto value_builder = entry_builder.getValue(); - if (entry.second) entry.second->Save(&value_builder, saved_uids); - ++i; - } - auto common_builders = builder->initLabels(labels_.size()); - for (size_t i = 0; i < labels_.size(); ++i) { - auto common_builder = common_builders[i]; - labels_[i].Save(&common_builder); - } -} - -void NodeAtom::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - PatternAtom::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getPatternAtom().getNodeAtom(); - for (auto entry_reader : reader.getProperties()) { - auto key_pair_reader = entry_reader.getKey(); - auto key_first = key_pair_reader.getFirst().cStr(); - auto storage_property_reader = key_pair_reader.getSecond(); - storage::Property property; - property.Load(storage_property_reader); - const auto value_reader = entry_reader.getValue(); - auto value = storage->Load(value_reader, loaded_uids); - auto key = std::make_pair(key_first, property); - // TODO Maybe check if expression is nullptr? - properties_.emplace(key, dynamic_cast<Expression *>(value)); - } - for (auto label_reader : reader.getLabels()) { - storage::Label label; - label.Load(label_reader); - labels_.push_back(label); - } -} - -NodeAtom *NodeAtom::Construct(const capnp::NodeAtom::Reader &reader, - AstStorage *storage) { - return storage->Create<NodeAtom>(); -} - -// EdgeAtom -void EdgeAtom::Save(capnp::PatternAtom::Builder *pattern_builder, - std::vector<int> *saved_uids) { - PatternAtom::Save(pattern_builder, saved_uids); - auto builder = pattern_builder->initEdgeAtom(); - Save(&builder, saved_uids); -} - -void SaveLambda(query::EdgeAtom::Lambda &lambda, - capnp::EdgeAtom::Lambda::Builder *builder, - std::vector<int> *saved_uids) { - if (lambda.inner_edge) { - auto ie_builder = builder->getInnerEdge(); - lambda.inner_edge->Save(&ie_builder, saved_uids); - } - if (lambda.inner_node) { - auto in_builder = builder->getInnerNode(); - lambda.inner_node->Save(&in_builder, saved_uids); - } - if (lambda.expression) { - auto expr_builder = builder->getExpression(); - lambda.expression->Save(&expr_builder, saved_uids); - } -} - -void EdgeAtom::Save(capnp::EdgeAtom::Builder *builder, - std::vector<int> *saved_uids) { - switch (type_) { - case Type::BREADTH_FIRST: - builder->setType(capnp::EdgeAtom::Type::BREADTH_FIRST); - break; - case Type::DEPTH_FIRST: - builder->setType(capnp::EdgeAtom::Type::DEPTH_FIRST); - break; - case Type::SINGLE: - builder->setType(capnp::EdgeAtom::Type::SINGLE); - break; - case Type::WEIGHTED_SHORTEST_PATH: - builder->setType(capnp::EdgeAtom::Type::WEIGHTED_SHORTEST_PATH); - break; - } - - switch (direction_) { - case Direction::BOTH: - builder->setDirection(capnp::EdgeAtom::Direction::BOTH); - break; - case Direction::IN: - builder->setDirection(capnp::EdgeAtom::Direction::IN); - break; - case Direction::OUT: - builder->setDirection(capnp::EdgeAtom::Direction::OUT); - break; - } - - auto common_builders = builder->initEdgeTypes(edge_types_.size()); - for (size_t i = 0; i < edge_types_.size(); ++i) { - auto common_builder = common_builders[i]; - edge_types_[i].Save(&common_builder); - } - - ::capnp::List<capnp::EdgeAtom::Entry>::Builder map_builder = - builder->initProperties(properties_.size()); - size_t i = 0; - for (auto &entry : properties_) { - auto entry_builder = map_builder[i]; - auto key_builder = entry_builder.getKey(); - key_builder.setFirst(entry.first.first); - auto storage_property_builder = key_builder.getSecond(); - entry.first.second.Save(&storage_property_builder); - auto value_builder = entry_builder.getValue(); - if (entry.second) entry.second->Save(&value_builder, saved_uids); - ++i; - } - - if (lower_bound_) { - auto lb_builder = builder->getLowerBound(); - lower_bound_->Save(&lb_builder, saved_uids); - } - if (upper_bound_) { - auto ub_builder = builder->getUpperBound(); - upper_bound_->Save(&ub_builder, saved_uids); - } - - auto filter_builder = builder->initFilterLambda(); - SaveLambda(filter_lambda_, &filter_builder, saved_uids); - auto weight_builder = builder->initWeightLambda(); - SaveLambda(weight_lambda_, &weight_builder, saved_uids); - - if (total_weight_) { - auto total_weight_builder = builder->getTotalWeight(); - total_weight_->Save(&total_weight_builder, saved_uids); - } -} - -void LoadLambda(capnp::EdgeAtom::Lambda::Reader &reader, - query::EdgeAtom::Lambda &lambda, AstStorage *storage, - std::vector<int> *loaded_uids) { - if (reader.hasInnerEdge()) { - const auto ie_reader = reader.getInnerEdge(); - lambda.inner_edge = - dynamic_cast<Identifier *>(storage->Load(ie_reader, loaded_uids)); - } - if (reader.hasInnerNode()) { - const auto in_reader = reader.getInnerNode(); - lambda.inner_node = - dynamic_cast<Identifier *>(storage->Load(in_reader, loaded_uids)); - } - if (reader.hasExpression()) { - const auto expr_reader = reader.getExpression(); - lambda.expression = - dynamic_cast<Expression *>(storage->Load(expr_reader, loaded_uids)); - } -} - -void EdgeAtom::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - PatternAtom::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getPatternAtom().getEdgeAtom(); - switch (reader.getType()) { - case capnp::EdgeAtom::Type::BREADTH_FIRST: - type_ = Type::BREADTH_FIRST; - break; - case capnp::EdgeAtom::Type::DEPTH_FIRST: - type_ = Type::DEPTH_FIRST; - break; - case capnp::EdgeAtom::Type::SINGLE: - type_ = Type::SINGLE; - break; - case capnp::EdgeAtom::Type::WEIGHTED_SHORTEST_PATH: - type_ = Type::WEIGHTED_SHORTEST_PATH; - break; - } - - switch (reader.getDirection()) { - case capnp::EdgeAtom::Direction::BOTH: - direction_ = Direction::BOTH; - break; - case capnp::EdgeAtom::Direction::IN: - direction_ = Direction::IN; - break; - case capnp::EdgeAtom::Direction::OUT: - direction_ = Direction::OUT; - break; - } - - if (reader.hasTotalWeight()) { - const auto id_reader = reader.getTotalWeight(); - total_weight_ = - dynamic_cast<Identifier *>(storage->Load(id_reader, loaded_uids)); - } - if (reader.hasLowerBound()) { - const auto lb_reader = reader.getLowerBound(); - lower_bound_ = - dynamic_cast<Expression *>(storage->Load(lb_reader, loaded_uids)); - } - if (reader.hasUpperBound()) { - const auto ub_reader = reader.getUpperBound(); - upper_bound_ = - dynamic_cast<Expression *>(storage->Load(ub_reader, loaded_uids)); - } - auto filter_reader = reader.getFilterLambda(); - LoadLambda(filter_reader, filter_lambda_, storage, loaded_uids); - auto weight_reader = reader.getWeightLambda(); - LoadLambda(weight_reader, weight_lambda_, storage, loaded_uids); - - for (auto entry_reader : reader.getProperties()) { - auto key_pair_reader = entry_reader.getKey(); - auto key_first = key_pair_reader.getFirst().cStr(); - auto storage_property_reader = key_pair_reader.getSecond(); - storage::Property property; - property.Load(storage_property_reader); - const auto value_reader = entry_reader.getValue(); - auto value = storage->Load(value_reader, loaded_uids); - auto key = std::make_pair(key_first, property); - // TODO Check if expression is null? - properties_.emplace(key, dynamic_cast<Expression *>(value)); - } - - for (auto edge_type_reader : reader.getEdgeTypes()) { - storage::EdgeType edge_type; - edge_type.Load(edge_type_reader); - edge_types_.push_back(edge_type); - } -} - -EdgeAtom *EdgeAtom::Construct(const capnp::EdgeAtom::Reader &reader, - AstStorage *storage) { - return storage->Create<EdgeAtom>(); -} - -// Query -void Query::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initQuery(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void Query::Save(capnp::Query::Builder *builder, std::vector<int> *saved_uids) { - if (single_query_) { - auto sq_builder = builder->initSingleQuery(); - single_query_->Save(&sq_builder, saved_uids); - } - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initCypherUnions(cypher_unions_.size()); - for (size_t i = 0; i < cypher_unions_.size(); ++i) { - auto tree_builder = tree_builders[i]; - cypher_unions_[i]->Save(&tree_builder, saved_uids); - } -} - -void Query::Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) { - Tree::Load(reader, storage, loaded_uids); - auto query_reader = reader.getQuery(); - if (query_reader.hasSingleQuery()) { - const auto sq_reader = query_reader.getSingleQuery(); - single_query_ = - dynamic_cast<SingleQuery *>(storage->Load(sq_reader, loaded_uids)); - } - for (const auto tree_reader : query_reader.getCypherUnions()) { - auto tree = storage->Load(tree_reader, loaded_uids); - cypher_unions_.push_back(dynamic_cast<CypherUnion *>(tree)); - } -} - -// SingleQuery -void SingleQuery::Save(capnp::Tree::Builder *tree_builder, - std::vector<int> *saved_uids) { - Tree::Save(tree_builder, saved_uids); - if (IsSaved(*saved_uids)) { - return; - } - auto builder = tree_builder->initSingleQuery(); - Save(&builder, saved_uids); - AddToSaved(saved_uids); -} - -void SingleQuery::Save(capnp::SingleQuery::Builder *builder, - std::vector<int> *saved_uids) { - ::capnp::List<capnp::Tree>::Builder tree_builders = - builder->initClauses(clauses_.size()); - for (size_t i = 0; i < clauses_.size(); ++i) { - auto tree_builder = tree_builders[i]; - clauses_[i]->Save(&tree_builder, saved_uids); - } -} - -void SingleQuery::Load(const capnp::Tree::Reader &base_reader, - AstStorage *storage, std::vector<int> *loaded_uids) { - Tree::Load(base_reader, storage, loaded_uids); - auto reader = base_reader.getSingleQuery(); - for (const auto tree_reader : reader.getClauses()) { - auto tree = storage->Load(tree_reader, loaded_uids); - clauses_.push_back(dynamic_cast<Clause *>(tree)); - } -} - -SingleQuery *SingleQuery::Construct(const capnp::SingleQuery::Reader &reader, - AstStorage *storage) { - return storage->Create<SingleQuery>(); -} -} // namespace query - -BOOST_CLASS_EXPORT_IMPLEMENT(query::Query); -BOOST_CLASS_EXPORT_IMPLEMENT(query::SingleQuery); -BOOST_CLASS_EXPORT_IMPLEMENT(query::CypherUnion); -BOOST_CLASS_EXPORT_IMPLEMENT(query::NamedExpression); -BOOST_CLASS_EXPORT_IMPLEMENT(query::OrOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::XorOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::AndOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::NotOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::AdditionOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::SubtractionOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::MultiplicationOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::DivisionOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ModOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::NotEqualOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::EqualOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::LessOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::GreaterOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::LessEqualOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::GreaterEqualOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::InListOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ListMapIndexingOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ListSlicingOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::IfOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::UnaryPlusOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::UnaryMinusOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::IsNullOperator); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ListLiteral); -BOOST_CLASS_EXPORT_IMPLEMENT(query::MapLiteral); -BOOST_CLASS_EXPORT_IMPLEMENT(query::PropertyLookup); -BOOST_CLASS_EXPORT_IMPLEMENT(query::LabelsTest); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Aggregation); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Function); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Reduce); -BOOST_CLASS_EXPORT_IMPLEMENT(query::All); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Single); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ParameterLookup); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Create); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Match); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Return); -BOOST_CLASS_EXPORT_IMPLEMENT(query::With); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Pattern); -BOOST_CLASS_EXPORT_IMPLEMENT(query::NodeAtom); -BOOST_CLASS_EXPORT_IMPLEMENT(query::EdgeAtom); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Delete); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Where); -BOOST_CLASS_EXPORT_IMPLEMENT(query::SetProperty); -BOOST_CLASS_EXPORT_IMPLEMENT(query::SetProperties); -BOOST_CLASS_EXPORT_IMPLEMENT(query::SetLabels); -BOOST_CLASS_EXPORT_IMPLEMENT(query::RemoveProperty); -BOOST_CLASS_EXPORT_IMPLEMENT(query::RemoveLabels); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Merge); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Unwind); -BOOST_CLASS_EXPORT_IMPLEMENT(query::Identifier); -BOOST_CLASS_EXPORT_IMPLEMENT(query::PrimitiveLiteral); -BOOST_CLASS_EXPORT_IMPLEMENT(query::CreateIndex); -BOOST_CLASS_EXPORT_IMPLEMENT(query::ModifyUser); -BOOST_CLASS_EXPORT_IMPLEMENT(query::DropUser); diff --git a/src/query/frontend/ast/ast.hpp b/src/query/frontend/ast/ast.hpp index 989681d5e..62510d68b 100644 --- a/src/query/frontend/ast/ast.hpp +++ b/src/query/frontend/ast/ast.hpp @@ -4,20 +4,11 @@ #include <unordered_map> #include <vector> -#include "boost/serialization/base_object.hpp" -#include "boost/serialization/export.hpp" -#include "boost/serialization/split_member.hpp" -#include "boost/serialization/string.hpp" -#include "boost/serialization/vector.hpp" - #include "query/frontend/ast/ast_visitor.hpp" #include "query/frontend/semantic/symbol.hpp" #include "query/interpret/awesome_memgraph_functions.hpp" #include "query/typed_value.hpp" #include "storage/types.hpp" -#include "utils/serialization.hpp" - -#include "ast.capnp.h" // Hash function for the key in pattern atom property maps. namespace std { @@ -55,12 +46,6 @@ namespace query { expression_->Clone(storage)); \ } -#define SERIALIZE_USING_BASE(BaseClass) \ - template <class TArchive> \ - void serialize(TArchive &ar, const unsigned int) { \ - ar &boost::serialization::base_object<BaseClass>(*this); \ - } - class Tree; // It would be better to call this AstTree, but we already have a class Tree, @@ -88,76 +73,11 @@ class AstStorage { /// Id for using get_helper<AstStorage> in boost archives. static void *const kHelperId; - /// Load an Ast Node into this storage. - template <class TArchive, class TNode> - void Load(TArchive &ar, TNode &node) { - auto &tmp_ast = ar.template get_helper<AstStorage>(kHelperId); - std::swap(*this, tmp_ast); - ar >> node; - std::swap(*this, tmp_ast); - } - - /// Load a Query into this storage. - template <class TArchive> - void Load(TArchive &ar) { - Load(ar, *query()); - } - - Tree *Load(const capnp::Tree::Reader &tree, std::vector<int> *loaded_uids); - private: int next_uid_ = 0; std::vector<std::unique_ptr<Tree>> storage_; - - template <class TArchive, class TNode> - friend void LoadPointer(TArchive &ar, TNode *&node); }; -template <class TArchive, class TNode> -void SavePointer(TArchive &ar, TNode *node) { - ar << node; -} - -template <class TArchive, class TNode> -void LoadPointer(TArchive &ar, TNode *&node) { - ar >> node; - if (node) { - auto &ast_storage = - ar.template get_helper<AstStorage>(AstStorage::kHelperId); - auto found = - std::find_if(ast_storage.storage_.begin(), ast_storage.storage_.end(), - [&](const auto &n) { return n->uid() == node->uid(); }); - // Boost makes sure pointers to same address are deserialized only once, so - // we only need to add nodes to the storage only on the first load. - DCHECK(ast_storage.storage_.end() == found || - dynamic_cast<TNode *>(found->get()) == node); - if (ast_storage.storage_.end() == found) { - ast_storage.storage_.emplace_back(node); - ast_storage.next_uid_ = std::max(ast_storage.next_uid_, node->uid() + 1); - } - } -} - -template <class TArchive, class TNode> -void SavePointers(TArchive &ar, const std::vector<TNode *> &nodes) { - ar << nodes.size(); - for (auto *node : nodes) { - SavePointer(ar, node); - } -} - -template <class TArchive, class TNode> -void LoadPointers(TArchive &ar, std::vector<TNode *> &nodes) { - size_t size = 0; - ar >> size; - for (size_t i = 0; i < size; ++i) { - TNode *node = nullptr; - LoadPointer(ar, node); - DCHECK(node) << "Unexpected nullptr serialized"; - nodes.emplace_back(node); - } -} - class Tree : public ::utils::Visitable<HierarchicalTreeVisitor>, ::utils::Visitable<TreeVisitor<TypedValue>> { friend class AstStorage; @@ -169,26 +89,15 @@ class Tree : public ::utils::Visitable<HierarchicalTreeVisitor>, int uid() const { return uid_; } virtual Tree *Clone(AstStorage &storage) const = 0; - virtual void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids); protected: explicit Tree(int uid) : uid_(uid) {} - virtual void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids); bool IsSaved(const std::vector<int> &saved_uids); void AddToSaved(std::vector<int> *saved_uids); private: int uid_; - - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &uid_; - } }; // Expressions @@ -198,20 +107,9 @@ class Expression : public Tree { public: Expression *Clone(AstStorage &storage) const override = 0; - static Expression *Construct(const capnp::Expression::Reader &reader, - AstStorage *storage); - - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; protected: explicit Expression(int uid) : Tree(uid) {} - - virtual void Save(capnp::Expression::Builder *, std::vector<int> *) {} - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(Tree); }; class Where : public Tree { @@ -230,42 +128,11 @@ class Where : public Tree { return storage.Create<Where>(expression_->Clone(storage)); } - static Where *Construct(const capnp::Where::Reader &reader, - AstStorage *storage); - - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - Expression *expression_ = nullptr; protected: explicit Where(int uid) : Tree(uid) {} Where(int uid, Expression *expression) : Tree(uid), expression_(expression) {} - - virtual void Save(capnp::Where::Builder *, std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointer(ar, expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointer(ar, expression_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Where *, - const unsigned int); }; class BinaryOperator : public Expression { @@ -276,40 +143,11 @@ class BinaryOperator : public Expression { Expression *expression2_ = nullptr; BinaryOperator *Clone(AstStorage &storage) const override = 0; - static BinaryOperator *Construct(const capnp::BinaryOperator::Reader &reader, - AstStorage *storage); protected: explicit BinaryOperator(int uid) : Expression(uid) {} BinaryOperator(int uid, Expression *expression1, Expression *expression2) : Expression(uid), expression1_(expression1), expression2_(expression2) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, expression1_); - SavePointer(ar, expression2_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, expression1_); - LoadPointer(ar, expression2_); - } }; class UnaryOperator : public Expression { @@ -319,38 +157,11 @@ class UnaryOperator : public Expression { Expression *expression_ = nullptr; UnaryOperator *Clone(AstStorage &storage) const override = 0; - static UnaryOperator *Construct(const capnp::UnaryOperator::Reader &reader, - AstStorage *storage); protected: explicit UnaryOperator(int uid) : Expression(uid) {} UnaryOperator(int uid, Expression *expression) : Expression(uid), expression_(expression) {} - - void Save(capnp::Expression::Builder *, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, expression_); - } }; class OrOperator : public BinaryOperator { @@ -366,21 +177,8 @@ class OrOperator : public BinaryOperator { } CLONE_BINARY_EXPRESSION; - static OrOperator *Construct(const capnp::OrOperator::Reader &reader, - AstStorage *storage); - protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - OrOperator *, - const unsigned int); }; class XorOperator : public BinaryOperator { @@ -395,21 +193,9 @@ class XorOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static XorOperator *Construct(const capnp::XorOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - XorOperator *, - const unsigned int); }; class AndOperator : public BinaryOperator { @@ -424,21 +210,9 @@ class AndOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static AndOperator *Construct(const capnp::AndOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - AndOperator *, - const unsigned int); }; class AdditionOperator : public BinaryOperator { @@ -453,21 +227,9 @@ class AdditionOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static AdditionOperator *Construct( - const capnp::AdditionOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - AdditionOperator *, - const unsigned int); }; class SubtractionOperator : public BinaryOperator { @@ -482,21 +244,9 @@ class SubtractionOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static SubtractionOperator *Construct( - capnp::SubtractionOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - SubtractionOperator *, - const unsigned int); }; class MultiplicationOperator : public BinaryOperator { @@ -511,20 +261,9 @@ class MultiplicationOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static MultiplicationOperator *Construct( - capnp::MultiplicationOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data( - TArchive &, MultiplicationOperator *, const unsigned int); }; class DivisionOperator : public BinaryOperator { @@ -539,21 +278,9 @@ class DivisionOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static DivisionOperator *Construct( - const capnp::DivisionOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - DivisionOperator *, - const unsigned int); }; class ModOperator : public BinaryOperator { @@ -568,21 +295,9 @@ class ModOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static ModOperator *Construct(const capnp::ModOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - ModOperator *, - const unsigned int); }; class NotEqualOperator : public BinaryOperator { @@ -597,21 +312,9 @@ class NotEqualOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static NotEqualOperator *Construct( - const capnp::NotEqualOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - NotEqualOperator *, - const unsigned int); }; class EqualOperator : public BinaryOperator { @@ -626,21 +329,9 @@ class EqualOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static EqualOperator *Construct(const capnp::EqualOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - EqualOperator *, - const unsigned int); }; class LessOperator : public BinaryOperator { @@ -655,21 +346,9 @@ class LessOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static LessOperator *Construct(const capnp::LessOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - LessOperator *, - const unsigned int); }; class GreaterOperator : public BinaryOperator { @@ -684,21 +363,9 @@ class GreaterOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static GreaterOperator *Construct( - const capnp::GreaterOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - GreaterOperator *, - const unsigned int); }; class LessEqualOperator : public BinaryOperator { @@ -713,21 +380,9 @@ class LessEqualOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static LessEqualOperator *Construct( - const capnp::LessEqualOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - LessEqualOperator *, - const unsigned int); }; class GreaterEqualOperator : public BinaryOperator { @@ -742,22 +397,9 @@ class GreaterEqualOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static GreaterEqualOperator *Construct( - const capnp::GreaterEqualOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - GreaterEqualOperator *, - const unsigned int); }; class InListOperator : public BinaryOperator { @@ -772,21 +414,9 @@ class InListOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static InListOperator *Construct(const capnp::InListOperator::Reader &reader, - AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - InListOperator *, - const unsigned int); }; class ListMapIndexingOperator : public BinaryOperator { @@ -801,20 +431,9 @@ class ListMapIndexingOperator : public BinaryOperator { return visitor.PostVisit(*this); } CLONE_BINARY_EXPRESSION; - static ListMapIndexingOperator *Construct( - capnp::ListMapIndexingOperator::Reader &reader, AstStorage *storage); protected: using BinaryOperator::BinaryOperator; - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(BinaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data( - TArchive &, ListMapIndexingOperator *, const unsigned int); }; class ListSlicingOperator : public Expression { @@ -842,10 +461,6 @@ class ListSlicingOperator : public Expression { upper_bound_ ? upper_bound_->Clone(storage) : nullptr); } - static ListSlicingOperator *Construct( - const capnp::ListSlicingOperator::Reader &reader, - AstStorage *storage); - Expression *list_ = nullptr; Expression *lower_bound_ = nullptr; Expression *upper_bound_ = nullptr; @@ -857,40 +472,6 @@ class ListSlicingOperator : public Expression { list_(list), lower_bound_(lower_bound), upper_bound_(upper_bound) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::ListSlicingOperator::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, list_); - SavePointer(ar, lower_bound_); - SavePointer(ar, upper_bound_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, list_); - LoadPointer(ar, lower_bound_); - LoadPointer(ar, upper_bound_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - ListSlicingOperator *, - const unsigned int); }; class IfOperator : public Expression { @@ -912,8 +493,6 @@ class IfOperator : public Expression { else_expression_->Clone(storage)); } - static IfOperator *Construct(const capnp::IfOperator::Reader &reader, - AstStorage *storage); // None of the expressions should be nullptrs. If there is no else_expression // you probably want to make it NULL PrimitiveLiteral. Expression *condition_; @@ -927,40 +506,6 @@ class IfOperator : public Expression { condition_(condition), then_expression_(then_expression), else_expression_(else_expression) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::IfOperator::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, condition_); - SavePointer(ar, then_expression_); - SavePointer(ar, else_expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, condition_); - LoadPointer(ar, then_expression_); - LoadPointer(ar, else_expression_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - IfOperator *, - const unsigned int); }; class NotOperator : public UnaryOperator { @@ -975,21 +520,9 @@ class NotOperator : public UnaryOperator { return visitor.PostVisit(*this); } CLONE_UNARY_EXPRESSION; - static NotOperator *Construct(const capnp::NotOperator::Reader &reader, - AstStorage *storage); protected: using UnaryOperator::UnaryOperator; - void Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(UnaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - NotOperator *, - const unsigned int); }; class UnaryPlusOperator : public UnaryOperator { @@ -1004,21 +537,9 @@ class UnaryPlusOperator : public UnaryOperator { return visitor.PostVisit(*this); } CLONE_UNARY_EXPRESSION; - static UnaryPlusOperator *Construct( - const capnp::UnaryPlusOperator::Reader &reader, AstStorage *storage); protected: using UnaryOperator::UnaryOperator; - void Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(UnaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - UnaryPlusOperator *, - const unsigned int); }; class UnaryMinusOperator : public UnaryOperator { @@ -1033,21 +554,9 @@ class UnaryMinusOperator : public UnaryOperator { return visitor.PostVisit(*this); } CLONE_UNARY_EXPRESSION; - static UnaryMinusOperator *Construct( - capnp::UnaryMinusOperator::Reader &reader, AstStorage *storage); protected: using UnaryOperator::UnaryOperator; - void Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(UnaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - UnaryMinusOperator *, - const unsigned int); }; class IsNullOperator : public UnaryOperator { @@ -1062,21 +571,9 @@ class IsNullOperator : public UnaryOperator { return visitor.PostVisit(*this); } CLONE_UNARY_EXPRESSION; - static IsNullOperator *Construct(const capnp::IsNullOperator::Reader &reader, - AstStorage *storage); protected: using UnaryOperator::UnaryOperator; - void Save(capnp::UnaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(UnaryOperator); - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - IsNullOperator *, - const unsigned int); }; class BaseLiteral : public Expression { @@ -1084,21 +581,9 @@ class BaseLiteral : public Expression { public: BaseLiteral *Clone(AstStorage &storage) const override = 0; - static BaseLiteral *Construct(const capnp::BaseLiteral::Reader &reader, - AstStorage *storage); protected: explicit BaseLiteral(int uid) : Expression(uid) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::BaseLiteral::Builder *, - std::vector<int> *saved_uids) {} - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(Expression); }; class PrimitiveLiteral : public BaseLiteral { @@ -1112,9 +597,6 @@ class PrimitiveLiteral : public BaseLiteral { return storage.Create<PrimitiveLiteral>(value_, token_position_); } - static PrimitiveLiteral *Construct( - const capnp::PrimitiveLiteral::Reader &reader, AstStorage *storage); - TypedValue value_; // This field contains token position of literal used to create // PrimitiveLiteral object. If PrimitiveLiteral object is not created from @@ -1128,35 +610,6 @@ class PrimitiveLiteral : public BaseLiteral { template <typename T> PrimitiveLiteral(int uid, T value, int token_position) : BaseLiteral(uid), value_(value), token_position_(token_position) {} - - void Save(capnp::BaseLiteral::Builder *builder, - std::vector<int> *saved_uids) override; - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<BaseLiteral>(*this); - ar << token_position_; - utils::SaveTypedValue(ar, value_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<BaseLiteral>(*this); - ar >> token_position_; - utils::LoadTypedValue(ar, value_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - PrimitiveLiteral *, - const unsigned int); }; class ListLiteral : public BaseLiteral { @@ -1180,42 +633,12 @@ class ListLiteral : public BaseLiteral { return list; } - static ListLiteral *Construct(const capnp::ListLiteral::Reader &reader, - AstStorage *storage); - std::vector<Expression *> elements_; protected: explicit ListLiteral(int uid) : BaseLiteral(uid) {} ListLiteral(int uid, const std::vector<Expression *> &elements) : BaseLiteral(uid), elements_(elements) {} - - void Save(capnp::BaseLiteral::Builder *builder, - std::vector<int> *saved_uids) override; - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<BaseLiteral>(*this); - SavePointers(ar, elements_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<BaseLiteral>(*this); - LoadPointers(ar, elements_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - ListLiteral *, - const unsigned int); }; class MapLiteral : public BaseLiteral { @@ -1238,9 +661,6 @@ class MapLiteral : public BaseLiteral { return map; } - static MapLiteral *Construct(const capnp::MapLiteral::Reader &reader, - AstStorage *storage); - // maps (property_name, property) to expressions std::unordered_map<std::pair<std::string, storage::Property>, Expression *> elements_; @@ -1251,49 +671,6 @@ class MapLiteral : public BaseLiteral { const std::unordered_map<std::pair<std::string, storage::Property>, Expression *> &elements) : BaseLiteral(uid), elements_(elements) {} - - void Save(capnp::BaseLiteral::Builder *builder, - std::vector<int> *saved_uids) override; - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<BaseLiteral>(*this); - ar << elements_.size(); - for (const auto &element : elements_) { - const auto &property = element.first; - ar << property.first; - ar << property.second; - SavePointer(ar, element.second); - } - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<BaseLiteral>(*this); - size_t size = 0; - ar >> size; - for (size_t i = 0; i < size; ++i) { - std::pair<std::string, storage::Property> property; - ar >> property.first; - ar >> property.second; - Expression *expression = nullptr; - LoadPointer(ar, expression); - DCHECK(expression) << "Unexpected nullptr expression serialized"; - elements_.emplace(property, expression); - } - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - MapLiteral *, - const unsigned int); }; class Identifier : public Expression { @@ -1307,10 +684,6 @@ class Identifier : public Expression { return storage.Create<Identifier>(name_, user_declared_); } - static Identifier *Construct(const capnp::Identifier::Reader &reader, - AstStorage *storage); - using Expression::Save; - std::string name_; bool user_declared_ = true; @@ -1318,26 +691,6 @@ class Identifier : public Expression { Identifier(int uid, const std::string &name) : Expression(uid), name_(name) {} Identifier(int uid, const std::string &name, bool user_declared) : Expression(uid), name_(name), user_declared_(user_declared) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Identifier::Builder *builder, - std::vector<int> *saved_uids); - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Expression>(*this); - ar &name_; - ar &user_declared_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - Identifier *, - const unsigned int); }; class PropertyLookup : public Expression { @@ -1357,10 +710,6 @@ class PropertyLookup : public Expression { property_name_, property_); } - static PropertyLookup *Construct(const capnp::PropertyLookup::Reader &reader, - AstStorage *storage); - using Expression::Save; - Expression *expression_ = nullptr; std::string property_name_; storage::Property property_; @@ -1378,39 +727,6 @@ class PropertyLookup : public Expression { expression_(expression), property_name_(property.first), property_(property.second) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::PropertyLookup::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, expression_); - ar << property_name_; - ar << property_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, expression_); - ar >> property_name_; - ar >> property_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - PropertyLookup *, - const unsigned int); }; class LabelsTest : public Expression { @@ -1429,10 +745,6 @@ class LabelsTest : public Expression { return storage.Create<LabelsTest>(expression_->Clone(storage), labels_); } - static LabelsTest *Construct(const capnp::LabelsTest::Reader &reader, - AstStorage *storage); - using Expression::Save; - Expression *expression_ = nullptr; std::vector<storage::Label> labels_; @@ -1440,37 +752,6 @@ class LabelsTest : public Expression { LabelsTest(int uid, Expression *expression, const std::vector<storage::Label> &labels) : Expression(uid), expression_(expression), labels_(labels) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::LabelsTest::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, expression_); - ar << labels_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, expression_); - ar >> labels_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - LabelsTest *, - const unsigned int); }; class Function : public Expression { @@ -1495,9 +776,6 @@ class Function : public Expression { return storage.Create<Function>(function_name_, arguments); } - static Function *Construct(const capnp::Function::Reader &reader, - AstStorage *storage); - const auto &function() const { return function_; } const auto &function_name() const { return function_name_; } std::vector<Expression *> arguments_; @@ -1513,43 +791,12 @@ class Function : public Expression { function_(NameToFunction(function_name_)) { DCHECK(function_) << "Unexpected missing function: " << function_name_; } - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::Function::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; private: std::string function_name_; std::function<TypedValue(const std::vector<TypedValue> &, database::GraphDbAccessor &)> function_; - - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - ar << function_name_; - SavePointers(ar, arguments_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - ar >> function_name_; - function_ = NameToFunction(function_name_); - DCHECK(function_) << "Unexpected missing function: " << function_name_; - LoadPointers(ar, arguments_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Function *, - const unsigned int); }; class Aggregation : public BinaryOperator { @@ -1586,8 +833,6 @@ class Aggregation : public BinaryOperator { } Op op_; - static Aggregation *Construct(const capnp::Aggregation::Reader &, - AstStorage *storage); protected: // Use only for serialization. @@ -1604,23 +849,6 @@ class Aggregation : public BinaryOperator { << "The second expression is obligatory in COLLECT_MAP and " "invalid otherwise"; } - - void Save(capnp::BinaryOperator::Builder *builder, - std::vector<int> *saved_uids) override; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<BinaryOperator>(*this); - ar &op_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - Aggregation *, - const unsigned int); }; class Reduce : public Expression { @@ -1644,10 +872,6 @@ class Reduce : public Expression { expression_->Clone(storage)); } - static Reduce *Construct(const capnp::Reduce::Reader &reader, - AstStorage *storage); - using Expression::Save; - // None of these should be nullptr after construction. /// Identifier for the accumulating variable @@ -1671,42 +895,6 @@ class Reduce : public Expression { identifier_(identifier), list_(list), expression_(expression) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Reduce::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, accumulator_); - SavePointer(ar, initializer_); - SavePointer(ar, identifier_); - SavePointer(ar, list_); - SavePointer(ar, expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, accumulator_); - LoadPointer(ar, initializer_); - LoadPointer(ar, identifier_); - LoadPointer(ar, list_); - LoadPointer(ar, expression_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Reduce *, - const unsigned int); }; // TODO: Think about representing All and Any as Reduce. @@ -1729,9 +917,6 @@ class All : public Expression { where_->Clone(storage)); } - static All *Construct(const capnp::All::Reader &reader, - AstStorage *storage); - // None of these should be nullptr after construction. Identifier *identifier_ = nullptr; Expression *list_expression_ = nullptr; @@ -1744,38 +929,6 @@ class All : public Expression { identifier_(identifier), list_expression_(list_expression), where_(where) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - using Expression::Save; - virtual void Save(capnp::All::Builder *builder, std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, identifier_); - SavePointer(ar, list_expression_); - SavePointer(ar, where_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, identifier_); - LoadPointer(ar, list_expression_); - LoadPointer(ar, where_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, All *, - const unsigned int); }; // TODO: This is pretty much copy pasted from All. Consider merging Reduce, @@ -1800,10 +953,6 @@ class Single : public Expression { where_->Clone(storage)); } - static Single *Construct(const capnp::Single::Reader &reader, - AstStorage *storage); - using Expression::Save; - // None of these should be nullptr after construction. Identifier *identifier_ = nullptr; Expression *list_expression_ = nullptr; @@ -1816,38 +965,6 @@ class Single : public Expression { identifier_(identifier), list_expression_(list_expression), where_(where) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Single::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Expression>(*this); - SavePointer(ar, identifier_); - SavePointer(ar, list_expression_); - SavePointer(ar, where_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Expression>(*this); - LoadPointer(ar, identifier_); - LoadPointer(ar, list_expression_); - LoadPointer(ar, where_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Single *, - const unsigned int); }; class ParameterLookup : public Expression { @@ -1861,10 +978,6 @@ class ParameterLookup : public Expression { return storage.Create<ParameterLookup>(token_position_); } - static ParameterLookup *Construct( - const capnp::ParameterLookup::Reader &reader, AstStorage *storage); - using Expression::Save; - // This field contains token position of *literal* used to create // ParameterLookup object. If ParameterLookup object is not created from // a literal leave this value at -1. @@ -1874,24 +987,6 @@ class ParameterLookup : public Expression { explicit ParameterLookup(int uid) : Expression(uid) {} ParameterLookup(int uid, int token_position) : Expression(uid), token_position_(token_position) {} - - void Save(capnp::Expression::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::ParameterLookup::Builder *builder, - std::vector<int> *saved_uids); - - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Expression>(*this); - ar &token_position_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - ParameterLookup *, - const unsigned int); }; class NamedExpression : public Tree { @@ -1911,11 +1006,6 @@ class NamedExpression : public Tree { token_position_); } - static NamedExpression *Construct( - const capnp::NamedExpression::Reader &reader, AstStorage *storage); - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - std::string name_; Expression *expression_ = nullptr; // This field contains token position of first token in named expression @@ -1934,37 +1024,6 @@ class NamedExpression : public Tree { name_(name), expression_(expression), token_position_(token_position) {} - - virtual void Save(capnp::NamedExpression::Builder *, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - ar << name_; - SavePointer(ar, expression_); - ar << token_position_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - ar >> name_; - LoadPointer(ar, expression_); - ar >> token_position_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - NamedExpression *, - const unsigned int); }; // Pattern atoms @@ -1977,37 +1036,10 @@ class PatternAtom : public Tree { PatternAtom *Clone(AstStorage &storage) const override = 0; - static PatternAtom *Construct(const capnp::PatternAtom::Reader &reader, - AstStorage *storage); - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - protected: explicit PatternAtom(int uid) : Tree(uid) {} PatternAtom(int uid, Identifier *identifier) : Tree(uid), identifier_(identifier) {} - - virtual void Save(capnp::PatternAtom::Builder *, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointer(ar, identifier_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointer(ar, identifier_); - } }; class NodeAtom : public PatternAtom { @@ -2036,10 +1068,6 @@ class NodeAtom : public PatternAtom { return node_atom; } - static NodeAtom *Construct(const capnp::NodeAtom::Reader &reader, - AstStorage *storage); - using PatternAtom::Save; - std::vector<storage::Label> labels_; // maps (property_name, property) to an expression std::unordered_map<std::pair<std::string, storage::Property>, Expression *> @@ -2047,52 +1075,6 @@ class NodeAtom : public PatternAtom { protected: using PatternAtom::PatternAtom; - - void Save(capnp::PatternAtom::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::NodeAtom::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<PatternAtom>(*this); - ar << labels_; - ar << properties_.size(); - for (const auto &property : properties_) { - const auto &key = property.first; - ar << key.first; - ar << key.second; - SavePointer(ar, property.second); - } - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<PatternAtom>(*this); - ar >> labels_; - size_t size = 0; - ar >> size; - for (size_t i = 0; i < size; ++i) { - std::pair<std::string, storage::Property> property; - ar >> property.first; - ar >> property.second; - Expression *expression = nullptr; - LoadPointer(ar, expression); - DCHECK(expression) << "Unexpected nullptr expression serialized"; - properties_.emplace(property, expression); - } - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, NodeAtom *, - const unsigned int); }; class EdgeAtom : public PatternAtom { @@ -2177,10 +1159,6 @@ class EdgeAtom : public PatternAtom { } } - static EdgeAtom *Construct(const capnp::EdgeAtom::Reader &reader, - AstStorage *storage); - using PatternAtom::Save; - Type type_ = Type::SINGLE; Direction direction_ = Direction::BOTH; std::vector<storage::EdgeType> edge_types_; @@ -2217,76 +1195,6 @@ class EdgeAtom : public PatternAtom { type_(type), direction_(direction), edge_types_(edge_types) {} - - void Save(capnp::PatternAtom::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::EdgeAtom::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<PatternAtom>(*this); - ar << type_; - ar << direction_; - ar << edge_types_; - ar << properties_.size(); - for (const auto &property : properties_) { - const auto &key = property.first; - ar << key.first; - ar << key.second; - SavePointer(ar, property.second); - } - SavePointer(ar, lower_bound_); - SavePointer(ar, upper_bound_); - auto save_lambda = [&ar](const auto &lambda) { - SavePointer(ar, lambda.inner_edge); - SavePointer(ar, lambda.inner_node); - SavePointer(ar, lambda.expression); - }; - save_lambda(filter_lambda_); - save_lambda(weight_lambda_); - SavePointer(ar, total_weight_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<PatternAtom>(*this); - ar >> type_; - ar >> direction_; - ar >> edge_types_; - size_t size = 0; - ar >> size; - for (size_t i = 0; i < size; ++i) { - std::pair<std::string, storage::Property> property; - ar >> property.first; - ar >> property.second; - Expression *expression = nullptr; - LoadPointer(ar, expression); - DCHECK(expression) << "Unexpected nullptr expression serialized"; - properties_.emplace(property, expression); - } - LoadPointer(ar, lower_bound_); - LoadPointer(ar, upper_bound_); - auto load_lambda = [&ar](auto &lambda) { - LoadPointer(ar, lambda.inner_edge); - LoadPointer(ar, lambda.inner_node); - LoadPointer(ar, lambda.expression); - }; - load_lambda(filter_lambda_); - load_lambda(weight_lambda_); - LoadPointer(ar, total_weight_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, EdgeAtom *, - const unsigned int); }; class Pattern : public Tree { @@ -2315,43 +1223,11 @@ class Pattern : public Tree { return pattern; } - static Pattern *Construct(const capnp::Pattern::Reader &reader, - AstStorage *storage); - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - Identifier *identifier_ = nullptr; std::vector<PatternAtom *> atoms_; protected: explicit Pattern(int uid) : Tree(uid) {} - - virtual void Save(capnp::Pattern::Builder *, std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointer(ar, identifier_); - SavePointers(ar, atoms_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointer(ar, identifier_); - LoadPointers(ar, atoms_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Pattern *, - const unsigned int); }; // Clause @@ -2363,19 +1239,6 @@ class Clause : public Tree { explicit Clause(int uid) : Tree(uid) {} Clause *Clone(AstStorage &storage) const override = 0; - - static Clause *Construct(const capnp::Clause::Reader &reader, - AstStorage *storage); - - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - - protected: - virtual void Save(capnp::Clause::Builder *, std::vector<int> *saved_uids) {} - - private: - friend class boost::serialization::access; - SERIALIZE_USING_BASE(Tree); }; // SingleQuery @@ -2402,43 +1265,10 @@ class SingleQuery : public Tree { return single_query; } - static SingleQuery *Construct(const capnp::SingleQuery::Reader &reader, - AstStorage *storage); - - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - std::vector<Clause *> clauses_; protected: explicit SingleQuery(int uid) : Tree(uid) {} - - virtual void Save(capnp::SingleQuery::Builder *, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointers(ar, clauses_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointers(ar, clauses_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - SingleQuery *, - const unsigned int); }; // CypherUnion @@ -2462,11 +1292,6 @@ class CypherUnion : public Tree { return cypher_union; } - static CypherUnion *Construct(const capnp::CypherUnion::Reader &reader, - AstStorage *storage); - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - SingleQuery *single_query_ = nullptr; bool distinct_ = false; /// Holds symbols that are created during symbol generation phase. @@ -2483,37 +1308,6 @@ class CypherUnion : public Tree { single_query_(single_query), distinct_(distinct), union_symbols_(union_symbols) {} - - virtual void Save(capnp::CypherUnion::Builder *, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointer(ar, single_query_); - ar << distinct_; - ar << union_symbols_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointer(ar, single_query_); - ar >> distinct_; - ar >> union_symbols_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - CypherUnion *, - const unsigned int); }; // Queries @@ -2545,41 +1339,11 @@ class Query : public Tree { return query; } - void Load(const capnp::Tree::Reader &reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - void Save(capnp::Tree::Builder *builder, - std::vector<int> *saved_uids) override; - SingleQuery *single_query_ = nullptr; std::vector<CypherUnion *> cypher_unions_; protected: explicit Query(int uid) : Tree(uid) {} - - virtual void Save(capnp::Query::Builder *, std::vector<int> *saved_uids); - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Tree>(*this); - SavePointer(ar, single_query_); - SavePointers(ar, cypher_unions_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Tree>(*this); - LoadPointer(ar, single_query_); - LoadPointers(ar, cypher_unions_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Query *, - const unsigned int); }; // Clauses @@ -2606,44 +1370,12 @@ class Create : public Clause { return create; } - static Create *Construct(const capnp::Create::Reader &reader, - AstStorage *storage); - using Clause::Save; - std::vector<Pattern *> patterns_; protected: explicit Create(int uid) : Clause(uid) {} Create(int uid, std::vector<Pattern *> patterns) : Clause(uid), patterns_(patterns) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Create::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &tree_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointers(ar, patterns_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointers(ar, patterns_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Create *, - const unsigned int); }; class Match : public Clause { @@ -2676,10 +1408,6 @@ class Match : public Clause { return match; } - using Clause::Save; - static Match *Construct(const capnp::Match::Reader &reader, - AstStorage *storage); - std::vector<Pattern *> patterns_; Where *where_ = nullptr; bool optional_ = false; @@ -2689,38 +1417,6 @@ class Match : public Clause { Match(int uid, bool optional) : Clause(uid), optional_(optional) {} Match(int uid, bool optional, Where *where, std::vector<Pattern *> patterns) : Clause(uid), patterns_(patterns), where_(where), optional_(optional) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Match::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointers(ar, patterns_); - SavePointer(ar, where_); - ar << optional_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointers(ar, patterns_); - LoadPointer(ar, where_); - ar >> optional_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Match *, - const unsigned int); }; /// Defines the order for sorting values (ascending or descending). @@ -2747,44 +1443,6 @@ struct ReturnBody { // function class member. ReturnBody CloneReturnBody(AstStorage &storage, const ReturnBody &body); -template <class TArchive> -void serialize(TArchive &ar, ReturnBody &body, - const unsigned int file_version) { - boost::serialization::split_free(ar, body, file_version); -} - -template <class TArchive> -void save(TArchive &ar, const ReturnBody &body, const unsigned int) { - ar << body.distinct; - ar << body.all_identifiers; - SavePointers(ar, body.named_expressions); - ar << body.order_by.size(); - for (const auto &order_by : body.order_by) { - ar << order_by.first; - SavePointer(ar, order_by.second); - } - SavePointer(ar, body.skip); - SavePointer(ar, body.limit); -} - -template <class TArchive> -void load(TArchive &ar, ReturnBody &body, const unsigned int) { - ar >> body.distinct; - ar >> body.all_identifiers; - LoadPointers(ar, body.named_expressions); - size_t size = 0; - ar >> size; - for (size_t i = 0; i < size; ++i) { - std::pair<Ordering, Expression *> order_by; - ar >> order_by.first; - LoadPointer(ar, order_by.second); - DCHECK(order_by.second) << "Unexpected nullptr serialized"; - body.order_by.emplace_back(order_by); - } - LoadPointer(ar, body.skip); - LoadPointer(ar, body.limit); -} - class Return : public Clause { friend class AstStorage; @@ -2819,35 +1477,11 @@ class Return : public Clause { return ret; } - using Clause::Save; - static Return *Construct(const capnp::Return::Reader &reader, - AstStorage *storage); - ReturnBody body_; protected: explicit Return(int uid) : Clause(uid) {} Return(int uid, ReturnBody &body) : Clause(uid), body_(body) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Return::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Clause>(*this); - ar &body_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Return *, - const unsigned int); }; class With : public Clause { @@ -2886,10 +1520,6 @@ class With : public Clause { return with; } - using Clause::Save; - static With *Construct(const capnp::With::Reader &reader, - AstStorage *storage); - ReturnBody body_; Where *where_ = nullptr; @@ -2897,36 +1527,6 @@ class With : public Clause { explicit With(int uid) : Clause(uid) {} With(int uid, ReturnBody &body, Where *where) : Clause(uid), body_(body), where_(where) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::With::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - ar << body_; - SavePointer(ar, where_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - ar >> body_; - LoadPointer(ar, where_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, With *, - const unsigned int); }; class Delete : public Clause { @@ -2952,10 +1552,6 @@ class Delete : public Clause { return del; } - using Clause::Save; - static Delete *Construct(const capnp::Delete::Reader &reader, - AstStorage *storage); - std::vector<Expression *> expressions_; bool detach_ = false; @@ -2964,36 +1560,6 @@ class Delete : public Clause { explicit Delete(int uid) : Clause(uid) {} Delete(int uid, bool detach, std::vector<Expression *> expressions) : Clause(uid), expressions_(expressions), detach_(detach) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Delete::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointers(ar, expressions_); - ar << detach_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointers(ar, expressions_); - ar >> detach_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Delete *, - const unsigned int); }; class SetProperty : public Clause { @@ -3013,10 +1579,6 @@ class SetProperty : public Clause { expression_->Clone(storage)); } - using Clause::Save; - static SetProperty *Construct(const capnp::SetProperty::Reader &reader, - AstStorage *storage); - PropertyLookup *property_lookup_ = nullptr; Expression *expression_ = nullptr; @@ -3026,37 +1588,6 @@ class SetProperty : public Clause { : Clause(uid), property_lookup_(property_lookup), expression_(expression) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::SetProperty::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, property_lookup_); - SavePointer(ar, expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, property_lookup_); - LoadPointer(ar, expression_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - SetProperty *, - const unsigned int); }; class SetProperties : public Clause { @@ -3076,10 +1607,6 @@ class SetProperties : public Clause { expression_->Clone(storage), update_); } - using Clause::Save; - static SetProperties *Construct(const capnp::SetProperties::Reader &reader, - AstStorage *storage); - Identifier *identifier_ = nullptr; Expression *expression_ = nullptr; bool update_ = false; @@ -3092,39 +1619,6 @@ class SetProperties : public Clause { identifier_(identifier), expression_(expression), update_(update) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::SetProperties::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, identifier_); - SavePointer(ar, expression_); - ar << update_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, identifier_); - LoadPointer(ar, expression_); - ar >> update_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - SetProperties *, - const unsigned int); }; class SetLabels : public Clause { @@ -3143,10 +1637,6 @@ class SetLabels : public Clause { return storage.Create<SetLabels>(identifier_->Clone(storage), labels_); } - using Clause::Save; - static SetLabels *Construct(const capnp::SetLabels::Reader &reader, - AstStorage *storage); - Identifier *identifier_ = nullptr; std::vector<storage::Label> labels_; @@ -3155,36 +1645,6 @@ class SetLabels : public Clause { SetLabels(int uid, Identifier *identifier, const std::vector<storage::Label> &labels) : Clause(uid), identifier_(identifier), labels_(labels) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::SetLabels::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, identifier_); - ar << labels_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, identifier_); - ar >> labels_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, SetLabels *, - const unsigned int); }; class RemoveProperty : public Clause { @@ -3203,45 +1663,12 @@ class RemoveProperty : public Clause { return storage.Create<RemoveProperty>(property_lookup_->Clone(storage)); } - using Clause::Save; - static RemoveProperty *Construct(const capnp::RemoveProperty::Reader &reader, - AstStorage *storage); - PropertyLookup *property_lookup_ = nullptr; protected: explicit RemoveProperty(int uid) : Clause(uid) {} RemoveProperty(int uid, PropertyLookup *property_lookup) : Clause(uid), property_lookup_(property_lookup) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::RemoveProperty::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, property_lookup_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, property_lookup_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - RemoveProperty *, - const unsigned int); }; class RemoveLabels : public Clause { @@ -3260,10 +1687,6 @@ class RemoveLabels : public Clause { return storage.Create<RemoveLabels>(identifier_->Clone(storage), labels_); } - using Clause::Save; - static RemoveLabels *Construct(const capnp::RemoveLabels::Reader &reader, - AstStorage *storage); - Identifier *identifier_ = nullptr; std::vector<storage::Label> labels_; @@ -3272,37 +1695,6 @@ class RemoveLabels : public Clause { RemoveLabels(int uid, Identifier *identifier, const std::vector<storage::Label> &labels) : Clause(uid), identifier_(identifier), labels_(labels) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::RemoveLabels::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, identifier_); - ar << labels_; - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, identifier_); - ar >> labels_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - RemoveLabels *, - const unsigned int); }; class Merge : public Clause { @@ -3345,10 +1737,6 @@ class Merge : public Clause { return merge; } - using Clause::Save; - static Merge *Construct(const capnp::Merge::Reader &reader, - AstStorage *storage); - Pattern *pattern_ = nullptr; std::vector<Clause *> on_match_; std::vector<Clause *> on_create_; @@ -3361,38 +1749,6 @@ class Merge : public Clause { pattern_(pattern), on_match_(on_match), on_create_(on_create) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Merge::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, pattern_); - SavePointers(ar, on_match_); - SavePointers(ar, on_create_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, pattern_); - LoadPointers(ar, on_match_); - LoadPointers(ar, on_create_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Merge *, - const unsigned int); }; class Unwind : public Clause { @@ -3411,10 +1767,6 @@ class Unwind : public Clause { return storage.Create<Unwind>(named_expression_->Clone(storage)); } - using Clause::Save; - static Unwind *Construct(const capnp::Unwind::Reader &reader, - AstStorage *storage); - NamedExpression *named_expression_ = nullptr; protected: @@ -3425,34 +1777,6 @@ class Unwind : public Clause { DCHECK(named_expression) << "Unwind cannot take nullptr for named_expression"; } - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::Unwind::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - BOOST_SERIALIZATION_SPLIT_MEMBER(); - - template <class TArchive> - void save(TArchive &ar, const unsigned int) const { - ar << boost::serialization::base_object<Clause>(*this); - SavePointer(ar, named_expression_); - } - - template <class TArchive> - void load(TArchive &ar, const unsigned int) { - ar >> boost::serialization::base_object<Clause>(*this); - LoadPointer(ar, named_expression_); - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, Unwind *, - const unsigned int); }; class CreateIndex : public Clause { @@ -3466,10 +1790,6 @@ class CreateIndex : public Clause { return storage.Create<CreateIndex>(label_, property_); } - static CreateIndex *Construct(const capnp::CreateIndex::Reader &reader, - AstStorage *storage); - using Clause::Save; - storage::Label label_; storage::Property property_; @@ -3477,120 +1797,6 @@ class CreateIndex : public Clause { explicit CreateIndex(int uid) : Clause(uid) {} CreateIndex(int uid, storage::Label label, storage::Property property) : Clause(uid), label_(label), property_(property) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::CreateIndex::Builder *builder, - std::vector<int> *saved_uids); - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Clause>(*this); - ar &label_; - ar &property_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - CreateIndex *, - const unsigned int); -}; - -class ModifyUser : public Clause { - friend class AstStorage; - - public: - DEFVISITABLE(TreeVisitor<TypedValue>); - DEFVISITABLE(HierarchicalTreeVisitor); - - ModifyUser *Clone(AstStorage &storage) const override { - return storage.Create<ModifyUser>( - username_, password_ ? password_->Clone(storage) : nullptr, is_create_); - } - - static ModifyUser *Construct(const capnp::ModifyUser::Reader &reader, - AstStorage *storage); - using Clause::Save; - - std::string username_; - Expression *password_; - bool is_create_; - - protected: - explicit ModifyUser(int uid) : Clause(uid) {} - ModifyUser(int uid, std::string username, Expression *password, - bool is_create) - : Clause(uid), - username_(std::move(username)), - password_(password), - is_create_(is_create) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::ModifyUser::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Clause>(*this); - ar &username_ &password_ &is_create_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, - ModifyUser *, - const unsigned int); -}; - -class DropUser : public Clause { - friend class AstStorage; - - public: - DEFVISITABLE(TreeVisitor<TypedValue>); - DEFVISITABLE(HierarchicalTreeVisitor); - - DropUser *Clone(AstStorage &storage) const override { - return storage.Create<DropUser>(usernames_); - } - - static DropUser *Construct(const capnp::DropUser::Reader &reader, - AstStorage *storage); - using Clause::Save; - - std::vector<std::string> usernames_; - - protected: - explicit DropUser(int uid) : Clause(uid) {} - DropUser(int uid, std::vector<std::string> usernames) - : Clause(uid), usernames_(usernames) {} - - void Save(capnp::Clause::Builder *builder, - std::vector<int> *saved_uids) override; - virtual void Save(capnp::DropUser::Builder *builder, - std::vector<int> *saved_uids); - void Load(const capnp::Tree::Reader &base_reader, AstStorage *storage, - std::vector<int> *loaded_uids) override; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Clause>(*this); - ar &usernames_; - } - - template <class TArchive> - friend void boost::serialization::load_construct_data(TArchive &, DropUser *, - const unsigned int); }; #undef CLONE_BINARY_EXPRESSION @@ -3598,136 +1804,3 @@ class DropUser : public Clause { #undef SERIALIZE_USING_BASE } // namespace query -// All of the serialization cruft follows - -#define LOAD_AND_CONSTRUCT(DerivedClass, ...) \ - template <class TArchive> \ - void load_construct_data(TArchive &, DerivedClass *cls, \ - const unsigned int) { \ - ::new (cls) DerivedClass(__VA_ARGS__); \ - } - -namespace boost::serialization { - -LOAD_AND_CONSTRUCT(query::Where, 0); -LOAD_AND_CONSTRUCT(query::OrOperator, 0); -LOAD_AND_CONSTRUCT(query::XorOperator, 0); -LOAD_AND_CONSTRUCT(query::AndOperator, 0); -LOAD_AND_CONSTRUCT(query::AdditionOperator, 0); -LOAD_AND_CONSTRUCT(query::SubtractionOperator, 0); -LOAD_AND_CONSTRUCT(query::MultiplicationOperator, 0); -LOAD_AND_CONSTRUCT(query::DivisionOperator, 0); -LOAD_AND_CONSTRUCT(query::ModOperator, 0); -LOAD_AND_CONSTRUCT(query::NotEqualOperator, 0); -LOAD_AND_CONSTRUCT(query::EqualOperator, 0); -LOAD_AND_CONSTRUCT(query::LessOperator, 0); -LOAD_AND_CONSTRUCT(query::GreaterOperator, 0); -LOAD_AND_CONSTRUCT(query::LessEqualOperator, 0); -LOAD_AND_CONSTRUCT(query::GreaterEqualOperator, 0); -LOAD_AND_CONSTRUCT(query::InListOperator, 0); -LOAD_AND_CONSTRUCT(query::ListMapIndexingOperator, 0); -LOAD_AND_CONSTRUCT(query::ListSlicingOperator, 0, nullptr, nullptr, nullptr); -LOAD_AND_CONSTRUCT(query::IfOperator, 0, nullptr, nullptr, nullptr); -LOAD_AND_CONSTRUCT(query::NotOperator, 0); -LOAD_AND_CONSTRUCT(query::UnaryPlusOperator, 0); -LOAD_AND_CONSTRUCT(query::UnaryMinusOperator, 0); -LOAD_AND_CONSTRUCT(query::IsNullOperator, 0); -LOAD_AND_CONSTRUCT(query::PrimitiveLiteral, 0); -LOAD_AND_CONSTRUCT(query::ListLiteral, 0); -LOAD_AND_CONSTRUCT(query::MapLiteral, 0); -LOAD_AND_CONSTRUCT(query::Identifier, 0, ""); -LOAD_AND_CONSTRUCT(query::PropertyLookup, 0, nullptr, "", storage::Property()); -LOAD_AND_CONSTRUCT(query::LabelsTest, 0, nullptr, - std::vector<storage::Label>()); -LOAD_AND_CONSTRUCT(query::Function, 0); -LOAD_AND_CONSTRUCT(query::Aggregation, 0, nullptr, nullptr, - query::Aggregation::Op::COUNT); -LOAD_AND_CONSTRUCT(query::Reduce, 0, nullptr, nullptr, nullptr, nullptr, - nullptr); -LOAD_AND_CONSTRUCT(query::All, 0, nullptr, nullptr, nullptr); -LOAD_AND_CONSTRUCT(query::Single, 0, nullptr, nullptr, nullptr); -LOAD_AND_CONSTRUCT(query::ParameterLookup, 0); -LOAD_AND_CONSTRUCT(query::NamedExpression, 0); -LOAD_AND_CONSTRUCT(query::NodeAtom, 0); -LOAD_AND_CONSTRUCT(query::EdgeAtom, 0); -LOAD_AND_CONSTRUCT(query::Pattern, 0); -LOAD_AND_CONSTRUCT(query::SingleQuery, 0); -LOAD_AND_CONSTRUCT(query::CypherUnion, 0); -LOAD_AND_CONSTRUCT(query::Query, 0); -LOAD_AND_CONSTRUCT(query::Create, 0); -LOAD_AND_CONSTRUCT(query::Match, 0); -LOAD_AND_CONSTRUCT(query::Return, 0); -LOAD_AND_CONSTRUCT(query::With, 0); -LOAD_AND_CONSTRUCT(query::Delete, 0); -LOAD_AND_CONSTRUCT(query::SetProperty, 0); -LOAD_AND_CONSTRUCT(query::SetProperties, 0); -LOAD_AND_CONSTRUCT(query::SetLabels, 0); -LOAD_AND_CONSTRUCT(query::RemoveProperty, 0); -LOAD_AND_CONSTRUCT(query::RemoveLabels, 0); -LOAD_AND_CONSTRUCT(query::Merge, 0); -LOAD_AND_CONSTRUCT(query::Unwind, 0); -LOAD_AND_CONSTRUCT(query::CreateIndex, 0); -LOAD_AND_CONSTRUCT(query::ModifyUser, 0); -LOAD_AND_CONSTRUCT(query::DropUser, 0); - -} // namespace boost::serialization - -#undef LOAD_AND_CONSTRUCT - -BOOST_CLASS_EXPORT_KEY(query::Query); -BOOST_CLASS_EXPORT_KEY(query::SingleQuery); -BOOST_CLASS_EXPORT_KEY(query::CypherUnion); -BOOST_CLASS_EXPORT_KEY(query::NamedExpression); -BOOST_CLASS_EXPORT_KEY(query::OrOperator); -BOOST_CLASS_EXPORT_KEY(query::XorOperator); -BOOST_CLASS_EXPORT_KEY(query::AndOperator); -BOOST_CLASS_EXPORT_KEY(query::NotOperator); -BOOST_CLASS_EXPORT_KEY(query::AdditionOperator); -BOOST_CLASS_EXPORT_KEY(query::SubtractionOperator); -BOOST_CLASS_EXPORT_KEY(query::MultiplicationOperator); -BOOST_CLASS_EXPORT_KEY(query::DivisionOperator); -BOOST_CLASS_EXPORT_KEY(query::ModOperator); -BOOST_CLASS_EXPORT_KEY(query::NotEqualOperator); -BOOST_CLASS_EXPORT_KEY(query::EqualOperator); -BOOST_CLASS_EXPORT_KEY(query::LessOperator); -BOOST_CLASS_EXPORT_KEY(query::GreaterOperator); -BOOST_CLASS_EXPORT_KEY(query::LessEqualOperator); -BOOST_CLASS_EXPORT_KEY(query::GreaterEqualOperator); -BOOST_CLASS_EXPORT_KEY(query::InListOperator); -BOOST_CLASS_EXPORT_KEY(query::ListMapIndexingOperator); -BOOST_CLASS_EXPORT_KEY(query::ListSlicingOperator); -BOOST_CLASS_EXPORT_KEY(query::IfOperator); -BOOST_CLASS_EXPORT_KEY(query::UnaryPlusOperator); -BOOST_CLASS_EXPORT_KEY(query::UnaryMinusOperator); -BOOST_CLASS_EXPORT_KEY(query::IsNullOperator); -BOOST_CLASS_EXPORT_KEY(query::ListLiteral); -BOOST_CLASS_EXPORT_KEY(query::MapLiteral); -BOOST_CLASS_EXPORT_KEY(query::PropertyLookup); -BOOST_CLASS_EXPORT_KEY(query::LabelsTest); -BOOST_CLASS_EXPORT_KEY(query::Aggregation); -BOOST_CLASS_EXPORT_KEY(query::Function); -BOOST_CLASS_EXPORT_KEY(query::Reduce); -BOOST_CLASS_EXPORT_KEY(query::All); -BOOST_CLASS_EXPORT_KEY(query::Single); -BOOST_CLASS_EXPORT_KEY(query::ParameterLookup); -BOOST_CLASS_EXPORT_KEY(query::Create); -BOOST_CLASS_EXPORT_KEY(query::Match); -BOOST_CLASS_EXPORT_KEY(query::Return); -BOOST_CLASS_EXPORT_KEY(query::With); -BOOST_CLASS_EXPORT_KEY(query::Pattern); -BOOST_CLASS_EXPORT_KEY(query::NodeAtom); -BOOST_CLASS_EXPORT_KEY(query::EdgeAtom); -BOOST_CLASS_EXPORT_KEY(query::Delete); -BOOST_CLASS_EXPORT_KEY(query::Where); -BOOST_CLASS_EXPORT_KEY(query::SetProperty); -BOOST_CLASS_EXPORT_KEY(query::SetProperties); -BOOST_CLASS_EXPORT_KEY(query::SetLabels); -BOOST_CLASS_EXPORT_KEY(query::RemoveProperty); -BOOST_CLASS_EXPORT_KEY(query::RemoveLabels); -BOOST_CLASS_EXPORT_KEY(query::Merge); -BOOST_CLASS_EXPORT_KEY(query::Unwind); -BOOST_CLASS_EXPORT_KEY(query::Identifier); -BOOST_CLASS_EXPORT_KEY(query::PrimitiveLiteral); -BOOST_CLASS_EXPORT_KEY(query::CreateIndex); -BOOST_CLASS_EXPORT_KEY(query::ModifyUser); -BOOST_CLASS_EXPORT_KEY(query::DropUser); diff --git a/src/query/frontend/ast/ast_visitor.hpp b/src/query/frontend/ast/ast_visitor.hpp index ff3290e0f..f4273abb5 100644 --- a/src/query/frontend/ast/ast_visitor.hpp +++ b/src/query/frontend/ast/ast_visitor.hpp @@ -60,8 +60,6 @@ class RemoveLabels; class Merge; class Unwind; class CreateIndex; -class ModifyUser; -class DropUser; using TreeCompositeVisitor = ::utils::CompositeVisitor< Query, SingleQuery, CypherUnion, NamedExpression, OrOperator, XorOperator, @@ -77,7 +75,7 @@ using TreeCompositeVisitor = ::utils::CompositeVisitor< using TreeLeafVisitor = ::utils::LeafVisitor<Identifier, PrimitiveLiteral, ParameterLookup, - CreateIndex, ModifyUser, DropUser>; + CreateIndex>; class HierarchicalTreeVisitor : public TreeCompositeVisitor, public TreeLeafVisitor { @@ -100,6 +98,6 @@ using TreeVisitor = ::utils::Visitor< LabelsTest, Aggregation, Function, Reduce, All, Single, ParameterLookup, Create, Match, Return, With, Pattern, NodeAtom, EdgeAtom, Delete, Where, SetProperty, SetProperties, SetLabels, RemoveProperty, RemoveLabels, Merge, - Unwind, Identifier, PrimitiveLiteral, CreateIndex, ModifyUser, DropUser>; + Unwind, Identifier, PrimitiveLiteral, CreateIndex>; } // namespace query diff --git a/src/query/frontend/ast/cypher_main_visitor.cpp b/src/query/frontend/ast/cypher_main_visitor.cpp index 85647a65c..fa2697dca 100644 --- a/src/query/frontend/ast/cypher_main_visitor.cpp +++ b/src/query/frontend/ast/cypher_main_visitor.cpp @@ -81,7 +81,6 @@ antlrcpp::Any CypherMainVisitor::visitSingleQuery( bool has_return = false; bool has_optional_match = false; bool has_create_index = false; - bool has_modify_user = false; for (Clause *clause : single_query->clauses_) { if (dynamic_cast<Unwind *>(clause)) { if (has_update || has_return) { @@ -126,21 +125,11 @@ antlrcpp::Any CypherMainVisitor::visitSingleQuery( "CreateIndex must be only clause in the query."); } has_create_index = true; - } else if (dynamic_cast<ModifyUser *>(clause)) { - has_modify_user = true; - if (single_query->clauses_.size() != 1U) { - throw SemanticException("ModifyUser must be only clause in the query."); - } - } else if (dynamic_cast<DropUser *>(clause)) { - has_modify_user = true; - if (single_query->clauses_.size() != 1U) { - throw SemanticException("DropUser must be only clause in the query."); - } } else { DLOG(FATAL) << "Can't happen"; } } - if (!has_update && !has_return && !has_create_index && !has_modify_user) { + if (!has_update && !has_return && !has_create_index) { throw SemanticException( "Query should either update something, return results or create an " "index"); @@ -197,14 +186,6 @@ antlrcpp::Any CypherMainVisitor::visitClause(CypherParser::ClauseContext *ctx) { return static_cast<Clause *>( ctx->createIndex()->accept(this).as<CreateIndex *>()); } - if (ctx->modifyUser()) { - return static_cast<Clause *>( - ctx->modifyUser()->accept(this).as<ModifyUser *>()); - } - if (ctx->dropUser()) { - return static_cast<Clause *>( - ctx->dropUser()->accept(this).as<DropUser *>()); - } // TODO: implement other clauses. throw utils::NotYetImplemented("clause '{}'", ctx->getText()); return 0; @@ -238,49 +219,6 @@ antlrcpp::Any CypherMainVisitor::visitCreateIndex( ctx_.db_accessor_.Label(ctx->labelName()->accept(this)), key.second); } -/** - * @return ModifyUser* - */ -antlrcpp::Any CypherMainVisitor::visitModifyUser( - CypherParser::ModifyUserContext *ctx) { - std::string username(ctx->userName()->getText()); - Expression *password = nullptr; - bool is_create = static_cast<bool>(ctx->createUser()); - for (auto option : ctx->modifyUserOption()) { - if (option->passwordOption()) { - if (password) { - throw QueryException("password should be set at most once"); - } - password = option->passwordOption()->accept(this); - continue; - } - LOG(FATAL) << "Expected to handle all cases above."; - } - return storage_.Create<ModifyUser>(username, password, is_create); -} - -/** - * @return Expression* - */ -antlrcpp::Any CypherMainVisitor::visitPasswordOption( - CypherParser::PasswordOptionContext *ctx) { - if (!ctx->literal()->StringLiteral() && !ctx->literal()->CYPHERNULL()) { - throw SyntaxException("password should be a string literal or NULL"); - } - return ctx->literal()->accept(this); -} - -/** - * @return DropUser* - */ -antlrcpp::Any CypherMainVisitor::visitDropUser( - CypherParser::DropUserContext *ctx) { - std::vector<std::string> usernames; - for (auto username_ptr : ctx->userName()) - usernames.emplace_back(username_ptr->getText()); - return storage_.Create<DropUser>(usernames); -} - antlrcpp::Any CypherMainVisitor::visitCypherReturn( CypherParser::CypherReturnContext *ctx) { auto *return_clause = storage_.Create<Return>(); diff --git a/src/query/frontend/ast/cypher_main_visitor.hpp b/src/query/frontend/ast/cypher_main_visitor.hpp index 49e0bb262..01f1b3bcd 100644 --- a/src/query/frontend/ast/cypher_main_visitor.hpp +++ b/src/query/frontend/ast/cypher_main_visitor.hpp @@ -173,19 +173,6 @@ class CypherMainVisitor : public antlropencypher::CypherBaseVisitor { antlrcpp::Any visitCreateIndex( CypherParser::CreateIndexContext *ctx) override; - /** - * @return ModifyUser* - */ - antlrcpp::Any visitModifyUser(CypherParser::ModifyUserContext *ctx) override; - - antlrcpp::Any visitPasswordOption( - CypherParser::PasswordOptionContext *ctx) override; - - /** - * @return DropUser* - */ - antlrcpp::Any visitDropUser(CypherParser::DropUserContext *ctx) override; - /** * @return Return* */ diff --git a/src/query/frontend/opencypher/grammar/Cypher.g4 b/src/query/frontend/opencypher/grammar/Cypher.g4 index 25ad537a2..0029b5db5 100644 --- a/src/query/frontend/opencypher/grammar/Cypher.g4 +++ b/src/query/frontend/opencypher/grammar/Cypher.g4 @@ -47,8 +47,6 @@ clause : cypherMatch | with | cypherReturn | createIndex - | modifyUser - | dropUser ; cypherMatch : ( OPTIONAL SP )? MATCH SP? pattern ( SP? where )? ; @@ -276,20 +274,6 @@ integerLiteral : HexInteger createIndex : CREATE SP INDEX SP ON SP? ':' SP? labelName SP? '(' SP? propertyKeyName SP? ')' ; -userName : UnescapedSymbolicName ; - -createUser : CREATE SP USER ; - -alterUser : ALTER SP USER ; - -modifyUser : ( createUser | alterUser ) SP userName ( SP WITH ( SP modifyUserOption )+ )? ; - -modifyUserOption : passwordOption ; - -passwordOption : PASSWORD SP literal; - -dropUser : DROP SP USER SP userName ( SP? ',' SP? userName )* ; - HexInteger : '0x' ( HexDigit )+ ; DecimalInteger : ZeroDigit @@ -500,14 +484,6 @@ BFS : ( 'B' | 'b' ) ( 'F' | 'f' ) ( 'S' | 's' ) ; WSHORTEST : ( 'W' | 'w' ) ( 'S' | 's' ) ( 'H' | 'h' ) ( 'O' | 'o' ) ( 'R' | 'r' ) ( 'T' | 't' ) ( 'E' | 'e' ) ( 'S' | 's' ) ( 'T' | 't' ) ; -USER : ( 'U' | 'u' ) ( 'S' | 's' ) ( 'E' | 'e' ) ( 'R' | 'r' ) ; - -PASSWORD : ( 'P' | 'p' ) ( 'A' | 'a' ) ( 'S' | 's' ) ( 'S' | 's' ) ( 'W' | 'w' ) ( 'O' | 'o' ) ( 'R' | 'r' ) ( 'D' | 'd' ) ; - -ALTER : ( 'A' | 'a' ) ( 'L' | 'l' ) ( 'T' | 't' ) ( 'E' | 'e' ) ( 'R' | 'r' ) ; - -DROP : ( 'D' | 'd' ) ( 'R' | 'r' ) ( 'O' | 'o' ) ( 'P' | 'p' ) ; - UnescapedSymbolicName : IdentifierStart ( IdentifierPart )* ; /** diff --git a/src/query/frontend/semantic/symbol.capnp b/src/query/frontend/semantic/symbol.capnp deleted file mode 100644 index 076ea08cb..000000000 --- a/src/query/frontend/semantic/symbol.capnp +++ /dev/null @@ -1,31 +0,0 @@ -@0x93c1dcee84e93b76; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("query::capnp"); - -struct Symbol { - enum Type { - any @0; - vertex @1; - edge @2; - path @3; - number @4; - edgeList @5; - } - - name @0 :Text; - position @1 :Int32; - type @2 :Type; - userDeclared @3 :Bool; - tokenPosition @4 :Int32; -} - -struct SymbolTable { - position @0 :Int32; - table @1 :List(Entry); - - struct Entry { - key @0 :Int32; - val @1 :Symbol; - } -} diff --git a/src/query/frontend/semantic/symbol.hpp b/src/query/frontend/semantic/symbol.hpp index f9e892fb4..478614628 100644 --- a/src/query/frontend/semantic/symbol.hpp +++ b/src/query/frontend/semantic/symbol.hpp @@ -2,11 +2,6 @@ #include <string> -#include "boost/serialization/serialization.hpp" -#include "boost/serialization/string.hpp" - -#include "symbol.capnp.h" - namespace query { class Symbol { @@ -42,77 +37,12 @@ class Symbol { bool user_declared() const { return user_declared_; } int token_position() const { return token_position_; } - void Save(capnp::Symbol::Builder *builder) const { - builder->setName(name_); - builder->setPosition(position_); - builder->setUserDeclared(user_declared_); - builder->setTokenPosition(token_position_); - switch (type_) { - case Type::Any: - builder->setType(capnp::Symbol::Type::ANY); - break; - case Type::Edge: - builder->setType(capnp::Symbol::Type::EDGE); - break; - case Type::EdgeList: - builder->setType(capnp::Symbol::Type::EDGE_LIST); - break; - case Type::Number: - builder->setType(capnp::Symbol::Type::NUMBER); - break; - case Type::Path: - builder->setType(capnp::Symbol::Type::PATH); - break; - case Type::Vertex: - builder->setType(capnp::Symbol::Type::VERTEX); - break; - } - } - - void Load(const capnp::Symbol::Reader &reader) { - name_ = reader.getName(); - position_ = reader.getPosition(); - user_declared_ = reader.getUserDeclared(); - token_position_ = reader.getTokenPosition(); - switch (reader.getType()) { - case capnp::Symbol::Type::ANY: - type_ = Type::Any; - break; - case capnp::Symbol::Type::EDGE: - type_ = Type::Edge; - break; - case capnp::Symbol::Type::EDGE_LIST: - type_ = Type::EdgeList; - break; - case capnp::Symbol::Type::NUMBER: - type_ = Type::Number; - break; - case capnp::Symbol::Type::PATH: - type_ = Type::Path; - break; - case capnp::Symbol::Type::VERTEX: - type_ = Type::Vertex; - break; - } - } - private: std::string name_; int position_; bool user_declared_ = true; Type type_ = Type::Any; int token_position_ = -1; - - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar & name_; - ar & position_; - ar & user_declared_; - ar & type_; - ar & token_position_; - } }; } // namespace query diff --git a/src/query/frontend/semantic/symbol_generator.cpp b/src/query/frontend/semantic/symbol_generator.cpp index 27d8bb24c..e1d5392df 100644 --- a/src/query/frontend/semantic/symbol_generator.cpp +++ b/src/query/frontend/semantic/symbol_generator.cpp @@ -220,10 +220,6 @@ bool SymbolGenerator::PostVisit(Match &) { bool SymbolGenerator::Visit(CreateIndex &) { return true; } -bool SymbolGenerator::Visit(ModifyUser &) { return true; } - -bool SymbolGenerator::Visit(DropUser &) { return true; } - // Expressions SymbolGenerator::ReturnType SymbolGenerator::Visit(Identifier &ident) { diff --git a/src/query/frontend/semantic/symbol_generator.hpp b/src/query/frontend/semantic/symbol_generator.hpp index 1d63ae22c..178fbafb0 100644 --- a/src/query/frontend/semantic/symbol_generator.hpp +++ b/src/query/frontend/semantic/symbol_generator.hpp @@ -47,8 +47,6 @@ class SymbolGenerator : public HierarchicalTreeVisitor { bool PreVisit(Match &) override; bool PostVisit(Match &) override; bool Visit(CreateIndex &) override; - bool Visit(ModifyUser &) override; - bool Visit(DropUser &) override; // Expressions ReturnType Visit(Identifier &) override; diff --git a/src/query/frontend/semantic/symbol_table.hpp b/src/query/frontend/semantic/symbol_table.hpp index 852572ff6..a2297dd9c 100644 --- a/src/query/frontend/semantic/symbol_table.hpp +++ b/src/query/frontend/semantic/symbol_table.hpp @@ -3,11 +3,7 @@ #include <map> #include <string> -#include "boost/serialization/map.hpp" -#include "boost/serialization/serialization.hpp" - #include "query/frontend/ast/ast.hpp" -#include "query/frontend/semantic/symbol.capnp.h" #include "query/frontend/semantic/symbol.hpp" namespace query { @@ -31,40 +27,9 @@ class SymbolTable final { const auto &table() const { return table_; } - void Save(capnp::SymbolTable::Builder *builder) const { - builder->setPosition(position_); - auto list_builder = builder->initTable(table_.size()); - size_t i = 0; - for (const auto &entry : table_) { - auto entry_builder = list_builder[i++]; - entry_builder.setKey(entry.first); - auto sym_builder = entry_builder.initVal(); - entry.second.Save(&sym_builder); - } - } - - void Load(const capnp::SymbolTable::Reader &reader) { - position_ = reader.getPosition(); - table_.clear(); - for (const auto &entry_reader : reader.getTable()) { - int key = entry_reader.getKey(); - Symbol val; - val.Load(entry_reader.getVal()); - table_[key] = val; - } - } - private: int position_{0}; std::map<int, Symbol> table_; - - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &position_; - ar &table_; - } }; } // namespace query diff --git a/src/query/interpret/awesome_memgraph_functions.cpp b/src/query/interpret/awesome_memgraph_functions.cpp index a9fb16341..856f85e07 100644 --- a/src/query/interpret/awesome_memgraph_functions.cpp +++ b/src/query/interpret/awesome_memgraph_functions.cpp @@ -607,22 +607,6 @@ TypedValue IndexInfo(const std::vector<TypedValue> &args, return std::vector<TypedValue>(info.begin(), info.end()); } -TypedValue WorkerId(const std::vector<TypedValue> &args, - database::GraphDbAccessor &) { - if (args.size() != 1U) { - throw QueryRuntimeException("workerId takes one argument"); - } - auto &arg = args[0]; - switch (arg.type()) { - case TypedValue::Type::Vertex: - return arg.ValueVertex().GlobalAddress().worker_id(); - case TypedValue::Type::Edge: - return arg.ValueEdge().GlobalAddress().worker_id(); - default: - throw QueryRuntimeException("workerId argument must be a vertex or edge"); - } -} - TypedValue Id(const std::vector<TypedValue> &args, database::GraphDbAccessor &dba) { if (args.size() != 1U) { @@ -727,7 +711,6 @@ NameToFunction(const std::string &function_name) { if (function_name == "COUNTER") return Counter; if (function_name == "COUNTERSET") return CounterSet; if (function_name == "INDEXINFO") return IndexInfo; - if (function_name == "WORKERID") return WorkerId; if (function_name == "ID") return Id; if (function_name == "TOSTRING") return ToString; return nullptr; diff --git a/src/query/interpret/eval.hpp b/src/query/interpret/eval.hpp index ab3b930d4..153d47b13 100644 --- a/src/query/interpret/eval.hpp +++ b/src/query/interpret/eval.hpp @@ -56,8 +56,6 @@ class ExpressionEvaluator : public TreeVisitor<TypedValue> { BLOCK_VISIT(Merge); BLOCK_VISIT(Unwind); BLOCK_VISIT(CreateIndex); - BLOCK_VISIT(ModifyUser); - BLOCK_VISIT(DropUser); #undef BLOCK_VISIT diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index feafb65d7..f17e83d8e 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -3,7 +3,6 @@ #include <glog/logging.h> #include <limits> -#include "distributed/plan_dispatcher.hpp" #include "query/exceptions.hpp" #include "query/frontend/ast/cypher_main_visitor.hpp" #include "query/frontend/opencypher/parser.hpp" @@ -20,35 +19,13 @@ DEFINE_VALIDATED_int32(query_plan_cache_ttl, 60, namespace query { -Interpreter::CachedPlan::CachedPlan( - plan::DistributedPlan distributed_plan, double cost, - distributed::PlanDispatcher *plan_dispatcher) - : distributed_plan_(std::move(distributed_plan)), - cost_(cost), - plan_dispatcher_(plan_dispatcher) { - if (plan_dispatcher_) { - for (const auto &plan_pair : distributed_plan_.worker_plans) { - const auto &plan_id = plan_pair.first; - const auto &worker_plan = plan_pair.second; - plan_dispatcher_->DispatchPlan(plan_id, worker_plan, - distributed_plan_.symbol_table); - } - } -} +Interpreter::CachedPlan::CachedPlan(plan::DistributedPlan distributed_plan, + double cost) + : distributed_plan_(std::move(distributed_plan)), cost_(cost) {} -Interpreter::CachedPlan::~CachedPlan() { - if (plan_dispatcher_) { - for (const auto &plan_pair : distributed_plan_.worker_plans) { - const auto &plan_id = plan_pair.first; - plan_dispatcher_->RemovePlan(plan_id); - } - } -} +Interpreter::CachedPlan::~CachedPlan() {} -Interpreter::Interpreter(database::GraphDb &db) - : plan_dispatcher_(db.type() == database::GraphDb::Type::DISTRIBUTED_MASTER - ? &db.plan_dispatcher() - : nullptr) {} +Interpreter::Interpreter(database::GraphDb &db) {} Interpreter::Results Interpreter::operator()( const std::string &query, database::GraphDbAccessor &db_accessor, @@ -134,30 +111,17 @@ std::shared_ptr<Interpreter::CachedPlan> Interpreter::QueryToPlan( std::tie(tmp_logical_plan, query_plan_cost_estimation) = MakeLogicalPlan(ast_storage, ctx); - DCHECK(ctx.db_accessor_.db().type() != - database::GraphDb::Type::DISTRIBUTED_WORKER); - if (ctx.db_accessor_.db().type() == - database::GraphDb::Type::DISTRIBUTED_MASTER) { - auto distributed_plan = MakeDistributedPlan( - *tmp_logical_plan, ctx.symbol_table_, next_plan_id_); - VLOG(10) << "[Interpreter] Created plan for distributed execution " - << next_plan_id_ - 1; - return std::make_shared<CachedPlan>(std::move(distributed_plan), - query_plan_cost_estimation, - plan_dispatcher_); - } else { - return std::make_shared<CachedPlan>( - plan::DistributedPlan{0, - std::move(tmp_logical_plan), - {}, - std::move(ast_storage), - ctx.symbol_table_}, - query_plan_cost_estimation, plan_dispatcher_); - } + return std::make_shared<CachedPlan>( + plan::DistributedPlan{0, + std::move(tmp_logical_plan), + {}, + std::move(ast_storage), + ctx.symbol_table_}, + query_plan_cost_estimation); } AstStorage Interpreter::QueryToAst(const StrippedQuery &stripped, - Context &ctx) { + Context &ctx) { if (!ctx.is_query_cached_) { // stripped query -> AST auto parser = [&] { diff --git a/src/query/interpreter.hpp b/src/query/interpreter.hpp index fae3b95bb..99c2cdd69 100644 --- a/src/query/interpreter.hpp +++ b/src/query/interpreter.hpp @@ -29,8 +29,7 @@ class Interpreter { class CachedPlan { public: /// Creates a cached plan and sends it to all the workers. - CachedPlan(plan::DistributedPlan distributed_plan, double cost, - distributed::PlanDispatcher *plan_dispatcher); + CachedPlan(plan::DistributedPlan distributed_plan, double cost); /// Removes the cached plan from all the workers. ~CachedPlan(); @@ -49,9 +48,6 @@ class Interpreter { plan::DistributedPlan distributed_plan_; double cost_; utils::Timer cache_timer_; - - // Optional, only available in a distributed master. - distributed::PlanDispatcher *plan_dispatcher_{nullptr}; }; using PlanCacheT = ConcurrentMap<HashType, std::shared_ptr<CachedPlan>>; @@ -175,9 +171,6 @@ class Interpreter { // so this lock probably won't impact performance much... utils::SpinLock antlr_lock_; - // Optional, not null only in a distributed master. - distributed::PlanDispatcher *plan_dispatcher_{nullptr}; - // stripped query -> CachedPlan std::shared_ptr<CachedPlan> QueryToPlan(const StrippedQuery &stripped, Context &ctx); diff --git a/src/query/plan/cost_estimator.hpp b/src/query/plan/cost_estimator.hpp index a8f9c0792..c8f94f407 100644 --- a/src/query/plan/cost_estimator.hpp +++ b/src/query/plan/cost_estimator.hpp @@ -185,8 +185,6 @@ class CostEstimator : public HierarchicalLogicalOperatorVisitor { bool Visit(Once &) override { return true; } bool Visit(CreateIndex &) override { return true; } - bool Visit(ModifyUser &) override { return true; } - bool Visit(DropUser &) override { return true; } // TODO: Cost estimate PullRemote and ProduceRemote? diff --git a/src/query/plan/distributed.cpp b/src/query/plan/distributed.cpp index d3d6a19c8..81fb61d01 100644 --- a/src/query/plan/distributed.cpp +++ b/src/query/plan/distributed.cpp @@ -2,12 +2,6 @@ #include <memory> -// TODO: Remove these includes for hacked cloning of logical operators via boost -// serialization when proper cloning is added. -#include <sstream> -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" - #include "query/plan/operator.hpp" #include "query/plan/preprocess.hpp" #include "utils/exceptions.hpp" @@ -16,22 +10,6 @@ namespace query::plan { namespace { -std::pair<std::unique_ptr<LogicalOperator>, AstStorage> Clone( - const LogicalOperator &original_plan) { - // TODO: Add a proper Clone method to LogicalOperator - std::stringstream stream; - { - boost::archive::binary_oarchive out_archive(stream); - out_archive << &original_plan; - } - boost::archive::binary_iarchive in_archive(stream); - LogicalOperator *plan_copy = nullptr; - in_archive >> plan_copy; - return {std::unique_ptr<LogicalOperator>(plan_copy), - std::move(in_archive.template get_helper<AstStorage>( - AstStorage::kHelperId))}; -} - int64_t AddWorkerPlan(DistributedPlan &distributed_plan, std::atomic<int64_t> &next_plan_id, const std::shared_ptr<LogicalOperator> &worker_plan) { @@ -56,8 +34,6 @@ class IndependentSubtreeFinder : public HierarchicalLogicalOperatorVisitor { // These don't use any symbols bool Visit(Once &) override { return true; } bool Visit(CreateIndex &) override { return true; } - bool Visit(ModifyUser &) override { return true; } - bool Visit(DropUser &) override { return true; } bool PostVisit(ScanAll &scan) override { return true; } bool PostVisit(ScanAllByLabel &scan) override { return true; } @@ -359,23 +335,6 @@ class IndependentSubtreeFinder : public HierarchicalLogicalOperatorVisitor { return true; } - bool PostVisit(Synchronize &op) override { return true; } - - bool PostVisit(PullRemote &pull) override { - CHECK(!ContainsForbidden(pull.symbols())); - return true; - } - - bool PostVisit(PullRemoteOrderBy &pull) override { - CHECK(!ContainsForbidden(pull.symbols())); - for (auto *expr : pull.order_by()) { - UsedSymbolsCollector collector(*symbol_table_); - expr->Accept(collector); - CHECK(!ContainsForbidden(collector.symbols_)); - } - return true; - } - // Independent subtree std::shared_ptr<LogicalOperator> subtree_; // Immediate parent of `subtree_`. @@ -443,12 +402,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { using HierarchicalLogicalOperatorVisitor::PostVisit; using HierarchicalLogicalOperatorVisitor::PreVisit; - // Returns true if the plan should be run on master and workers. Note, that - // false is returned if the plan is already split. - bool ShouldSplit() const { return should_split_; } - - bool NeedsSynchronize() const { return needs_synchronize_; } - // ScanAll are all done on each machine locally. // We need special care when multiple ScanAll operators appear, this means we // need a Cartesian product. Both the left and the right side of Cartesian @@ -573,7 +526,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(ScanAll &scan) override { prev_ops_.pop_back(); - should_split_ = true; if (has_scan_all_) { AddForCartesian(&scan); } @@ -587,7 +539,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(ScanAllByLabel &scan) override { prev_ops_.pop_back(); - should_split_ = true; if (has_scan_all_) { AddForCartesian(&scan); } @@ -600,7 +551,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(ScanAllByLabelPropertyRange &scan) override { prev_ops_.pop_back(); - should_split_ = true; if (has_scan_all_) { AddForCartesian(&scan); } @@ -613,7 +563,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(ScanAllByLabelPropertyValue &scan) override { prev_ops_.pop_back(); - should_split_ = true; if (has_scan_all_) { AddForCartesian(&scan); } @@ -667,13 +616,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(Skip &skip) override { prev_ops_.pop_back(); - if (ShouldSplit()) { - auto input = skip.input(); - auto pull_id = AddWorkerPlan(input); - Split(skip, std::make_shared<PullRemote>( - input, pull_id, - input->OutputSymbols(distributed_plan_.symbol_table))); - } return true; } @@ -688,14 +630,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(Limit &limit) override { prev_ops_.pop_back(); - if (ShouldSplit()) { - // Shallow copy Limit - auto pull_id = AddWorkerPlan(std::make_shared<Limit>(limit)); - auto input = limit.input(); - Split(limit, std::make_shared<PullRemote>( - input, pull_id, - input->OutputSymbols(distributed_plan_.symbol_table))); - } return true; } @@ -707,40 +641,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(OrderBy &order_by) override { prev_ops_.pop_back(); - // TODO: Associative combination of OrderBy - if (ShouldSplit()) { - std::unordered_set<Symbol> pull_symbols(order_by.output_symbols().begin(), - order_by.output_symbols().end()); - // Pull symbols need to also include those used in order by expressions. - // For example, `RETURN n AS m ORDER BY n.prop`, output symbols will - // contain `m`, while we also need to pull `n`. - // TODO: Consider creating a virtual symbol for expressions like `n.prop` - // and sending them instead. It's possible that the evaluated expression - // requires less network traffic than sending the value of the used symbol - // `n` itself. - for (const auto &expr : order_by.order_by()) { - UsedSymbolsCollector collector(distributed_plan_.symbol_table); - expr->Accept(collector); - pull_symbols.insert(collector.symbols_.begin(), - collector.symbols_.end()); - } - // Create a copy of OrderBy but with added symbols used in expressions, so - // that they can be pulled. - std::vector<std::pair<Ordering, Expression *>> ordering; - ordering.reserve(order_by.order_by().size()); - for (int i = 0; i < order_by.order_by().size(); ++i) { - ordering.emplace_back(order_by.compare().ordering()[i], - order_by.order_by()[i]); - } - auto worker_plan = std::make_shared<OrderBy>( - order_by.input(), ordering, - std::vector<Symbol>(pull_symbols.begin(), pull_symbols.end())); - auto pull_id = AddWorkerPlan(worker_plan); - auto merge_op = std::make_unique<PullRemoteOrderBy>( - worker_plan, pull_id, ordering, - std::vector<Symbol>(pull_symbols.begin(), pull_symbols.end())); - SplitOnPrevious(std::move(merge_op)); - } return true; } @@ -751,15 +651,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(Distinct &distinct) override { prev_ops_.pop_back(); - if (ShouldSplit()) { - // Shallow copy Distinct - auto pull_id = AddWorkerPlan(std::make_shared<Distinct>(distinct)); - auto input = distinct.input(); - Split(distinct, - std::make_shared<PullRemote>( - input, pull_id, - input->OutputSymbols(distributed_plan_.symbol_table))); - } return true; } @@ -787,151 +678,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { Split(aggr_op, PlanCartesian(aggr_op.input())); return true; } - if (!ShouldSplit()) { - // We have already split the plan, so the aggregation we are visiting is - // on master. - return true; - } - auto is_associative = [&aggr_op]() { - for (const auto &aggr : aggr_op.aggregations()) { - switch (aggr.op) { - case Aggregation::Op::COUNT: - case Aggregation::Op::MIN: - case Aggregation::Op::MAX: - case Aggregation::Op::SUM: - case Aggregation::Op::AVG: - break; - default: - return false; - } - } - return true; - }; - if (!is_associative()) { - auto input = aggr_op.input(); - auto pull_id = AddWorkerPlan(input); - std::unordered_set<Symbol> pull_symbols(aggr_op.remember().begin(), - aggr_op.remember().end()); - for (const auto &elem : aggr_op.aggregations()) { - UsedSymbolsCollector collector(distributed_plan_.symbol_table); - elem.value->Accept(collector); - if (elem.key) elem.key->Accept(collector); - pull_symbols.insert(collector.symbols_.begin(), - collector.symbols_.end()); - } - Split(aggr_op, - std::make_shared<PullRemote>( - input, pull_id, - std::vector<Symbol>(pull_symbols.begin(), pull_symbols.end()))); - return true; - } - auto make_ident = [this](const auto &symbol) { - auto *ident = - distributed_plan_.ast_storage.Create<Identifier>(symbol.name()); - distributed_plan_.symbol_table[*ident] = symbol; - return ident; - }; - auto make_named_expr = [&](const auto &in_sym, const auto &out_sym) { - auto *nexpr = distributed_plan_.ast_storage.Create<NamedExpression>( - out_sym.name(), make_ident(in_sym)); - distributed_plan_.symbol_table[*nexpr] = out_sym; - return nexpr; - }; - auto make_merge_aggregation = [&](auto op, const auto &worker_sym) { - auto *worker_ident = make_ident(worker_sym); - auto merge_name = Aggregation::OpToString(op) + - std::to_string(worker_ident->uid()) + "<-" + - worker_sym.name(); - auto merge_sym = distributed_plan_.symbol_table.CreateSymbol( - merge_name, false, Symbol::Type::Number); - return Aggregate::Element{worker_ident, nullptr, op, merge_sym}; - }; - // Aggregate uses associative operation(s), so split the work across master - // and workers. - std::vector<Aggregate::Element> master_aggrs; - master_aggrs.reserve(aggr_op.aggregations().size()); - std::vector<Aggregate::Element> worker_aggrs; - worker_aggrs.reserve(aggr_op.aggregations().size()); - // We will need to create a Produce operator which moves the final results - // from new (merge) symbols into old aggregation symbols, because - // expressions following the aggregation expect the result in old symbols. - std::vector<NamedExpression *> produce_exprs; - produce_exprs.reserve(aggr_op.aggregations().size()); - for (const auto &aggr : aggr_op.aggregations()) { - switch (aggr.op) { - // Count, like sum, only needs to sum all of the results on master. - case Aggregation::Op::COUNT: - case Aggregation::Op::SUM: { - worker_aggrs.emplace_back(aggr); - auto merge_aggr = - make_merge_aggregation(Aggregation::Op::SUM, aggr.output_sym); - master_aggrs.emplace_back(merge_aggr); - produce_exprs.emplace_back( - make_named_expr(merge_aggr.output_sym, aggr.output_sym)); - break; - } - case Aggregation::Op::MIN: - case Aggregation::Op::MAX: { - worker_aggrs.emplace_back(aggr); - auto merge_aggr = make_merge_aggregation(aggr.op, aggr.output_sym); - master_aggrs.emplace_back(merge_aggr); - produce_exprs.emplace_back( - make_named_expr(merge_aggr.output_sym, aggr.output_sym)); - break; - } - // AVG is split into: - // * workers: SUM(xpr), COUNT(expr) - // * master: SUM(worker_sum) / toFloat(SUM(worker_count)) AS avg - case Aggregation::Op::AVG: { - auto worker_sum_sym = distributed_plan_.symbol_table.CreateSymbol( - aggr.output_sym.name() + "_SUM", false, Symbol::Type::Number); - Aggregate::Element worker_sum{aggr.value, aggr.key, - Aggregation::Op::SUM, worker_sum_sym}; - worker_aggrs.emplace_back(worker_sum); - auto worker_count_sym = distributed_plan_.symbol_table.CreateSymbol( - aggr.output_sym.name() + "_COUNT", false, Symbol::Type::Number); - Aggregate::Element worker_count{ - aggr.value, aggr.key, Aggregation::Op::COUNT, worker_count_sym}; - worker_aggrs.emplace_back(worker_count); - auto master_sum = - make_merge_aggregation(Aggregation::Op::SUM, worker_sum_sym); - master_aggrs.emplace_back(master_sum); - auto master_count = - make_merge_aggregation(Aggregation::Op::SUM, worker_count_sym); - master_aggrs.emplace_back(master_count); - auto *master_sum_ident = make_ident(master_sum.output_sym); - auto *master_count_ident = make_ident(master_count.output_sym); - auto *to_float = distributed_plan_.ast_storage.Create<Function>( - "TOFLOAT", std::vector<Expression *>{master_count_ident}); - auto *div_expr = - distributed_plan_.ast_storage.Create<DivisionOperator>( - master_sum_ident, to_float); - auto *as_avg = distributed_plan_.ast_storage.Create<NamedExpression>( - aggr.output_sym.name(), div_expr); - distributed_plan_.symbol_table[*as_avg] = aggr.output_sym; - produce_exprs.emplace_back(as_avg); - break; - } - default: - throw utils::NotYetImplemented("distributed planning"); - } - } - // Rewire master/worker aggregation. - auto worker_plan = std::make_shared<Aggregate>( - aggr_op.input(), worker_aggrs, aggr_op.group_by(), aggr_op.remember()); - auto pull_id = AddWorkerPlan(worker_plan); - std::vector<Symbol> pull_symbols; - pull_symbols.reserve(worker_aggrs.size() + aggr_op.remember().size()); - for (const auto &aggr : worker_aggrs) - pull_symbols.push_back(aggr.output_sym); - for (const auto &sym : aggr_op.remember()) pull_symbols.push_back(sym); - auto pull_op = - std::make_shared<PullRemote>(worker_plan, pull_id, pull_symbols); - auto master_aggr_op = std::make_shared<Aggregate>( - pull_op, master_aggrs, aggr_op.group_by(), aggr_op.remember()); - // Make our master Aggregate into Produce + Aggregate - auto master_plan = std::make_unique<Produce>(master_aggr_op, produce_exprs); - SplitOnPrevious(std::move(master_plan)); return true; } @@ -959,10 +705,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { bool Visit(CreateIndex &) override { return true; } - bool Visit(ModifyUser &) override { return true; } - - bool Visit(DropUser &) override { return true; } - // Accumulate is used only if the query performs any writes. In such a case, // we need to synchronize the work done on master and all workers. // Synchronization will force applying changes to distributed storage, and @@ -976,23 +718,7 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } bool PostVisit(Accumulate &acc) override { prev_ops_.pop_back(); - DCHECK(needs_synchronize_) - << "Expected Accumulate to follow a write operator"; - // Create a synchronization point. Use pull remote to fetch accumulated - // symbols from workers. Accumulation is done through Synchronize, so we - // don't need the Accumulate operator itself. Local input operations are the - // same as on workers. - std::shared_ptr<PullRemote> pull_remote; - if (ShouldSplit()) { - auto pull_id = AddWorkerPlan(acc.input()); - pull_remote = - std::make_shared<PullRemote>(nullptr, pull_id, acc.symbols()); - } - auto sync = std::make_unique<Synchronize>(acc.input(), pull_remote, - acc.advance_command()); - SetOnPrevious(std::move(sync)); on_master_ = true; - needs_synchronize_ = false; return true; } @@ -1007,12 +733,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - // Creation needs to be modified if running on master, so as to distribute - // node creation to workers. - if (!ShouldSplit()) { - op.set_on_random_worker(true); - } - needs_synchronize_ = true; return true; } @@ -1025,7 +745,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1038,7 +757,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1051,7 +769,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1064,7 +781,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1077,7 +793,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1090,7 +805,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1103,7 +817,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { if (!cartesian_branches_.empty()) { Split(op, PlanCartesian(op.input())); } - needs_synchronize_ = true; return true; } @@ -1148,11 +861,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { // not be sent to workers if we are executing on master. return branch; } - // Send the independent subtree to workers and wire it in PullRemote - auto id = AddWorkerPlan(branch.subtree); - branch.subtree = std::make_shared<PullRemote>( - branch.subtree, id, - branch.subtree->ModifiedSymbols(distributed_plan_.symbol_table)); return branch; } @@ -1165,8 +873,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { // corresponds to the above CartesianBranch. std::vector<std::vector<Symbol>> cartesian_symbols_; bool has_scan_all_ = false; - bool needs_synchronize_ = false; - bool should_split_ = false; // True if we have added a worker merge point on master, i.e. the rest of the // plan is executing on master. bool on_master_ = false; @@ -1199,7 +905,6 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } int64_t AddWorkerPlan(const std::shared_ptr<LogicalOperator> &worker_plan) { - should_split_ = false; return ::query::plan::AddWorkerPlan(distributed_plan_, next_plan_id_, worker_plan); } @@ -1207,43 +912,4 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor { } // namespace -DistributedPlan MakeDistributedPlan(const LogicalOperator &original_plan, - const SymbolTable &symbol_table, - std::atomic<int64_t> &next_plan_id) { - DistributedPlan distributed_plan; - // If we will generate multiple worker plans, we will need to increment the - // next_plan_id for each one. - distributed_plan.master_plan_id = next_plan_id++; - distributed_plan.symbol_table = symbol_table; - std::tie(distributed_plan.master_plan, distributed_plan.ast_storage) = - Clone(original_plan); - DistributedPlanner planner(distributed_plan, next_plan_id); - distributed_plan.master_plan->Accept(planner); - if (planner.ShouldSplit()) { - // We haven't split the plan, this means that it should be the same on - // master and worker. We only need to prepend PullRemote to master plan. - std::shared_ptr<LogicalOperator> worker_plan( - std::move(distributed_plan.master_plan)); - auto pull_id = AddWorkerPlan(distributed_plan, next_plan_id, worker_plan); - // If the plan performs writes, we need to finish with Synchronize. - if (planner.NeedsSynchronize()) { - auto pull_remote = std::make_shared<PullRemote>( - nullptr, pull_id, - worker_plan->OutputSymbols(distributed_plan.symbol_table)); - distributed_plan.master_plan = - std::make_unique<Synchronize>(worker_plan, pull_remote, false); - } else { - distributed_plan.master_plan = std::make_unique<PullRemote>( - worker_plan, pull_id, - worker_plan->OutputSymbols(distributed_plan.symbol_table)); - } - } else if (planner.NeedsSynchronize()) { - // If the plan performs writes on master, we still need to Synchronize, even - // though we don't split the plan. - distributed_plan.master_plan = std::make_unique<Synchronize>( - std::move(distributed_plan.master_plan), nullptr, false); - } - return distributed_plan; -} - } // namespace query::plan diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp index fd9bbb7c7..ef8c5564b 100644 --- a/src/query/plan/operator.cpp +++ b/src/query/plan/operator.cpp @@ -11,16 +11,9 @@ #include <unordered_set> #include <utility> -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" -#include "boost/serialization/export.hpp" #include "glog/logging.h" #include "database/graph_db_accessor.hpp" -#include "distributed/bfs_rpc_clients.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "distributed/updates_rpc_clients.hpp" -#include "distributed/updates_rpc_server.hpp" #include "query/context.hpp" #include "query/exceptions.hpp" #include "query/frontend/ast/ast.hpp" @@ -121,15 +114,6 @@ CreateNode::CreateNode(const std::shared_ptr<LogicalOperator> &input, namespace { -// Returns a random worker id. Worker ID is obtained from the Db. -int RandomWorkerId(database::GraphDb &db) { - thread_local std::mt19937 gen_{std::random_device{}()}; - thread_local std::uniform_int_distribution<int> rand_; - - auto worker_ids = db.GetWorkerIds(); - return worker_ids[rand_(gen_) % worker_ids.size()]; -} - // Creates a vertex on this GraphDb. Returns a reference to vertex placed on the // frame. VertexAccessor &CreateLocalVertex(NodeAtom *node_atom, Frame &frame, @@ -148,34 +132,6 @@ VertexAccessor &CreateLocalVertex(NodeAtom *node_atom, Frame &frame, return frame[context.symbol_table_.at(*node_atom->identifier_)].ValueVertex(); } -// Creates a vertex on the GraphDb with the given worker_id. Can be this worker. -VertexAccessor &CreateVertexOnWorker(int worker_id, NodeAtom *node_atom, - Frame &frame, Context &context) { - auto &dba = context.db_accessor_; - - if (worker_id == dba.db().WorkerId()) - return CreateLocalVertex(node_atom, frame, context); - - std::unordered_map<storage::Property, query::TypedValue> properties; - - // Evaluator should use the latest accessors, as modified in this query, when - // setting properties on new nodes. - ExpressionEvaluator evaluator(frame, context.parameters_, - context.symbol_table_, dba, GraphView::NEW); - for (auto &kv : node_atom->properties_) { - auto value = kv.second->Accept(evaluator); - if (!value.IsPropertyValue()) { - throw QueryRuntimeException("'{}' cannot be used as a property value.", - value.type()); - } - properties.emplace(kv.first.second, std::move(value)); - } - - auto new_node = - dba.InsertVertexIntoRemote(worker_id, node_atom->labels_, properties); - frame[context.symbol_table_.at(*node_atom->identifier_)] = new_node; - return frame[context.symbol_table_.at(*node_atom->identifier_)].ValueVertex(); -} } // namespace ACCEPT_WITH_INPUT(CreateNode) @@ -194,16 +150,11 @@ std::vector<Symbol> CreateNode::ModifiedSymbols( CreateNode::CreateNodeCursor::CreateNodeCursor(const CreateNode &self, database::GraphDbAccessor &db) - : self_(self), db_(db), input_cursor_(self.input_->MakeCursor(db)) {} + : self_(self), input_cursor_(self.input_->MakeCursor(db)) {} bool CreateNode::CreateNodeCursor::Pull(Frame &frame, Context &context) { if (input_cursor_->Pull(frame, context)) { - if (self_.on_random_worker_) { - CreateVertexOnWorker(RandomWorkerId(db_.db()), self_.node_atom_, frame, - context); - } else { - CreateLocalVertex(self_.node_atom_, frame, context); - } + CreateLocalVertex(self_.node_atom_, frame, context); return true; } return false; @@ -288,7 +239,7 @@ VertexAccessor &CreateExpand::CreateExpandCursor::OtherVertex( ExpectType(dest_node_symbol, dest_node_value, TypedValue::Type::Vertex); return dest_node_value.Value<VertexAccessor>(); } else { - return CreateVertexOnWorker(worker_id, self_.node_atom_, frame, context); + return CreateLocalVertex(self_.node_atom_, frame, context); } } @@ -1188,169 +1139,6 @@ class ExpandBfsCursor : public query::plan::Cursor { std::deque<std::pair<EdgeAccessor, VertexAccessor>> to_visit_next_; }; -class DistributedExpandBfsCursor : public query::plan::Cursor { - public: - DistributedExpandBfsCursor(const ExpandVariable &self, - database::GraphDbAccessor &db) - : self_(self), db_(db), input_cursor_(self_.input_->MakeCursor(db)) { - subcursor_ids_ = db_.db().bfs_subcursor_clients().CreateBfsSubcursors( - db_.transaction_id(), self_.direction(), self_.edge_types(), - self_.graph_view()); - db_.db().bfs_subcursor_clients().RegisterSubcursors(subcursor_ids_); - VLOG(10) << "BFS subcursors initialized"; - pull_pos_ = subcursor_ids_.end(); - } - - ~DistributedExpandBfsCursor() { - VLOG(10) << "Removing BFS subcursors"; - db_.db().bfs_subcursor_clients().RemoveBfsSubcursors(subcursor_ids_); - } - - bool Pull(Frame &frame, Context &context) override { - // TODO(mtomic): lambda filtering in distributed - if (self_.filter_lambda_.expression) { - throw utils::NotYetImplemented("lambda filtering in distributed BFS"); - } - - // Evaluator for the filtering condition and expansion depth. - ExpressionEvaluator evaluator(frame, context.parameters_, - context.symbol_table_, db_, - self_.graph_view_); - - while (true) { - TypedValue last_vertex; - - if (current_depth_ >= lower_bound_) { - for (; pull_pos_ != subcursor_ids_.end(); ++pull_pos_) { - auto vertex = db_.db().bfs_subcursor_clients().Pull( - pull_pos_->first, pull_pos_->second, &db_); - if (vertex) { - last_vertex = *vertex; - SwitchAccessor(last_vertex.ValueVertex(), self_.graph_view_); - break; - } - VLOG(10) << "Nothing to pull from " << pull_pos_->first; - } - } - - if (last_vertex.IsVertex()) { - // Handle existence flag - if (self_.existing_node_) { - TypedValue &node = frame[self_.node_symbol_]; - // Due to optional matching the existing node could be null - if (node.IsNull() || (node != last_vertex).ValueBool()) continue; - } else { - frame[self_.node_symbol_] = last_vertex; - } - - VLOG(10) << "Expanded to vertex: " << last_vertex; - - // Reconstruct path - std::vector<TypedValue> edges; - - // During path reconstruction, edges crossing worker boundary are - // obtained from edge owner to reduce network traffic. If the last - // worker queried for its path segment owned the crossing edge, - // `current_vertex_addr` will be set. Otherwise, `current_edge_addr` - // will be set. - std::experimental::optional<storage::VertexAddress> - current_vertex_addr = last_vertex.ValueVertex().GlobalAddress(); - std::experimental::optional<storage::EdgeAddress> current_edge_addr; - - while (true) { - DCHECK(static_cast<bool>(current_edge_addr) ^ - static_cast<bool>(current_vertex_addr)) - << "Exactly one of `current_edge_addr` or `current_vertex_addr` " - "should be set during path reconstruction"; - auto ret = current_edge_addr - ? db_.db().bfs_subcursor_clients().ReconstructPath( - subcursor_ids_, *current_edge_addr, &db_) - : db_.db().bfs_subcursor_clients().ReconstructPath( - subcursor_ids_, *current_vertex_addr, &db_); - edges.insert(edges.end(), ret.edges.begin(), ret.edges.end()); - current_vertex_addr = ret.next_vertex; - current_edge_addr = ret.next_edge; - if (!current_vertex_addr && !current_edge_addr) break; - } - std::reverse(edges.begin(), edges.end()); - for (auto &edge : edges) - SwitchAccessor(edge.ValueEdge(), self_.graph_view_); - frame[self_.edge_symbol_] = std::move(edges); - return true; - } - - // We're done pulling for this level - pull_pos_ = subcursor_ids_.begin(); - - // Try to expand again - if (current_depth_ < upper_bound_) { - VLOG(10) << "Trying to expand again..."; - current_depth_++; - db_.db().bfs_subcursor_clients().PrepareForExpand(subcursor_ids_, - false); - if (db_.db().bfs_subcursor_clients().ExpandLevel(subcursor_ids_)) { - continue; - } - } - - VLOG(10) << "Trying to get a new source..."; - // We're done with this source, try getting a new one - if (!input_cursor_->Pull(frame, context)) return false; - - auto vertex_value = frame[self_.input_symbol_]; - - // It is possible that the vertex is Null due to optional matching. - if (vertex_value.IsNull()) continue; - - auto vertex = vertex_value.ValueVertex(); - lower_bound_ = self_.lower_bound_ - ? EvaluateInt(evaluator, self_.lower_bound_, - "Min depth in breadth-first expansion") - : 1; - upper_bound_ = self_.upper_bound_ - ? EvaluateInt(evaluator, self_.upper_bound_, - "Max depth in breadth-first expansion") - : std::numeric_limits<int>::max(); - - if (upper_bound_ < 1) { - throw QueryRuntimeException( - "Max depth in breadth-first expansion must be at least 1"); - } - - VLOG(10) << "Starting BFS from " << vertex << " with limits " - << lower_bound_ << ".." << upper_bound_; - db_.db().bfs_subcursor_clients().PrepareForExpand(subcursor_ids_, true); - db_.db().bfs_subcursor_clients().SetSource(subcursor_ids_, - vertex.GlobalAddress()); - current_depth_ = 1; - } - } - - void Reset() override { - LOG(FATAL) << "`Reset` not supported in distributed"; - } - - private: - const ExpandVariable &self_; - database::GraphDbAccessor &db_; - const std::unique_ptr<query::plan::Cursor> input_cursor_; - - // Depth bounds. Calculated on each pull from the input, the initial value - // is irrelevant. - int lower_bound_{-1}; - int upper_bound_{-1}; - - // Current depth. Reset for each new expansion, the initial value is - // irrelevant. - int current_depth_{-1}; - - // Map from worker IDs to their corresponding subcursors. - std::unordered_map<int16_t, int64_t> subcursor_ids_; - - // Next worker master should try pulling from. - std::unordered_map<int16_t, int64_t>::iterator pull_pos_; -}; - class ExpandWeightedShortestPathCursor : public query::plan::Cursor { public: ExpandWeightedShortestPathCursor(const ExpandVariable &self, @@ -1591,11 +1379,7 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor { std::unique_ptr<Cursor> ExpandVariable::MakeCursor( database::GraphDbAccessor &db) const { if (type_ == EdgeAtom::Type::BREADTH_FIRST) { - if (db.db().type() == database::GraphDb::Type::SINGLE_NODE) { - return std::make_unique<ExpandBfsCursor>(*this, db); - } else { - return std::make_unique<DistributedExpandBfsCursor>(*this, db); - } + return std::make_unique<ExpandBfsCursor>(*this, db); } else if (type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH) { return std::make_unique<ExpandWeightedShortestPathCursor>(*this, db); } else { @@ -3109,46 +2893,6 @@ void Union::UnionCursor::Reset() { right_cursor_->Reset(); } -bool PullRemote::Accept(HierarchicalLogicalOperatorVisitor &visitor) { - if (visitor.PreVisit(*this)) { - if (input_) input_->Accept(visitor); - } - return visitor.PostVisit(*this); -} - -std::vector<Symbol> PullRemote::OutputSymbols(const SymbolTable &table) const { - return input_ ? input_->OutputSymbols(table) : std::vector<Symbol>{}; -} - -std::vector<Symbol> PullRemote::ModifiedSymbols( - const SymbolTable &table) const { - auto symbols = symbols_; - if (input_) { - auto input_symbols = input_->ModifiedSymbols(table); - symbols.insert(symbols.end(), input_symbols.begin(), input_symbols.end()); - } - return symbols; -} - -std::vector<Symbol> Synchronize::ModifiedSymbols( - const SymbolTable &table) const { - auto symbols = input_->ModifiedSymbols(table); - if (pull_remote_) { - auto pull_symbols = pull_remote_->ModifiedSymbols(table); - symbols.insert(symbols.end(), pull_symbols.begin(), pull_symbols.end()); - } - return symbols; -} - -bool Synchronize::Accept(HierarchicalLogicalOperatorVisitor &visitor) { - if (visitor.PreVisit(*this)) { - // pull_remote_ is optional here, so visit it only if we continue visiting - // and pull_remote_ does exist. - input_->Accept(visitor) && pull_remote_ && pull_remote_->Accept(visitor); - } - return visitor.PostVisit(*this); -} - std::vector<Symbol> Cartesian::ModifiedSymbols(const SymbolTable &table) const { auto symbols = left_op_->ModifiedSymbols(table); auto right = right_op_->ModifiedSymbols(table); @@ -3165,427 +2909,8 @@ bool Cartesian::Accept(HierarchicalLogicalOperatorVisitor &visitor) { WITHOUT_SINGLE_INPUT(Cartesian); -PullRemoteOrderBy::PullRemoteOrderBy( - const std::shared_ptr<LogicalOperator> &input, int64_t plan_id, - const std::vector<std::pair<Ordering, Expression *>> &order_by, - const std::vector<Symbol> &symbols) - : input_(input), plan_id_(plan_id), symbols_(symbols) { - CHECK(input_ != nullptr) - << "PullRemoteOrderBy should always be constructed with input!"; - std::vector<Ordering> ordering; - ordering.reserve(order_by.size()); - order_by_.reserve(order_by.size()); - for (const auto &ordering_expression_pair : order_by) { - ordering.emplace_back(ordering_expression_pair.first); - order_by_.emplace_back(ordering_expression_pair.second); - } - compare_ = TypedValueVectorCompare(ordering); -} - -ACCEPT_WITH_INPUT(PullRemoteOrderBy); - -std::vector<Symbol> PullRemoteOrderBy::OutputSymbols( - const SymbolTable &table) const { - return input_->OutputSymbols(table); -} - -std::vector<Symbol> PullRemoteOrderBy::ModifiedSymbols( - const SymbolTable &table) const { - return input_->ModifiedSymbols(table); -} - namespace { -/** Helper class that wraps remote pulling for cursors that handle results from - * distributed workers. - * - * The command_id should be the command_id at the initialization of a cursor. - */ -class RemotePuller { - public: - RemotePuller(database::GraphDbAccessor &db, - const std::vector<Symbol> &symbols, int64_t plan_id, - tx::CommandId command_id) - : db_(db), symbols_(symbols), plan_id_(plan_id), command_id_(command_id) { - worker_ids_ = db_.db().pull_clients().GetWorkerIds(); - // Remove master from the worker ids list. - worker_ids_.erase(std::find(worker_ids_.begin(), worker_ids_.end(), 0)); - } - - void Initialize(Context &context) { - if (!remote_pulls_initialized_) { - VLOG(10) << "[RemotePuller] [" << context.db_accessor_.transaction_id() - << "] [" << plan_id_ << "] [" << command_id_ << "] initialized"; - for (auto &worker_id : worker_ids_) { - UpdatePullForWorker(worker_id, context); - } - remote_pulls_initialized_ = true; - } - } - - void Update(Context &context) { - // If we don't have results for a worker, check if his remote pull - // finished and save results locally. - - auto move_frames = [this, &context](int worker_id, auto remote_results) { - VLOG(10) << "[RemotePuller] [" << context.db_accessor_.transaction_id() - << "] [" << plan_id_ << "] [" << command_id_ - << "] received results from " << worker_id; - remote_results_[worker_id] = std::move(remote_results.frames); - // Since we return and remove results from the back of the vector, - // reverse the results so the first to return is on the end of the - // vector. - std::reverse(remote_results_[worker_id].begin(), - remote_results_[worker_id].end()); - }; - - for (auto &worker_id : worker_ids_) { - if (!remote_results_[worker_id].empty()) continue; - - auto found_it = remote_pulls_.find(worker_id); - if (found_it == remote_pulls_.end()) continue; - - auto &remote_pull = found_it->second; - if (!remote_pull.IsReady()) continue; - - auto remote_results = remote_pull.get(); - switch (remote_results.pull_state) { - case distributed::PullState::CURSOR_EXHAUSTED: - VLOG(10) << "[RemotePuller] [" - << context.db_accessor_.transaction_id() << "] [" << plan_id_ - << "] [" << command_id_ << "] cursor exhausted from " - << worker_id; - move_frames(worker_id, remote_results); - remote_pulls_.erase(found_it); - break; - case distributed::PullState::CURSOR_IN_PROGRESS: - VLOG(10) << "[RemotePuller] [" - << context.db_accessor_.transaction_id() << "] [" << plan_id_ - << "] [" << command_id_ << "] cursor in progress from " - << worker_id; - move_frames(worker_id, remote_results); - UpdatePullForWorker(worker_id, context); - break; - case distributed::PullState::SERIALIZATION_ERROR: - throw mvcc::SerializationError( - "Serialization error occured during PullRemote !"); - case distributed::PullState::LOCK_TIMEOUT_ERROR: - throw utils::LockTimeoutException( - "LockTimeout error occured during PullRemote !"); - case distributed::PullState::UPDATE_DELETED_ERROR: - throw QueryRuntimeException( - "RecordDeleted error ocured during PullRemote !"); - case distributed::PullState::RECONSTRUCTION_ERROR: - throw query::ReconstructionException(); - case distributed::PullState::UNABLE_TO_DELETE_VERTEX_ERROR: - throw RemoveAttachedVertexException(); - case distributed::PullState::HINTED_ABORT_ERROR: - throw HintedAbortError(); - case distributed::PullState::QUERY_ERROR: - throw QueryRuntimeException( - "Query runtime error occurred duing PullRemote !"); - } - } - } - - auto Workers() { return worker_ids_; } - - int GetWorkerId(int worker_id_index) { return worker_ids_[worker_id_index]; } - - size_t WorkerCount() { return worker_ids_.size(); } - - void ClearWorkers() { worker_ids_.clear(); } - - bool HasPendingPulls() { return !remote_pulls_.empty(); } - - bool HasPendingPullFromWorker(int worker_id) { - return remote_pulls_.find(worker_id) != remote_pulls_.end(); - } - - bool HasResultsFromWorker(int worker_id) { - return !remote_results_[worker_id].empty(); - } - - std::vector<query::TypedValue> PopResultFromWorker(int worker_id) { - auto result = remote_results_[worker_id].back(); - remote_results_[worker_id].pop_back(); - - // Remove the worker if we exhausted all locally stored results and there - // are no more pending remote pulls for that worker. - if (remote_results_[worker_id].empty() && - remote_pulls_.find(worker_id) == remote_pulls_.end()) { - worker_ids_.erase( - std::find(worker_ids_.begin(), worker_ids_.end(), worker_id)); - } - - return result; - } - - private: - database::GraphDbAccessor &db_; - std::vector<Symbol> symbols_; - int64_t plan_id_; - tx::CommandId command_id_; - std::unordered_map<int, utils::Future<distributed::PullData>> remote_pulls_; - std::unordered_map<int, std::vector<std::vector<query::TypedValue>>> - remote_results_; - std::vector<int> worker_ids_; - bool remote_pulls_initialized_ = false; - - void UpdatePullForWorker(int worker_id, Context &context) { - remote_pulls_[worker_id] = - db_.db().pull_clients().Pull(db_, worker_id, plan_id_, command_id_, - context.parameters_, symbols_, false); - } -}; - -class PullRemoteCursor : public Cursor { - public: - PullRemoteCursor(const PullRemote &self, database::GraphDbAccessor &db) - : self_(self), - input_cursor_(self.input() ? self.input()->MakeCursor(db) : nullptr), - command_id_(db.transaction().cid()), - remote_puller_( - RemotePuller(db, self.symbols(), self.plan_id(), command_id_)) {} - - bool Pull(Frame &frame, Context &context) override { - if (context.db_accessor_.should_abort()) throw HintedAbortError(); - remote_puller_.Initialize(context); - - bool have_remote_results = false; - while (!have_remote_results && remote_puller_.WorkerCount() > 0) { - if (context.db_accessor_.should_abort()) throw HintedAbortError(); - remote_puller_.Update(context); - - // Get locally stored results from workers in a round-robin fasion. - int num_workers = remote_puller_.WorkerCount(); - for (int i = 0; i < num_workers; ++i) { - int worker_id_index = - (last_pulled_worker_id_index_ + i + 1) % num_workers; - int worker_id = remote_puller_.GetWorkerId(worker_id_index); - - if (remote_puller_.HasResultsFromWorker(worker_id)) { - last_pulled_worker_id_index_ = worker_id_index; - have_remote_results = true; - break; - } - } - - if (!have_remote_results) { - if (!remote_puller_.HasPendingPulls()) { - remote_puller_.ClearWorkers(); - break; - } - - // If there are no remote results available, try to pull and return - // local results. - if (input_cursor_ && input_cursor_->Pull(frame, context)) { - VLOG(10) << "[PullRemoteCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] producing local results "; - return true; - } - - VLOG(10) << "[PullRemoteCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] no results available, sleeping "; - // If there aren't any local/remote results available, sleep. - std::this_thread::sleep_for( - std::chrono::microseconds(FLAGS_remote_pull_sleep_micros)); - } - } - - // No more remote results, make sure local results get exhausted. - if (!have_remote_results) { - if (input_cursor_ && input_cursor_->Pull(frame, context)) { - VLOG(10) << "[PullRemoteCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] producing local results "; - return true; - } - return false; - } - - { - int worker_id = remote_puller_.GetWorkerId(last_pulled_worker_id_index_); - VLOG(10) << "[PullRemoteCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] producing results from worker " << worker_id; - auto result = remote_puller_.PopResultFromWorker(worker_id); - for (size_t i = 0; i < self_.symbols().size(); ++i) { - frame[self_.symbols()[i]] = std::move(result[i]); - } - } - return true; - } - - void Reset() override { - throw QueryRuntimeException("Unsupported: Reset during PullRemote!"); - } - - private: - const PullRemote &self_; - const std::unique_ptr<Cursor> input_cursor_; - tx::CommandId command_id_; - RemotePuller remote_puller_; - int last_pulled_worker_id_index_ = 0; -}; - -class SynchronizeCursor : public Cursor { - public: - SynchronizeCursor(const Synchronize &self, database::GraphDbAccessor &db) - : self_(self), - input_cursor_(self.input()->MakeCursor(db)), - pull_remote_cursor_( - self.pull_remote() ? self.pull_remote()->MakeCursor(db) : nullptr), - command_id_(db.transaction().cid()) {} - - bool Pull(Frame &frame, Context &context) override { - if (!initial_pull_done_) { - InitialPull(frame, context); - initial_pull_done_ = true; - } - // Yield local stuff while available. - if (!local_frames_.empty()) { - VLOG(10) << "[SynchronizeCursor] [" - << context.db_accessor_.transaction_id() - << "] producing local results"; - auto &result = local_frames_.back(); - for (size_t i = 0; i < frame.elems().size(); ++i) { - if (self_.advance_command()) { - query::ReconstructTypedValue(result[i]); - } - frame.elems()[i] = std::move(result[i]); - } - local_frames_.resize(local_frames_.size() - 1); - return true; - } - - // We're out of local stuff, yield from pull_remote if available. - if (pull_remote_cursor_ && pull_remote_cursor_->Pull(frame, context)) { - VLOG(10) << "[SynchronizeCursor] [" - << context.db_accessor_.transaction_id() - << "] producing remote results"; - return true; - } - - return false; - } - - void Reset() override { - throw QueryRuntimeException("Unsupported: Reset during Synchronize!"); - } - - private: - const Synchronize &self_; - const std::unique_ptr<Cursor> input_cursor_; - const std::unique_ptr<Cursor> pull_remote_cursor_; - bool initial_pull_done_{false}; - std::vector<std::vector<TypedValue>> local_frames_; - tx::CommandId command_id_; - - void InitialPull(Frame &frame, Context &context) { - VLOG(10) << "[SynchronizeCursor] [" << context.db_accessor_.transaction_id() - << "] initial pull"; - auto &db = context.db_accessor_.db(); - - // Tell all workers to accumulate, only if there is a remote pull. - std::vector<utils::Future<distributed::PullData>> worker_accumulations; - if (pull_remote_cursor_) { - for (auto worker_id : db.pull_clients().GetWorkerIds()) { - if (worker_id == db.WorkerId()) continue; - worker_accumulations.emplace_back(db.pull_clients().Pull( - context.db_accessor_, worker_id, self_.pull_remote()->plan_id(), - command_id_, context.parameters_, self_.pull_remote()->symbols(), - true, 0)); - } - } - - // Accumulate local results - while (input_cursor_->Pull(frame, context)) { - local_frames_.emplace_back(); - auto &local_frame = local_frames_.back(); - local_frame.reserve(frame.elems().size()); - for (auto &elem : frame.elems()) { - local_frame.emplace_back(std::move(elem)); - } - } - - // Wait for all workers to finish accumulation (first sync point). - for (auto &accu : worker_accumulations) { - switch (accu.get().pull_state) { - case distributed::PullState::CURSOR_EXHAUSTED: - continue; - case distributed::PullState::CURSOR_IN_PROGRESS: - throw QueryRuntimeException( - "Expected exhausted cursor after remote pull accumulate"); - case distributed::PullState::SERIALIZATION_ERROR: - throw mvcc::SerializationError( - "Failed to perform remote accumulate due to " - "SerializationError"); - case distributed::PullState::UPDATE_DELETED_ERROR: - throw QueryRuntimeException( - "Failed to perform remote accumulate due to " - "RecordDeletedError"); - case distributed::PullState::LOCK_TIMEOUT_ERROR: - throw utils::LockTimeoutException( - "Failed to perform remote accumulate due to " - "LockTimeoutException"); - case distributed::PullState::RECONSTRUCTION_ERROR: - throw QueryRuntimeException( - "Failed to perform remote accumulate due to " - "ReconstructionError"); - case distributed::PullState::UNABLE_TO_DELETE_VERTEX_ERROR: - throw RemoveAttachedVertexException(); - case distributed::PullState::HINTED_ABORT_ERROR: - throw HintedAbortError(); - case distributed::PullState::QUERY_ERROR: - throw QueryRuntimeException( - "Failed to perform remote accumulate due to Query runtime " - "error"); - } - } - - if (self_.advance_command()) { - context.db_accessor_.AdvanceCommand(); - } - - // Make all the workers apply their deltas. - auto tx_id = context.db_accessor_.transaction_id(); - auto apply_futures = - db.updates_clients().UpdateApplyAll(db.WorkerId(), tx_id); - db.updates_server().Apply(tx_id); - for (auto &future : apply_futures) { - switch (future.get()) { - case distributed::UpdateResult::SERIALIZATION_ERROR: - throw mvcc::SerializationError( - "Failed to apply deferred updates due to SerializationError"); - case distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR: - throw RemoveAttachedVertexException(); - case distributed::UpdateResult::UPDATE_DELETED_ERROR: - throw QueryRuntimeException( - "Failed to apply deferred updates due to RecordDeletedError"); - case distributed::UpdateResult::LOCK_TIMEOUT_ERROR: - throw utils::LockTimeoutException( - "Failed to apply deferred update due to LockTimeoutException"); - case distributed::UpdateResult::DONE: - break; - } - } - - // If the command advanced, let the workers know. - if (self_.advance_command()) { - auto futures = - db.pull_clients().NotifyAllTransactionCommandAdvanced(tx_id); - for (auto &future : futures) future.wait(); - } - } -}; - class CartesianCursor : public Cursor { public: CartesianCursor(const Cartesian &self, database::GraphDbAccessor &db) @@ -3666,268 +2991,11 @@ class CartesianCursor : public Cursor { bool cartesian_pull_initialized_{false}; }; -class PullRemoteOrderByCursor : public Cursor { - public: - PullRemoteOrderByCursor(const PullRemoteOrderBy &self, - database::GraphDbAccessor &db) - : self_(self), - input_(self.input()->MakeCursor(db)), - command_id_(db.transaction().cid()), - remote_puller_( - RemotePuller(db, self.symbols(), self.plan_id(), command_id_)) {} - - bool Pull(Frame &frame, Context &context) { - if (context.db_accessor_.should_abort()) throw HintedAbortError(); - ExpressionEvaluator evaluator(frame, context.parameters_, - context.symbol_table_, context.db_accessor_, - GraphView::OLD); - - auto evaluate_result = [this, &evaluator]() { - std::vector<TypedValue> order_by; - order_by.reserve(self_.order_by().size()); - for (auto expression_ptr : self_.order_by()) { - order_by.emplace_back(expression_ptr->Accept(evaluator)); - } - return order_by; - }; - - auto restore_frame = [&frame, - this](const std::vector<TypedValue> &restore_from) { - for (size_t i = 0; i < restore_from.size(); ++i) { - frame[self_.symbols()[i]] = restore_from[i]; - } - }; - - if (!merge_initialized_) { - VLOG(10) << "[PullRemoteOrderBy] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ << "] initialize"; - remote_puller_.Initialize(context); - missing_results_from_ = remote_puller_.Workers(); - missing_master_result_ = true; - merge_initialized_ = true; - } - - if (missing_master_result_) { - if (input_->Pull(frame, context)) { - std::vector<TypedValue> output; - output.reserve(self_.symbols().size()); - for (const Symbol &symbol : self_.symbols()) { - output.emplace_back(frame[symbol]); - } - - merge_.push_back(MergeResultItem{std::experimental::nullopt, output, - evaluate_result()}); - } - missing_master_result_ = false; - } - - while (!missing_results_from_.empty()) { - if (context.db_accessor_.should_abort()) throw HintedAbortError(); - remote_puller_.Update(context); - - bool has_all_result = true; - for (auto &worker_id : missing_results_from_) { - if (!remote_puller_.HasResultsFromWorker(worker_id) && - remote_puller_.HasPendingPullFromWorker(worker_id)) { - has_all_result = false; - break; - } - } - - if (!has_all_result) { - VLOG(10) << "[PullRemoteOrderByCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] missing results, sleep"; - // If we don't have results from all workers, sleep before continuing. - std::this_thread::sleep_for( - std::chrono::microseconds(FLAGS_remote_pull_sleep_micros)); - continue; - } - - for (auto &worker_id : missing_results_from_) { - // It is possible that the workers remote pull finished but it didn't - // return any results. In that case, just skip it. - if (!remote_puller_.HasResultsFromWorker(worker_id)) continue; - auto remote_result = remote_puller_.PopResultFromWorker(worker_id); - restore_frame(remote_result); - merge_.push_back( - MergeResultItem{worker_id, remote_result, evaluate_result()}); - } - - missing_results_from_.clear(); - } - - if (merge_.empty()) return false; - - auto result_it = std::min_element( - merge_.begin(), merge_.end(), [this](const auto &lhs, const auto &rhs) { - return self_.compare()(lhs.order_by, rhs.order_by); - }); - - restore_frame(result_it->remote_result); - - if (result_it->worker_id) { - VLOG(10) << "[PullRemoteOrderByCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] producing results from worker " - << result_it->worker_id.value(); - missing_results_from_.push_back(result_it->worker_id.value()); - } else { - VLOG(10) << "[PullRemoteOrderByCursor] [" - << context.db_accessor_.transaction_id() << "] [" - << self_.plan_id() << "] [" << command_id_ - << "] producing local results"; - missing_master_result_ = true; - } - - merge_.erase(result_it); - return true; - } - - void Reset() { - throw QueryRuntimeException("Unsupported: Reset during PullRemoteOrderBy!"); - } - - private: - struct MergeResultItem { - std::experimental::optional<int> worker_id; - std::vector<TypedValue> remote_result; - std::vector<TypedValue> order_by; - }; - - const PullRemoteOrderBy &self_; - std::unique_ptr<Cursor> input_; - tx::CommandId command_id_; - RemotePuller remote_puller_; - std::vector<MergeResultItem> merge_; - std::vector<int> missing_results_from_; - bool missing_master_result_ = false; - bool merge_initialized_ = false; -}; - } // namespace -std::unique_ptr<Cursor> PullRemote::MakeCursor( - database::GraphDbAccessor &db) const { - return std::make_unique<PullRemoteCursor>(*this, db); -} - -std::unique_ptr<Cursor> Synchronize::MakeCursor( - database::GraphDbAccessor &db) const { - return std::make_unique<SynchronizeCursor>(*this, db); -} - std::unique_ptr<Cursor> Cartesian::MakeCursor( database::GraphDbAccessor &db) const { return std::make_unique<CartesianCursor>(*this, db); } -std::unique_ptr<Cursor> PullRemoteOrderBy::MakeCursor( - database::GraphDbAccessor &db) const { - return std::make_unique<PullRemoteOrderByCursor>(*this, db); -} - -ModifyUser::ModifyUser(std::string username, Expression *password, - bool is_create) - : username_(std::move(username)), - password_(password), - is_create_(is_create) {} - -bool ModifyUser::Accept(HierarchicalLogicalOperatorVisitor &visitor) { - return visitor.Visit(*this); -} - -WITHOUT_SINGLE_INPUT(ModifyUser) - -class ModifyUserCursor : public Cursor { - public: - ModifyUserCursor(database::GraphDbAccessor &db) : db_(db) {} - - bool Pull(Frame &frame, Context &ctx) override { - if (ctx.in_explicit_transaction_) { - throw UserModificationInMulticommandTxException(); - } - ExpressionEvaluator evaluator(frame, ctx.parameters_, ctx.symbol_table_, - db_, GraphView::OLD); - throw utils::NotYetImplemented("user auth"); - } - - void Reset() override { throw utils::NotYetImplemented("user auth"); } - - private: - database::GraphDbAccessor &db_; -}; - -std::unique_ptr<Cursor> ModifyUser::MakeCursor( - database::GraphDbAccessor &db) const { - return std::make_unique<ModifyUserCursor>(db); -} - -bool DropUser::Accept(HierarchicalLogicalOperatorVisitor &visitor) { - return visitor.Visit(*this); -} - -WITHOUT_SINGLE_INPUT(DropUser) - -class DropUserCursor : public Cursor { - public: - DropUserCursor() {} - - bool Pull(Frame &, Context &ctx) override { - if (ctx.in_explicit_transaction_) { - throw UserModificationInMulticommandTxException(); - } - throw utils::NotYetImplemented("user auth"); - } - - void Reset() override { throw utils::NotYetImplemented("user auth"); } -}; - -std::unique_ptr<Cursor> DropUser::MakeCursor( - database::GraphDbAccessor &db) const { - return std::make_unique<DropUserCursor>(); -} - } // namespace query::plan - -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Once); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateNode); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateExpand); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAll); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabel); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabelPropertyRange); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabelPropertyValue); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Expand); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ExpandVariable); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Filter); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Produce); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ConstructNamedPath); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Delete); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetProperty); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetProperties); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetLabels); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::RemoveProperty); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::RemoveLabels); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ExpandUniquenessFilter<EdgeAccessor>); -BOOST_CLASS_EXPORT_IMPLEMENT( - query::plan::ExpandUniquenessFilter<VertexAccessor>); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Accumulate); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Aggregate); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Skip); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Limit); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::OrderBy); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Merge); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Optional); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Unwind); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Distinct); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateIndex); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Union); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::PullRemote); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Synchronize); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Cartesian); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::PullRemoteOrderBy); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ModifyUser); -BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::DropUser); diff --git a/src/query/plan/operator.lcp b/src/query/plan/operator.lcp index dcf6f6b85..816e52dd1 100644 --- a/src/query/plan/operator.lcp +++ b/src/query/plan/operator.lcp @@ -10,14 +10,6 @@ #include <utility> #include <vector> -#include <boost/serialization/shared_ptr_helper.hpp> -#include "boost/serialization/base_object.hpp" -#include "boost/serialization/export.hpp" -#include "boost/serialization/serialization.hpp" -#include "boost/serialization/shared_ptr.hpp" -#include "boost/serialization/unique_ptr.hpp" - -#include "distributed/pull_produce_rpc_messages.hpp" #include "query/common.hpp" #include "query/frontend/ast/ast.hpp" #include "query/frontend/semantic/symbol.hpp" @@ -28,8 +20,6 @@ #include "utils/hashing/fnv.hpp" #include "utils/visitor.hpp" -#include "query/plan/operator.capnp.h" - namespace database { class GraphDbAccessor; } @@ -105,12 +95,7 @@ class Unwind; class Distinct; class CreateIndex; class Union; -class PullRemote; -class Synchronize; class Cartesian; -class PullRemoteOrderBy; -class ModifyUser; -class DropUser; using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor< Once, CreateNode, CreateExpand, ScanAll, ScanAllByLabel, @@ -119,11 +104,11 @@ using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor< SetProperties, SetLabels, RemoveProperty, RemoveLabels, ExpandUniquenessFilter<VertexAccessor>, ExpandUniquenessFilter<EdgeAccessor>, Accumulate, Aggregate, Skip, Limit, - OrderBy, Merge, Optional, Unwind, Distinct, Union, PullRemote, Synchronize, - Cartesian, PullRemoteOrderBy>; + OrderBy, Merge, Optional, Unwind, Distinct, Union, + Cartesian>; using LogicalOperatorLeafVisitor = - ::utils::LeafVisitor<Once, CreateIndex, ModifyUser, DropUser>; + ::utils::LeafVisitor<Once, CreateIndex>; /** * @brief Base class for hierarhical visitors of @c LogicalOperator class @@ -142,12 +127,6 @@ cpp<# (lcp:capnp-namespace "query::plan") -(lcp:capnp-import 'utils "/utils/serialization.capnp") -(lcp:capnp-import 'storage "/storage/serialization.capnp") -(lcp:capnp-import 'ast "/query/frontend/ast/ast.capnp") -(lcp:capnp-import 'semantic "/query/frontend/semantic/symbol.capnp") -(lcp:capnp-import 'common "/query/common.capnp") - (lcp:capnp-type-conversion "Symbol" "Semantic.Symbol") (lcp:capnp-type-conversion "storage::Label" "Storage.Common") (lcp:capnp-type-conversion "storage::Property" "Storage.Common") @@ -235,27 +214,7 @@ can serve as inputs to others and thus a sequence of operations is formed.") loaded_ops; }; cpp<#) - (:private - #>cpp - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &, const unsigned int) {} - cpp<#) - (:serialize :capnp :base t - :save-args '((helper "SaveHelper *")) - :load-args '((helper "LoadHelper *")))) - -#>cpp -template <class TArchive> -std::pair<std::unique_ptr<LogicalOperator>, AstStorage> LoadPlan( - TArchive &ar) { - std::unique_ptr<LogicalOperator> root; - ar >> root; - return {std::move(root), std::move(ar.template get_helper<AstStorage>( - AstStorage::kHelperId))}; -} -cpp<# + (:serialize)) (defun save-pointer (archive member-name) #>cpp @@ -359,7 +318,7 @@ and false on every following Pull.") bool did_pull_{false}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class create-node (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -417,11 +376,10 @@ a preceeding `MATCH`), or multiple nodes (`MATCH ... CREATE` or private: const CreateNode &self_; - database::GraphDbAccessor &db_; const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class create-expand (logical-operator) ( @@ -515,7 +473,7 @@ chained in cases when longer paths need creating. ExpressionEvaluator &evaluator); }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class scan-all (logical-operator) ((input "std::shared_ptr<LogicalOperator>" :scope :protected @@ -567,7 +525,7 @@ with a constructor argument. } cpp<#) (:protected #>cpp ScanAll() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class scan-all-by-label (scan-all) ((label "storage::Label" :reader t)) @@ -588,7 +546,7 @@ given label. database::GraphDbAccessor &db) const override; cpp<#) (:private #>cpp ScanAllByLabel() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (defun save-optional-bound (builder member) (let ((save-bound @@ -695,7 +653,7 @@ property value which is inside a range (inclusive or exlusive). database::GraphDbAccessor &db) const override; cpp<#) (:private #>cpp ScanAllByLabelPropertyRange() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class scan-all-by-label-property-value (scan-all) ((label "storage::Label" :reader t) @@ -734,7 +692,7 @@ property value. database::GraphDbAccessor &db) const override; cpp<#) (:private #>cpp ScanAllByLabelPropertyValue() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class expand-common () ( @@ -830,9 +788,7 @@ expansion") ExpandCommon() {} cpp<#) - (:serialize :boost :capnp - :save-args '((helper "LogicalOperator::SaveHelper *")) - :load-args '((helper "LogicalOperator::LoadHelper *")))) + (:serialize)) (lcp:define-class expand (logical-operator expand-common) () @@ -902,7 +858,7 @@ pulled.") bool InitEdges(Frame &, Context &); }; cpp<#) - (:serialize :boost :capnp :inherit-compose '(expand-common))) + (:serialize :inherit-compose '(expand-common))) (lcp:define-class expand-variable (logical-operator expand-common) ((type "EdgeAtom::Type" :reader t :capnp-type "Ast.EdgeAtom.Type" @@ -959,9 +915,7 @@ pulled.") :capnp-type "Ast.Tree" :capnp-init nil :capnp-save #'save-ast-pointer :capnp-load (load-ast-pointer "Expression *") :save-fun #'save-pointer :load-fun #'load-pointer)) - (:serialize :boost :capnp - :save-args '((helper "LogicalOperator::SaveHelper *")) - :load-args '((helper "LogicalOperator::LoadHelper *")))) + (:serialize)) #>cpp /** * Creates a variable-length expansion. Most params are forwarded @@ -1020,7 +974,7 @@ pulled.") ExpandVariable() {} cpp<#) - (:serialize :boost :capnp :inherit-compose '(expand-common))) + (:serialize :inherit-compose '(expand-common))) (lcp:define-class construct-named-path (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1053,7 +1007,7 @@ pulled.") cpp<#) (:private #>cpp ConstructNamedPath() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class filter (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1100,7 +1054,7 @@ a boolean value.") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class produce (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1153,7 +1107,7 @@ RETURN clause) the Produce's pull succeeds exactly once.") Produce() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class delete (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1202,7 +1156,7 @@ Has a flag for using DETACH DELETE when deleting vertices.") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class set-property (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1252,7 +1206,7 @@ can be stored (a TypedValue that can be converted to PropertyValue).") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class set-properties (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1282,7 +1236,7 @@ updating.") @c UPDATE means that the current property set is augmented with additional ones (existing props of the same name are replaced), while @c REPLACE means that the old props are discarded and replaced with new ones.") - (:serialize :capnp)) + (:serialize)) #>cpp SetProperties(const std::shared_ptr<LogicalOperator> &input, @@ -1323,7 +1277,7 @@ that the old props are discarded and replaced with new ones.") void Set(TRecordAccessor &record, const TypedValue &rhs) const; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class set-labels (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1367,7 +1321,7 @@ It does NOT remove labels that are already set on that Vertex.") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class remove-property (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1411,7 +1365,7 @@ It does NOT remove labels that are already set on that Vertex.") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class remove-labels (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1455,7 +1409,7 @@ If a label does not exist on a Vertex, nothing happens.") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class (expand-uniqueness-filter t-accessor) (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1519,7 +1473,7 @@ between edges and an edge lists).") const std::unique_ptr<Cursor> input_cursor_; }; cpp<#) - (:serialize :boost :capnp :type-args '(vertex-accessor edge-accessor))) + (:serialize)) (lcp:define-class accumulate (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1590,7 +1544,7 @@ has been cached will be reconstructed before Pull returns. bool pulled_all_input_{false}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) #>cpp /** @@ -1656,9 +1610,7 @@ elements are in an undefined state after aggregation.") "An aggregation element, contains: (input data expression, key expression - only used in COLLECT_MAP, type of aggregation, output symbol).") - (:serialize :boost :capnp - :save-args '((helper "LogicalOperator::SaveHelper *")) - :load-args '((helper "LogicalOperator::LoadHelper *")))) + (:serialize)) #>cpp Aggregate(const std::shared_ptr<LogicalOperator> &input, const std::vector<Element> &aggregations, @@ -1759,7 +1711,7 @@ elements are in an undefined state after aggregation.") void EnsureOkForAvgSum(const TypedValue &value) const; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class skip (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1816,7 +1768,7 @@ operator's implementation does not expect this.") int skipped_{0}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class limit (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1876,7 +1828,7 @@ input should be performed).") int pulled_{0}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class order-by (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -1945,7 +1897,7 @@ are valid for usage after the OrderBy operator.") decltype(cache_.begin()) cache_it_ = cache_.begin(); }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class merge (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -2011,7 +1963,7 @@ documentation.") bool pull_input_{true}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class optional (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -2069,7 +2021,7 @@ and returns true, once.") bool pull_input_{true}; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class unwind (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -2120,7 +2072,7 @@ Input is optional (unwind can be the first clause in a query).") std::vector<TypedValue>::iterator input_value_it_ = input_value_.end(); }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class distinct (logical-operator) ((input "std::shared_ptr<LogicalOperator>" @@ -2176,7 +2128,7 @@ This implementation maintains input ordering.") seen_rows_; }; cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class create-index (logical-operator) ((label "storage::Label" :reader t) @@ -2203,7 +2155,7 @@ case the index already exists, nothing happens.") void set_input(std::shared_ptr<LogicalOperator>) override; cpp<#) (:private #>cpp CreateIndex() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class union (logical-operator) ((left-op "std::shared_ptr<LogicalOperator>" :reader t @@ -2259,106 +2211,7 @@ vectors of symbols used by each of the inputs.") const std::unique_ptr<Cursor> left_cursor_, right_cursor_; }; cpp<#) - (:serialize :boost :capnp)) - -(lcp:define-class pull-remote (logical-operator) - ((input "std::shared_ptr<LogicalOperator>" - :capnp-save #'save-operator-pointer - :capnp-load #'load-operator-pointer) - (plan-id :int64_t :initval 0 :reader t) - (symbols "std::vector<Symbol>" :reader t - :capnp-save (lcp:capnp-save-vector "::query::capnp::Symbol" "Symbol") - :capnp-load (lcp:capnp-load-vector "::query::capnp::Symbol" "Symbol"))) - (:documentation - "An operator in distributed Memgraph that yields both local and remote (from -other workers) frames. Obtaining remote frames is done through RPC calls to -`distributed::ProduceRpcServer`s running on all the workers. - -This operator aims to yield results as fast as possible and lose minimal -time on data transfer. It gives no guarantees on result order.") - (:public - #>cpp - PullRemote(const std::shared_ptr<LogicalOperator> &input, int64_t plan_id, - const std::vector<Symbol> &symbols) - : input_(input), plan_id_(plan_id), symbols_(symbols) {} - bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override; - std::unique_ptr<Cursor> MakeCursor( - database::GraphDbAccessor &db) const override; - std::vector<Symbol> OutputSymbols(const SymbolTable &) const override; - std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override; - - bool HasSingleInput() const override { return true; } - std::shared_ptr<LogicalOperator> input() const override { return input_; } - void set_input(std::shared_ptr<LogicalOperator> input) override { - input_ = input; - } - cpp<#) - (:private #>cpp PullRemote() {} cpp<#) - (:serialize :boost :capnp)) - -(defun load-pull-remote (reader member-name) - #>cpp - ${member-name} = std::static_pointer_cast<PullRemote>( - utils::LoadSharedPtr<capnp::LogicalOperator, LogicalOperator>(${reader}, - [helper](const auto &reader) { - auto op = LogicalOperator::Construct(reader); - op->Load(reader, helper); - return op.release(); - }, &helper->loaded_ops)); - cpp<#) - -(lcp:define-class synchronize (logical-operator) - ((input "std::shared_ptr<LogicalOperator>" - :capnp-save #'save-operator-pointer - :capnp-load #'load-operator-pointer) - (pull-remote "std::shared_ptr<PullRemote>" :reader t - :capnp-save #'save-operator-pointer - :capnp-load #'load-pull-remote) - (advance-command :bool :initval "false" :reader t)) - (:documentation - "Operator used to synchronize stages of plan execution between the master and -all the workers. Synchronization is necessary in queries that update that -graph state because updates (as well as creations and deletions) are deferred -to avoid multithreaded modification of graph element data (as it's not -thread-safe). - -Logic of the synchronize operator is: - -1. If there is a Pull, tell all the workers to pull on that plan and - accumulate results without sending them to the master. This is async. -2. Accumulate local results, in parallel with 1. getting executed on workers. -3. Wait till the master and all the workers are done accumulating. -4. Advance the command, if necessary. -5. Tell all the workers to apply their updates. This is async. -6. Apply local updates, in parallel with 5. on the workers. -7. Notify workers that the command has advanced, if necessary. -8. Yield all the results, first local, then from Pull if available.") - (:public - #>cpp - Synchronize(const std::shared_ptr<LogicalOperator> &input, - const std::shared_ptr<PullRemote> &pull_remote, - bool advance_command) - : input_(input), - pull_remote_(pull_remote), - advance_command_(advance_command) {} - bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override; - std::unique_ptr<Cursor> MakeCursor( - database::GraphDbAccessor &db) const override; - std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override; - - std::vector<Symbol> OutputSymbols( - const SymbolTable &symbol_table) const override { - return input_->OutputSymbols(symbol_table); - } - - bool HasSingleInput() const override { return true; } - std::shared_ptr<LogicalOperator> input() const override { return input_; } - void set_input(std::shared_ptr<LogicalOperator> input) override { - input_ = input; - } - cpp<#) - (:private #>cpp Synchronize() {} cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:define-class cartesian (logical-operator) ((left-op "std::shared_ptr<LogicalOperator>" :reader t @@ -2397,168 +2250,7 @@ Logic of the synchronize operator is: void set_input(std::shared_ptr<LogicalOperator>) override; cpp<#) (:private #>cpp Cartesian() {} cpp<#) - (:serialize :boost :capnp)) - -(lcp:define-class pull-remote-order-by (logical-operator) - ((input "std::shared_ptr<LogicalOperator>" - :capnp-save #'save-operator-pointer - :capnp-load #'load-operator-pointer) - (plan-id :int64_t :initval 0 :reader t) - (symbols "std::vector<Symbol>" :reader t - :capnp-save (lcp:capnp-save-vector "::query::capnp::Symbol" "Symbol") - :capnp-load (lcp:capnp-load-vector "::query::capnp::Symbol" "Symbol")) - (order-by "std::vector<Expression *>" :reader t - :capnp-type "List(Ast.Tree)" - :capnp-save (save-ast-vector "Expression *") - :capnp-load (load-ast-vector "Expression *") - :save-fun #'save-pointers :load-fun #'load-pointers) - (compare "TypedValueVectorCompare" :reader t - :capnp-type "Common.TypedValueVectorCompare")) - (:documentation - "Operator that merges distributed OrderBy operators. -Instead of using a regular OrderBy on master (which would collect all remote -results and order them), we can have each worker do an OrderBy locally and -have the master rely on the fact that the results are ordered and merge them -by having only one result from each worker.") - (:public - #>cpp - PullRemoteOrderBy( - const std::shared_ptr<LogicalOperator> &input, int64_t plan_id, - const std::vector<std::pair<Ordering, Expression *>> &order_by, - const std::vector<Symbol> &symbols); - bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override; - std::unique_ptr<Cursor> MakeCursor( - database::GraphDbAccessor &db) const override; - - std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override; - std::vector<Symbol> OutputSymbols(const SymbolTable &) const override; - - bool HasSingleInput() const override { return true; } - std::shared_ptr<LogicalOperator> input() const override { return input_; } - void set_input(std::shared_ptr<LogicalOperator> input) override { - input_ = input; - } - cpp<#) - (:private #>cpp PullRemoteOrderBy() {} cpp<#) - (:serialize :boost :capnp)) - -(lcp:define-class modify-user (logical-operator) - ((input "std::shared_ptr<LogicalOperator>" - :capnp-save #'save-operator-pointer - :capnp-load #'load-operator-pointer) - (username "std::string" :reader t) - (password "Expression *" - :reader t - :save-fun #'save-pointer - :load-fun #'load-pointer - :capnp-type "Ast.Tree" - :capnp-init nil - :capnp-save #'save-ast-pointer - :capnp-load (load-ast-pointer "Expression *")) - (is-create :bool :reader t)) - (:documentation - "Operator that creates a new database user or modifies an existing one.") - (:public - #>cpp - ModifyUser(std::string username, Expression *password, bool is_create); - - bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override; - std::unique_ptr<Cursor> MakeCursor( - database::GraphDbAccessor &db) const override; - - std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override { - return std::vector<Symbol>(); - } - - bool HasSingleInput() const override; - std::shared_ptr<LogicalOperator> input() const override; - void set_input(std::shared_ptr<LogicalOperator> input) override; - cpp<#) - (:private - #>cpp - friend class boost::serialization::access; - - ModifyUser() {} - cpp<#) - (:serialize :boost :capnp)) - -(lcp:define-class drop-user (logical-operator) - ((input "std::shared_ptr<LogicalOperator>" - :capnp-save #'save-operator-pointer - :capnp-load #'load-operator-pointer) - (usernames "std::vector<std::string>" :reader t - :capnp-save (lambda (builder member-name) - #>cpp - utils::SaveVector(${member-name}, &${builder}); - cpp<#) - :capnp-load (lambda (reader member-name) - #>cpp - utils::LoadVector(&${member-name}, ${reader}); - cpp<#))) - (:documentation - "Operator that deletes one or more existing database users.") - (:public - #>cpp - DropUser(std::vector<std::string> usernames): usernames_(usernames) {} - - bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override; - std::unique_ptr<Cursor> MakeCursor( - database::GraphDbAccessor &db) const override; - - std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override { - return std::vector<Symbol>(); - } - - bool HasSingleInput() const override; - std::shared_ptr<LogicalOperator> input() const override; - void set_input(std::shared_ptr<LogicalOperator> input) override; - cpp<#) - (:private - #>cpp - friend class boost::serialization::access; - DropUser() {} - cpp<#) - (:serialize :boost :capnp)) + (:serialize)) (lcp:pop-namespace) ;; plan (lcp:pop-namespace) ;; query - -#>cpp -BOOST_CLASS_EXPORT_KEY(query::plan::Once); -BOOST_CLASS_EXPORT_KEY(query::plan::CreateNode); -BOOST_CLASS_EXPORT_KEY(query::plan::CreateExpand); -BOOST_CLASS_EXPORT_KEY(query::plan::ScanAll); -BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabel); -BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabelPropertyRange); -BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabelPropertyValue); -BOOST_CLASS_EXPORT_KEY(query::plan::Expand); -BOOST_CLASS_EXPORT_KEY(query::plan::ExpandVariable); -BOOST_CLASS_EXPORT_KEY(query::plan::Filter); -BOOST_CLASS_EXPORT_KEY(query::plan::Produce); -BOOST_CLASS_EXPORT_KEY(query::plan::ConstructNamedPath); -BOOST_CLASS_EXPORT_KEY(query::plan::Delete); -BOOST_CLASS_EXPORT_KEY(query::plan::SetProperty); -BOOST_CLASS_EXPORT_KEY(query::plan::SetProperties); -BOOST_CLASS_EXPORT_KEY(query::plan::SetLabels); -BOOST_CLASS_EXPORT_KEY(query::plan::RemoveProperty); -BOOST_CLASS_EXPORT_KEY(query::plan::RemoveLabels); -BOOST_CLASS_EXPORT_KEY(query::plan::ExpandUniquenessFilter<EdgeAccessor>); -BOOST_CLASS_EXPORT_KEY(query::plan::ExpandUniquenessFilter<VertexAccessor>); -BOOST_CLASS_EXPORT_KEY(query::plan::Accumulate); -BOOST_CLASS_EXPORT_KEY(query::plan::Aggregate); -BOOST_CLASS_EXPORT_KEY(query::plan::Skip); -BOOST_CLASS_EXPORT_KEY(query::plan::Limit); -BOOST_CLASS_EXPORT_KEY(query::plan::OrderBy); -BOOST_CLASS_EXPORT_KEY(query::plan::Merge); -BOOST_CLASS_EXPORT_KEY(query::plan::Optional); -BOOST_CLASS_EXPORT_KEY(query::plan::Unwind); -BOOST_CLASS_EXPORT_KEY(query::plan::Distinct); -BOOST_CLASS_EXPORT_KEY(query::plan::CreateIndex); -BOOST_CLASS_EXPORT_KEY(query::plan::Union); -BOOST_CLASS_EXPORT_KEY(query::plan::PullRemote); -BOOST_CLASS_EXPORT_KEY(query::plan::Synchronize); -BOOST_CLASS_EXPORT_KEY(query::plan::Cartesian); -BOOST_CLASS_EXPORT_KEY(query::plan::PullRemoteOrderBy); -BOOST_CLASS_EXPORT_KEY(query::plan::ModifyUser); -BOOST_CLASS_EXPORT_KEY(query::plan::DropUser); -cpp<# diff --git a/src/query/plan/preprocess.hpp b/src/query/plan/preprocess.hpp index c1930be77..dbbb94c6b 100644 --- a/src/query/plan/preprocess.hpp +++ b/src/query/plan/preprocess.hpp @@ -53,8 +53,6 @@ class UsedSymbolsCollector : public HierarchicalTreeVisitor { bool Visit(PrimitiveLiteral &) override { return true; } bool Visit(ParameterLookup &) override { return true; } bool Visit(query::CreateIndex &) override { return true; } - bool Visit(query::ModifyUser &) override { return true; } - bool Visit(query::DropUser &) override { return true; } std::unordered_set<Symbol> symbols_; const SymbolTable &symbol_table_; diff --git a/src/query/plan/rule_based_planner.cpp b/src/query/plan/rule_based_planner.cpp index a4393b13e..929daac48 100644 --- a/src/query/plan/rule_based_planner.cpp +++ b/src/query/plan/rule_based_planner.cpp @@ -387,16 +387,6 @@ class ReturnBodyContext : public HierarchicalTreeVisitor { return true; } - bool Visit(query::ModifyUser &) override { - has_aggregation_.emplace_back(false); - return true; - } - - bool Visit(query::DropUser &) override { - has_aggregation_.emplace_back(false); - return true; - } - // Creates NamedExpression with an Identifier for each user declared symbol. // This should be used when body.all_identifiers is true, to generate // expressions for Produce operator. diff --git a/src/query/plan/rule_based_planner.hpp b/src/query/plan/rule_based_planner.hpp index 0acf815cf..f6054dcdf 100644 --- a/src/query/plan/rule_based_planner.hpp +++ b/src/query/plan/rule_based_planner.hpp @@ -182,15 +182,6 @@ class RuleBasedPlanner { DCHECK(!input_op) << "Unexpected operator before CreateIndex"; input_op = std::make_unique<plan::CreateIndex>( create_index->label_, create_index->property_); - } else if (auto *modify_user = - dynamic_cast<query::ModifyUser *>(clause)) { - DCHECK(!input_op) << "Unexpected operator before ModifyUser"; - input_op = std::make_unique<plan::ModifyUser>( - modify_user->username_, modify_user->password_, - modify_user->is_create_); - } else if (auto *drop_user = dynamic_cast<query::DropUser *>(clause)) { - DCHECK(!input_op) << "Unexpected operator before DropUser"; - input_op = std::make_unique<plan::DropUser>(drop_user->usernames_); } else { throw utils::NotYetImplemented("clause conversion to operator(s)"); } diff --git a/src/stats/metrics.cpp b/src/stats/metrics.cpp deleted file mode 100644 index 69552f45b..000000000 --- a/src/stats/metrics.cpp +++ /dev/null @@ -1,105 +0,0 @@ -#include "stats/metrics.hpp" - -#include <tuple> - -#include "fmt/format.h" -#include "glog/logging.h" - -namespace stats { - -std::mutex &MetricsMutex() { - static std::mutex mutex; - return mutex; -} - -std::map<std::string, std::unique_ptr<Metric>> &AccessMetrics() { - static std::map<std::string, std::unique_ptr<Metric>> metrics; - MetricsMutex().lock(); - return metrics; -} - -void ReleaseMetrics() { MetricsMutex().unlock(); } - -Metric::Metric(int64_t start_value) : value_(start_value) {} - -Counter::Counter(int64_t start_value) : Metric(start_value) {} - -void Counter::Bump(int64_t delta) { value_ += delta; } - -std::experimental::optional<int64_t> Counter::Flush() { return value_; } - -int64_t Counter::Value() { return value_; } - -Gauge::Gauge(int64_t start_value) : Metric(start_value) {} - -void Gauge::Set(int64_t value) { value_ = value; } - -std::experimental::optional<int64_t> Gauge::Flush() { return value_; } - -IntervalMin::IntervalMin(int64_t start_value) : Metric(start_value) {} - -void IntervalMin::Add(int64_t value) { - int64_t curr = value_; - while (curr > value && !value_.compare_exchange_weak(curr, value)) - ; -} - -std::experimental::optional<int64_t> IntervalMin::Flush() { - int64_t curr = value_; - value_.compare_exchange_weak(curr, std::numeric_limits<int64_t>::max()); - return curr == std::numeric_limits<int64_t>::max() - ? std::experimental::nullopt - : std::experimental::make_optional(curr); -} - -IntervalMax::IntervalMax(int64_t start_value) : Metric(start_value) {} - -void IntervalMax::Add(int64_t value) { - int64_t curr = value_; - while (curr < value && !value_.compare_exchange_weak(curr, value)) - ; -} - -std::experimental::optional<int64_t> IntervalMax::Flush() { - int64_t curr = value_; - value_.compare_exchange_weak(curr, std::numeric_limits<int64_t>::min()); - return curr == std::numeric_limits<int64_t>::min() - ? std::experimental::nullopt - : std::experimental::make_optional(curr); -} - -template <class T> -T &GetMetric(const std::string &name, int64_t start_value) { - auto &metrics = AccessMetrics(); - auto it = metrics.find(name); - if (it == metrics.end()) { - auto got = metrics.emplace(name, std::make_unique<T>(start_value)); - CHECK(got.second) << "Failed to create counter " << name; - it = got.first; - } - ReleaseMetrics(); - auto *ptr = dynamic_cast<T *>(it->second.get()); - if (!ptr) { - LOG(FATAL) << fmt::format("GetMetric({}) called with invalid metric type", - name); - } - return *ptr; -} - -Counter &GetCounter(const std::string &name, int64_t start_value) { - return GetMetric<Counter>(name, start_value); -} - -Gauge &GetGauge(const std::string &name, int64_t start_value) { - return GetMetric<Gauge>(name, start_value); -} - -IntervalMin &GetIntervalMin(const std::string &name) { - return GetMetric<IntervalMin>(name, std::numeric_limits<int64_t>::max()); -} - -IntervalMax &GetIntervalMax(const std::string &name) { - return GetMetric<IntervalMax>(name, std::numeric_limits<int64_t>::min()); -} - -} // namespace stats diff --git a/src/stats/metrics.hpp b/src/stats/metrics.hpp deleted file mode 100644 index c13bcff18..000000000 --- a/src/stats/metrics.hpp +++ /dev/null @@ -1,202 +0,0 @@ -/** - * @file - * - * This file contains some metrics types that can be aggregated on client side - * and periodically flushed to StatsD. - */ -#pragma once - -#include <atomic> -#include <experimental/optional> -#include <map> -#include <memory> -#include <mutex> -#include <string> - -#include "fmt/format.h" - -namespace stats { - -// TODO(mtomic): it would probably be nice to have Value method for every metric -// type, however, there is no use case for this yet - -/** - * Abstract base class for all metrics. - */ -class Metric { - public: - /** - * Constructs a metric to be exported to StatsD. - * - * @param name metric will be exported to StatsD with this path - * @param value initial value - */ - virtual ~Metric() {} - - /** - * Metric refresh thread will periodically call this function. It should - * return the metric value aggregated since the last flush call or nullopt - * if there were no updates. - */ - virtual std::experimental::optional<int64_t> Flush() = 0; - - explicit Metric(int64_t start_value = 0); - - protected: - std::atomic<int64_t> value_; -}; - -/** - * A simple counter. - */ -class Counter : public Metric { - public: - explicit Counter(int64_t start_value = 0); - - /** - * Change counter value by delta. - * - * @param delta value change - */ - void Bump(int64_t delta = 1); - - /** Returns the current value of the counter. **/ - std::experimental::optional<int64_t> Flush() override; - - /** Returns the current value of the counter. **/ - int64_t Value(); - - friend Counter &GetCounter(const std::string &name); -}; - -/** - * To be used instead of Counter constructor. If counter with this name doesn't - * exist, it will be initialized with start_value. - * - * @param name counter name - * @param start_value start value - */ -Counter &GetCounter(const std::string &name, int64_t start_value = 0); - -/** - * A simple gauge. Gauge value is explicitly set, instead of being added to or - * subtracted from. - */ -class Gauge : public Metric { - public: - explicit Gauge(int64_t start_value = 0); - - /** - * Set gauge value. - * - * @param value value to be set - */ - void Set(int64_t value); - - /** Returns the current gauge value. **/ - std::experimental::optional<int64_t> Flush() override; -}; - -/** - * To be used instead of Gauge constructor. If gauge with this name doesn't - * exist, it will be initialized with start_value. - * - * @param name gauge name - * @param start_value start value - */ -Gauge &GetGauge(const std::string &name, int64_t start_value = 0); - -/** - * Aggregates minimum between two flush periods. - */ -class IntervalMin : public Metric { - public: - explicit IntervalMin(int64_t start_value); - - /** - * Add another value into the minimum computation. - * - * @param value value to be added - */ - void Add(int64_t value); - - /** - * Returns the minimum value encountered since the last flush period, - * or nullopt if no values were added. - */ - std::experimental::optional<int64_t> Flush() override; -}; - -/** - * To be used instead of IntervalMin constructor. - * - * @param name interval min name - */ -IntervalMin &GetIntervalMin(const std::string &name); - -/** - * Aggregates maximum betweenw two flush periods. - */ -class IntervalMax : public Metric { - public: - explicit IntervalMax(int64_t start_value); - - /** - * Add another value into the maximum computation. - */ - void Add(int64_t value); - - /** - * Returns the maximum value encountered since the last flush period, - * or nullopt if no values were added. - */ - std::experimental::optional<int64_t> Flush() override; -}; - -/** - * To be used instead of IntervalMax constructor. - * - * @param name interval max name - */ -IntervalMax &GetIntervalMax(const std::string &name); - -/** - * A stopwatch utility. It exports 4 metrics: total time measured since the - * beginning of the program, total number of times time intervals measured, - * minimum and maximum time interval measured since the last metric flush. - * Metrics exported by the stopwatch will be named - * [name].{total_time|count|min|max}. - * - * @param name timed event name - * @param f Callable, an action to be performed. - */ -template <class Function> -int64_t Stopwatch(const std::string &name, Function f) { - auto &total_time = GetCounter(fmt::format("{}.total_time", name)); - auto &count = GetCounter(fmt::format("{}.count", name)); - auto &min = GetIntervalMin(fmt::format("{}.min", name)); - auto &max = GetIntervalMax(fmt::format("{}.max", name)); - auto start = std::chrono::system_clock::now(); - f(); - auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( - std::chrono::system_clock::now() - start) - .count(); - total_time.Bump(duration); - count.Bump(); - min.Add(duration); - max.Add(duration); - return duration; -} - -/** - * Access internal metric list. You probably don't want to use this, - * but if you do, make sure to call ReleaseMetrics when you're done. - */ -std::map<std::string, std::unique_ptr<Metric>> &AccessMetrics(); - -/** - * Releases internal lock on metric list. - */ -void ReleaseMetrics(); - -} // namespace stats diff --git a/src/stats/stats.cpp b/src/stats/stats.cpp deleted file mode 100644 index 2abc28c63..000000000 --- a/src/stats/stats.cpp +++ /dev/null @@ -1,113 +0,0 @@ -#include "stats/stats.hpp" - -#include "glog/logging.h" - -#include "communication/rpc/client.hpp" -#include "data_structures/concurrent/push_queue.hpp" -#include "utils/thread.hpp" - -#include "stats/stats_rpc_messages.hpp" - -DEFINE_HIDDEN_string(statsd_address, "", "Stats server IP address"); -DEFINE_HIDDEN_int32(statsd_port, 2500, "Stats server port"); -DEFINE_HIDDEN_int32(statsd_flush_interval, 500, - "Stats flush interval (in milliseconds)"); - -namespace stats { - -std::string statsd_prefix = ""; -std::thread stats_dispatch_thread; -std::thread counter_refresh_thread; -std::atomic<bool> stats_running{false}; -ConcurrentPushQueue<StatsReq> stats_queue; - -void RefreshMetrics() { - LOG(INFO) << "Metrics flush thread started"; - utils::ThreadSetName("Stats refresh"); - while (stats_running) { - auto &metrics = AccessMetrics(); - for (auto &kv : metrics) { - auto value = kv.second->Flush(); - if (value) { - LogStat(kv.first, *value); - } - } - ReleaseMetrics(); - // TODO(mtomic): hardcoded sleep time - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - LOG(INFO) << "Metrics flush thread stopped"; -} - -void StatsDispatchMain(const io::network::Endpoint &endpoint) { - // TODO(mtomic): we probably want to batch based on request size and MTU - const int MAX_BATCH_SIZE = 100; - - LOG(INFO) << "Stats dispatcher thread started"; - utils::ThreadSetName("Stats dispatcher"); - - communication::rpc::Client client(endpoint); - - BatchStatsReq batch_request; - batch_request.requests.reserve(MAX_BATCH_SIZE); - - while (stats_running) { - auto last = stats_queue.begin(); - size_t sent = 0, total = 0; - - auto flush_batch = [&] { - if (client.Call<BatchStatsRpc>(batch_request)) { - sent += batch_request.requests.size(); - } - total += batch_request.requests.size(); - batch_request.requests.clear(); - }; - - for (auto it = last; it != stats_queue.end(); it++) { - batch_request.requests.emplace_back(std::move(*it)); - if (batch_request.requests.size() == MAX_BATCH_SIZE) { - flush_batch(); - } - } - - if (!batch_request.requests.empty()) { - flush_batch(); - } - - VLOG(30) << fmt::format("Sent {} out of {} events from queue.", sent, - total); - last.delete_tail(); - std::this_thread::sleep_for( - std::chrono::milliseconds(FLAGS_statsd_flush_interval)); - } -} - -void LogStat(const std::string &metric_path, double value, - const std::vector<std::pair<std::string, std::string>> &tags) { - if (stats_running) { - stats_queue.push(statsd_prefix + metric_path, tags, value); - } -} - -void InitStatsLogging(std::string prefix) { - if (!prefix.empty()) { - statsd_prefix = prefix + "."; - } - if (FLAGS_statsd_address != "") { - stats_running = true; - stats_dispatch_thread = std::thread( - StatsDispatchMain, io::network::Endpoint{FLAGS_statsd_address, - (uint16_t)FLAGS_statsd_port}); - counter_refresh_thread = std::thread(RefreshMetrics); - } -} - -void StopStatsLogging() { - if (stats_running) { - stats_running = false; - stats_dispatch_thread.join(); - counter_refresh_thread.join(); - } -} - -} // namespace stats diff --git a/src/stats/stats.hpp b/src/stats/stats.hpp deleted file mode 100644 index b3dd2f703..000000000 --- a/src/stats/stats.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/// @file - -#pragma once - -#include <thread> -#include <vector> - -#include "gflags/gflags.h" - -#include "stats/metrics.hpp" - -namespace stats { - -/** - * Start sending metrics to StatsD server. - * - * @param prefix prefix to prepend to exported keys - */ -void InitStatsLogging(std::string prefix = ""); - -/** - * Stop sending metrics to StatsD server. This should be called before exiting - * program. - */ -void StopStatsLogging(); - -/** - * Send a value to StatsD with current timestamp. - */ -void LogStat(const std::string &metric_path, double value, - const std::vector<std::pair<std::string, std::string>> &tags = {}); - -} // namespace stats diff --git a/src/stats/stats_rpc_messages.lcp b/src/stats/stats_rpc_messages.lcp deleted file mode 100644 index 286e19fef..000000000 --- a/src/stats/stats_rpc_messages.lcp +++ /dev/null @@ -1,51 +0,0 @@ -#>cpp -#pragma once - -#include "communication/rpc/messages.hpp" -#include "stats/stats_rpc_messages.capnp.h" -#include "utils/serialization.hpp" -#include "utils/timestamp.hpp" -cpp<# - -(lcp:namespace stats) - -(lcp:capnp-namespace "stats") - -(lcp:capnp-import 'utils "/utils/serialization.capnp") - -(lcp:define-rpc stats - (:request - ((metric-path "std::string") - (tags "std::vector<std::pair<std::string, std::string>>" - :capnp-type "List(Utils.Pair(Text, Text))" - :capnp-save - (lcp:capnp-save-vector - "utils::capnp::Pair<::capnp::Text, ::capnp::Text>" - "std::pair<std::string, std::string>" - "[](auto *builder, const auto &pair) { - builder->setFirst(pair.first); - builder->setSecond(pair.second); - }") - :capnp-load - (lcp:capnp-load-vector - "utils::capnp::Pair<::capnp::Text, ::capnp::Text>" - "std::pair<std::string, std::string>" - "[](const auto &reader) { - std::string first = reader.getFirst(); - std::string second = reader.getSecond(); - return std::make_pair(first, second); - }")) - (value :double) - (timestamp :uint64_t :initarg nil - :initval "static_cast<uint64_t>(utils::Timestamp::Now().SecSinceTheEpoch())"))) - (:response ())) - -(lcp:define-rpc batch-stats - (:request - ((requests "std::vector<StatsReq>" - :capnp-type "List(StatsReq)" - :capnp-save (lcp:capnp-save-vector "capnp::StatsReq" "StatsReq") - :capnp-load (lcp:capnp-load-vector "capnp::StatsReq" "StatsReq")))) - (:response ())) - -(lcp:pop-namespace) ;; stats diff --git a/src/storage/address.hpp b/src/storage/address.hpp index ff6d7bd8a..990d38441 100644 --- a/src/storage/address.hpp +++ b/src/storage/address.hpp @@ -4,7 +4,6 @@ #include "glog/logging.h" -#include "storage/serialization.capnp.h" #include "storage/gid.hpp" namespace storage { @@ -89,14 +88,6 @@ class Address { return storage_ == other.storage_; } - void Save(capnp::Address::Builder *builder) const { - builder->setStorage(storage_); - } - - void Load(const capnp::Address::Reader &reader) { - storage_ = reader.getStorage(); - } - private: StorageT storage_{0}; }; diff --git a/src/storage/concurrent_id_mapper_master.cpp b/src/storage/concurrent_id_mapper_master.cpp deleted file mode 100644 index aa4799581..000000000 --- a/src/storage/concurrent_id_mapper_master.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include "glog/logging.h" - -#include "storage/concurrent_id_mapper_master.hpp" -#include "storage/concurrent_id_mapper_rpc_messages.hpp" -#include "storage/types.hpp" - -namespace storage { - -namespace { -template <typename TId> -void RegisterRpc(MasterConcurrentIdMapper<TId> &mapper, - communication::rpc::Server &rpc_server); -#define ID_VALUE_RPC_CALLS(type) \ - template <> \ - void RegisterRpc<type>(MasterConcurrentIdMapper<type> & mapper, \ - communication::rpc::Server & rpc_server) { \ - rpc_server.Register<type##IdRpc>( \ - [&mapper](const auto &req_reader, auto *res_builder) { \ - type##IdReq req; \ - req.Load(req_reader); \ - type##IdRes res(mapper.value_to_id(req.member)); \ - res.Save(res_builder); \ - }); \ - rpc_server.Register<Id##type##Rpc>( \ - [&mapper](const auto &req_reader, auto *res_builder) { \ - Id##type##Req req; \ - req.Load(req_reader); \ - Id##type##Res res(mapper.id_to_value(req.member)); \ - res.Save(res_builder); \ - }); \ - } - -using namespace storage; -ID_VALUE_RPC_CALLS(Label) -ID_VALUE_RPC_CALLS(EdgeType) -ID_VALUE_RPC_CALLS(Property) -#undef ID_VALUE_RPC -} // namespace - -template <typename TId> -MasterConcurrentIdMapper<TId>::MasterConcurrentIdMapper( - communication::rpc::Server &server) - // We have to make sure our rpc server name is unique with regards to type. - // Otherwise we will try to reuse the same rpc server name for different - // types (Label/EdgeType/Property) - : rpc_server_(server) { - RegisterRpc(*this, rpc_server_); -} - -template class MasterConcurrentIdMapper<Label>; -template class MasterConcurrentIdMapper<EdgeType>; -template class MasterConcurrentIdMapper<Property>; - -} // namespace storage diff --git a/src/storage/concurrent_id_mapper_master.hpp b/src/storage/concurrent_id_mapper_master.hpp deleted file mode 100644 index 563b634fa..000000000 --- a/src/storage/concurrent_id_mapper_master.hpp +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once - -#include <experimental/optional> - -#include "communication/rpc/server.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "storage/concurrent_id_mapper_single_node.hpp" - -namespace storage { - -/** Master implementation of ConcurrentIdMapper. */ -template <typename TId> -class MasterConcurrentIdMapper : public SingleNodeConcurrentIdMapper<TId> { - public: - explicit MasterConcurrentIdMapper(communication::rpc::Server &server); - - private: - communication::rpc::Server &rpc_server_; -}; -} // namespace storage diff --git a/src/storage/concurrent_id_mapper_rpc_messages.lcp b/src/storage/concurrent_id_mapper_rpc_messages.lcp deleted file mode 100644 index 439cf3d8d..000000000 --- a/src/storage/concurrent_id_mapper_rpc_messages.lcp +++ /dev/null @@ -1,44 +0,0 @@ -#>cpp -#pragma once - -#include <chrono> - -#include "communication/rpc/messages.hpp" -#include "storage/concurrent_id_mapper_rpc_messages.capnp.h" -#include "storage/types.hpp" -#include "transactions/commit_log.hpp" -#include "transactions/snapshot.hpp" -#include "transactions/type.hpp" -cpp<# - -(lcp:namespace storage) - -(lcp:capnp-namespace "storage") - -(lcp:capnp-import 's "/storage/serialization.capnp") - -(lcp:define-rpc label-id - (:request ((member "std::string"))) - (:response ((member "Label" :capnp-type "S.Common")))) - -(lcp:define-rpc id-label - (:request ((member "Label" :capnp-type "S.Common"))) - (:response ((member "std::string")))) - -(lcp:define-rpc edge-type-id - (:request ((member "std::string"))) - (:response ((member "EdgeType" :capnp-type "S.Common")))) - -(lcp:define-rpc id-edge-type - (:request ((member "EdgeType" :capnp-type "S.Common"))) - (:response ((member "std::string")))) - -(lcp:define-rpc property-id - (:request ((member "std::string"))) - (:response ((member "Property" :capnp-type "S.Common")))) - -(lcp:define-rpc id-property - (:request ((member "Property" :capnp-type "S.Common"))) - (:response ((member "std::string")))) - -(lcp:pop-namespace) diff --git a/src/storage/concurrent_id_mapper_worker.cpp b/src/storage/concurrent_id_mapper_worker.cpp deleted file mode 100644 index 85902702c..000000000 --- a/src/storage/concurrent_id_mapper_worker.cpp +++ /dev/null @@ -1,60 +0,0 @@ -#include "glog/logging.h" - -#include "concurrent_id_mapper_worker.hpp" -#include "storage/concurrent_id_mapper_rpc_messages.hpp" -#include "storage/types.hpp" - -namespace storage { - -#define ID_VALUE_RPC_CALLS(type) \ - template <> \ - type WorkerConcurrentIdMapper<type>::RpcValueToId( \ - const std::string &value) { \ - auto response = master_client_pool_.Call<type##IdRpc>(value); \ - CHECK(response) << (#type "IdRpc failed"); \ - return response->member; \ - } \ - \ - template <> \ - std::string WorkerConcurrentIdMapper<type>::RpcIdToValue(type id) { \ - auto response = master_client_pool_.Call<Id##type##Rpc>(id); \ - CHECK(response) << ("Id" #type "Rpc failed"); \ - return response->member; \ - } - -using namespace storage; -ID_VALUE_RPC_CALLS(Label) -ID_VALUE_RPC_CALLS(EdgeType) -ID_VALUE_RPC_CALLS(Property) - -#undef ID_VALUE_RPC_CALLS - -template <typename TId> -WorkerConcurrentIdMapper<TId>::WorkerConcurrentIdMapper( - communication::rpc::ClientPool &master_client_pool) - : master_client_pool_(master_client_pool) {} - -template <typename TId> -TId WorkerConcurrentIdMapper<TId>::value_to_id(const std::string &value) { - auto accessor = value_to_id_cache_.access(); - auto found = accessor.find(value); - if (found != accessor.end()) return found->second; - - TId id = RpcValueToId(value); - accessor.insert(value, id); - return id; -} - -template <typename TId> -const std::string &WorkerConcurrentIdMapper<TId>::id_to_value(const TId &id) { - auto accessor = id_to_value_cache_.access(); - auto found = accessor.find(id); - if (found != accessor.end()) return found->second; - std::string value = RpcIdToValue(id); - return accessor.insert(id, value).first->second; -} - -template class WorkerConcurrentIdMapper<Label>; -template class WorkerConcurrentIdMapper<EdgeType>; -template class WorkerConcurrentIdMapper<Property>; -} // namespace storage diff --git a/src/storage/concurrent_id_mapper_worker.hpp b/src/storage/concurrent_id_mapper_worker.hpp deleted file mode 100644 index 5a45299a8..000000000 --- a/src/storage/concurrent_id_mapper_worker.hpp +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include "communication/rpc/client_pool.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "io/network/endpoint.hpp" -#include "storage/concurrent_id_mapper.hpp" - -namespace storage { - -/** Worker implementation of ConcurrentIdMapper. */ -template <typename TId> -class WorkerConcurrentIdMapper : public ConcurrentIdMapper<TId> { - // Makes an appropriate RPC call for the current TId type and the given value. - TId RpcValueToId(const std::string &value); - - // Makes an appropriate RPC call for the current TId type and the given value. - std::string RpcIdToValue(TId id); - - public: - WorkerConcurrentIdMapper(communication::rpc::ClientPool &master_client_pool); - - TId value_to_id(const std::string &value) override; - const std::string &id_to_value(const TId &id) override; - - private: - // Sources of truth for the mappings are on the master, not on this worker. We - // keep the caches. - ConcurrentMap<std::string, TId> value_to_id_cache_; - ConcurrentMap<TId, std::string> id_to_value_cache_; - - // Communication to the concurrent ID master. - communication::rpc::ClientPool &master_client_pool_; -}; -} // namespace storage diff --git a/src/storage/record_accessor.cpp b/src/storage/record_accessor.cpp index 675edd303..86358e85b 100644 --- a/src/storage/record_accessor.cpp +++ b/src/storage/record_accessor.cpp @@ -2,8 +2,6 @@ #include "database/graph_db_accessor.hpp" #include "database/state_delta.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/updates_rpc_clients.hpp" #include "query/exceptions.hpp" #include "storage/edge.hpp" #include "storage/record_accessor.hpp" @@ -142,19 +140,7 @@ RecordAccessor<TRecord> &RecordAccessor<TRecord>::SwitchOld() { template <typename TRecord> bool RecordAccessor<TRecord>::Reconstruct() const { auto &dba = db_accessor(); - if (is_local()) { - address_.local()->find_set_old_new(dba.transaction(), old_, new_); - } else { - // It's not possible that we have a global address for a graph element - // that's local, because that is resolved in the constructor. - // TODO in write queries it's possible the command has been advanced and - // we need to invalidate the Cache and really get the latest stuff. - // But only do that after the command has been advanced. - auto &cache = dba.db().data_manager().template Elements<TRecord>( - dba.transaction_id()); - cache.FindSetOldNew(dba.transaction().id_, address_.worker_id(), - address_.gid(), old_, new_); - } + address_.local()->find_set_old_new(dba.transaction(), old_, new_); current_ = old_ ? old_ : new_; return old_ != nullptr || new_ != nullptr; } @@ -176,13 +162,8 @@ TRecord &RecordAccessor<TRecord>::update() const { if (new_) return *new_; - if (is_local()) { - new_ = address_.local()->update(t); - } else { - auto &cache = dba.db().data_manager().template Elements<TRecord>( - dba.transaction_id()); - new_ = cache.FindNew(address_.gid()); - } + new_ = address_.local()->update(t); + DCHECK(new_ != nullptr) << "RecordAccessor.new_ is null after update"; return *new_; } @@ -196,36 +177,10 @@ const TRecord &RecordAccessor<TRecord>::current() const { return *current_; } -template <typename TRecord> -void RecordAccessor<TRecord>::SendDelta( - const database::StateDelta &delta) const { - DCHECK(!is_local()) - << "Only a delta created on a remote accessor should be sent"; - - auto result = - db_accessor().db().updates_clients().Update(address().worker_id(), delta); - switch (result) { - case distributed::UpdateResult::DONE: - break; - case distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR: - throw query::RemoveAttachedVertexException(); - case distributed::UpdateResult::SERIALIZATION_ERROR: - throw mvcc::SerializationError(); - case distributed::UpdateResult::UPDATE_DELETED_ERROR: - throw RecordDeletedError(); - case distributed::UpdateResult::LOCK_TIMEOUT_ERROR: - throw utils::LockTimeoutException("Lock timeout on remote worker"); - } -} - template <typename TRecord> void RecordAccessor<TRecord>::ProcessDelta( const database::StateDelta &delta) const { - if (is_local()) { - db_accessor().wal().Emplace(delta); - } else { - SendDelta(delta); - } + db_accessor().wal().Emplace(delta); } template class RecordAccessor<Vertex>; diff --git a/src/storage/serialization.capnp b/src/storage/serialization.capnp deleted file mode 100644 index 8140fe015..000000000 --- a/src/storage/serialization.capnp +++ /dev/null @@ -1,21 +0,0 @@ -@0x8678c6a8817808d9; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("storage::capnp"); - -struct Common { - storage @0 :UInt16; - union { - label @1 :Label; - edgeType @2 :EdgeType; - property @3 :Property; - } -} - -struct Label {} -struct EdgeType {} -struct Property {} - -struct Address { - storage @0 :UInt64; -} diff --git a/src/storage/types.hpp b/src/storage/types.hpp index b7801fc00..555c79a27 100644 --- a/src/storage/types.hpp +++ b/src/storage/types.hpp @@ -4,10 +4,8 @@ #include <functional> #include <limits> -#include "boost/serialization/base_object.hpp" #include "glog/logging.h" -#include "storage/serialization.capnp.h" #include "utils/total_ordering.hpp" namespace storage { @@ -56,61 +54,23 @@ class Common : public utils::TotalOrdering<TSpecificType> { size_t operator()(const TSpecificType &t) const { return hash(t.id_); } }; - virtual void Save(capnp::Common::Builder *builder) const { - builder->setStorage(id_); - } - - virtual void Load(const capnp::Common::Reader &reader) { - id_ = reader.getStorage(); - } - private: static constexpr IdT Mask = std::numeric_limits<IdT>::max() >> 1; static constexpr IdT NotMask = ~Mask; - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &id_; - } - IdT id_{0}; }; class Label : public Common<Label> { using Common::Common; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Common<Label>>(*this); - } }; class EdgeType : public Common<EdgeType> { using Common::Common; - - private: - friend class boost::serialization::access; - - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Common<EdgeType>>(*this); - } }; class Property : public Common<Property> { using Common::Common; - - private: - friend class boost::serialization::access; - template <class TArchive> - void serialize(TArchive &ar, const unsigned int) { - ar &boost::serialization::base_object<Common<Property>>(*this); - } }; }; // namespace storage diff --git a/src/storage/vertex_accessor.cpp b/src/storage/vertex_accessor.cpp index 4793ee655..7cc8e7c29 100644 --- a/src/storage/vertex_accessor.cpp +++ b/src/storage/vertex_accessor.cpp @@ -23,8 +23,6 @@ void VertexAccessor::add_label(storage::Label label) { dba.UpdateLabelIndices(label, *this, &vertex); } } - - if (!is_local()) SendDelta(delta); } void VertexAccessor::remove_label(storage::Label label) { @@ -41,8 +39,6 @@ void VertexAccessor::remove_label(storage::Label label) { dba.wal().Emplace(delta); } } - - if (!is_local()) SendDelta(delta); } bool VertexAccessor::has_label(storage::Label label) const { diff --git a/src/transactions/commit_log.hpp b/src/transactions/commit_log.hpp index 6081cc2f7..0599343b9 100644 --- a/src/transactions/commit_log.hpp +++ b/src/transactions/commit_log.hpp @@ -1,7 +1,6 @@ #pragma once #include "data_structures/bitset/dynamic_bitset.hpp" -#include "transactions/common.capnp.h" #include "transactions/type.hpp" namespace tx { @@ -56,14 +55,6 @@ class CommitLog { operator uint8_t() const { return flags_; } - void Save(capnp::CommitLogInfo::Builder *builder) const { - builder->setFlags(flags_); - } - - void Load(const capnp::CommitLogInfo::Reader &reader) { - flags_ = reader.getFlags(); - } - private: uint8_t flags_{0}; }; diff --git a/src/transactions/common.capnp b/src/transactions/common.capnp deleted file mode 100644 index f9999efe4..000000000 --- a/src/transactions/common.capnp +++ /dev/null @@ -1,12 +0,0 @@ -@0xcdbe169866471033; - -using Cxx = import "/capnp/c++.capnp"; -$Cxx.namespace("tx::capnp"); - -struct Snapshot { - transactionIds @0 :List(UInt64); -} - -struct CommitLogInfo { - flags @0 :UInt8; -} diff --git a/src/transactions/engine_master.cpp b/src/transactions/engine_master.cpp deleted file mode 100644 index 6a081b893..000000000 --- a/src/transactions/engine_master.cpp +++ /dev/null @@ -1,98 +0,0 @@ -#include <limits> -#include <mutex> - -#include "glog/logging.h" - -#include "database/state_delta.hpp" -#include "transactions/engine_master.hpp" -#include "transactions/engine_rpc_messages.hpp" - -namespace tx { - -MasterEngine::MasterEngine(communication::rpc::Server &server, - distributed::RpcWorkerClients &rpc_worker_clients, - durability::WriteAheadLog *wal) - : SingleNodeEngine(wal), - rpc_server_(server), - ongoing_produce_joiner_(rpc_worker_clients) { - rpc_server_.Register<BeginRpc>( - [this](const auto &req_reader, auto *res_builder) { - auto tx = this->Begin(); - BeginRes res(TxAndSnapshot{tx->id_, tx->snapshot()}); - res.Save(res_builder); - }); - - rpc_server_.Register<AdvanceRpc>( - [this](const auto &req_reader, auto *res_builder) { - AdvanceRes res(this->Advance(req_reader.getMember())); - res.Save(res_builder); - }); - - rpc_server_.Register<CommitRpc>( - [this](const auto &req_reader, auto *res_builder) { - this->Commit(*this->RunningTransaction(req_reader.getMember())); - }); - - rpc_server_.Register<AbortRpc>( - [this](const auto &req_reader, auto *res_builder) { - this->Abort(*this->RunningTransaction(req_reader.getMember())); - }); - - rpc_server_.Register<SnapshotRpc>( - [this](const auto &req_reader, auto *res_builder) { - // It is guaranteed that the Worker will not be requesting this for a - // transaction that's done, and that there are no race conditions here. - SnapshotRes res( - this->RunningTransaction(req_reader.getMember())->snapshot()); - res.Save(res_builder); - }); - - rpc_server_.Register<CommandRpc>( - [this](const auto &req_reader, auto *res_builder) { - // It is guaranteed that the Worker will not be requesting this for a - // transaction that's done, and that there are no race conditions here. - CommandRes res(this->RunningTransaction(req_reader.getMember())->cid()); - res.Save(res_builder); - }); - - rpc_server_.Register<GcSnapshotRpc>( - [this](const auto &req_reader, auto *res_builder) { - GcSnapshotRes res(this->GlobalGcSnapshot()); - res.Save(res_builder); - }); - - rpc_server_.Register<ClogInfoRpc>( - [this](const auto &req_reader, auto *res_builder) { - ClogInfoRes res(this->Info(req_reader.getMember())); - res.Save(res_builder); - }); - - rpc_server_.Register<ActiveTransactionsRpc>( - [this](const auto &req_reader, auto *res_builder) { - ActiveTransactionsRes res(this->GlobalActiveTransactions()); - res.Save(res_builder); - }); - - rpc_server_.Register<EnsureNextIdGreaterRpc>( - [this](const auto &req_reader, auto *res_builder) { - this->EnsureNextIdGreater(req_reader.getMember()); - }); - - rpc_server_.Register<GlobalLastRpc>( - [this](const auto &req_reader, auto *res_builder) { - GlobalLastRes res(this->GlobalLast()); - res.Save(res_builder); - }); -} - -void MasterEngine::Commit(const Transaction &t) { - ongoing_produce_joiner_.JoinOngoingProduces(t.id_); - SingleNodeEngine::Commit(t); -} - -void MasterEngine::Abort(const Transaction &t) { - ongoing_produce_joiner_.JoinOngoingProduces(t.id_); - SingleNodeEngine::Abort(t); -} - -} // namespace tx diff --git a/src/transactions/engine_master.hpp b/src/transactions/engine_master.hpp deleted file mode 100644 index 5dc9b9e95..000000000 --- a/src/transactions/engine_master.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include "communication/rpc/server.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "transactions/engine_single_node.hpp" - -namespace tx { - -/** Distributed master transaction engine. Has complete engine functionality and - * exposes an RPC server to be used by distributed Workers. */ -class MasterEngine : public SingleNodeEngine { - public: - /** - * @param server - Required. Used for rpc::Server construction. - * @param rpc_worker_clients - Required. Used for - * OngoingProduceJoinerRpcClients construction. - * @param wal - Optional. If present, the Engine will write tx - * Begin/Commit/Abort atomically (while under lock). - */ - MasterEngine(communication::rpc::Server &server, - distributed::RpcWorkerClients &rpc_worker_clients, - durability::WriteAheadLog *wal = nullptr); - void Commit(const Transaction &t) override; - void Abort(const Transaction &t) override; - - private: - communication::rpc::Server &rpc_server_; - distributed::OngoingProduceJoinerRpcClients ongoing_produce_joiner_; -}; -} // namespace tx diff --git a/src/transactions/engine_rpc_messages.lcp b/src/transactions/engine_rpc_messages.lcp deleted file mode 100644 index 839ad6c57..000000000 --- a/src/transactions/engine_rpc_messages.lcp +++ /dev/null @@ -1,69 +0,0 @@ -#>cpp -#pragma once - -#include "communication/rpc/messages.hpp" -#include "transactions/commit_log.hpp" -#include "transactions/engine_rpc_messages.capnp.h" -#include "transactions/snapshot.hpp" -#include "transactions/type.hpp" -cpp<# - -(lcp:namespace tx) - -(lcp:capnp-namespace "tx") - -(lcp:capnp-import 'tx "/transactions/common.capnp") -(lcp:capnp-type-conversion "TransactionId" "UInt64") -(lcp:capnp-type-conversion "CommandId" "UInt32") -(lcp:capnp-type-conversion "Snapshot" "Tx.Snapshot") - -(lcp:define-struct tx-and-snapshot () - ((tx-id "TransactionId") - (snapshot "Snapshot")) - (:serialize :capnp)) - -(lcp:define-rpc begin - (:request ()) - (:response ((member "TxAndSnapshot")))) - -(lcp:define-rpc advance - (:request ((member "TransactionId"))) - (:response ((member "CommandId")))) - -(lcp:define-rpc commit - (:request ((member "TransactionId"))) - (:response ())) - -(lcp:define-rpc abort - (:request ((member "TransactionId"))) - (:response ())) - -(lcp:define-rpc snapshot - (:request ((member "TransactionId"))) - (:response ((member "Snapshot")))) - -(lcp:define-rpc command - (:request ((member "TransactionId"))) - (:response ((member "CommandId")))) - -(lcp:define-rpc gc-snapshot - (:request ()) - (:response ((member "Snapshot")))) - -(lcp:define-rpc clog-info - (:request ((member "TransactionId"))) - (:response ((member "CommitLog::Info" :capnp-type "Tx.CommitLogInfo")))) - -(lcp:define-rpc active-transactions - (:request ()) - (:response ((member "Snapshot")))) - -(lcp:define-rpc ensure-next-id-greater - (:request ((member "TransactionId"))) - (:response ())) - -(lcp:define-rpc global-last - (:request ()) - (:response ((member "TransactionId")))) - -(lcp:pop-namespace) ;; tx diff --git a/src/transactions/engine_single_node.cpp b/src/transactions/engine_single_node.cpp index 71dedfffa..eb92f07de 100644 --- a/src/transactions/engine_single_node.cpp +++ b/src/transactions/engine_single_node.cpp @@ -4,7 +4,6 @@ #include "glog/logging.h" #include "database/state_delta.hpp" -#include "transactions/engine_rpc_messages.hpp" #include "transactions/engine_single_node.hpp" namespace tx { diff --git a/src/transactions/engine_worker.cpp b/src/transactions/engine_worker.cpp deleted file mode 100644 index d47419123..000000000 --- a/src/transactions/engine_worker.cpp +++ /dev/null @@ -1,191 +0,0 @@ -#include <chrono> - -#include "glog/logging.h" - -#include "transactions/engine_rpc_messages.hpp" -#include "transactions/engine_worker.hpp" -#include "utils/atomic.hpp" - -namespace tx { - -WorkerEngine::WorkerEngine(communication::rpc::ClientPool &master_client_pool) - : master_client_pool_(master_client_pool) {} - -WorkerEngine::~WorkerEngine() { - for (auto &kv : active_.access()) { - delete kv.second; - } -} - -Transaction *WorkerEngine::Begin() { - auto res = master_client_pool_.Call<BeginRpc>(); - CHECK(res) << "BeginRpc failed"; - auto &data = res->member; - UpdateOldestActive(data.snapshot, data.tx_id); - Transaction *tx = new Transaction(data.tx_id, data.snapshot, *this); - auto insertion = active_.access().insert(data.tx_id, tx); - CHECK(insertion.second) << "Failed to start creation from worker"; - VLOG(11) << "[Tx] Starting worker transaction " << data.tx_id; - return tx; -} - -CommandId WorkerEngine::Advance(TransactionId tx_id) { - auto res = master_client_pool_.Call<AdvanceRpc>(tx_id); - CHECK(res) << "AdvanceRpc failed"; - auto access = active_.access(); - auto found = access.find(tx_id); - CHECK(found != access.end()) - << "Can't advance a transaction not in local cache"; - found->second->cid_ = res->member; - return res->member; -} - -CommandId WorkerEngine::UpdateCommand(TransactionId tx_id) { - auto res = master_client_pool_.Call<CommandRpc>(tx_id); - CHECK(res) << "CommandRpc failed"; - auto cmd_id = res->member; - - // Assume there is no concurrent work being done on this worker in the given - // transaction. This assumption is sound because command advancing needs to be - // done in a synchronized fashion, while no workers are executing in that - // transaction. That assumption lets us freely modify the command id in the - // cached transaction object, and ensures there are no race conditions on - // caching a transaction object if it wasn't cached already. - - auto access = active_.access(); - auto found = access.find(tx_id); - if (found != access.end()) { - found->second->cid_ = cmd_id; - } - return cmd_id; -} - -void WorkerEngine::Commit(const Transaction &t) { - auto res = master_client_pool_.Call<CommitRpc>(t.id_); - CHECK(res) << "CommitRpc failed"; - VLOG(11) << "[Tx] Commiting worker transaction " << t.id_; - ClearSingleTransaction(t.id_); -} - -void WorkerEngine::Abort(const Transaction &t) { - auto res = master_client_pool_.Call<AbortRpc>(t.id_); - CHECK(res) << "AbortRpc failed"; - VLOG(11) << "[Tx] Aborting worker transaction " << t.id_; - ClearSingleTransaction(t.id_); -} - -CommitLog::Info WorkerEngine::Info(TransactionId tid) const { - auto info = clog_.fetch_info(tid); - // If we don't know the transaction to be commited nor aborted, ask the - // master about it and update the local commit log. - if (!(info.is_aborted() || info.is_committed())) { - // @review: this version of Call is just used because Info has no - // default constructor. - auto res = master_client_pool_.Call<ClogInfoRpc>(tid); - CHECK(res) << "ClogInfoRpc failed"; - info = res->member; - if (!info.is_active()) { - if (info.is_committed()) clog_.set_committed(tid); - if (info.is_aborted()) clog_.set_aborted(tid); - ClearSingleTransaction(tid); - } - } - - return info; -} - -Snapshot WorkerEngine::GlobalGcSnapshot() { - auto res = master_client_pool_.Call<GcSnapshotRpc>(); - CHECK(res) << "GcSnapshotRpc failed"; - auto snapshot = std::move(res->member); - UpdateOldestActive(snapshot, local_last_.load()); - return snapshot; -} - -Snapshot WorkerEngine::GlobalActiveTransactions() { - auto res = master_client_pool_.Call<ActiveTransactionsRpc>(); - CHECK(res) << "ActiveTransactionsRpc failed"; - auto snapshot = std::move(res->member); - UpdateOldestActive(snapshot, local_last_.load()); - return snapshot; -} - -TransactionId WorkerEngine::LocalLast() const { return local_last_; } -TransactionId WorkerEngine::GlobalLast() const { - auto res = master_client_pool_.Call<GlobalLastRpc>(); - CHECK(res) << "GlobalLastRpc failed"; - return res->member; -} - -void WorkerEngine::LocalForEachActiveTransaction( - std::function<void(Transaction &)> f) { - for (auto pair : active_.access()) f(*pair.second); -} - -TransactionId WorkerEngine::LocalOldestActive() const { return oldest_active_; } - -Transaction *WorkerEngine::RunningTransaction(TransactionId tx_id) { - auto accessor = active_.access(); - auto found = accessor.find(tx_id); - if (found != accessor.end()) return found->second; - - auto res = master_client_pool_.Call<SnapshotRpc>(tx_id); - CHECK(res) << "SnapshotRpc failed"; - auto snapshot = std::move(res->member); - UpdateOldestActive(snapshot, local_last_.load()); - return RunningTransaction(tx_id, snapshot); -} - -Transaction *WorkerEngine::RunningTransaction(TransactionId tx_id, - const Snapshot &snapshot) { - auto accessor = active_.access(); - auto found = accessor.find(tx_id); - if (found != accessor.end()) return found->second; - - auto new_tx = new Transaction(tx_id, snapshot, *this); - auto insertion = accessor.insert(tx_id, new_tx); - if (!insertion.second) delete new_tx; - utils::EnsureAtomicGe(local_last_, tx_id); - return insertion.first->second; -} - -void WorkerEngine::ClearTransactionalCache(TransactionId oldest_active) const { - auto access = active_.access(); - for (auto kv : access) { - if (kv.first < oldest_active) { - auto transaction_ptr = kv.second; - if (access.remove(kv.first)) { - delete transaction_ptr; - } - } - } -} - -void WorkerEngine::ClearSingleTransaction(TransactionId tx_id) const { - auto access = active_.access(); - auto found = access.find(tx_id); - if (found != access.end()) { - auto transaction_ptr = found->second; - if (access.remove(found->first)) { - delete transaction_ptr; - } - } -} - -void WorkerEngine::UpdateOldestActive(const Snapshot &snapshot, - TransactionId alternative) { - if (snapshot.empty()) { - oldest_active_.store(std::max(alternative, oldest_active_.load())); - } else { - oldest_active_.store(snapshot.front()); - } -} - -void WorkerEngine::EnsureNextIdGreater(TransactionId tx_id) { - master_client_pool_.Call<EnsureNextIdGreaterRpc>(tx_id); -} - -void WorkerEngine::GarbageCollectCommitLog(TransactionId tx_id) { - clog_.garbage_collect_older(tx_id); -} -} // namespace tx diff --git a/src/transactions/engine_worker.hpp b/src/transactions/engine_worker.hpp deleted file mode 100644 index 65023ee30..000000000 --- a/src/transactions/engine_worker.hpp +++ /dev/null @@ -1,73 +0,0 @@ -#pragma once - -#include <atomic> - -#include "communication/rpc/client_pool.hpp" -#include "data_structures/concurrent/concurrent_map.hpp" -#include "io/network/endpoint.hpp" -#include "transactions/commit_log.hpp" -#include "transactions/engine.hpp" -#include "transactions/transaction.hpp" - -namespace tx { - -/** Distributed worker transaction engine. Connects to a MasterEngine (single - * source of truth) to obtain transactional info. Caches most info locally. Can - * begin/advance/end transactions on the master. */ -class WorkerEngine : public Engine { - public: - /// The wait time between two releases of local transaction objects that have - /// expired on the master. - static constexpr std::chrono::seconds kCacheReleasePeriod{1}; - - explicit WorkerEngine(communication::rpc::ClientPool &master_client_pool); - ~WorkerEngine(); - - Transaction *Begin() override; - CommandId Advance(TransactionId id) override; - CommandId UpdateCommand(TransactionId id) override; - void Commit(const Transaction &t) override; - void Abort(const Transaction &t) override; - CommitLog::Info Info(TransactionId tid) const override; - Snapshot GlobalGcSnapshot() override; - Snapshot GlobalActiveTransactions() override; - TransactionId GlobalLast() const override; - TransactionId LocalLast() const override; - void LocalForEachActiveTransaction( - std::function<void(Transaction &)> f) override; - TransactionId LocalOldestActive() const override; - Transaction *RunningTransaction(TransactionId tx_id) override; - - // Caches the transaction for the given info an returs a ptr to it. - Transaction *RunningTransaction(TransactionId tx_id, - const Snapshot &snapshot); - - void EnsureNextIdGreater(TransactionId tx_id) override; - void GarbageCollectCommitLog(tx::TransactionId tx_id) override; - - /// Clears the cache of local transactions that have expired. The signature of - /// this method is dictated by `distributed::TransactionalCacheCleaner`. - void ClearTransactionalCache(TransactionId oldest_active) const; - - private: - // Local caches. - mutable ConcurrentMap<TransactionId, Transaction *> active_; - std::atomic<TransactionId> local_last_{0}; - // Mutable because just getting info can cause a cache fill. - mutable CommitLog clog_; - - // Communication to the transactional master. - communication::rpc::ClientPool &master_client_pool_; - - // Used for clearing of caches of transactions that have expired. - // Initialize the oldest_active_ with 1 because there's never a tx with id=0 - std::atomic<TransactionId> oldest_active_{1}; - - // Removes a single transaction from the cache, if present. - void ClearSingleTransaction(TransactionId tx_Id) const; - - // Updates the oldest active transaction to the one from the snapshot. If the - // snapshot is empty, it's set to the given alternative. - void UpdateOldestActive(const Snapshot &snapshot, TransactionId alternative); -}; -} // namespace tx diff --git a/src/transactions/snapshot.cpp b/src/transactions/snapshot.cpp deleted file mode 100644 index 134259566..000000000 --- a/src/transactions/snapshot.cpp +++ /dev/null @@ -1,16 +0,0 @@ -#include "transactions/snapshot.hpp" - -#include "utils/serialization.hpp" - -namespace tx { - -void Snapshot::Save(capnp::Snapshot::Builder *builder) const { - auto list_builder = builder->initTransactionIds(transaction_ids_.size()); - utils::SaveVector(transaction_ids_, &list_builder); -} - -void Snapshot::Load(const capnp::Snapshot::Reader &reader) { - utils::LoadVector(&transaction_ids_, reader.getTransactionIds()); -} - -} // namespace tx diff --git a/src/transactions/snapshot.hpp b/src/transactions/snapshot.hpp index bb2549282..e346d89b5 100644 --- a/src/transactions/snapshot.hpp +++ b/src/transactions/snapshot.hpp @@ -5,7 +5,6 @@ #include <vector> #include "glog/logging.h" -#include "transactions/common.capnp.h" #include "transactions/type.hpp" #include "utils/algorithm.hpp" @@ -84,9 +83,6 @@ class Snapshot { return stream; } - void Save(capnp::Snapshot::Builder *builder) const; - void Load(const capnp::Snapshot::Reader &reader); - private: std::vector<TransactionId> transaction_ids_; }; diff --git a/src/utils/serialization.capnp b/src/utils/serialization.capnp deleted file mode 100644 index a25111477..000000000 --- a/src/utils/serialization.capnp +++ /dev/null @@ -1,97 +0,0 @@ -@0xe7647d63b36c2c65; - -using Cxx = import "/capnp/c++.capnp"; - -$Cxx.namespace("utils::capnp"); - -# Primitive type wrappers - -struct BoxInt16 { - value @0 :Int16; -} - -struct BoxInt32 { - value @0 :Int32; -} - -struct BoxInt64 { - value @0 :Int64; -} - -struct BoxUInt16 { - value @0 :UInt16; -} - -struct BoxUInt32 { - value @0 :UInt32; -} - -struct BoxUInt64 { - value @0 :UInt64; -} - -struct BoxFloat32 { - value @0 :Float32; -} - -struct BoxFloat64 { - value @0 :Float64; -} - -struct BoxBool { - value @0 :Bool; -} - -# C++ STL types - -struct Optional(T) { - union { - nullopt @0 :Void; - value @1 :T; - } -} - -struct UniquePtr(T) { - union { - nullptr @0 :Void; - value @1 :T; - } -} - -struct SharedPtr(T) { - union { - nullptr @0 :Void; - entry @1 :Entry; - } - - struct Entry { - id @0 :UInt64; - value @1 :T; - } -} - -struct Map(K, V) { - entries @0 :List(Entry); - - struct Entry { - key @0 :K; - value @1 :V; - } -} - -struct Pair(First, Second) { - first @0 :First; - second @1 :Second; -} - -# Our types - -struct Bound(T) { - type @0 :Type; - value @1 :T; - - enum Type { - inclusive @0; - exclusive @1; - } -} diff --git a/src/utils/serialization.hpp b/src/utils/serialization.hpp deleted file mode 100644 index 48a9cc8dd..000000000 --- a/src/utils/serialization.hpp +++ /dev/null @@ -1,489 +0,0 @@ -#pragma once - -#include <experimental/optional> - -#include "boost/serialization/optional.hpp" -#include "boost/serialization/serialization.hpp" -#include "boost/serialization/split_free.hpp" - -#include "distributed/serialization.capnp.h" -#include "query/typed_value.hpp" -#include "storage/edge.hpp" -#include "storage/vertex.hpp" -#include "utils/exceptions.hpp" -#include "utils/serialization.capnp.h" - -namespace boost::serialization { - -namespace { - -template <size_t idx, class TArchive, class... Elements> -void tuple_serialization_helper(TArchive &ar, std::tuple<Elements...> &tup) { - if constexpr (idx < sizeof...(Elements)) { - ar &std::get<idx>(tup); - tuple_serialization_helper<idx + 1, TArchive, Elements...>(ar, tup); - } -} - -} // namespace - -template <class TArchive, class... Elements> -inline void serialize(TArchive &ar, std::tuple<Elements...> &tup, - unsigned int) { - tuple_serialization_helper<0, TArchive, Elements...>(ar, tup); -} - -template <class TArchive, class T> -inline void serialize(TArchive &ar, std::experimental::optional<T> &opt, - unsigned int version) { - split_free(ar, opt, version); -} - -template <class TArchive, class T> -void save(TArchive &ar, const std::experimental::optional<T> &opt, - unsigned int) { - ar << static_cast<bool>(opt); - if (opt) { - ar << *opt; - } -} - -template <class TArchive, class T> -void load(TArchive &ar, std::experimental::optional<T> &opt, - unsigned int version) { - bool has_value; - ar >> has_value; - if (has_value) { - detail::stack_construct<TArchive, T> tmp(ar, version); - ar >> tmp.reference(); - opt = std::move(tmp.reference()); - } else { - opt = std::experimental::nullopt; - } -} - -} // namespace boost::serialization - -namespace utils { - -inline void SaveCapnpTypedValue( - const query::TypedValue &value, - distributed::capnp::TypedValue::Builder *builder, - std::function<void(const query::TypedValue &, - distributed::capnp::TypedValue::Builder *)> - save_graph_element = nullptr) { - switch (value.type()) { - case query::TypedValue::Type::Null: - builder->setNullType(); - return; - case query::TypedValue::Type::Bool: - builder->setBool(value.Value<bool>()); - return; - case query::TypedValue::Type::Int: - builder->setInteger(value.Value<int64_t>()); - return; - case query::TypedValue::Type::Double: - builder->setDouble(value.Value<double>()); - return; - case query::TypedValue::Type::String: - builder->setString(value.Value<std::string>()); - return; - case query::TypedValue::Type::List: { - const auto &values = value.Value<std::vector<query::TypedValue>>(); - auto list_builder = builder->initList(values.size()); - for (size_t i = 0; i < values.size(); ++i) { - auto value_builder = list_builder[i]; - SaveCapnpTypedValue(values[i], &value_builder, save_graph_element); - } - return; - } - case query::TypedValue::Type::Map: { - const auto &map = value.Value<std::map<std::string, query::TypedValue>>(); - auto map_builder = builder->initMap(map.size()); - size_t i = 0; - for (const auto &kv : map) { - auto kv_builder = map_builder[i]; - kv_builder.setKey(kv.first); - auto value_builder = kv_builder.initValue(); - SaveCapnpTypedValue(kv.second, &value_builder, save_graph_element); - ++i; - } - return; - } - case query::TypedValue::Type::Vertex: - case query::TypedValue::Type::Edge: - case query::TypedValue::Type::Path: - if (save_graph_element) { - save_graph_element(value, builder); - } else { - throw utils::BasicException( - "Unable to serialize TypedValue of type: {}", value.type()); - } - } -} - -inline void LoadCapnpTypedValue( - const distributed::capnp::TypedValue::Reader &reader, - query::TypedValue *value, - std::function<void(const distributed::capnp::TypedValue::Reader &, - query::TypedValue *)> - load_graph_element = nullptr) { - switch (reader.which()) { - case distributed::capnp::TypedValue::NULL_TYPE: - *value = query::TypedValue::Null; - return; - case distributed::capnp::TypedValue::BOOL: - *value = reader.getBool(); - return; - case distributed::capnp::TypedValue::INTEGER: - *value = reader.getInteger(); - return; - case distributed::capnp::TypedValue::DOUBLE: - *value = reader.getDouble(); - return; - case distributed::capnp::TypedValue::STRING: - *value = reader.getString().cStr(); - return; - case distributed::capnp::TypedValue::LIST: { - std::vector<query::TypedValue> list; - list.reserve(reader.getList().size()); - for (const auto &value_reader : reader.getList()) { - list.emplace_back(); - LoadCapnpTypedValue(value_reader, &list.back(), load_graph_element); - } - *value = list; - return; - } - case distributed::capnp::TypedValue::MAP: { - std::map<std::string, query::TypedValue> map; - for (const auto &kv_reader : reader.getMap()) { - auto key = kv_reader.getKey().cStr(); - LoadCapnpTypedValue(kv_reader.getValue(), &map[key], - load_graph_element); - } - *value = map; - return; - } - case distributed::capnp::TypedValue::VERTEX: - case distributed::capnp::TypedValue::EDGE: - case distributed::capnp::TypedValue::PATH: - if (load_graph_element) { - load_graph_element(reader, value); - } else { - throw utils::BasicException( - "Unexpected TypedValue type '{}' when loading from archive", - reader.which()); - } - } -} - -template <typename T> -inline void SaveVector(const std::vector<T> &data, - typename ::capnp::List<T>::Builder *list_builder) { - for (size_t i = 0; i < data.size(); ++i) { - list_builder->set(i, data[i]); - } -} - -inline void SaveVector(const std::vector<std::string> &data, - ::capnp::List<::capnp::Text>::Builder *list_builder) { - for (size_t i = 0; i < data.size(); ++i) { - list_builder->set(i, data[i]); - } -} - -template <typename T> -inline void LoadVector(std::vector<T> *data, - const typename ::capnp::List<T>::Reader &list_reader) { - for (const auto e : list_reader) { - data->emplace_back(e); - } -} - -inline void LoadVector( - std::vector<std::string> *data, - const typename ::capnp::List<::capnp::Text>::Reader &list_reader) { - for (const auto e : list_reader) { - data->emplace_back(e); } -} - -template <typename TCapnp, typename T> -inline void SaveVector( - const std::vector<T> &data, - typename ::capnp::List<TCapnp>::Builder *list_builder, - const std::function<void(typename TCapnp::Builder *, const T &)> &save) { - for (size_t i = 0; i < data.size(); ++i) { - auto elem_builder = (*list_builder)[i]; - save(&elem_builder, data[i]); - } -} - -template <typename TCapnp, typename T> -inline void LoadVector( - std::vector<T> *data, - const typename ::capnp::List<TCapnp>::Reader &list_reader, - const std::function<T(const typename TCapnp::Reader &reader)> &load) { - for (const auto reader : list_reader) { - data->emplace_back(load(reader)); - } -} - -template <class TCapnpKey, class TCapnpValue, class TMap> -void SaveMap(const TMap &map, - typename capnp::Map<TCapnpKey, TCapnpValue>::Builder *map_builder, - std::function<void( - typename capnp::Map<TCapnpKey, TCapnpValue>::Entry::Builder *, - const typename TMap::value_type &)> - save) { - auto entries_builder = map_builder->initEntries(map.size()); - size_t i = 0; - for (const auto &entry : map) { - auto entry_builder = entries_builder[i]; - save(&entry_builder, entry); - ++i; - } -} - -template <class TCapnpKey, class TCapnpValue, class TMap> -void LoadMap( - TMap *map, - const typename capnp::Map<TCapnpKey, TCapnpValue>::Reader &map_reader, - std::function<typename TMap::value_type( - const typename capnp::Map<TCapnpKey, TCapnpValue>::Entry::Reader &)> - load) { - for (const auto &entry_reader : map_reader.getEntries()) { - map->insert(load(entry_reader)); - } -} - -template <typename TCapnp, typename T> -inline void SaveOptional( - const std::experimental::optional<T> &data, - typename capnp::Optional<TCapnp>::Builder *builder, - const std::function<void(typename TCapnp::Builder *, const T &)> &save) { - if (data) { - auto value_builder = builder->initValue(); - save(&value_builder, data.value()); - } else { - builder->setNullopt(); - } -} - -template <typename TCapnp, typename T> -inline std::experimental::optional<T> LoadOptional( - const typename capnp::Optional<TCapnp>::Reader &reader, - const std::function<T(const typename TCapnp::Reader &reader)> &load) { - switch (reader.which()) { - case capnp::Optional<TCapnp>::NULLOPT: - return std::experimental::nullopt; - case capnp::Optional<TCapnp>::VALUE: - auto value_reader = reader.getValue(); - return std::experimental::optional<T>{load(value_reader)}; - } -} - -template <typename TCapnp, typename T> -inline void SaveUniquePtr( - const std::unique_ptr<T> &data, - typename capnp::UniquePtr<TCapnp>::Builder *builder, - const std::function<void(typename TCapnp::Builder *, const T &)> &save) { - if (data) { - auto value_builder = builder->initValue(); - save(&value_builder, *data); - } else { - builder->setNullptr(); - } -} - -template <typename TCapnp, typename T> -inline std::unique_ptr<T> LoadUniquePtr( - const typename capnp::UniquePtr<TCapnp>::Reader &reader, - const std::function<T*(const typename TCapnp::Reader &reader)> &load) { - switch (reader.which()) { - case capnp::UniquePtr<TCapnp>::NULLPTR: - return nullptr; - case capnp::UniquePtr<TCapnp>::VALUE: - auto value_reader = reader.getValue(); - return std::unique_ptr<T>(load(value_reader)); - } -} - -template <typename TCapnp, typename T> -inline void SaveSharedPtr( - const std::shared_ptr<T> &data, - typename capnp::SharedPtr<TCapnp>::Builder *builder, - const std::function<void(typename TCapnp::Builder *, const T &)> &save, - std::vector<T *> *saved_pointers) { - if (!data) { - builder->setNullptr(); - return; - } - auto entry_builder = builder->initEntry(); - auto pointer_id = reinterpret_cast<uintptr_t>(data.get()); - CHECK(pointer_id <= std::numeric_limits<uint64_t>::max()); - entry_builder.setId(pointer_id); - if (utils::Contains(*saved_pointers, data.get())) { - return; - } - auto value_builder = entry_builder.initValue(); - save(&value_builder, *data); - saved_pointers->emplace_back(data.get()); -} - -template <typename TCapnp, typename T> -std::shared_ptr<T> LoadSharedPtr( - const typename capnp::SharedPtr<TCapnp>::Reader &reader, - const std::function<T *(const typename TCapnp::Reader &reader)> &load, - std::vector<std::pair<uint64_t, std::shared_ptr<T>>> *loaded_pointers) { - std::shared_ptr<T> ret; - switch (reader.which()) { - case capnp::SharedPtr<TCapnp>::NULLPTR: - ret = nullptr; - break; - case capnp::SharedPtr<TCapnp>::ENTRY: - auto entry_reader = reader.getEntry(); - uint64_t pointer_id = entry_reader.getId(); - auto found = - std::find_if(loaded_pointers->begin(), loaded_pointers->end(), - [pointer_id](const auto &e) -> bool { - return e.first == pointer_id; - }); - if (found != loaded_pointers->end()) return found->second; - auto value_reader = entry_reader.getValue(); - ret = std::shared_ptr<T>(load(value_reader)); - loaded_pointers->emplace_back(std::make_pair(pointer_id, ret)); - } - return ret; -} - -/** - * Saves the given value into the given Boost archive. The optional - * `save_graph_element` function is called if the given `value` is a - * [Vertex|Edge|Path]. If that function is not provided, and `value` is one of - * those, an exception is thrown. - */ -template <class TArchive> -void SaveTypedValue( - TArchive &ar, const query::TypedValue &value, - std::function<void(TArchive &ar, const query::TypedValue &value)> - save_graph_element = nullptr) { - ar << value.type(); - switch (value.type()) { - case query::TypedValue::Type::Null: - return; - case query::TypedValue::Type::Bool: - ar << value.Value<bool>(); - return; - case query::TypedValue::Type::Int: - ar << value.Value<int64_t>(); - return; - case query::TypedValue::Type::Double: - ar << value.Value<double>(); - return; - case query::TypedValue::Type::String: - ar << value.Value<std::string>(); - return; - case query::TypedValue::Type::List: { - const auto &values = value.Value<std::vector<query::TypedValue>>(); - ar << values.size(); - for (const auto &v : values) { - SaveTypedValue(ar, v, save_graph_element); - } - return; - } - case query::TypedValue::Type::Map: { - const auto &map = value.Value<std::map<std::string, query::TypedValue>>(); - ar << map.size(); - for (const auto &key_value : map) { - ar << key_value.first; - SaveTypedValue(ar, key_value.second, save_graph_element); - } - return; - } - case query::TypedValue::Type::Vertex: - case query::TypedValue::Type::Edge: - case query::TypedValue::Type::Path: - if (save_graph_element) { - save_graph_element(ar, value); - } else { - throw utils::BasicException("Unable to archive TypedValue of type: {}", - value.type()); - } - } -} - -/** Loads a typed value into the given reference from the given archive. The - * optional `load_graph_element` function is called if a [Vertex|Edge|Path] - * TypedValue should be unarchived. If that function is not provided, and - * `value` is one of those, an exception is thrown. - */ -template <class TArchive> -void LoadTypedValue(TArchive &ar, query::TypedValue &value, - std::function<void(TArchive &ar, query::TypedValue::Type, - query::TypedValue &)> - load_graph_element = nullptr) { - query::TypedValue::Type type = query::TypedValue::Type::Null; - ar >> type; - switch (type) { - case query::TypedValue::Type::Null: - return; - case query::TypedValue::Type::Bool: { - bool v; - ar >> v; - value = v; - return; - } - case query::TypedValue::Type::Int: { - int64_t v; - ar >> v; - value = v; - return; - } - case query::TypedValue::Type::Double: { - double v; - ar >> v; - value = v; - return; - } - case query::TypedValue::Type::String: { - std::string v; - ar >> v; - value = v; - return; - } - case query::TypedValue::Type::List: { - value = std::vector<query::TypedValue>{}; - auto &list = value.ValueList(); - size_t size; - ar >> size; - list.reserve(size); - for (size_t i = 0; i < size; ++i) { - list.emplace_back(); - LoadTypedValue(ar, list.back(), load_graph_element); - } - return; - } - case query::TypedValue::Type::Map: { - value = std::map<std::string, query::TypedValue>{}; - auto &map = value.ValueMap(); - size_t size; - ar >> size; - for (size_t i = 0; i < size; ++i) { - std::string key; - ar >> key; - LoadTypedValue(ar, map[key], load_graph_element); - } - return; - } - case query::TypedValue::Type::Vertex: - case query::TypedValue::Type::Edge: - case query::TypedValue::Type::Path: - if (load_graph_element) { - load_graph_element(ar, type, value); - } else { - throw utils::BasicException( - "Unexpected TypedValue type '{}' when loading from archive", type); - } - } -} -} // namespace utils diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7e1468218..aa8fe5214 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -25,8 +25,5 @@ add_subdirectory(unit) # property based test binaries add_subdirectory(property_based) -# raft binaries -add_subdirectory(distributed/raft) - # integration test binaries add_subdirectory(integration) diff --git a/tests/benchmark/serialization.cpp b/tests/benchmark/serialization.cpp deleted file mode 100644 index ba417217b..000000000 --- a/tests/benchmark/serialization.cpp +++ /dev/null @@ -1,143 +0,0 @@ -#include <sstream> -#include <string> - -#include <benchmark/benchmark.h> - -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" -#include "boost/serialization/vector.hpp" - -#include <capnp/serialize.h> -#include <kj/std/iostream.h> - -#include "query/frontend/semantic/symbol.capnp.h" -#include "query/frontend/semantic/symbol.hpp" - -class SymbolVectorFixture : public benchmark::Fixture { - protected: - std::vector<query::Symbol> symbols_; - - void SetUp(const benchmark::State &state) override { - using Type = ::query::Symbol::Type; - std::vector<Type> types{Type::Any, Type::Vertex, Type::Edge, - Type::Path, Type::Number, Type::EdgeList}; - symbols_.reserve(state.range(0)); - for (int i = 0; i < state.range(0); ++i) { - std::string name = "Symbol " + std::to_string(i); - bool user_declared = i % 2; - auto type = types[i % types.size()]; - symbols_.emplace_back(name, i, user_declared, type, i); - } - } - - void TearDown(const benchmark::State &) override { symbols_.clear(); } -}; - -BENCHMARK_DEFINE_F(SymbolVectorFixture, BoostSerial)(benchmark::State &state) { - while (state.KeepRunning()) { - std::stringstream stream(std::ios_base::out | std::ios_base::binary); - { - boost::archive::binary_oarchive archive(stream); - archive << symbols_; - } - } -} - -BENCHMARK_DEFINE_F(SymbolVectorFixture, BoostDeserial) -(benchmark::State &state) { - auto serialize = [this]() { - std::stringstream stream(std::ios_base::in | std::ios_base::out | - std::ios_base::binary); - { - boost::archive::binary_oarchive archive(stream); - archive << symbols_; - } - return stream; - }; - while (state.KeepRunning()) { - state.PauseTiming(); - auto stream = serialize(); - state.ResumeTiming(); - std::vector<query::Symbol> symbols; - { - boost::archive::binary_iarchive archive(stream); - archive >> symbols; - } - } -} - -void SymbolVectorToCapnpMessage(const std::vector<query::Symbol> &symbols, - capnp::MessageBuilder &message) { - auto symbols_builder = - message.initRoot<capnp::List<query::capnp::Symbol>>(symbols.size()); - for (int i = 0; i < symbols.size(); ++i) { - const auto &sym = symbols[i]; - query::capnp::Symbol::Builder sym_builder = symbols_builder[i]; - sym_builder.setName(sym.name()); - sym_builder.setPosition(sym.position()); - sym_builder.setType(query::capnp::Symbol::Type::ANY); - sym_builder.setUserDeclared(sym.user_declared()); - sym_builder.setTokenPosition(sym.token_position()); - } -} - -std::stringstream SerializeCapnpSymbolVector( - const std::vector<query::Symbol> &symbols) { - std::stringstream stream(std::ios_base::in | std::ios_base::out | - std::ios_base::binary); - { - capnp::MallocMessageBuilder message; - SymbolVectorToCapnpMessage(symbols, message); - kj::std::StdOutputStream std_stream(stream); - kj::BufferedOutputStreamWrapper buffered_stream(std_stream); - writeMessage(buffered_stream, message); - } - return stream; -} - -BENCHMARK_DEFINE_F(SymbolVectorFixture, CapnpSerial)(benchmark::State &state) { - while (state.KeepRunning()) { - SerializeCapnpSymbolVector(symbols_); - } -} - -BENCHMARK_DEFINE_F(SymbolVectorFixture, CapnpDeserial) -(benchmark::State &state) { - while (state.KeepRunning()) { - state.PauseTiming(); - auto stream = SerializeCapnpSymbolVector(symbols_); - state.ResumeTiming(); - kj::std::StdInputStream std_stream(stream); - capnp::InputStreamMessageReader message(std_stream); - auto symbols_reader = message.getRoot<capnp::List<query::capnp::Symbol>>(); - std::vector<query::Symbol> symbols; - symbols.reserve(symbols_reader.size()); - for (const auto &sym : symbols_reader) { - symbols.emplace_back(sym.getName().cStr(), sym.getPosition(), - sym.getUserDeclared(), query::Symbol::Type::Any, - sym.getTokenPosition()); - } - } -} - -BENCHMARK_REGISTER_F(SymbolVectorFixture, BoostSerial) - ->RangeMultiplier(4) - ->Range(4, 1 << 12) - ->Unit(benchmark::kNanosecond); - -BENCHMARK_REGISTER_F(SymbolVectorFixture, CapnpSerial) - ->RangeMultiplier(4) - ->Range(4, 1 << 12) - ->Unit(benchmark::kNanosecond); - -BENCHMARK_REGISTER_F(SymbolVectorFixture, BoostDeserial) - ->RangeMultiplier(4) - ->Range(4, 1 << 12) - ->Unit(benchmark::kNanosecond); - -BENCHMARK_REGISTER_F(SymbolVectorFixture, CapnpDeserial) - ->RangeMultiplier(4) - ->Range(4, 1 << 12) - ->Unit(benchmark::kNanosecond); - -BENCHMARK_MAIN(); diff --git a/tests/distributed/card_fraud/.gitignore b/tests/distributed/card_fraud/.gitignore deleted file mode 100644 index 6a72bbd8f..000000000 --- a/tests/distributed/card_fraud/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -output -snapshots diff --git a/tests/distributed/card_fraud/apollo_runs.py b/tests/distributed/card_fraud/apollo_runs.py deleted file mode 100755 index 0301f4572..000000000 --- a/tests/distributed/card_fraud/apollo_runs.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -import json -import os -import re -import subprocess - -from card_fraud import NUM_MACHINES, BINARIES - -# paths -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -WORKSPACE_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", "..")) -OUTPUT_DIR_REL = os.path.join(os.path.relpath(SCRIPT_DIR, WORKSPACE_DIR), "output") - -# generate runs -runs = [] - -binaries = list(map(lambda x: os.path.join("..", "..", "build_release", x), BINARIES)) - -for i in range(NUM_MACHINES): - name = "master" if i == 0 else "worker" + str(i) - additional = ["master.py"] if i == 0 else [] - outfile_paths = ["\\./" + OUTPUT_DIR_REL + "/.+"] if i == 0 else [] - if i == 0: - cmd = "master.py" - args = "--machines-num {0} --test-suite card_fraud " \ - "--test card_fraud".format(NUM_MACHINES) - else: - cmd = "jail_service.py" - args = "" - runs.append({ - "name": "distributed__card_fraud__" + name, - "cd": "..", - "supervisor": cmd, - "arguments": args, - "infiles": binaries + [ - "common.py", - "jail_service.py", - "card_fraud/card_fraud.py", - "card_fraud/snapshots/worker_" + str(i), - ] + additional, - "outfile_paths": outfile_paths, - "parallel_run": "distributed__card_fraud", - "slave_group": "remote_4c32g", - "enable_network": True, - }) - -print(json.dumps(runs, indent=4, sort_keys=True)) diff --git a/tests/distributed/card_fraud/card_fraud.py b/tests/distributed/card_fraud/card_fraud.py deleted file mode 100644 index ba657bd17..000000000 --- a/tests/distributed/card_fraud/card_fraud.py +++ /dev/null @@ -1,223 +0,0 @@ -import json -import os -import time - -# to change the size of the cluster, just change this parameter -NUM_MACHINES = 3 - -# test setup -SCENARIOS = ["point_lookup", "create_tx"] -DURATION = 300 -WORKERS = 6 - -# constants -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -MEMGRAPH_BINARY = "memgraph" -CLIENT_BINARY = "tests/macro_benchmark/card_fraud_client" -BINARIES = [MEMGRAPH_BINARY, CLIENT_BINARY] - -# wrappers -class WorkerWrapper: - def __init__(self, address, worker): - self._address = address - self._worker = worker - self._tid = worker.get_jail() - - def get_address(self): - return self._address - - def __getattr__(self, name): - if name in ["allocate_file", "read_file", "store_label"]: - return getattr(self._worker, name) - def func(*args, **kwargs): - args = [self._tid] + list(args) - return getattr(self._worker, name)(*args, **kwargs) - return func - -class MgCluster: - def __init__(self, machine_ids, workers): - # create wrappers - self._master = WorkerWrapper(os.environ[machine_ids[0]], - workers[machine_ids[0]]) - self._workers = [] - for machine_id in machine_ids[1:]: - self._workers.append(WorkerWrapper(os.environ[machine_id], - workers[machine_id])) - - def start(self): - # start memgraph master - self._master.start(MEMGRAPH_BINARY, [ - "--master", - "--master-host", self._master.get_address(), - "--master-port", "10000", - "--durability-directory", os.path.join(SCRIPT_DIR, "snapshots", - "worker_0"), - "--db-recover-on-startup", - "--query-vertex-count-to-expand-existing", "-1", - "--num-workers", str(WORKERS), - "--rpc-num-workers", str(WORKERS), - "--recovering-cluster-size", str(len(self._workers) + 1) - ]) - - # sleep to allow the master to startup - time.sleep(5) - - # start memgraph workers - for i, worker in enumerate(self._workers, start=1): - worker.start(MEMGRAPH_BINARY, [ - "--worker", "--worker-id", str(i), - "--worker-host", worker.get_address(), - "--worker-port", str(10000 + i), - "--master-host", self._master.get_address(), - "--master-port", "10000", - "--durability-directory", os.path.join(SCRIPT_DIR, "snapshots", - "worker_" + str(i)), - "--db-recover-on-startup", - "--num-workers", str(WORKERS), - "--rpc-num-workers", str(WORKERS), - ]) - - # sleep to allow the workers to startup - time.sleep(5) - - # store initial usage - self._usage_start = [self._master.get_usage()] - for worker in self._workers: - self._usage_start.append(worker.get_usage()) - self._usage_start_time = time.time() - - def get_master_address(self): - return self._master.get_address() - - def check_status(self): - if not self._master.check_status(): - return False - for worker in self._workers: - if not worker.check_status(): - return False - return True - - def stop(self): - # store final usage - self._usage_stop = [self._master.get_usage()] - for worker in self._workers: - self._usage_stop.append(worker.get_usage()) - self._usage_stop_time = time.time() - - # stop the master - self._master.stop() - - # wait to allow the master and workers to die - time.sleep(5) - - # stop the workers - for worker in self._workers: - worker.stop() - - # wait to allow the workers to die - time.sleep(5) - - def get_usage(self): - ret = [] - tdelta = self._usage_stop_time - self._usage_start_time - for val_start, val_stop in zip(self._usage_start, self._usage_stop): - data = { - "cpu": (val_stop["cpu"] - val_start["cpu"]) / tdelta, - "memory": val_stop["max_memory"] / 1024, - "threads": val_stop["max_threads"], - "network": {} - } - net_start = val_start["network"]["eth0"] - net_stop = val_stop["network"]["eth0"] - for i in ["bytes", "packets"]: - data["network"][i] = {} - for j in ["rx", "tx"]: - data["network"][i][j] = (net_stop[i][j] - - net_start[i][j]) / tdelta - ret.append(data) - return ret - - def store_label(self, label): - self._master.store_label(label) - for worker in self._workers: - worker.store_label(label) - -def write_scenario_summary(scenario, throughput, usage, output): - output.write("Scenario **{}** throughput !!{:.2f}!! queries/s.\n\n".format( - scenario, throughput)) - headers = ["Memgraph", "CPU", "Max memory", "Max threads", - "Network RX", "Network TX"] - output.write("<table>\n<tr>") - for header in headers: - output.write("<th>{}</th>".format(header)) - output.write("</tr>\n") - for i, current in enumerate(usage): - name = "master" if i == 0 else "worker" + str(i) - output.write("<tr><td>{}</td>".format(name)) - for key, unit in [("cpu", "s/s"), ("memory", "MiB"), ("threads", "")]: - fmt = ".2f" if key != "threads" else "" - output.write(("<td>{:" + fmt + "} {}</td>").format( - current[key], unit).strip()) - for key in ["rx", "tx"]: - output.write("<td>{:.2f} packets/s</td>".format( - current["network"]["packets"][key])) - output.write("</tr>\n") - output.write("</table>\n\n") - -# main test function -def run(machine_ids, workers): - # create output directory - output_dir = os.path.join(SCRIPT_DIR, "output") - if not os.path.exists(output_dir): - os.mkdir(output_dir) - - # create memgraph cluster and client - mg_cluster = MgCluster(machine_ids, workers) - mg_client = WorkerWrapper(os.environ[machine_ids[0]], - workers[machine_ids[0]]) - - # execute the tests - stats = {} - for scenario in SCENARIOS: - output_file = os.path.join(output_dir, scenario + ".json") - - print("Starting memgraph cluster") - mg_cluster.store_label("Start: cluster") - mg_cluster.start() - - print("Starting client scenario:", scenario) - mg_cluster.store_label("Start: " + scenario) - mg_client.start(CLIENT_BINARY, [ - "--address", mg_cluster.get_master_address(), - "--group", "card_fraud", - "--scenario", scenario, - "--duration", str(DURATION), - "--num-workers", str(WORKERS), - "--output", output_file, - ]) - - # wait for the client to terminate and check the cluster status - while mg_client.check_status(): - assert mg_cluster.check_status(), "The memgraph cluster has died!" - time.sleep(2) - - # stop everything - mg_client.wait() - mg_cluster.store_label("Stop: " + scenario) - mg_cluster.stop() - mg_cluster.store_label("Stop: cluster") - - # process the stats - data = json.loads(list(filter(lambda x: x.strip(), - open(output_file).read().split("\n")))[-1]) - throughput = data["num_executed_queries"] / data["elapsed_time"] - usage = mg_cluster.get_usage() - stats[scenario] = (throughput, usage) - - # dump the stats - stats_file = open(os.path.join(output_dir, ".card_fraud_summary"), "w") - stats_file.write("==== Distributed card fraud summary: ====\n\n") - for scenario in SCENARIOS: - throughput, usage = stats[scenario] - write_scenario_summary(scenario, throughput, usage, stats_file) - stats_file.close() diff --git a/tests/distributed/card_fraud/config.json b/tests/distributed/card_fraud/config.json deleted file mode 100644 index c3047ec41..000000000 --- a/tests/distributed/card_fraud/config.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "cards_per_worker" : 10000, - "pos_per_worker" : 1000, - "transactions_per_worker" : 50000, - "compromised_pos_probability" : 0.2, - "fraud_reported_probability" : 0.1, - "hop_probability" : 0.1 -} diff --git a/tests/distributed/card_fraud/generate_dataset.sh b/tests/distributed/card_fraud/generate_dataset.sh deleted file mode 100755 index b79ee3023..000000000 --- a/tests/distributed/card_fraud/generate_dataset.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd $script_dir - -output_dir=snapshots - -if [ -d $output_dir ]; then - rm -rf $output_dir -fi - -NUM_MACHINES="$( cat card_fraud.py | grep -m1 "NUM_MACHINES" | tail -c 2 )" - -build_dir=../../../build_release -if [ ! -d $build_dir ]; then - build_dir=../../../build -fi -$build_dir/tests/manual/card_fraud_generate_snapshot --config config.json --num-workers $NUM_MACHINES --dir $output_dir diff --git a/tests/distributed/common.py b/tests/distributed/common.py deleted file mode 120000 index 0968c4fd8..000000000 --- a/tests/distributed/common.py +++ /dev/null @@ -1 +0,0 @@ -../macro_benchmark/common.py \ No newline at end of file diff --git a/tests/distributed/jail_faker.py b/tests/distributed/jail_faker.py deleted file mode 120000 index ee550ff0c..000000000 --- a/tests/distributed/jail_faker.py +++ /dev/null @@ -1 +0,0 @@ -../macro_benchmark/jail_faker.py \ No newline at end of file diff --git a/tests/distributed/jail_service.py b/tests/distributed/jail_service.py deleted file mode 100755 index 357022501..000000000 --- a/tests/distributed/jail_service.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python3 -import logging -import os -import signal -import subprocess -import sys -import tempfile -import traceback -import uuid -import xmlrpc.client - -# workaround for xmlrpc max/min integer size -xmlrpc.client.MAXINT = 2**100 -xmlrpc.client.MININT = -2**100 - -from common import get_absolute_path -from xmlrpc.server import SimpleXMLRPCServer - -try: - import jail -except: - import jail_faker as jail - - -class XMLRPCServer(SimpleXMLRPCServer): - def _dispatch(self, method, params): - try: - return super()._dispatch(method, params) - except: - traceback.print_exc() - raise - - -class JailService: - """ - Knows how to start and stop binaries - """ - def __init__(self): - logging.basicConfig(level=logging.INFO) - self.log = logging.getLogger("JailService") - self.log.info("Initializing Jail Service") - self.processes = {} - self._generated_filenames = [] - self.tempdir = tempfile.TemporaryDirectory() - - def _get_proc(self, tid): - if tid not in self.processes: - raise Exception( - "Binary with tid {tid} does not exist".format(tid=tid)) - return self.processes[tid] - - def start(self, tid, binary_name, binary_args=None): - self.log.info("Starting Binary: {binary}".format(binary=binary_name)) - self.log.info("With args: {args}".format(args=binary_args)) - # find executable path - binary = get_absolute_path(binary_name, "build") - if not os.path.exists(binary): - # Apollo builds both debug and release binaries on diff - # so we need to use the release binary if the debug one - # doesn't exist - binary = get_absolute_path(binary_name, "build_release") - - # fetch process - proc = self._get_proc(tid) - - # start binary - proc.run(binary, args=binary_args, timeout=600) - - msg = "Binary {binary} successfully started with tid {tid}".format( - binary=binary_name, tid=proc._tid) - self.log.info(msg) - - def check_status(self, tid): - proc = self._get_proc(tid) - status = proc.get_status() - if status is None: return True - assert status == 0, "The binary exited with a non-zero status!" - return False - - def get_usage(self, tid): - usage = self._get_proc(tid).get_usage() - usage.update({"network": jail.get_network_usage()}) - return usage - - def wait(self, tid): - proc = self._get_proc(tid) - proc.wait() - - def stop(self, tid): - self.log.info("Stopping binary with tid {tid}".format(tid=tid)) - proc = self._get_proc(tid) - try: - proc.send_signal(jail.SIGTERM) - except Exception: - pass - proc.wait() - self.log.info("Binary with tid {tid} stopped".format(tid=tid)) - - def allocate_file(self, extension=""): - if extension != "" and not extension.startswith("."): - extension = "." + extension - tmp_name = str(uuid.uuid4()) - while tmp_name in self._generated_filenames: - tmp_name = str(uuid.uuid4()) - self._generated_filenames.append(tmp_name) - absolute_path = os.path.join(self.tempdir.name, tmp_name + extension) - return absolute_path - - def read_file(self, absolute_path): - with open(absolute_path, "rb") as handle: - return xmlrpc.client.Binary(handle.read()) - - def get_jail(self): - proc = jail.get_process() - self.processes[proc._tid] = proc - return proc._tid - - def store_label(self, label): - jail.store_label(label) - - def shutdown(self): - self.log.info("Stopping Jail Service") - os._exit(0) - - -def main(): - # set port dynamically - port = os.environ["CURRENT_MACHINE"][len("MACHINE"):] - port = 8000 + (int(port) * 100) - interface = os.environ[os.environ["CURRENT_MACHINE"]] - - # init server - server = XMLRPCServer((interface, port), allow_none=True, logRequests=False) - server.register_introspection_functions() - server.register_instance(JailService()) - - # signal handler - def signal_sigterm(signum, frame): - server.server_close() - sys.exit() - - try: - signal.signal(signal.SIGTERM, signal_sigterm) - server.serve_forever() - except KeyboardInterrupt: - server.server_close() - - -if __name__ == "__main__": - main() diff --git a/tests/distributed/local_runner b/tests/distributed/local_runner deleted file mode 100755 index 922a73a05..000000000 --- a/tests/distributed/local_runner +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -e - -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -cd $script_dir - -if [[ $# -ne 2 ]]; then - echo "Invalid number of arguments" - echo "Usage: ./local_runner {test_suite} {test_name}" - exit 1 -fi - -test_suite=$1 -test_name=$2 -test_path="${test_suite}/${test_name}.py" - -if [ ! -f ${test_path} ]; then - echo "Test ${test_name}.py does not exist" - echo "Usage: ./local_runner {test_suite} {test_name}" - exit 1 -fi - -NUM_MACHINES="$(cat $test_path | grep -m1 "NUM_MACHINES" | tail -c 2)" - -# Define machine ips -for i in `seq 1 $NUM_MACHINES`; -do - export "MACHINE${i}"="127.0.0.1" -done - -# Run workers -for i in `seq 2 $NUM_MACHINES`; -do - CURRENT_MACHINE="MACHINE$i" ./jail_service.py & - pids[$i]=$! -done - -quit() -{ - # Stop workers - sleep 1 - for i in `seq 2 $NUM_MACHINES`; - do - kill ${pids[$i]} - done -} - -trap 'quit' INT - -# Run master with test -args="--machines-num $NUM_MACHINES --test-suite $test_suite --test $test_name" -CURRENT_MACHINE="MACHINE1" ./master.py $args || quit - -quit diff --git a/tests/distributed/master.py b/tests/distributed/master.py deleted file mode 100755 index 41909b610..000000000 --- a/tests/distributed/master.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -import atexit -import importlib -import logging -import os -import signal -import subprocess -import time -import xmlrpc.client - -# workaround for xmlrpc max/min integer size -xmlrpc.client.MAXINT = 2**100 -xmlrpc.client.MININT = -2**100 - -from argparse import ArgumentParser -from jail_service import JailService - - -def parse_args(): - """ - Parse command line arguments - """ - argp = ArgumentParser(description=__doc__) - argp.add_argument("--test-suite", default="raft", - help="Tests suite") - argp.add_argument("--test", default="example_test", - help="Test specification in python module") - argp.add_argument("--machines-num", default="4", - help="Number of machines in cluster") - return argp.parse_args() - - -def wait_for_server(interface, port, delay=0.1): - cmd = ["nc", "-z", "-w", "1", interface, port] - while subprocess.call(cmd) != 0: - time.sleep(0.01) - time.sleep(delay) - - -def main(args): - workers = {} - machine_ids = [] - machines_num = int(args.machines_num) - - # initialize workers - for i in range(machines_num): - id = i + 1 - machine_id = "MACHINE{id}".format(id=id) - machine_ids.append(machine_id) - machine_interface = os.environ[machine_id] - machine_port = 8000 + id * 100 - - if (id == 1): - worker = JailService() - else: - host = "http://{interface}:{port}".format( - interface=machine_interface, - port=str(machine_port)) - worker = xmlrpc.client.ServerProxy(host) - wait_for_server(machine_interface, str(machine_port)) - - workers[machine_id] = worker - - # cleanup at exit - @atexit.register - def cleanup(): - for machine_id in machine_ids[1:]: - try: - workers[machine_id].shutdown() - except ConnectionRefusedError: - pass - - # run test - test = importlib.import_module( - "{suite}.{test}".format(suite=args.test_suite, test=args.test)) - test.run(machine_ids, workers) - - -if __name__ == "__main__": - args = parse_args() - main(args) diff --git a/tests/distributed/raft/CMakeLists.txt b/tests/distributed/raft/CMakeLists.txt deleted file mode 100644 index f09b290c7..000000000 --- a/tests/distributed/raft/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -# set current directory name as a test type -get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME) - -# get all cpp abs file names recursively starting from current directory -file(GLOB_RECURSE test_type_cpps *.cpp) -message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}") - -# for each cpp file build binary and register test -foreach(test_cpp ${test_type_cpps}) - - # get exec name (remove extension from the abs path) - get_filename_component(exec_name ${test_cpp} NAME_WE) - - set(target_name memgraph__${test_type}__${exec_name}) - - # build exec file - add_executable(${target_name} ${test_cpp}) - - # OUTPUT_NAME sets the real name of a target when it is built and can be - # used to help create two targets of the same name even though CMake - # requires unique logical target names - set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name}) - - # link libraries - target_link_libraries(${target_name} memgraph_lib kvstore_dummy_lib) - - set(output_path ${CMAKE_BINARY_DIR}/test_results/unit/${target_name}.xml) - -endforeach() diff --git a/tests/distributed/raft/README.md b/tests/distributed/raft/README.md deleted file mode 100644 index b7dbe11f2..000000000 --- a/tests/distributed/raft/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Raft Tests - -To run test locally execute following command: - -``` -./local_runner {test_suite} {test_name} -``` - -Every test has to be defined as python module -with exposed ```run(machine_ids, workers)``` -method. In each test there has to be constant -```NUM_MACHINES``` which specifies how many workers -to run in cluster. diff --git a/tests/distributed/raft/example_client.cpp b/tests/distributed/raft/example_client.cpp deleted file mode 100644 index a6f547918..000000000 --- a/tests/distributed/raft/example_client.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include <ctime> -#include <random> -#include <thread> - -#include <fmt/format.h> -#include <gflags/gflags.h> -#include <glog/logging.h> - -#include "communication/rpc/client.hpp" -#include "io/network/endpoint.hpp" -#include "io/network/utils.hpp" -#include "messages.hpp" - -using namespace communication::rpc; -using namespace std::literals::chrono_literals; - -DEFINE_string(server_interface, "127.0.0.1", - "Server interface on which to communicate."); -DEFINE_int32(server_port, 8010, "Server port on which to communicate."); - -int main(int argc, char **argv) { - google::SetUsageMessage("Raft RPC Client"); - - gflags::ParseCommandLineFlags(&argc, &argv, true); - google::InitGoogleLogging(argv[0]); - - // Initialize client. - Client client(io::network::Endpoint( - io::network::ResolveHostname(FLAGS_server_interface), FLAGS_server_port)); - - // Try to send 100 values to server. - // If requests timeout, try to resend it. - // Log output on server should contain all values once - // in correct order. - for (int i = 1; i <= 100; ++i) { - LOG(INFO) << fmt::format("Apennding value: {}", i); - // TODO: Serialize RPC via Cap'n Proto - // auto result_tuple = client.Call<AppendEntry>(i); - // if (!result_tuple) { - // LOG(INFO) << "Request unsuccessful"; - // // Try to resend value - // --i; - // } else { - // LOG(INFO) << fmt::format("Appended value: {}", i); - // } - } - - return 0; -} diff --git a/tests/distributed/raft/example_server.cpp b/tests/distributed/raft/example_server.cpp deleted file mode 100644 index 1ec3cd00f..000000000 --- a/tests/distributed/raft/example_server.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include <fstream> -#include <thread> - -#include <fmt/format.h> -#include <gflags/gflags.h> -#include <glog/logging.h> - -#include "communication/rpc/server.hpp" -#include "messages.hpp" -#include "utils/signals.hpp" -#include "utils/terminate_handler.hpp" - -using namespace communication::rpc; -using namespace std::literals::chrono_literals; - -DEFINE_string(interface, "127.0.0.1", - "Communication interface on which to listen."); -DEFINE_string(port, "10000", "Communication port on which to listen."); -DEFINE_string(log, "log.txt", "Entries log file"); - -volatile sig_atomic_t is_shutting_down = 0; - -int main(int argc, char **argv) { - google::SetUsageMessage("Raft RPC Server"); - - gflags::ParseCommandLineFlags(&argc, &argv, true); - google::InitGoogleLogging(argv[0]); - - // Unhandled exception handler init. - std::set_terminate(&utils::TerminateHandler); - - Server server(io::network::Endpoint(FLAGS_interface, stoul(FLAGS_port))); - std::ofstream log(FLAGS_log, std::ios_base::app); - - // Handler for regular termination signals. - auto shutdown = [&log]() { - if (is_shutting_down) return; - is_shutting_down = 1; - log.close(); - exit(0); - }; - - // Prevent handling shutdown inside a shutdown. For example, SIGINT handler - // being interrupted by SIGTERM before is_shutting_down is set, thus causing - // double shutdown. - sigset_t block_shutdown_signals; - sigemptyset(&block_shutdown_signals); - sigaddset(&block_shutdown_signals, SIGTERM); - sigaddset(&block_shutdown_signals, SIGINT); - - CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Terminate, - shutdown, block_shutdown_signals)) - << "Unable to register SIGTERM handler!"; - CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Interupt, shutdown, - block_shutdown_signals)) - << "Unable to register SIGINT handler!"; - - // Example callback. - // TODO: Serialize RPC via Cap'n Proto - // server.Register<AppendEntry>( - // [&log](const auto &req_reader, auto *res_builder) { - // AppendEntryReq request; - // request.Load(req_reader); - // log << request.val << std::endl; - // log.flush(); - // LOG(INFO) << fmt::format("AppendEntry: {}", request.val); - // AppendEntryRes res(200, FLAGS_interface, stol(FLAGS_port)); - // res.Save(res_builder); - // }); - - LOG(INFO) << "Raft RPC server started"; - // Sleep until shutdown detected. - std::this_thread::sleep_until( - std::chrono::time_point<std::chrono::system_clock>::max()); - - return 0; -} diff --git a/tests/distributed/raft/example_test.py b/tests/distributed/raft/example_test.py deleted file mode 100644 index 06525ce0c..000000000 --- a/tests/distributed/raft/example_test.py +++ /dev/null @@ -1,61 +0,0 @@ -import logging -import os -import time -import xmlrpc.client - -NUM_MACHINES = 2 - -# binaries to run -CLIENT_BINARY = "tests/distributed/raft/example_client" -SERVER_BINARY = "tests/distributed/raft/example_server" - - -def run(machine_ids, workers): - logging.basicConfig(level=logging.INFO) - log = logging.getLogger("example_test") - log.info("Start") - - # define interfaces and ports for binaries - server_interface = os.environ[machine_ids[1]] - server_port = str(10000) - client_interface = os.environ[machine_ids[0]] - client_port = str(10010) - - # start binaries - log_abs_path = workers[machine_ids[1]].allocate_file() - server_tid = workers[machine_ids[1]].get_jail() - server_args = ["--interface", server_interface] - server_args += ["--port", server_port] - server_args += ["--log", log_abs_path] - workers[machine_ids[1]].start(server_tid, SERVER_BINARY, server_args) - - client_tid = workers[machine_ids[0]].get_jail() - client_args = ["--interface", client_interface] - client_args += ["--port", client_port] - client_args += ["--server-interface", server_interface] - client_args += ["--server-port", server_port] - workers[machine_ids[0]].start(client_tid, CLIENT_BINARY, client_args) - - # crash server - workers[machine_ids[1]].stop(server_tid) - time.sleep(5) - workers[machine_ids[1]].start(server_tid, SERVER_BINARY, server_args) - - # wait for test to finish - time.sleep(5) - - # stop binaries - workers[machine_ids[0]].stop(client_tid) - workers[machine_ids[1]].stop(server_tid) - - # fetch log - result = workers[machine_ids[1]].read_file(log_abs_path) - if result is not None: - local_log = "local_log.txt" - result = result.data.decode('ascii') - if result.splitlines() == ["{}".format(x) for x in range(1, 101)]: - log.warn("Test successful") - else: - raise Exception("Test failed") - - log.info("End") diff --git a/tests/distributed/raft/messages.hpp b/tests/distributed/raft/messages.hpp deleted file mode 100644 index e79f5e68a..000000000 --- a/tests/distributed/raft/messages.hpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "communication/rpc/messages.hpp" - -using namespace communication::rpc; - -struct AppendEntryReq { - AppendEntryReq() {} - explicit AppendEntryReq(int val) : val(val) {} - int val; -}; - -struct AppendEntryRes { - AppendEntryRes() {} - AppendEntryRes(int status, std::string interface, uint16_t port) - : status(status), interface(interface), port(port) {} - int status; - std::string interface; - uint16_t port; - -}; - -using AppendEntry = RequestResponse<AppendEntryReq, AppendEntryRes>; diff --git a/tests/macro_benchmark/clients/card_fraud_client.cpp b/tests/macro_benchmark/clients/card_fraud_client.cpp index fdba02efb..502e03480 100644 --- a/tests/macro_benchmark/clients/card_fraud_client.cpp +++ b/tests/macro_benchmark/clients/card_fraud_client.cpp @@ -5,8 +5,6 @@ #include "gflags/gflags.h" -#include "stats/stats.hpp" -#include "stats/stats_rpc_messages.hpp" #include "utils/thread/sync.hpp" #include "long_running_common.hpp" @@ -22,12 +20,12 @@ DEFINE_string(config, "", "test config"); enum class Role { WORKER, ANALYTIC, CLEANUP }; -stats::Gauge &num_vertices = stats::GetGauge("vertices"); -stats::Gauge &num_edges = stats::GetGauge("edges"); +std::atomic<int64_t> num_vertices{0}; +std::atomic<int64_t> num_edges{0}; void UpdateStats() { - num_vertices.Set(num_pos + num_cards + num_transactions); - num_edges.Set(2 * num_transactions); + num_vertices = num_pos + num_cards + num_transactions; + num_edges = 2 * num_transactions; } int64_t NumNodesWithLabel(Client &client, std::string label) { @@ -333,9 +331,6 @@ int main(int argc, char **argv) { communication::Init(); - stats::InitStatsLogging( - fmt::format("client.long_running.{}.{}", FLAGS_group, FLAGS_scenario)); - Endpoint endpoint(FLAGS_address, FLAGS_port); ClientContext context(FLAGS_use_ssl); Client client(&context); @@ -383,7 +378,5 @@ int main(int argc, char **argv) { RunMultithreadedTest(clients); - stats::StopStatsLogging(); - return 0; } diff --git a/tests/macro_benchmark/clients/graph_500_bfs.cpp b/tests/macro_benchmark/clients/graph_500_bfs.cpp index 0e6d80c34..35ddb60aa 100644 --- a/tests/macro_benchmark/clients/graph_500_bfs.cpp +++ b/tests/macro_benchmark/clients/graph_500_bfs.cpp @@ -6,8 +6,6 @@ #include "gflags/gflags.h" #include "long_running_common.hpp" -#include "stats/stats.hpp" -#include "stats/stats_rpc_messages.hpp" class Graph500BfsClient : public TestClient { public: @@ -55,7 +53,5 @@ int main(int argc, char **argv) { RunMultithreadedTest(clients); - stats::StopStatsLogging(); - return 0; } diff --git a/tests/macro_benchmark/clients/long_running_common.hpp b/tests/macro_benchmark/clients/long_running_common.hpp index 8ab7311ee..7f54c94d6 100644 --- a/tests/macro_benchmark/clients/long_running_common.hpp +++ b/tests/macro_benchmark/clients/long_running_common.hpp @@ -1,9 +1,9 @@ #pragma once +#include <memory> + #include "json/json.hpp" -#include "stats/metrics.hpp" -#include "stats/stats.hpp" #include "utils/timer.hpp" #include "common.hpp" @@ -22,8 +22,8 @@ DEFINE_int32(duration, 30, "Number of seconds to execute benchmark"); DEFINE_string(group, "unknown", "Test group name"); DEFINE_string(scenario, "unknown", "Test scenario name"); -auto &executed_queries = stats::GetCounter("executed_queries"); -auto &serialization_errors = stats::GetCounter("serialization_errors"); +std::atomic<uint64_t> executed_queries{0}; +std::atomic<uint64_t> serialization_errors{0}; class TestClient { public: @@ -70,7 +70,7 @@ class TestClient { std::tie(result, retries) = ExecuteNTimesTillSuccess(client_, query, params, MAX_RETRIES); } catch (const utils::BasicException &e) { - serialization_errors.Bump(MAX_RETRIES); + serialization_errors += MAX_RETRIES; return std::experimental::nullopt; } auto wall_time = timer.Elapsed(); @@ -84,8 +84,8 @@ class TestClient { stats_[query].push_back(std::move(metadata)); } } - executed_queries.Bump(); - serialization_errors.Bump(retries); + ++executed_queries; + serialization_errors += retries; return result; } @@ -168,15 +168,10 @@ void RunMultithreadedTest(std::vector<std::unique_ptr<TestClient>> &clients) { .first; it->second = (it->second.ValueDouble() * old_count + stat.second) / (old_count + new_count); - stats::LogStat( - fmt::format("queries.{}.{}", query_stats.first, stat.first), - (stat.second / new_count)); } - stats::LogStat(fmt::format("queries.{}.count", query_stats.first), - new_count); } - out << "{\"num_executed_queries\": " << executed_queries.Value() << ", " + out << "{\"num_executed_queries\": " << executed_queries << ", " << "\"elapsed_time\": " << timer.Elapsed().count() << ", \"queries\": ["; utils::PrintIterable( diff --git a/tests/manual/CMakeLists.txt b/tests/manual/CMakeLists.txt index aab46eb77..7f01fc779 100644 --- a/tests/manual/CMakeLists.txt +++ b/tests/manual/CMakeLists.txt @@ -33,12 +33,6 @@ target_link_libraries(${test_prefix}bolt_client memgraph_lib kvstore_dummy_lib) add_manual_test(card_fraud_generate_snapshot.cpp) target_link_libraries(${test_prefix}card_fraud_generate_snapshot memgraph_lib kvstore_dummy_lib) -add_manual_test(card_fraud_local.cpp) -target_link_libraries(${test_prefix}card_fraud_local memgraph_lib kvstore_dummy_lib) - -add_manual_test(distributed_repl.cpp) -target_link_libraries(${test_prefix}distributed_repl memgraph_lib kvstore_dummy_lib) - add_manual_test(endinan.cpp) add_manual_test(generate_snapshot.cpp) @@ -56,9 +50,6 @@ target_link_libraries(${test_prefix}query_hash memgraph_lib kvstore_dummy_lib) add_manual_test(query_planner.cpp) target_link_libraries(${test_prefix}query_planner memgraph_lib kvstore_dummy_lib) -add_manual_test(raft_rpc.cpp) -target_link_libraries(${test_prefix}raft_rpc memgraph_lib kvstore_dummy_lib) - add_manual_test(repl.cpp) target_link_libraries(${test_prefix}repl memgraph_lib kvstore_dummy_lib) diff --git a/tests/manual/card_fraud_local.cpp b/tests/manual/card_fraud_local.cpp deleted file mode 100644 index 571704474..000000000 --- a/tests/manual/card_fraud_local.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include <atomic> -#include <random> -#include <thread> -#include <vector> - -#include "gflags/gflags.h" - -#include "distributed_common.hpp" - -DEFINE_int32(num_tx_creators, 3, "Number of threads creating transactions"); -DEFINE_int32(tx_per_thread, 1000, "Number of transactions each thread creates"); - -int main(int argc, char *argv[]) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - - Cluster cluster(5); - - cluster.Execute("CREATE INDEX ON :Card(id)"); - cluster.Execute("CREATE INDEX ON :Transaction(id)"); - cluster.Execute("CREATE INDEX ON :Pos(id)"); - - int kCardCount = 20000; - int kPosCount = 20000; - - cluster.Execute("UNWIND range(0, $card_count) AS id CREATE (:Card {id:id})", - {{"card_count", kCardCount - 1}}); - cluster.Execute("UNWIND range(0, $pos_count) AS id CREATE (:Pos {id:id})", - {{"pos_count", kPosCount - 1}}); - - CheckResults(cluster.Execute("MATCH (:Pos) RETURN count(1)"), {{kPosCount}}, - "Failed to create POS"); - CheckResults(cluster.Execute("MATCH (:Card) RETURN count(1)"), {{kCardCount}}, - "Failed to create Cards"); - - std::atomic<int> tx_counter{0}; - auto create_tx = [&cluster, kCardCount, kPosCount, &tx_counter](int count) { - std::mt19937 rand_dev{std::random_device{}()}; - std::uniform_int_distribution<> int_dist; - - auto rint = [&rand_dev, &int_dist](int upper) { - return int_dist(rand_dev) % upper; - }; - - for (int i = 0; i < count; ++i) { - try { - auto res = cluster.Execute( - "MATCH (p:Pos {id: $pos}), (c:Card {id: $card}) " - "CREATE (p)<-[:At]-(:Transaction {id : $tx})-[:Using]->(c) " - "RETURN count(1)", - {{"pos", rint(kPosCount)}, - {"card", rint(kCardCount)}, - {"tx", tx_counter++}}); - CheckResults(res, {{1}}, "Transaction creation"); - } catch (utils::LockTimeoutException &) { - --i; - } catch (mvcc::SerializationError &) { - --i; - } - if (i > 0 && i % 200 == 0) - LOG(INFO) << "Created " << i << " transactions"; - } - }; - - LOG(INFO) << "Creating " << FLAGS_num_tx_creators * FLAGS_tx_per_thread - << " transactions in " << FLAGS_num_tx_creators << " threads"; - std::vector<std::thread> tx_creators; - for (int i = 0; i < FLAGS_num_tx_creators; ++i) - tx_creators.emplace_back(create_tx, FLAGS_tx_per_thread); - for (auto &t : tx_creators) t.join(); - - CheckResults(cluster.Execute("MATCH (:Transaction) RETURN count(1)"), - {{FLAGS_num_tx_creators * FLAGS_tx_per_thread}}, - "Failed to create Transactions"); - - LOG(INFO) << "Test terminated successfully"; - return 0; -} diff --git a/tests/manual/distributed_common.hpp b/tests/manual/distributed_common.hpp deleted file mode 100644 index a33d5acd5..000000000 --- a/tests/manual/distributed_common.hpp +++ /dev/null @@ -1,98 +0,0 @@ -#pragma once - -#include <chrono> -#include <vector> - -#include "communication/result_stream_faker.hpp" -#include "database/graph_db_accessor.hpp" -#include "query/interpreter.hpp" -#include "query/typed_value.hpp" - -class WorkerInThread { - public: - explicit WorkerInThread(database::Config config) : worker_(config) { - thread_ = std::thread([this, config] { worker_.WaitForShutdown(); }); - } - - ~WorkerInThread() { - if (thread_.joinable()) thread_.join(); - } - - database::Worker worker_; - std::thread thread_; -}; - -class Cluster { - const std::chrono::microseconds kInitTime{200}; - const std::string kLocal = "127.0.0.1"; - - public: - Cluster(int worker_count) { - database::Config masterconfig; - masterconfig.master_endpoint = {kLocal, 0}; - master_ = std::make_unique<database::Master>(masterconfig); - interpreter_ = std::make_unique<query::Interpreter>(*master_); - std::this_thread::sleep_for(kInitTime); - - auto worker_config = [this](int worker_id) { - database::Config config; - config.worker_id = worker_id; - config.master_endpoint = master_->endpoint(); - config.worker_endpoint = {kLocal, 0}; - return config; - }; - - for (int i = 0; i < worker_count; ++i) { - workers_.emplace_back( - std::make_unique<WorkerInThread>(worker_config(i + 1))); - std::this_thread::sleep_for(kInitTime); - } - } - - void Stop() { - interpreter_ = nullptr; - master_ = nullptr; - workers_.clear(); - } - - ~Cluster() { - if (master_) Stop(); - } - - auto Execute(const std::string &query, - std::map<std::string, query::TypedValue> params = {}) { - database::GraphDbAccessor dba(*master_); - ResultStreamFaker result; - interpreter_->operator()(query, dba, params, false).PullAll(result); - dba.Commit(); - return result.GetResults(); - }; - - private: - std::unique_ptr<database::Master> master_; - std::vector<std::unique_ptr<WorkerInThread>> workers_; - std::unique_ptr<query::Interpreter> interpreter_; -}; - -void CheckResults( - const std::vector<std::vector<query::TypedValue>> &results, - const std::vector<std::vector<query::TypedValue>> &expected_rows, - const std::string &msg) { - query::TypedValue::BoolEqual equality; - CHECK(results.size() == expected_rows.size()) - << msg << " (expected " << expected_rows.size() << " rows " - << ", got " << results.size() << ")"; - for (size_t row_id = 0; row_id < results.size(); ++row_id) { - auto &result = results[row_id]; - auto &expected = expected_rows[row_id]; - CHECK(result.size() == expected.size()) - << msg << " (expected " << expected.size() << " elements in row " - << row_id << ", got " << result.size() << ")"; - for (size_t col_id = 0; col_id < result.size(); ++col_id) { - CHECK(equality(result[col_id], expected[col_id])) - << msg << " (expected value '" << expected[col_id] << "' got '" - << result[col_id] << "' in row " << row_id << " col " << col_id - << ")"; - } - } -} diff --git a/tests/manual/distributed_repl.cpp b/tests/manual/distributed_repl.cpp deleted file mode 100644 index 17853e43b..000000000 --- a/tests/manual/distributed_repl.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include <chrono> -#include <iostream> -#include <memory> -#include <thread> - -#include <gflags/gflags.h> -#include <glog/logging.h> - -#include "database/graph_db.hpp" -#include "query/interpreter.hpp" -#include "query/repl.hpp" -#include "utils/flag_validation.hpp" - -DEFINE_VALIDATED_int32(worker_count, 1, - "The number of worker nodes in cluster.", - FLAG_IN_RANGE(1, 1000)); -DECLARE_int32(min_log_level); - -const std::string kLocal = "127.0.0.1"; - -class WorkerInThread { - public: - explicit WorkerInThread(database::Config config) : worker_(config) { - thread_ = std::thread([this, config] { worker_.WaitForShutdown(); }); - } - - ~WorkerInThread() { - if (thread_.joinable()) thread_.join(); - } - - database::Worker worker_; - std::thread thread_; -}; - -int main(int argc, char *argv[]) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - FLAGS_min_log_level = google::ERROR; - google::InitGoogleLogging(argv[0]); - - // Start the master - database::Config master_config; - master_config.master_endpoint = {kLocal, 0}; - auto master = std::make_unique<database::Master>(master_config); - // Allow the master to get initialized before making workers. - std::this_thread::sleep_for(std::chrono::milliseconds(250)); - - std::vector<std::unique_ptr<WorkerInThread>> workers; - for (int i = 0; i < FLAGS_worker_count; ++i) { - database::Config config; - config.worker_id = i + 1; - config.master_endpoint = master->endpoint(); - config.worker_endpoint = {kLocal, 0}; - workers.emplace_back(std::make_unique<WorkerInThread>(config)); - } - - // Start the REPL - query::Repl(*master); - - master = nullptr; - return 0; -} diff --git a/tests/manual/query_planner.cpp b/tests/manual/query_planner.cpp index 55416238c..00f2d70c9 100644 --- a/tests/manual/query_planner.cpp +++ b/tests/manual/query_planner.cpp @@ -514,41 +514,6 @@ class PlanPrinter : public query::plan::HierarchicalLogicalOperatorVisitor { return true; } - bool Visit(query::plan::ModifyUser &op) override { - WithPrintLn([](auto &out) { out << "* ModifyUser "; }); - return true; - } - - bool Visit(query::plan::DropUser &op) override { - WithPrintLn([](auto &out) { out << "* DropUser"; }); - return true; - } - - bool PreVisit(query::plan::PullRemote &op) override { - WithPrintLn([&op](auto &out) { - out << "* PullRemote [" << op.plan_id() << "] {"; - utils::PrintIterable( - out, op.symbols(), ", ", - [](auto &out, const auto &sym) { out << sym.name(); }); - out << "}"; - }); - WithPrintLn([](auto &out) { out << "|\\"; }); - ++depth_; - WithPrintLn([](auto &out) { out << "* workers"; }); - --depth_; - return true; - } - - bool PreVisit(query::plan::Synchronize &op) override { - WithPrintLn([&op](auto &out) { - out << "* Synchronize"; - if (op.advance_command()) out << " (ADV CMD)"; - }); - if (op.pull_remote()) Branch(*op.pull_remote()); - op.input()->Accept(*this); - return false; - } - bool PreVisit(query::plan::Cartesian &op) override { WithPrintLn([&op](auto &out) { out << "* Cartesian {"; @@ -565,22 +530,6 @@ class PlanPrinter : public query::plan::HierarchicalLogicalOperatorVisitor { op.left_op()->Accept(*this); return false; } - - bool PreVisit(query::plan::PullRemoteOrderBy &op) override { - WithPrintLn([&op](auto &out) { - out << "* PullRemoteOrderBy {"; - utils::PrintIterable( - out, op.symbols(), ", ", - [](auto &out, const auto &sym) { out << sym.name(); }); - out << "}"; - }); - - WithPrintLn([](auto &out) { out << "|\\"; }); - ++depth_; - WithPrintLn([](auto &out) { out << "* workers"; }); - --depth_; - return true; - } #undef PRE_VISIT private: @@ -669,39 +618,11 @@ DEFCOMMAND(Show) { plan->Accept(printer); } -DEFCOMMAND(ShowDistributed) { - int64_t plan_ix = 0; - std::stringstream ss(args[0]); - ss >> plan_ix; - if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return; - const auto &plan = plans[plan_ix].first; - std::atomic<int64_t> plan_id{0}; - auto distributed_plan = MakeDistributedPlan(*plan, symbol_table, plan_id); - { - std::cout << "---- Master Plan ---- " << std::endl; - PlanPrinter printer(dba); - distributed_plan.master_plan->Accept(printer); - std::cout << std::endl; - } - for (size_t i = 0; i < distributed_plan.worker_plans.size(); ++i) { - int64_t id; - std::shared_ptr<query::plan::LogicalOperator> worker_plan; - std::tie(id, worker_plan) = distributed_plan.worker_plans[i]; - std::cout << "---- Worker Plan #" << id << " ---- " << std::endl; - PlanPrinter printer(dba); - worker_plan->Accept(printer); - std::cout << std::endl; - } -} - DEFCOMMAND(Help); std::map<std::string, Command> commands = { {"top", {TopCommand, 1, "Show top N plans"}}, {"show", {ShowCommand, 1, "Show the Nth plan"}}, - {"show-distributed", - {ShowDistributedCommand, 1, - "Show the Nth plan as for distributed execution"}}, {"help", {HelpCommand, 0, "Show available commands"}}, }; diff --git a/tests/manual/raft_rpc.cpp b/tests/manual/raft_rpc.cpp deleted file mode 100644 index 428b3bceb..000000000 --- a/tests/manual/raft_rpc.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "communication/raft/rpc.hpp" -#include "communication/raft/storage/file.hpp" -#include "communication/raft/test_utils.hpp" - -using namespace std::literals::chrono_literals; - -namespace raft = communication::raft; - -using io::network::Endpoint; -using raft::RaftConfig; -using raft::RpcNetwork; -using raft::test_utils::DummyState; - -DEFINE_string(member_id, "", "id of Raft member"); -DEFINE_string(log_dir, "", "Raft log directory"); - -/* Start cluster members with: - * ./raft_rpc --member-id a --log-dir a_log - * ./raft_rpc --member-id b --log-dir b_log - * ./raft_rpc --member-id c --log-dir c_log - * - * Enjoy democracy! - */ - -int main(int argc, char *argv[]) { - google::InitGoogleLogging(argv[0]); - gflags::ParseCommandLineFlags(&argc, &argv, true); - - std::unordered_map<std::string, Endpoint> directory = { - {"a", Endpoint("127.0.0.1", 12345)}, - {"b", Endpoint("127.0.0.1", 12346)}, - {"c", Endpoint("127.0.0.1", 12347)}}; - - communication::rpc::Server server(directory[FLAGS_member_id]); - // TODO: Serialize RPC via Cap'n Proto - // RpcNetwork<DummyState> network(server, directory); - // raft::SimpleFileStorage<DummyState> storage(FLAGS_log_dir); - - // raft::RaftConfig config{{"a", "b", "c"}, 150ms, 300ms, 70ms, 30ms}; - - // { - // raft::RaftMember<DummyState> raft_member(network, storage, FLAGS_member_id, - // config); - // while (true) { - // continue; - // } - // } - - return 0; -} diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 0a2b0f73d..ca55c1ee4 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -40,9 +40,6 @@ target_link_libraries(${test_prefix}bolt_session memgraph_lib kvstore_dummy_lib) add_unit_test(communication_buffer.cpp) target_link_libraries(${test_prefix}communication_buffer memgraph_lib kvstore_dummy_lib) -add_unit_test(concurrent_id_mapper_distributed.cpp) -target_link_libraries(${test_prefix}concurrent_id_mapper_distributed memgraph_lib kvstore_dummy_lib) - add_unit_test(concurrent_id_mapper_single_node.cpp) target_link_libraries(${test_prefix}concurrent_id_mapper_single_node memgraph_lib kvstore_dummy_lib) @@ -52,9 +49,6 @@ target_link_libraries(${test_prefix}concurrent_map_access memgraph_lib kvstore_d add_unit_test(concurrent_map.cpp) target_link_libraries(${test_prefix}concurrent_map memgraph_lib kvstore_dummy_lib) -add_unit_test(counters.cpp) -target_link_libraries(${test_prefix}counters memgraph_lib kvstore_dummy_lib) - add_unit_test(cypher_main_visitor.cpp) target_link_libraries(${test_prefix}cypher_main_visitor memgraph_lib kvstore_dummy_lib) @@ -64,9 +58,6 @@ target_link_libraries(${test_prefix}database_key_index memgraph_lib kvstore_dumm add_unit_test(database_label_property_index.cpp) target_link_libraries(${test_prefix}database_label_property_index memgraph_lib kvstore_dummy_lib) -add_unit_test(database_master.cpp) -target_link_libraries(${test_prefix}database_master memgraph_lib kvstore_dummy_lib) - add_unit_test(database_transaction_timeout.cpp) target_link_libraries(${test_prefix}database_transaction_timeout memgraph_lib kvstore_dummy_lib) @@ -76,36 +67,6 @@ target_link_libraries(${test_prefix}datastructure_union_find memgraph_lib kvstor add_unit_test(deferred_deleter.cpp) target_link_libraries(${test_prefix}deferred_deleter memgraph_lib kvstore_dummy_lib) -add_unit_test(distributed_bfs.cpp) -target_link_libraries(${test_prefix}distributed_bfs memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_coordination.cpp) -target_link_libraries(${test_prefix}distributed_coordination memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_data_exchange.cpp) -target_link_libraries(${test_prefix}distributed_data_exchange memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_durability.cpp) -target_link_libraries(${test_prefix}distributed_durability memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_gc.cpp) -target_link_libraries(${test_prefix}distributed_gc memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_graph_db.cpp) -target_link_libraries(${test_prefix}distributed_graph_db memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_interpretation.cpp) -target_link_libraries(${test_prefix}distributed_interpretation memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_query_plan.cpp) -target_link_libraries(${test_prefix}distributed_query_plan memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_serialization.cpp) -target_link_libraries(${test_prefix}distributed_serialization memgraph_lib kvstore_dummy_lib) - -add_unit_test(distributed_updates.cpp) -target_link_libraries(${test_prefix}distributed_updates memgraph_lib kvstore_dummy_lib) - add_unit_test(durability.cpp) target_link_libraries(${test_prefix}durability memgraph_lib kvstore_dummy_lib) @@ -130,9 +91,6 @@ target_link_libraries(${test_prefix}interpreter memgraph_lib kvstore_dummy_lib) add_unit_test(kvstore.cpp) target_link_libraries(${test_prefix}kvstore gtest gtest_main memgraph_lib kvstore_lib) -add_unit_test(metrics.cpp) -target_link_libraries(${test_prefix}metrics memgraph_lib kvstore_dummy_lib) - add_unit_test(mvcc.cpp) target_link_libraries(${test_prefix}mvcc memgraph_lib kvstore_dummy_lib) @@ -190,24 +148,9 @@ target_link_libraries(${test_prefix}query_variable_start_planner memgraph_lib kv add_unit_test(queue.cpp) target_link_libraries(${test_prefix}queue memgraph_lib kvstore_dummy_lib) -add_unit_test(raft.cpp) -target_link_libraries(${test_prefix}raft memgraph_lib kvstore_dummy_lib) - -add_unit_test(raft_storage.cpp) -target_link_libraries(${test_prefix}raft_storage memgraph_lib kvstore_dummy_lib) - add_unit_test(record_edge_vertex_accessor.cpp) target_link_libraries(${test_prefix}record_edge_vertex_accessor memgraph_lib kvstore_dummy_lib) -add_unit_test(rpc.cpp) -target_link_libraries(${test_prefix}rpc memgraph_lib kvstore_dummy_lib) - -add_unit_test(rpc_worker_clients.cpp) -target_link_libraries(${test_prefix}rpc_worker_clients memgraph_lib kvstore_dummy_lib) - -add_unit_test(serialization.cpp) -target_link_libraries(${test_prefix}serialization memgraph_lib kvstore_dummy_lib) - add_unit_test(skiplist_access.cpp) target_link_libraries(${test_prefix}skiplist_access memgraph_lib kvstore_dummy_lib) @@ -235,9 +178,6 @@ target_link_libraries(${test_prefix}storage_address memgraph_lib kvstore_dummy_l add_unit_test(stripped.cpp) target_link_libraries(${test_prefix}stripped memgraph_lib kvstore_dummy_lib) -add_unit_test(transaction_engine_distributed.cpp) -target_link_libraries(${test_prefix}transaction_engine_distributed memgraph_lib kvstore_dummy_lib) - add_unit_test(transaction_engine_single_node.cpp) target_link_libraries(${test_prefix}transaction_engine_single_node memgraph_lib kvstore_dummy_lib) diff --git a/tests/unit/concurrent_id_mapper_distributed.cpp b/tests/unit/concurrent_id_mapper_distributed.cpp deleted file mode 100644 index 9f0dc8629..000000000 --- a/tests/unit/concurrent_id_mapper_distributed.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include <experimental/optional> - -#include "gtest/gtest.h" - -#include "communication/rpc/server.hpp" -#include "storage/concurrent_id_mapper_master.hpp" -#include "storage/concurrent_id_mapper_worker.hpp" -#include "storage/types.hpp" - -template <typename TId> -class DistributedConcurrentIdMapperTest : public ::testing::Test { - const std::string kLocal{"127.0.0.1"}; - - protected: - communication::rpc::Server master_server_{{kLocal, 0}}; - std::experimental::optional<communication::rpc::ClientPool> - master_client_pool_; - std::experimental::optional<storage::MasterConcurrentIdMapper<TId>> - master_mapper_; - std::experimental::optional<storage::WorkerConcurrentIdMapper<TId>> - worker_mapper_; - - void SetUp() override { - master_client_pool_.emplace(master_server_.endpoint()); - master_mapper_.emplace(master_server_); - worker_mapper_.emplace(master_client_pool_.value()); - } - void TearDown() override { - worker_mapper_ = std::experimental::nullopt; - master_mapper_ = std::experimental::nullopt; - master_client_pool_ = std::experimental::nullopt; - } -}; - -typedef ::testing::Types<storage::Label, storage::EdgeType, storage::Property> - GraphDbTestTypes; -TYPED_TEST_CASE(DistributedConcurrentIdMapperTest, GraphDbTestTypes); - -TYPED_TEST(DistributedConcurrentIdMapperTest, Basic) { - auto &master = this->master_mapper_.value(); - auto &worker = this->worker_mapper_.value(); - - auto id1 = master.value_to_id("v1"); - EXPECT_EQ(worker.id_to_value(id1), "v1"); - EXPECT_EQ(worker.value_to_id("v1"), id1); - - auto id2 = worker.value_to_id("v2"); - EXPECT_EQ(master.id_to_value(id2), "v2"); - EXPECT_EQ(master.value_to_id("v2"), id2); - - EXPECT_NE(id1, id2); -} diff --git a/tests/unit/counters.cpp b/tests/unit/counters.cpp deleted file mode 100644 index fad665443..000000000 --- a/tests/unit/counters.cpp +++ /dev/null @@ -1,26 +0,0 @@ -#include "gtest/gtest.h" - -#include "communication/rpc/server.hpp" -#include "database/counters.hpp" - -const std::string kLocal = "127.0.0.1"; - -TEST(CountersDistributed, All) { - communication::rpc::Server master_server({kLocal, 0}); - database::MasterCounters master(master_server); - communication::rpc::ClientPool master_client_pool(master_server.endpoint()); - - database::WorkerCounters w1(master_client_pool); - database::WorkerCounters w2(master_client_pool); - - EXPECT_EQ(w1.Get("a"), 0); - EXPECT_EQ(w1.Get("a"), 1); - EXPECT_EQ(w2.Get("a"), 2); - EXPECT_EQ(w1.Get("a"), 3); - EXPECT_EQ(master.Get("a"), 4); - - EXPECT_EQ(master.Get("b"), 0); - EXPECT_EQ(w2.Get("b"), 1); - w1.Set("b", 42); - EXPECT_EQ(w2.Get("b"), 42); -} diff --git a/tests/unit/cypher_main_visitor.cpp b/tests/unit/cypher_main_visitor.cpp index 86c699654..9af064cdc 100644 --- a/tests/unit/cypher_main_visitor.cpp +++ b/tests/unit/cypher_main_visitor.cpp @@ -6,8 +6,6 @@ #include <vector> #include "antlr4-runtime.h" -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -18,9 +16,6 @@ #include "query/frontend/stripped.hpp" #include "query/typed_value.hpp" -#include "capnp/message.h" -#include "query/frontend/ast/ast.capnp.h" - namespace { using namespace query; @@ -113,71 +108,11 @@ class CachedAstGenerator : public Base { Query *query_; }; -// This generator serializes the parsed ast and uses the deserialized one. -class SerializedAstGenerator : public Base { - public: - SerializedAstGenerator(const std::string &query) - : Base(query), - storage_([&]() { - ::frontend::opencypher::Parser parser(query); - CypherMainVisitor visitor(context_); - visitor.visit(parser.tree()); - std::stringstream stream; - { - boost::archive::binary_oarchive out_archive(stream); - out_archive << *visitor.query(); - } - AstStorage new_ast; - { - boost::archive::binary_iarchive in_archive(stream); - new_ast.Load(in_archive); - } - return new_ast; - }()), - query_(storage_.query()) {} - - AstStorage storage_; - Query *query_; -}; - -class CapnpAstGenerator : public Base { - public: - CapnpAstGenerator(const std::string &query) - : Base(query), - storage_([&]() { - ::frontend::opencypher::Parser parser(query); - CypherMainVisitor visitor(context_); - visitor.visit(parser.tree()); - - ::capnp::MallocMessageBuilder message; - { - query::capnp::Tree::Builder builder = - message.initRoot<query::capnp::Tree>(); - std::vector<int> saved_uids; - visitor.query()->Save(&builder, &saved_uids); - } - - AstStorage new_ast; - { - const query::capnp::Tree::Reader reader = - message.getRoot<query::capnp::Tree>(); - std::vector<int> loaded_uids; - new_ast.Load(reader, &loaded_uids); - } - return new_ast; - }()), - query_(storage_.query()) {} - - AstStorage storage_; - Query *query_; -}; - template <typename T> class CypherMainVisitorTest : public ::testing::Test {}; typedef ::testing::Types<AstGenerator, OriginalAfterCloningAstGenerator, - ClonedAstGenerator, CachedAstGenerator, - SerializedAstGenerator, CapnpAstGenerator> + ClonedAstGenerator, CachedAstGenerator> AstGeneratorTypes; TYPED_TEST_CASE(CypherMainVisitorTest, AstGeneratorTypes); @@ -1858,72 +1793,4 @@ TYPED_TEST(CypherMainVisitorTest, UnionAll) { ASSERT_FALSE(return_clause->body_.distinct); } -TYPED_TEST(CypherMainVisitorTest, ModifyUser) { - auto check_modify_user = [](std::string input, std::string username, - std::experimental::optional<TypedValue> password, - bool is_create) { - TypeParam ast_generator(input); - auto *query = ast_generator.query_; - ASSERT_TRUE(query->single_query_); - auto *single_query = query->single_query_; - ASSERT_EQ(single_query->clauses_.size(), 1U); - auto *create_user = dynamic_cast<ModifyUser *>(single_query->clauses_[0]); - ASSERT_TRUE(create_user); - EXPECT_EQ(create_user->username_, username); - if (password) { - ASSERT_NE(create_user->password_, nullptr); - CheckLiteral(ast_generator.context_, create_user->password_, *password); - } else { - EXPECT_EQ(create_user->password_, nullptr); - } - EXPECT_EQ(create_user->is_create_, is_create); - }; - - check_modify_user("CreaTE UsEr dominik", "dominik", - std::experimental::nullopt, true); - check_modify_user("CreaTE UsEr dominik WIth PaSSWORD 'spomenik'", "dominik", - "spomenik", true); - check_modify_user("CreaTE UsEr dominik WIth PaSSWORD NULL", "dominik", - TypedValue::Null, true); - check_modify_user("AlTeR UsEr dominik", "dominik", std::experimental::nullopt, - false); - check_modify_user("ALtEr UsEr dominik", "dominik", std::experimental::nullopt, - false); - check_modify_user("ALtEr UsEr dominik WIth PaSSWORD 'spomenik'", "dominik", - "spomenik", false); - check_modify_user("ALtEr UsEr dominik WIth PaSSWORD NULL", "dominik", - TypedValue::Null, false); - EXPECT_THROW( - check_modify_user( - "CreaTE UsEr dominik WIth PaSSWORD 'spomenik' PaSSwoRD 'u muzeju'", - "dominik", "spomenik", true), - QueryException); - EXPECT_THROW(check_modify_user("CreaTE UsEr dominik WIth PaSSWORD 12345", - "dominik", "spomenik", true), - SyntaxException); -} - -TYPED_TEST(CypherMainVisitorTest, DropUser) { - auto check_drop_user = [](std::string input, - const std::vector<std::string> &usernames) { - TypeParam ast_generator(input); - auto *query = ast_generator.query_; - ASSERT_TRUE(query->single_query_); - auto *single_query = query->single_query_; - ASSERT_EQ(single_query->clauses_.size(), 1U); - auto *drop_user = dynamic_cast<DropUser *>(single_query->clauses_[0]); - ASSERT_TRUE(drop_user); - EXPECT_EQ(drop_user->usernames_, usernames); - }; - - EXPECT_THROW(check_drop_user("DrOp USER", {}), SyntaxException); - check_drop_user("DrOP UsEr dominik", {"dominik"}); - check_drop_user("DrOP USER dominik , spomenik", {"dominik", "spomenik"}); - EXPECT_THROW( - check_drop_user("DrOP USER dominik, , spomenik", {"dominik", "spomenik"}), - SyntaxException); - check_drop_user("DrOP USER dominik , spomenik , jackie, jackie , johnny", - {"dominik", "spomenik", "jackie", "jackie", "johnny"}); -} - } // namespace diff --git a/tests/unit/database_master.cpp b/tests/unit/database_master.cpp deleted file mode 100644 index 24cb62664..000000000 --- a/tests/unit/database_master.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include "gtest/gtest.h" - -#include "config.hpp" -#include "database/graph_db.hpp" - -TEST(DatabaseMaster, Instantiate) { - database::Config config; - config.master_endpoint = io::network::Endpoint("127.0.0.1", 0); - config.worker_id = 0; - database::Master master(config); -} diff --git a/tests/unit/distributed_bfs.cpp b/tests/unit/distributed_bfs.cpp deleted file mode 100644 index 173b1ff22..000000000 --- a/tests/unit/distributed_bfs.cpp +++ /dev/null @@ -1,113 +0,0 @@ -#include "gtest/gtest.h" - -#include "database/graph_db_accessor.hpp" -#include "distributed/bfs_rpc_clients.hpp" - -#include "distributed_common.hpp" - -using namespace database; - -std::vector<int> V = {0, 1, 1, 0, 1, 2}; -std::vector<std::pair<int, int>> E = {{0, 1}, {1, 2}, {1, 5}, - {2, 4}, {2, 5}, {3, 4}}; - -class BfsTest : public DistributedGraphDbTest { - protected: - void SetUp() override { - DistributedGraphDbTest::SetUp(); - - for (int v : V) { - auto vertex = v == 0 ? InsertVertex(master()) : InsertVertex(worker(v)); - vertices.emplace_back(vertex); - } - - for (auto e : E) { - edges[e] = InsertEdge(vertices[e.first], vertices[e.second], "Edge"); - } - } - - public: - std::vector<storage::VertexAddress> vertices; - std::map<std::pair<int, int>, storage::EdgeAddress> edges; -}; - -TEST_F(BfsTest, Expansion) { - GraphDbAccessor dba{master()}; - - auto &clients = master().bfs_subcursor_clients(); - auto subcursor_ids = clients.CreateBfsSubcursors( - dba.transaction_id(), query::EdgeAtom::Direction::BOTH, - {dba.EdgeType("Edge")}, query::GraphView::OLD); - clients.RegisterSubcursors(subcursor_ids); - - clients.SetSource(subcursor_ids, vertices[0]); - - auto pull = [&clients, &subcursor_ids, &dba](int worker_id) { - return clients.Pull(worker_id, subcursor_ids[worker_id], &dba); - }; - - EXPECT_EQ(pull(0), std::experimental::nullopt); - EXPECT_EQ(pull(1)->GlobalAddress(), vertices[1]); - EXPECT_EQ(pull(2), std::experimental::nullopt); - - clients.PrepareForExpand(subcursor_ids, false); - clients.ExpandLevel(subcursor_ids); - - EXPECT_EQ(pull(0), std::experimental::nullopt); - EXPECT_EQ(pull(1)->GlobalAddress(), vertices[2]); - EXPECT_EQ(pull(1), std::experimental::nullopt); - EXPECT_EQ(pull(2)->GlobalAddress(), vertices[5]); - EXPECT_EQ(pull(2), std::experimental::nullopt); - - clients.PrepareForExpand(subcursor_ids, false); - clients.ExpandLevel(subcursor_ids); - - EXPECT_EQ(pull(0), std::experimental::nullopt); - EXPECT_EQ(pull(1)->GlobalAddress(), vertices[4]); - EXPECT_EQ(pull(1), std::experimental::nullopt); - EXPECT_EQ(pull(2), std::experimental::nullopt); - - clients.PrepareForExpand(subcursor_ids, false); - clients.ExpandLevel(subcursor_ids); - - EXPECT_EQ(pull(0)->GlobalAddress(), vertices[3]); - EXPECT_EQ(pull(0), std::experimental::nullopt); - EXPECT_EQ(pull(1), std::experimental::nullopt); - EXPECT_EQ(pull(2), std::experimental::nullopt); - - auto compare = [this](const std::vector<EdgeAccessor> &lhs, - const std::vector<std::pair<int, int>> &rhs) { - EXPECT_EQ(lhs.size(), rhs.size()); - if (lhs.size() != rhs.size()) return; - for (auto idx = 0u; idx < lhs.size(); ++idx) { - EXPECT_EQ(lhs[idx].GlobalAddress(), edges[rhs[idx]]); - } - }; - - distributed::PathSegment ps; - - ps = clients.ReconstructPath(subcursor_ids, vertices[3], &dba); - ASSERT_EQ(ps.next_vertex, vertices[4]); - ASSERT_EQ(ps.next_edge, std::experimental::nullopt); - compare(ps.edges, {{3, 4}}); - - ps = clients.ReconstructPath(subcursor_ids, vertices[4], &dba); - EXPECT_EQ(ps.next_vertex, std::experimental::nullopt); - EXPECT_EQ(ps.next_edge, (edges[{0, 1}])); - compare(ps.edges, {{2, 4}, {1, 2}}); - - ps = clients.ReconstructPath(subcursor_ids, edges[{0, 1}], &dba); - EXPECT_EQ(ps.next_vertex, std::experimental::nullopt); - EXPECT_EQ(ps.next_edge, std::experimental::nullopt); - compare(ps.edges, {{0, 1}}); - - clients.PrepareForExpand(subcursor_ids, true); - clients.SetSource(subcursor_ids, vertices[3]); - - EXPECT_EQ(pull(0), std::experimental::nullopt); - EXPECT_EQ(pull(1)->GlobalAddress(), vertices[4]); - EXPECT_EQ(pull(1), std::experimental::nullopt); - EXPECT_EQ(pull(2), std::experimental::nullopt); - - clients.RemoveBfsSubcursors(subcursor_ids); -} diff --git a/tests/unit/distributed_common.hpp b/tests/unit/distributed_common.hpp deleted file mode 100644 index 387ab3d2d..000000000 --- a/tests/unit/distributed_common.hpp +++ /dev/null @@ -1,248 +0,0 @@ -#include <experimental/filesystem> -#include <memory> -#include <thread> - -#include <gflags/gflags.h> -#include <gtest/gtest.h> - -#include "database/graph_db.hpp" -#include "database/graph_db_accessor.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/updates_rpc_server.hpp" -#include "storage/address_types.hpp" -#include "transactions/engine_master.hpp" - -DECLARE_string(durability_directory); - -namespace fs = std::experimental::filesystem; - -class WorkerInThread { - public: - explicit WorkerInThread(database::Config config) : worker_(config) { - thread_ = std::thread([this, config] { worker_.WaitForShutdown(); }); - } - - ~WorkerInThread() { - if (thread_.joinable()) thread_.join(); - } - - database::Worker *db() { return &worker_; } - - database::Worker worker_; - std::thread thread_; -}; - -class DistributedGraphDbTest : public ::testing::Test { - const std::string kLocal = "127.0.0.1"; - const int kWorkerCount = 2; - - protected: - virtual int QueryExecutionTimeSec(int) { return 180; } - - void Initialize( - std::function<database::Config(database::Config config)> modify_config) { - using namespace std::literals::chrono_literals; - const auto kInitTime = 200ms; - - database::Config master_config; - master_config.master_endpoint = {kLocal, 0}; - master_config.query_execution_time_sec = QueryExecutionTimeSec(0); - master_config.durability_directory = tmp_dir_; - // This is semantically wrong since this is not a cluster of size 1 but of - // size kWorkerCount+1, but it's hard to wait here for workers to recover - // and simultaneously assign the port to which the workers must connect - // TODO(dgleich): Fix sometime in the future - not mission critical - master_config.recovering_cluster_size = 1; - master_ = std::make_unique<database::Master>(modify_config(master_config)); - - std::this_thread::sleep_for(kInitTime); - auto worker_config = [this](int worker_id) { - database::Config config; - config.worker_id = worker_id; - config.master_endpoint = master_->endpoint(); - config.durability_directory = tmp_dir_; - config.worker_endpoint = {kLocal, 0}; - config.query_execution_time_sec = QueryExecutionTimeSec(worker_id); - return config; - }; - - // Flag needs to be updated due to props on disk storage. - FLAGS_durability_directory = tmp_dir_; - - for (int i = 0; i < kWorkerCount; ++i) { - workers_.emplace_back(std::make_unique<WorkerInThread>( - modify_config(worker_config(i + 1)))); - std::this_thread::sleep_for(kInitTime); - } - } - - void SetUp() override { - Initialize([](database::Config config) { return config; }); - } - - void ShutDown() { - // Kill master first because it will expect a shutdown response from the - // workers. - auto t = std::thread([this]() { master_ = nullptr; }); - workers_.clear(); - if (t.joinable()) t.join(); - } - - void CleanDurability() { - if (fs::exists(tmp_dir_)) fs::remove_all(tmp_dir_); - } - - void TearDown() override { - ShutDown(); - CleanDurability(); - } - - database::Master &master() { return *master_; } - auto &master_tx_engine() { - return dynamic_cast<tx::MasterEngine &>(master_->tx_engine()); - } - - database::Worker &worker(int worker_id) { - return workers_[worker_id - 1]->worker_; - } - - /// Inserts a vertex and returns it's global address. Does it in a new - /// transaction. - storage::VertexAddress InsertVertex(database::GraphDb &db) { - database::GraphDbAccessor dba{db}; - auto r_val = dba.InsertVertex().GlobalAddress(); - dba.Commit(); - return r_val; - } - - /// Inserts an edge (on the 'from' side) and returns it's global address. - auto InsertEdge(storage::VertexAddress from_addr, - storage::VertexAddress to_addr, - const std::string &edge_type_name) { - CHECK(from_addr.is_remote() && to_addr.is_remote()) - << "Distributed test InsertEdge only takes global addresses"; - database::GraphDbAccessor dba{master()}; - VertexAccessor from{from_addr, dba}; - VertexAccessor to{to_addr, dba}; - auto r_val = - dba.InsertEdge(from, to, dba.EdgeType(edge_type_name)).GlobalAddress(); - master().updates_server().Apply(dba.transaction_id()); - worker(1).updates_server().Apply(dba.transaction_id()); - worker(2).updates_server().Apply(dba.transaction_id()); - dba.Commit(); - return r_val; - } - - auto VertexCount(database::GraphDb &db) { - database::GraphDbAccessor dba{db}; - auto vertices = dba.Vertices(false); - return std::distance(vertices.begin(), vertices.end()); - }; - - auto EdgeCount(database::GraphDb &db) { - database::GraphDbAccessor dba(db); - auto edges = dba.Edges(false); - return std::distance(edges.begin(), edges.end()); - }; - - fs::path tmp_dir_ = fs::temp_directory_path() / - ("MG_test_unit_durability" + std::to_string(getpid())); - - private: - std::unique_ptr<database::Master> master_; - std::vector<std::unique_ptr<WorkerInThread>> workers_; -}; - -enum class TestType { SINGLE_NODE, DISTRIBUTED }; - -// Class that can be used both in distributed and single node tests. -class Cluster { - public: - Cluster(TestType test_type, int num_workers = 0) : test_type_(test_type) { - using namespace std::literals::chrono_literals; - switch (test_type) { - case TestType::SINGLE_NODE: - master_ = std::make_unique<database::SingleNode>(database::Config{}); - break; - case TestType::DISTRIBUTED: - database::Config master_config; - master_config.master_endpoint = {kLocal, 0}; - - auto master_tmp = std::make_unique<database::Master>(master_config); - auto master_endpoint = master_tmp->endpoint(); - master_ = std::move(master_tmp); - - const auto kInitTime = 200ms; - std::this_thread::sleep_for(kInitTime); - - auto worker_config = [this, master_endpoint](int worker_id) { - database::Config config; - config.worker_id = worker_id; - config.master_endpoint = master_endpoint; - config.worker_endpoint = {kLocal, 0}; - return config; - }; - - for (int i = 0; i < num_workers; ++i) { - workers_.emplace_back( - std::make_unique<WorkerInThread>(worker_config(i + 1))); - } - std::this_thread::sleep_for(kInitTime); - break; - } - } - - ~Cluster() { - auto t = std::thread([this] { master_ = nullptr; }); - workers_.clear(); - if (t.joinable()) t.join(); - } - - database::GraphDb *master() { return master_.get(); } - auto workers() { - return iter::imap([](auto &worker) { return worker->db(); }, workers_); - } - - void ClearCache(tx::TransactionId tx_id) { - master()->data_manager().ClearCacheForSingleTransaction(tx_id); - for (auto member : workers()) { - member->data_manager().ClearCacheForSingleTransaction(tx_id); - } - } - - void ApplyUpdates(tx::TransactionId tx_id) { - switch (test_type_) { - case TestType::SINGLE_NODE: - break; - case TestType::DISTRIBUTED: - master()->updates_server().Apply(tx_id); - for (auto member : workers()) { - member->updates_server().Apply(tx_id); - } - ClearCache(tx_id); - } - } - - void AdvanceCommand(tx::TransactionId tx_id) { - switch (test_type_) { - case TestType::SINGLE_NODE: { - database::GraphDbAccessor dba{*master(), tx_id}; - dba.AdvanceCommand(); - break; - } - case TestType::DISTRIBUTED: - ApplyUpdates(tx_id); - master()->tx_engine().Advance(tx_id); - for (auto worker : workers()) worker->tx_engine().UpdateCommand(tx_id); - ClearCache(tx_id); - break; - } - } - - private: - const std::string kLocal = "127.0.0.1"; - - TestType test_type_; - std::unique_ptr<database::GraphDb> master_; - std::vector<std::unique_ptr<WorkerInThread>> workers_; -}; diff --git a/tests/unit/distributed_coordination.cpp b/tests/unit/distributed_coordination.cpp deleted file mode 100644 index ff37cccad..000000000 --- a/tests/unit/distributed_coordination.cpp +++ /dev/null @@ -1,205 +0,0 @@ -#include <atomic> -#include <experimental/optional> -#include <memory> -#include <thread> -#include <unordered_set> -#include <vector> - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -#include "communication/rpc/client_pool.hpp" -#include "communication/rpc/server.hpp" -#include "distributed/cluster_discovery_master.hpp" -#include "distributed/cluster_discovery_worker.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_worker.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "io/network/endpoint.hpp" - -using communication::rpc::ClientPool; -using communication::rpc::Server; -using namespace distributed; -using namespace std::literals::chrono_literals; - -const int kWorkerCount = 5; -const std::string kLocal = "127.0.0.1"; - -class WorkerCoordinationInThread { - struct Worker { - Worker(Endpoint master_endpoint) : master_endpoint(master_endpoint) {} - Endpoint master_endpoint; - Server server{{kLocal, 0}}; - WorkerCoordination coord{server, master_endpoint}; - ClientPool client_pool{master_endpoint}; - ClusterDiscoveryWorker discovery{server, coord, client_pool}; - std::atomic<int> worker_id_{0}; - }; - - public: - WorkerCoordinationInThread(io::network::Endpoint master_endpoint, - int desired_id = -1) { - std::atomic<bool> init_done{false}; - worker_thread_ = - std::thread([this, master_endpoint, desired_id, &init_done] { - worker.emplace(master_endpoint); - worker->discovery.RegisterWorker(desired_id); - worker->worker_id_ = desired_id; - init_done = true; - worker->coord.WaitForShutdown(); - worker = std::experimental::nullopt; - }); - - while (!init_done) std::this_thread::sleep_for(10ms); - } - - int worker_id() const { return worker->worker_id_; } - auto endpoint() const { return worker->server.endpoint(); } - auto worker_endpoint(int worker_id) { - return worker->coord.GetEndpoint(worker_id); - } - auto worker_ids() { return worker->coord.GetWorkerIds(); } - void join() { worker_thread_.join(); } - void NotifyWorkerRecovered() { worker->discovery.NotifyWorkerRecovered(); } - - private: - std::thread worker_thread_; - std::experimental::optional<Worker> worker; -}; - -TEST(Distributed, Coordination) { - Server master_server({kLocal, 0}); - std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers; - { - MasterCoordination master_coord(master_server.endpoint()); - master_coord.SetRecoveryInfo(std::experimental::nullopt); - RpcWorkerClients rpc_worker_clients(master_coord); - ClusterDiscoveryMaster master_discovery_(master_server, master_coord, - rpc_worker_clients); - - for (int i = 1; i <= kWorkerCount; ++i) - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), i)); - - // Expect that all workers have a different ID. - std::unordered_set<int> worker_ids; - for (const auto &w : workers) worker_ids.insert(w->worker_id()); - ASSERT_EQ(worker_ids.size(), kWorkerCount); - - // Check endpoints. - for (auto &w1 : workers) { - for (auto &w2 : workers) { - EXPECT_EQ(w1->worker_endpoint(w2->worker_id()), w2->endpoint()); - } - } - } // Coordinated shutdown. - - for (auto &worker : workers) worker->join(); -} - -TEST(Distributed, DesiredAndUniqueId) { - Server master_server({kLocal, 0}); - std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers; - { - MasterCoordination master_coord(master_server.endpoint()); - master_coord.SetRecoveryInfo(std::experimental::nullopt); - RpcWorkerClients rpc_worker_clients(master_coord); - ClusterDiscoveryMaster master_discovery_(master_server, master_coord, - rpc_worker_clients); - - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), 42)); - EXPECT_EQ(workers[0]->worker_id(), 42); - - EXPECT_DEATH( - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), 42)), - ""); - } - - for (auto &worker : workers) worker->join(); -} - -TEST(Distributed, CoordinationWorkersId) { - Server master_server({kLocal, 0}); - std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers; - { - MasterCoordination master_coord(master_server.endpoint()); - master_coord.SetRecoveryInfo(std::experimental::nullopt); - RpcWorkerClients rpc_worker_clients(master_coord); - ClusterDiscoveryMaster master_discovery_(master_server, master_coord, - rpc_worker_clients); - - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), 42)); - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), 43)); - - std::vector<int> ids; - ids.push_back(0); - - for (auto &worker : workers) ids.push_back(worker->worker_id()); - EXPECT_THAT(master_coord.GetWorkerIds(), - testing::UnorderedElementsAreArray(ids)); - } - - for (auto &worker : workers) worker->join(); -} - -TEST(Distributed, ClusterDiscovery) { - Server master_server({kLocal, 0}); - std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers; - { - MasterCoordination master_coord(master_server.endpoint()); - master_coord.SetRecoveryInfo(std::experimental::nullopt); - RpcWorkerClients rpc_worker_clients(master_coord); - ClusterDiscoveryMaster master_discovery_(master_server, master_coord, - rpc_worker_clients); - std::vector<int> ids; - int worker_count = 10; - - ids.push_back(0); - for (int i = 1; i <= worker_count; ++i) { - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), i)); - - ids.push_back(i); - } - - EXPECT_THAT(master_coord.GetWorkerIds(), - testing::UnorderedElementsAreArray(ids)); - for (auto &worker : workers) { - EXPECT_THAT(worker->worker_ids(), - testing::UnorderedElementsAreArray(ids)); - } - } - - for (auto &worker : workers) worker->join(); -} - -TEST(Distributed, KeepsTrackOfRecovered) { - Server master_server({kLocal, 0}); - std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers; - { - MasterCoordination master_coord(master_server.endpoint()); - master_coord.SetRecoveryInfo(std::experimental::nullopt); - RpcWorkerClients rpc_worker_clients(master_coord); - ClusterDiscoveryMaster master_discovery_(master_server, master_coord, - rpc_worker_clients); - int worker_count = 10; - for (int i = 1; i <= worker_count; ++i) { - workers.emplace_back(std::make_unique<WorkerCoordinationInThread>( - master_server.endpoint(), i)); - workers.back()->NotifyWorkerRecovered(); - EXPECT_THAT(master_coord.CountRecoveredWorkers(), i); - } - } - - for (auto &worker : workers) worker->join(); -} - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - return RUN_ALL_TESTS(); -} diff --git a/tests/unit/distributed_data_exchange.cpp b/tests/unit/distributed_data_exchange.cpp deleted file mode 100644 index c475fd5ed..000000000 --- a/tests/unit/distributed_data_exchange.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include <unordered_map> - -#include "gtest/gtest.h" - -#include "database/graph_db_accessor.hpp" -#include "storage/edge_accessor.hpp" -#include "storage/vertex_accessor.hpp" - -#include "distributed_common.hpp" - -using namespace database; -using namespace std::literals::chrono_literals; - -TEST_F(DistributedGraphDbTest, RemoteDataGetting) { - // Only old data is visible remotely, so create and commit some data. - gid::Gid v1_id, v2_id, e1_id; - - { - GraphDbAccessor dba{master()}; - auto v1 = dba.InsertVertex(); - auto v2 = dba.InsertVertex(); - auto e1 = dba.InsertEdge(v1, v2, dba.EdgeType("et")); - - // Set some data so we see we're getting the right stuff. - v1.PropsSet(dba.Property("p1"), 42); - v1.add_label(dba.Label("label")); - v2.PropsSet(dba.Property("p2"), "value"); - e1.PropsSet(dba.Property("p3"), true); - - v1_id = v1.gid(); - v2_id = v2.gid(); - e1_id = e1.gid(); - - dba.Commit(); - } - - // The master must start a transaction before workers can work in it. - GraphDbAccessor master_dba{master()}; - - { - GraphDbAccessor w1_dba{worker(1), master_dba.transaction_id()}; - VertexAccessor v1_in_w1{{v1_id, 0}, w1_dba}; - EXPECT_NE(v1_in_w1.GetOld(), nullptr); - EXPECT_EQ(v1_in_w1.GetNew(), nullptr); - EXPECT_EQ(v1_in_w1.PropsAt(w1_dba.Property("p1")).Value<int64_t>(), 42); - EXPECT_TRUE(v1_in_w1.has_label(w1_dba.Label("label"))); - } - - { - GraphDbAccessor w2_dba{worker(2), master_dba.transaction_id()}; - VertexAccessor v2_in_w2{{v2_id, 0}, w2_dba}; - EXPECT_NE(v2_in_w2.GetOld(), nullptr); - EXPECT_EQ(v2_in_w2.GetNew(), nullptr); - EXPECT_EQ(v2_in_w2.PropsAt(w2_dba.Property("p2")).Value<std::string>(), - "value"); - EXPECT_FALSE(v2_in_w2.has_label(w2_dba.Label("label"))); - - VertexAccessor v1_in_w2{{v1_id, 0}, w2_dba}; - EdgeAccessor e1_in_w2{{e1_id, 0}, w2_dba}; - EXPECT_EQ(e1_in_w2.from(), v1_in_w2); - EXPECT_EQ(e1_in_w2.to(), v2_in_w2); - EXPECT_EQ(e1_in_w2.EdgeType(), w2_dba.EdgeType("et")); - EXPECT_EQ(e1_in_w2.PropsAt(w2_dba.Property("p3")).Value<bool>(), true); - } -} - -TEST_F(DistributedGraphDbTest, RemoteExpansion) { - // Model (v1)-->(v2), where each vertex is on one worker. - auto from = InsertVertex(worker(1)); - auto to = InsertVertex(worker(2)); - InsertEdge(from, to, "et"); - { - // Expand on the master for three hops. Collect vertex gids. - GraphDbAccessor dba{master()}; - std::vector<VertexAccessor> visited; - - auto expand = [](auto &v) { - for (auto e : v.out()) return e.to(); - for (auto e : v.in()) return e.from(); - CHECK(false) << "No edge in vertex"; - }; - - // Do a few hops back and forth, all on the master. - VertexAccessor v{from, dba}; - for (int i = 0; i < 5; ++i) { - v = expand(v); - EXPECT_FALSE(v.address().is_local()); - EXPECT_EQ(v.address(), i % 2 ? from : to); - } - } -} - -TEST_F(DistributedGraphDbTest, VertexCountsEqual) { - for (int i = 0; i < 5; ++i) InsertVertex(master()); - for (int i = 0; i < 7; ++i) InsertVertex(worker(1)); - for (int i = 0; i < 9; ++i) InsertVertex(worker(2)); - - { - GraphDbAccessor accessor(master()); - auto m_cnt = - master().data_clients().VertexCounts(accessor.transaction().id_); - auto w1_cnt = - worker(1).data_clients().VertexCounts(accessor.transaction().id_); - auto w2_cnt = - worker(2).data_clients().VertexCounts(accessor.transaction().id_); - - auto check = [&m_cnt, &w1_cnt, &w2_cnt](int key, int value) { - return m_cnt[key] == w1_cnt[key] && w1_cnt[key] == w2_cnt[key] && - m_cnt[key] == value; - }; - - EXPECT_TRUE(check(master().WorkerId(), 5)); - EXPECT_TRUE(check(worker(1).WorkerId(), 7)); - EXPECT_TRUE(check(worker(2).WorkerId(), 9)); - } -} - -TEST_F(DistributedGraphDbTest, VertexCountsTransactional) { - { - GraphDbAccessor accessor(master()); - InsertVertex(master()); - EXPECT_EQ(master().data_clients().VertexCounts( - accessor.transaction().id_)[master().WorkerId()], - 0); - } - // Transaction after insert which should now see the insertion - { - GraphDbAccessor accessor(master()); - EXPECT_EQ(master().data_clients().VertexCounts( - accessor.transaction().id_)[master().WorkerId()], - 1); - } -} diff --git a/tests/unit/distributed_durability.cpp b/tests/unit/distributed_durability.cpp deleted file mode 100644 index b422c5f18..000000000 --- a/tests/unit/distributed_durability.cpp +++ /dev/null @@ -1,117 +0,0 @@ -#include "distributed_common.hpp" - -#include "database/graph_db_accessor.hpp" -#include "durability/snapshooter.hpp" - -class DistributedDurability : public DistributedGraphDbTest { - public: - void AddVertices() { - AddVertex(master(), "master"); - AddVertex(worker(1), "worker1"); - AddVertex(worker(2), "worker2"); - } - void CheckVertices(int expected_count) { - CheckVertex(master(), expected_count, "master"); - CheckVertex(worker(1), expected_count, "worker1"); - CheckVertex(worker(2), expected_count, "worker2"); - } - void RestartWithRecovery() { - ShutDown(); - Initialize([](database::Config config) { - config.db_recover_on_startup = true; - return config; - }); - } - - private: - void AddVertex(database::GraphDb &db, const std::string &label) { - database::GraphDbAccessor dba(db); - auto vertex = dba.InsertVertex(); - vertex.add_label(dba.Label(label)); - dba.Commit(); - } - - void CheckVertex(database::GraphDb &db, int expected_count, - const std::string &label) { - database::GraphDbAccessor dba(db); - auto it = dba.Vertices(false); - std::vector<VertexAccessor> vertices{it.begin(), it.end()}; - EXPECT_EQ(vertices.size(), expected_count); - for (auto &vertex : vertices) { - ASSERT_EQ(vertex.labels().size(), 1); - EXPECT_EQ(vertex.labels()[0], dba.Label(label)); - } - } -}; - -TEST_F(DistributedDurability, MakeSnapshot) { - // Create a graph with 3 nodes with 3 labels, one on each and make a snapshot - // of it - { - AddVertices(); - database::GraphDbAccessor dba(master()); - master().MakeSnapshot(dba); - } - // Recover the graph and check if it's the same as before - { - RestartWithRecovery(); - CheckVertices(1); - } -} - -TEST_F(DistributedDurability, SnapshotOnExit) { - { - TearDown(); - Initialize([](database::Config config) { - config.snapshot_on_exit = true; - return config; - }); - AddVertices(); - } - // Recover the graph and check if it's the same as before - { - RestartWithRecovery(); - CheckVertices(1); - } -} - -TEST_F(DistributedDurability, RecoveryFromSameSnapshot) { - { - AddVertices(); - // Make snapshot on one worker, expect it won't recover from that. - database::GraphDbAccessor dba(worker(1)); - worker(1).MakeSnapshot(dba); - } - { - RestartWithRecovery(); - CheckVertices(0); - AddVertices(); - database::GraphDbAccessor dba(master()); - master().MakeSnapshot(dba); - } - { - RestartWithRecovery(); - CheckVertices(1); - AddVertices(); - CheckVertices(2); - // Make snapshot on one worker, expect it won't recover from that. - database::GraphDbAccessor dba(worker(1)); - worker(1).MakeSnapshot(dba); - } - { - RestartWithRecovery(); - CheckVertices(1); - } -} - -TEST_F(DistributedDurability, RecoveryFailure) { - { - AddVertices(); - // Make a snapshot on the master without the right snapshots on workers. - database::GraphDbAccessor dba(master()); - bool status = durability::MakeSnapshot(master(), dba, tmp_dir_, 100); - ASSERT_TRUE(status); - } - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - EXPECT_DEATH(RestartWithRecovery(), "worker failed to recover"); -} diff --git a/tests/unit/distributed_dynamic_graph_partitioner.cpp b/tests/unit/distributed_dynamic_graph_partitioner.cpp deleted file mode 100644 index 8ccad6220..000000000 --- a/tests/unit/distributed_dynamic_graph_partitioner.cpp +++ /dev/null @@ -1,152 +0,0 @@ -#include "distributed_common.hpp" - -#include <memory> -#include <thread> -#include <unordered_set> -#include <vector> - -#include "gtest/gtest.h" - -#include "distributed/updates_rpc_clients.hpp" -#include "storage/dynamic_graph_partitioner/dgp.hpp" - -using namespace distributed; -using namespace database; - -DECLARE_int32(dgp_max_batch_size); - -TEST_F(DistributedGraphDbTest, CountLabels) { - auto va = InsertVertex(master()); - auto vb = InsertVertex(worker(1)); - auto vc = InsertVertex(worker(2)); - for (int i = 0; i < 2; ++i) InsertEdge(va, va, "edge"); - for (int i = 0; i < 3; ++i) InsertEdge(va, vb, "edge"); - for (int i = 0; i < 4; ++i) InsertEdge(va, vc, "edge"); - for (int i = 0; i < 5; ++i) InsertEdge(vb, va, "edge"); - for (int i = 0; i < 6; ++i) InsertEdge(vc, va, "edge"); - - DynamicGraphPartitioner dgp(&master()); - GraphDbAccessor dba(master()); - VertexAccessor v(va, dba); - auto count_labels = dgp.CountLabels(v); - - // Self loops counted twice - EXPECT_EQ(count_labels[master().WorkerId()], 2 * 2); - - EXPECT_EQ(count_labels[worker(1).WorkerId()], 3 + 5); - EXPECT_EQ(count_labels[worker(2).WorkerId()], 4 + 6); -} - -TEST_F(DistributedGraphDbTest, FindMigrationsMoveVertex) { - auto va = InsertVertex(master()); - auto vb = InsertVertex(worker(1)); - - // Balance the number of nodes on workers a bit - InsertVertex(worker(2)); - InsertVertex(worker(2)); - - for (int i = 0; i < 100; ++i) InsertEdge(va, vb, "edge"); - DynamicGraphPartitioner dgp(&master()); - GraphDbAccessor dba(master()); - auto migrations = dgp.FindMigrations(dba); - // Expect `va` to try to move to another worker, the one connected to it - ASSERT_EQ(migrations.size(), 1); - EXPECT_EQ(migrations[0].second, worker(1).WorkerId()); -} - -TEST_F(DistributedGraphDbTest, FindMigrationsNoChange) { - InsertVertex(master()); - InsertVertex(worker(1)); - InsertVertex(worker(2)); - - // Everything is balanced, there should be no movement - - DynamicGraphPartitioner dgp(&master()); - GraphDbAccessor dba(master()); - auto migrations = dgp.FindMigrations(dba); - EXPECT_EQ(migrations.size(), 0); -} - -TEST_F(DistributedGraphDbTest, FindMigrationsMultipleAndLimit) { - auto va = InsertVertex(master()); - auto vb = InsertVertex(master()); - auto vc = InsertVertex(worker(1)); - - // Balance the number of nodes on workers a bit - InsertVertex(worker(1)); - InsertVertex(worker(2)); - InsertVertex(worker(2)); - - for (int i = 0; i < 100; ++i) InsertEdge(va, vc, "edge"); - for (int i = 0; i < 100; ++i) InsertEdge(vb, vc, "edge"); - DynamicGraphPartitioner dgp(&master()); - GraphDbAccessor dba(master()); - { - auto migrations = dgp.FindMigrations(dba); - // Expect vertices to try to move to another worker - ASSERT_EQ(migrations.size(), 2); - } - - // See if flag affects number of returned results - { - FLAGS_dgp_max_batch_size = 1; - auto migrations = dgp.FindMigrations(dba); - // Expect vertices to try to move to another worker - ASSERT_EQ(migrations.size(), 1); - } -} - -TEST_F(DistributedGraphDbTest, Run) { - // Emulate a bipartite graph with lots of connections on the left, and right - // side, and some connections between the halfs - std::vector<storage::VertexAddress> left; - for (int i = 0; i < 10; ++i) { - left.push_back(InsertVertex(master())); - } - std::vector<storage::VertexAddress> right; - for (int i = 0; i < 10; ++i) { - right.push_back(InsertVertex(master())); - } - - // Force the nodes of both sides to stay on one worker by inserting a lot of - // edges in between them - for (int i = 0; i < 1000; ++i) { - InsertEdge(left[rand() % 10], left[rand() % 10], "edge"); - InsertEdge(right[rand() % 10], right[rand() % 10], "edge"); - } - - // Insert edges between left and right side - for (int i = 0; i < 50; ++i) - InsertEdge(left[rand() % 10], right[rand() % 10], "edge"); - - // Balance it out so that the vertices count on workers don't influence the - // partitioning too much - for (int i = 0; i < 10; ++i) InsertVertex(worker(2)); - - DynamicGraphPartitioner dgp(&master()); - // Transfer one by one to actually converge - FLAGS_dgp_max_batch_size = 1; - // Try a bit more transfers to see if we reached a steady state - for (int i = 0; i < 15; ++i) { - dgp.Run(); - } - - EXPECT_EQ(VertexCount(master()), 10); - EXPECT_EQ(VertexCount(worker(1)), 10); - - auto CountRemotes = [](GraphDbAccessor &dba) { - int64_t cnt = 0; - for (auto vertex : dba.Vertices(false)) { - for (auto edge : vertex.in()) - if (edge.from_addr().is_remote()) ++cnt; - for (auto edge : vertex.out()) - if (edge.to_addr().is_remote()) ++cnt; - } - return cnt; - }; - - GraphDbAccessor dba_m(master()); - GraphDbAccessor dba_w1(worker(1)); - EXPECT_EQ(CountRemotes(dba_m), 50); - EXPECT_EQ(CountRemotes(dba_w1), 50); -} diff --git a/tests/unit/distributed_gc.cpp b/tests/unit/distributed_gc.cpp deleted file mode 100644 index 53fd5b5c0..000000000 --- a/tests/unit/distributed_gc.cpp +++ /dev/null @@ -1,78 +0,0 @@ -#include <gtest/gtest.h> - -#include "distributed_common.hpp" - -TEST_F(DistributedGraphDbTest, GarbageCollect) { - database::GraphDbAccessor dba{master()}; - auto tx = dba.transaction_id(); - dba.Commit(); - - // Create multiple transactions so that the commit log can be cleared - for (int i = 0; i < tx::CommitLog::kBitsetBlockSize; ++i) { - database::GraphDbAccessor dba{master()}; - } - - master().CollectGarbage(); - worker(1).CollectGarbage(); - worker(2).CollectGarbage(); - EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true); - - database::GraphDbAccessor dba2{master()}; - auto tx_last = dba2.transaction_id(); - dba2.Commit(); - - worker(1).CollectGarbage(); - worker(2).CollectGarbage(); - master().CollectGarbage(); - - EXPECT_DEATH(master().tx_engine().Info(tx), "chunk is nullptr"); - EXPECT_DEATH(worker(1).tx_engine().Info(tx), "chunk is nullptr"); - EXPECT_DEATH(worker(2).tx_engine().Info(tx), "chunk is nullptr"); - EXPECT_EQ(master().tx_engine().Info(tx_last).is_committed(), true); - EXPECT_EQ(worker(1).tx_engine().Info(tx_last).is_committed(), true); - EXPECT_EQ(worker(2).tx_engine().Info(tx_last).is_committed(), true); -} - -TEST_F(DistributedGraphDbTest, GarbageCollectBlocked) { - database::GraphDbAccessor dba{master()}; - auto tx = dba.transaction_id(); - dba.Commit(); - - // Block garbage collection because this is a still alive transaction on the - // worker - database::GraphDbAccessor dba3{worker(1)}; - - // Create multiple transactions so that the commit log can be cleared - for (int i = 0; i < tx::CommitLog::kBitsetBlockSize; ++i) { - database::GraphDbAccessor dba{master()}; - } - - // Query for a large id so that the commit log new block is created - master().tx_engine().Info(tx::CommitLog::kBitsetBlockSize); - - master().CollectGarbage(); - worker(1).CollectGarbage(); - worker(2).CollectGarbage(); - EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true); - - database::GraphDbAccessor dba2{master()}; - auto tx_last = dba2.transaction_id(); - dba2.Commit(); - - worker(1).CollectGarbage(); - worker(2).CollectGarbage(); - master().CollectGarbage(); - - EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true); - EXPECT_EQ(worker(1).tx_engine().Info(tx).is_committed(), true); - EXPECT_EQ(worker(2).tx_engine().Info(tx).is_committed(), true); - EXPECT_EQ(master().tx_engine().Info(tx_last).is_committed(), true); - EXPECT_EQ(worker(1).tx_engine().Info(tx_last).is_committed(), true); - EXPECT_EQ(worker(2).tx_engine().Info(tx_last).is_committed(), true); -} - -int main(int argc, char **argv) { - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/tests/unit/distributed_graph_db.cpp b/tests/unit/distributed_graph_db.cpp deleted file mode 100644 index b93f5bab2..000000000 --- a/tests/unit/distributed_graph_db.cpp +++ /dev/null @@ -1,183 +0,0 @@ -#include <memory> -#include <thread> -#include <unordered_set> - -#include "gtest/gtest.h" - -#include "database/graph_db.hpp" -#include "distributed/coordination.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_worker.hpp" -#include "distributed/data_rpc_clients.hpp" -#include "distributed/data_rpc_server.hpp" -#include "distributed/plan_consumer.hpp" -#include "distributed/plan_dispatcher.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "distributed_common.hpp" -#include "io/network/endpoint.hpp" -#include "query/frontend/ast/ast.hpp" -#include "query/frontend/ast/cypher_main_visitor.hpp" -#include "query/frontend/semantic/symbol_generator.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/interpreter.hpp" -#include "query/plan/planner.hpp" -#include "query/typed_value.hpp" -#include "query_common.hpp" -#include "query_plan_common.hpp" -#include "transactions/engine_master.hpp" - -using namespace distributed; -using namespace database; -using namespace std::literals::chrono_literals; - -TEST_F(DistributedGraphDbTest, Coordination) { - EXPECT_NE(master().endpoint().port(), 0); - EXPECT_NE(worker(1).endpoint().port(), 0); - EXPECT_NE(worker(2).endpoint().port(), 0); - - EXPECT_EQ(master().GetEndpoint(1), worker(1).endpoint()); - EXPECT_EQ(master().GetEndpoint(2), worker(2).endpoint()); - EXPECT_EQ(worker(1).GetEndpoint(0), master().endpoint()); - EXPECT_EQ(worker(1).GetEndpoint(2), worker(2).endpoint()); - EXPECT_EQ(worker(2).GetEndpoint(0), master().endpoint()); - EXPECT_EQ(worker(2).GetEndpoint(1), worker(1).endpoint()); -} - -TEST_F(DistributedGraphDbTest, TxEngine) { - auto *tx1 = master_tx_engine().Begin(); - auto *tx2 = master_tx_engine().Begin(); - EXPECT_EQ(tx2->snapshot().size(), 1); - EXPECT_EQ( - worker(1).tx_engine().RunningTransaction(tx1->id_)->snapshot().size(), 0); - EXPECT_EQ(worker(2).tx_engine().RunningTransaction(tx2->id_)->snapshot(), - tx2->snapshot()); - - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - EXPECT_DEATH(worker(2).tx_engine().RunningTransaction(123), ""); -} - -template <typename TType> -using mapper_vec = - std::vector<std::reference_wrapper<storage::ConcurrentIdMapper<TType>>>; - -TEST_F(DistributedGraphDbTest, StorageTypes) { - auto test_mappers = [](auto mappers, auto ids) { - for (size_t i = 0; i < mappers.size(); ++i) { - ids.emplace_back( - mappers[i].get().value_to_id("value" + std::to_string(i))); - } - EXPECT_GT(ids.size(), 0); - for (size_t i = 0; i < mappers.size(); ++i) { - for (size_t j = 0; j < ids.size(); ++j) { - EXPECT_EQ(mappers[i].get().id_to_value(ids[j]), - "value" + std::to_string(j)); - } - } - }; - - test_mappers(mapper_vec<storage::Label>{master().label_mapper(), - worker(1).label_mapper(), - worker(2).label_mapper()}, - std::vector<storage::Label>{}); - test_mappers(mapper_vec<storage::EdgeType>{master().edge_type_mapper(), - worker(1).edge_type_mapper(), - worker(2).edge_type_mapper()}, - std::vector<storage::EdgeType>{}); - test_mappers(mapper_vec<storage::Property>{master().property_mapper(), - worker(1).property_mapper(), - worker(2).property_mapper()}, - std::vector<storage::Property>{}); -} - -TEST_F(DistributedGraphDbTest, Counters) { - EXPECT_EQ(master().counters().Get("a"), 0); - EXPECT_EQ(worker(1).counters().Get("a"), 1); - EXPECT_EQ(worker(2).counters().Get("a"), 2); - - EXPECT_EQ(worker(1).counters().Get("b"), 0); - EXPECT_EQ(worker(2).counters().Get("b"), 1); - EXPECT_EQ(master().counters().Get("b"), 2); -} - -TEST_F(DistributedGraphDbTest, DispatchPlan) { - auto kRPCWaitTime = 600ms; - int64_t plan_id = 5; - SymbolTable symbol_table; - AstStorage storage; - - auto scan_all = MakeScanAll(storage, symbol_table, "n"); - - master().plan_dispatcher().DispatchPlan(plan_id, scan_all.op_, symbol_table); - std::this_thread::sleep_for(kRPCWaitTime); - - auto check_for_worker = [plan_id, &symbol_table](auto &worker) { - auto &cached = worker.plan_consumer().PlanForId(plan_id); - EXPECT_NE(dynamic_cast<query::plan::ScanAll *>(cached.plan.get()), nullptr); - EXPECT_EQ(cached.symbol_table.max_position(), symbol_table.max_position()); - EXPECT_EQ(cached.symbol_table.table(), symbol_table.table()); - }; - check_for_worker(worker(1)); - check_for_worker(worker(2)); - - master().plan_dispatcher().RemovePlan(plan_id); - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - EXPECT_DEATH(check_for_worker(worker(1)), "Missing plan*"); -} - -TEST_F(DistributedGraphDbTest, BuildIndexDistributed) { - storage::Label label; - storage::Property property; - - { - GraphDbAccessor dba0{master()}; - label = dba0.Label("label"); - property = dba0.Property("property"); - auto tx_id = dba0.transaction_id(); - - GraphDbAccessor dba1{worker(1), tx_id}; - GraphDbAccessor dba2{worker(2), tx_id}; - auto add_vertex = [label, property](GraphDbAccessor &dba) { - auto vertex = dba.InsertVertex(); - vertex.add_label(label); - vertex.PropsSet(property, 1); - }; - for (int i = 0; i < 100; ++i) add_vertex(dba0); - for (int i = 0; i < 50; ++i) add_vertex(dba1); - for (int i = 0; i < 300; ++i) add_vertex(dba2); - dba0.Commit(); - } - - { - GraphDbAccessor dba{master()}; - dba.BuildIndex(label, property); - EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property)); - EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 100); - } - - GraphDbAccessor dba_master{master()}; - - { - GraphDbAccessor dba{worker(1), dba_master.transaction_id()}; - EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property)); - EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 50); - } - - { - GraphDbAccessor dba{worker(2), dba_master.transaction_id()}; - EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property)); - EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 300); - } -} - -TEST_F(DistributedGraphDbTest, WorkerOwnedDbAccessors) { - GraphDbAccessor dba_w1(worker(1)); - auto v = dba_w1.InsertVertex(); - auto prop = dba_w1.Property("p"); - v.PropsSet(prop, 42); - auto v_ga = v.GlobalAddress(); - dba_w1.Commit(); - - GraphDbAccessor dba_w2(worker(2)); - VertexAccessor v_in_w2{v_ga, dba_w2}; - EXPECT_EQ(v_in_w2.PropsAt(prop).Value<int64_t>(), 42); -} diff --git a/tests/unit/distributed_interpretation.cpp b/tests/unit/distributed_interpretation.cpp deleted file mode 100644 index d879ca72b..000000000 --- a/tests/unit/distributed_interpretation.cpp +++ /dev/null @@ -1,316 +0,0 @@ -#include <chrono> -#include <experimental/optional> - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -#include "database/graph_db.hpp" -#include "distributed/plan_consumer.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "distributed_common.hpp" -#include "query/interpreter.hpp" -#include "query_common.hpp" -#include "query_plan_common.hpp" -#include "utils/timer.hpp" - -// We use this to ensure a cached plan is removed from the concurrent map and -// properly destructed. -DECLARE_int32(skiplist_gc_interval); - -using namespace distributed; -using namespace database; -using namespace std::literals::chrono_literals; - -class DistributedInterpretationTest : public DistributedGraphDbTest { - protected: - void SetUp() override { - DistributedGraphDbTest::SetUp(); - interpreter_.emplace(master()); - } - - void TearDown() override { - interpreter_ = std::experimental::nullopt; - DistributedGraphDbTest::TearDown(); - } - - auto RunWithDba(const std::string &query, GraphDbAccessor &dba) { - std::map<std::string, query::TypedValue> params = {}; - ResultStreamFaker result; - interpreter_.value()(query, dba, params, false).PullAll(result); - return result.GetResults(); - } - - auto Run(const std::string &query) { - GraphDbAccessor dba(master()); - auto results = RunWithDba(query, dba); - dba.Commit(); - return results; - } - - private: - std::experimental::optional<query::Interpreter> interpreter_; -}; - -TEST_F(DistributedInterpretationTest, PullTest) { - auto results = Run("OPTIONAL MATCH(n) UNWIND(RANGE(0, 20)) AS X RETURN 1"); - ASSERT_EQ(results.size(), 3 * 21); - - for (auto result : results) { - ASSERT_EQ(result.size(), 1U); - ASSERT_EQ(result[0].ValueInt(), 1); - } -} - -TEST_F(DistributedInterpretationTest, PullNoResultsTest) { - auto results = Run("MATCH (n) RETURN n"); - ASSERT_EQ(results.size(), 0U); -} - -TEST_F(DistributedInterpretationTest, CreateExpand) { - InsertVertex(master()); - InsertVertex(worker(1)); - InsertVertex(worker(1)); - InsertVertex(worker(2)); - InsertVertex(worker(2)); - InsertVertex(worker(2)); - - Run("MATCH (n) CREATE (n)-[:T]->(m) RETURN n"); - - EXPECT_EQ(VertexCount(master()), 2); - EXPECT_EQ(VertexCount(worker(1)), 4); - EXPECT_EQ(VertexCount(worker(2)), 6); -} - -TEST_F(DistributedInterpretationTest, RemoteExpandTest2) { - // Make a fully connected graph with vertices scattered across master and - // worker storage. - // Vertex count is low, because test gets exponentially slower. The expected - // result size is ~ vertices^3, and then that is compared at the end in no - // particular order which causes O(result_size^2) comparisons. - int verts_per_storage = 3; - std::vector<storage::VertexAddress> vertices; - vertices.reserve(verts_per_storage * 3); - auto add_vertices = [this, &vertices, &verts_per_storage](auto &db) { - for (int i = 0; i < verts_per_storage; ++i) - vertices.push_back(InsertVertex(db)); - }; - add_vertices(master()); - add_vertices(worker(1)); - add_vertices(worker(2)); - auto get_edge_type = [](int v1, int v2) { - return std::to_string(v1) + "-" + std::to_string(v2); - }; - std::vector<std::string> edge_types; - edge_types.reserve(vertices.size() * vertices.size()); - for (size_t i = 0; i < vertices.size(); ++i) { - for (size_t j = 0; j < vertices.size(); ++j) { - auto edge_type = get_edge_type(i, j); - edge_types.push_back(edge_type); - InsertEdge(vertices[i], vertices[j], edge_type); - } - } - - auto results = Run("MATCH (n)-[r1]-(m)-[r2]-(l) RETURN type(r1), type(r2)"); - // We expect the number of results to be: - size_t expected_result_size = - // pick (n) - vertices.size() * - // pick both directed edges to other (m) and a - // single edge to (m) which equals (n), hence -1 - (2 * vertices.size() - 1) * - // Pick as before, but exclude the previously taken edge, hence another -1 - (2 * vertices.size() - 1 - 1); - std::vector<std::vector<std::string>> expected; - expected.reserve(expected_result_size); - for (size_t n = 0; n < vertices.size(); ++n) { - for (size_t m = 0; m < vertices.size(); ++m) { - std::vector<std::string> r1s{get_edge_type(n, m)}; - if (n != m) r1s.push_back(get_edge_type(m, n)); - for (size_t l = 0; l < vertices.size(); ++l) { - std::vector<std::string> r2s{get_edge_type(m, l)}; - if (m != l) r2s.push_back(get_edge_type(l, m)); - for (const auto &r1 : r1s) { - for (const auto &r2 : r2s) { - if (r1 == r2) continue; - expected.push_back({r1, r2}); - } - } - } - } - } - ASSERT_EQ(expected.size(), expected_result_size); - ASSERT_EQ(results.size(), expected_result_size); - std::vector<std::vector<std::string>> got; - got.reserve(results.size()); - for (const auto &res : results) { - std::vector<std::string> row; - row.reserve(res.size()); - for (const auto &col : res) { - row.push_back(col.Value<std::string>()); - } - got.push_back(row); - } - ASSERT_THAT(got, testing::UnorderedElementsAreArray(expected)); -} - -TEST_F(DistributedInterpretationTest, Cartesian) { - // Create some data on the master and both workers. - storage::Property prop; - { - GraphDbAccessor dba{master()}; - auto tx_id = dba.transaction_id(); - GraphDbAccessor dba1{worker(1), tx_id}; - GraphDbAccessor dba2{worker(2), tx_id}; - prop = dba.Property("prop"); - auto add_data = [prop](GraphDbAccessor &dba, int value) { - dba.InsertVertex().PropsSet(prop, value); - }; - - for (int i = 0; i < 10; ++i) add_data(dba, i); - for (int i = 10; i < 20; ++i) add_data(dba1, i); - for (int i = 20; i < 30; ++i) add_data(dba2, i); - - dba.Commit(); - } - - std::vector<std::vector<int64_t>> expected; - for (int64_t i = 0; i < 30; ++i) - for (int64_t j = 0; j < 30; ++j) expected.push_back({i, j}); - - auto results = Run("MATCH (n), (m) RETURN n.prop, m.prop;"); - - size_t expected_result_size = 30 * 30; - ASSERT_EQ(expected.size(), expected_result_size); - ASSERT_EQ(results.size(), expected_result_size); - - std::vector<std::vector<int64_t>> got; - got.reserve(results.size()); - for (const auto &res : results) { - std::vector<int64_t> row; - row.reserve(res.size()); - for (const auto &col : res) { - row.push_back(col.Value<int64_t>()); - } - got.push_back(row); - } - - ASSERT_THAT(got, testing::UnorderedElementsAreArray(expected)); -} - -class TestQueryWaitsOnFutures : public DistributedInterpretationTest { - protected: - int QueryExecutionTimeSec(int worker_id) override { - return worker_id == 2 ? 3 : 1; - } -}; - -TEST_F(TestQueryWaitsOnFutures, Test) { - const int kVertexCount = 10; - auto make_fully_connected = [](database::GraphDb &db) { - database::GraphDbAccessor dba(db); - std::vector<VertexAccessor> vertices; - for (int i = 0; i < kVertexCount; ++i) - vertices.emplace_back(dba.InsertVertex()); - auto et = dba.EdgeType("et"); - for (auto &from : vertices) - for (auto &to : vertices) dba.InsertEdge(from, to, et); - dba.Commit(); - }; - - make_fully_connected(worker(1)); - ASSERT_EQ(VertexCount(worker(1)), kVertexCount); - ASSERT_EQ(EdgeCount(worker(1)), kVertexCount * kVertexCount); - - { - utils::Timer timer; - try { - Run("MATCH ()--()--()--()--()--()--() RETURN count(1)"); - } catch (...) { - } - double seconds = timer.Elapsed().count(); - EXPECT_GT(seconds, 1); - EXPECT_LT(seconds, 2); - } - - make_fully_connected(worker(2)); - ASSERT_EQ(VertexCount(worker(2)), kVertexCount); - ASSERT_EQ(EdgeCount(worker(2)), kVertexCount * kVertexCount); - - { - utils::Timer timer; - try { - Run("MATCH ()--()--()--()--()--()--() RETURN count(1)"); - } catch (...) { - } - double seconds = timer.Elapsed().count(); - EXPECT_GT(seconds, 3); - } -} - -TEST_F(DistributedInterpretationTest, PlanExpiration) { - FLAGS_query_plan_cache_ttl = 1; - Run("MATCH (n) RETURN n"); - auto ids1 = worker(1).plan_consumer().CachedPlanIds(); - ASSERT_EQ(ids1.size(), 1); - // Sleep so the cached plan becomes invalid. - std::this_thread::sleep_for(std::chrono::milliseconds(1100)); - Run("MATCH (n) RETURN n"); - // Sleep so the invalidated plan (removed from cache which is a concurrent - // map) gets destructed and thus remote caches cleared. - std::this_thread::sleep_for(std::chrono::milliseconds(1500)); - auto ids2 = worker(1).plan_consumer().CachedPlanIds(); - ASSERT_EQ(ids2.size(), 1); - EXPECT_NE(ids1, ids2); -} - -TEST_F(DistributedInterpretationTest, ConcurrentPlanExpiration) { - FLAGS_query_plan_cache_ttl = 1; - auto count_vertices = [this]() { - utils::Timer timer; - while (timer.Elapsed() < 3s) { - Run("MATCH () RETURN count(1)"); - } - }; - std::vector<std::thread> counters; - for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i) - counters.emplace_back(count_vertices); - for (auto &t : counters) t.join(); -} - -TEST_F(DistributedInterpretationTest, OngoingProduceKeyTest) { - int worker_count = 10; - for (int i = 0; i < worker_count; ++i) { - InsertVertex(master()); - InsertVertex(worker(1)); - InsertVertex(worker(2)); - } - - GraphDbAccessor dba(master()); - auto count1 = RunWithDba("MATCH (n) RETURN count(n)", dba); - dba.AdvanceCommand(); - auto count2 = RunWithDba("MATCH (n) RETURN count(n)", dba); - - ASSERT_EQ(count1[0][0].ValueInt(), 3 * worker_count); - ASSERT_EQ(count2[0][0].ValueInt(), 3 * worker_count); -} - -TEST_F(DistributedInterpretationTest, AdvanceCommandOnWorkers) { - GraphDbAccessor dba(master()); - RunWithDba("UNWIND RANGE(1, 10) as x CREATE (:A {id: x})", dba); - dba.AdvanceCommand(); - // Advance commands on workers also. - auto futures = dba.db().pull_clients().NotifyAllTransactionCommandAdvanced( - dba.transaction_id()); - for (auto &future : futures) future.wait(); - - auto count = RunWithDba("MATCH (n) RETURN count(n)", dba); - ASSERT_EQ(count[0][0].ValueInt(), 10); -} - -int main(int argc, char **argv) { - google::InitGoogleLogging(argv[0]); - ::testing::InitGoogleTest(&argc, argv); - gflags::ParseCommandLineFlags(&argc, &argv, true); - FLAGS_skiplist_gc_interval = 1; - return RUN_ALL_TESTS(); -} diff --git a/tests/unit/distributed_query_plan.cpp b/tests/unit/distributed_query_plan.cpp deleted file mode 100644 index 7e7869adf..000000000 --- a/tests/unit/distributed_query_plan.cpp +++ /dev/null @@ -1,367 +0,0 @@ -#include <memory> -#include <thread> -#include <unordered_set> - -#include "gtest/gtest.h" - -#include "database/graph_db.hpp" -#include "distributed/coordination.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_worker.hpp" -#include "distributed/data_rpc_clients.hpp" -#include "distributed/data_rpc_server.hpp" -#include "distributed/plan_consumer.hpp" -#include "distributed/plan_dispatcher.hpp" -#include "distributed/pull_rpc_clients.hpp" -#include "distributed_common.hpp" -#include "io/network/endpoint.hpp" -#include "query/frontend/ast/ast.hpp" -#include "query/frontend/ast/cypher_main_visitor.hpp" -#include "query/frontend/semantic/symbol_generator.hpp" -#include "query/frontend/semantic/symbol_table.hpp" -#include "query/interpreter.hpp" -#include "query/plan/planner.hpp" -#include "query/typed_value.hpp" -#include "query_common.hpp" -#include "query_plan_common.hpp" -#include "transactions/engine_master.hpp" - -DECLARE_int32(query_execution_time_sec); - -using namespace distributed; -using namespace database; -using namespace std::literals::chrono_literals; - -TEST_F(DistributedGraphDbTest, PullProduceRpc) { - GraphDbAccessor dba{master()}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - - // Query plan for: UNWIND [42, true, "bla", 1, 2] as x RETURN x - using namespace query; - auto list = - LIST(LITERAL(42), LITERAL(true), LITERAL("bla"), LITERAL(1), LITERAL(2)); - auto x = ctx.symbol_table_.CreateSymbol("x", true); - auto unwind = std::make_shared<plan::Unwind>(nullptr, list, x); - auto x_expr = IDENT("x"); - ctx.symbol_table_[*x_expr] = x; - auto x_ne = NEXPR("x", x_expr); - ctx.symbol_table_[*x_ne] = ctx.symbol_table_.CreateSymbol("x_ne", true); - auto produce = MakeProduce(unwind, x_ne); - - // Test that the plan works locally. - auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba); - ASSERT_EQ(results.size(), 5); - - const int plan_id = 42; - master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_); - - tx::CommandId command_id = dba.transaction().cid(); - Parameters params; - std::vector<query::Symbol> symbols{ctx.symbol_table_[*x_ne]}; - auto remote_pull = [this, &command_id, ¶ms, &symbols]( - GraphDbAccessor &dba, int worker_id) { - return master().pull_clients().Pull(dba, worker_id, plan_id, command_id, - params, symbols, false, 3); - }; - auto expect_first_batch = [](auto &batch) { - EXPECT_EQ(batch.pull_state, distributed::PullState::CURSOR_IN_PROGRESS); - ASSERT_EQ(batch.frames.size(), 3); - ASSERT_EQ(batch.frames[0].size(), 1); - EXPECT_EQ(batch.frames[0][0].ValueInt(), 42); - EXPECT_EQ(batch.frames[1][0].ValueBool(), true); - EXPECT_EQ(batch.frames[2][0].ValueString(), "bla"); - }; - auto expect_second_batch = [](auto &batch) { - EXPECT_EQ(batch.pull_state, distributed::PullState::CURSOR_EXHAUSTED); - ASSERT_EQ(batch.frames.size(), 2); - ASSERT_EQ(batch.frames[0].size(), 1); - EXPECT_EQ(batch.frames[0][0].ValueInt(), 1); - EXPECT_EQ(batch.frames[1][0].ValueInt(), 2); - }; - - GraphDbAccessor dba_1{master()}; - GraphDbAccessor dba_2{master()}; - for (int worker_id : {1, 2}) { - // TODO flor, proper test async here. - auto tx1_batch1 = remote_pull(dba_1, worker_id).get(); - expect_first_batch(tx1_batch1); - auto tx2_batch1 = remote_pull(dba_2, worker_id).get(); - expect_first_batch(tx2_batch1); - auto tx2_batch2 = remote_pull(dba_2, worker_id).get(); - expect_second_batch(tx2_batch2); - auto tx1_batch2 = remote_pull(dba_1, worker_id).get(); - expect_second_batch(tx1_batch2); - } -} - -TEST_F(DistributedGraphDbTest, PullProduceRpcWithGraphElements) { - // Create some data on the master and both workers. Eeach edge (3 of them) and - // vertex (6 of them) will be uniquely identified with their worker id and - // sequence ID, so we can check we retrieved all. - storage::Property prop; - { - GraphDbAccessor dba{master()}; - prop = dba.Property("prop"); - auto create_data = [prop](GraphDbAccessor &dba, int worker_id) { - auto v1 = dba.InsertVertex(); - v1.PropsSet(prop, worker_id * 10); - auto v2 = dba.InsertVertex(); - v2.PropsSet(prop, worker_id * 10 + 1); - auto e12 = dba.InsertEdge(v1, v2, dba.EdgeType("et")); - e12.PropsSet(prop, worker_id * 10 + 2); - }; - create_data(dba, 0); - GraphDbAccessor dba_w1{worker(1), dba.transaction_id()}; - create_data(dba_w1, 1); - GraphDbAccessor dba_w2{worker(2), dba.transaction_id()}; - create_data(dba_w2, 2); - dba.Commit(); - } - - GraphDbAccessor dba{master()}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - - // Query plan for: MATCH p = (n)-[r]->(m) return [n, r], m, p - // Use this query to test graph elements are transferred correctly in - // collections too. - auto n = MakeScanAll(storage, ctx.symbol_table_, "n"); - auto r_m = - MakeExpand(storage, ctx.symbol_table_, n.op_, n.sym_, "r", - EdgeAtom::Direction::OUT, {}, "m", false, GraphView::OLD); - auto p_sym = ctx.symbol_table_.CreateSymbol("p", true); - auto p = std::make_shared<query::plan::ConstructNamedPath>( - r_m.op_, p_sym, - std::vector<Symbol>{n.sym_, r_m.edge_sym_, r_m.node_sym_}); - auto return_n = IDENT("n"); - ctx.symbol_table_[*return_n] = n.sym_; - auto return_r = IDENT("r"); - ctx.symbol_table_[*return_r] = r_m.edge_sym_; - auto return_n_r = NEXPR("[n, r]", LIST(return_n, return_r)); - ctx.symbol_table_[*return_n_r] = ctx.symbol_table_.CreateSymbol("", true); - auto return_m = NEXPR("m", IDENT("m")); - ctx.symbol_table_[*return_m->expression_] = r_m.node_sym_; - ctx.symbol_table_[*return_m] = ctx.symbol_table_.CreateSymbol("", true); - auto return_p = NEXPR("p", IDENT("p")); - ctx.symbol_table_[*return_p->expression_] = p_sym; - ctx.symbol_table_[*return_p] = ctx.symbol_table_.CreateSymbol("", true); - auto produce = MakeProduce(p, return_n_r, return_m, return_p); - - auto check_result = [prop]( - int worker_id, - const std::vector<std::vector<query::TypedValue>> &frames) { - int offset = worker_id * 10; - ASSERT_EQ(frames.size(), 1); - auto &row = frames[0]; - ASSERT_EQ(row.size(), 3); - auto &list = row[0].ValueList(); - ASSERT_EQ(list.size(), 2); - ASSERT_EQ(list[0].ValueVertex().PropsAt(prop).Value<int64_t>(), offset); - ASSERT_EQ(list[1].ValueEdge().PropsAt(prop).Value<int64_t>(), offset + 2); - ASSERT_EQ(row[1].ValueVertex().PropsAt(prop).Value<int64_t>(), offset + 1); - auto &path = row[2].ValuePath(); - ASSERT_EQ(path.size(), 1); - ASSERT_EQ(path.vertices()[0].PropsAt(prop).Value<int64_t>(), offset); - ASSERT_EQ(path.edges()[0].PropsAt(prop).Value<int64_t>(), offset + 2); - ASSERT_EQ(path.vertices()[1].PropsAt(prop).Value<int64_t>(), offset + 1); - }; - - // Test that the plan works locally. - auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba); - check_result(0, results); - - const int plan_id = 42; - master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_); - - tx::CommandId command_id = dba.transaction().cid(); - Parameters params; - std::vector<query::Symbol> symbols{ctx.symbol_table_[*return_n_r], - ctx.symbol_table_[*return_m], p_sym}; - auto remote_pull = [this, &command_id, ¶ms, &symbols]( - GraphDbAccessor &dba, int worker_id) { - return master().pull_clients().Pull(dba, worker_id, plan_id, command_id, - params, symbols, false, 3); - }; - auto future_w1_results = remote_pull(dba, 1); - auto future_w2_results = remote_pull(dba, 2); - check_result(1, future_w1_results.get().frames); - check_result(2, future_w2_results.get().frames); -} - -TEST_F(DistributedGraphDbTest, Synchronize) { - auto from = InsertVertex(worker(1)); - auto to = InsertVertex(worker(2)); - InsertEdge(from, to, "et"); - - // Query: MATCH (n)--(m) SET m.prop = 2 RETURN n.prop - // This query ensures that a remote update gets applied and the local stuff - // gets reconstructed. - auto &db = master(); - GraphDbAccessor dba{db}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - // MATCH - auto n = MakeScanAll(storage, ctx.symbol_table_, "n"); - auto r_m = - MakeExpand(storage, ctx.symbol_table_, n.op_, n.sym_, "r", - EdgeAtom::Direction::BOTH, {}, "m", false, GraphView::OLD); - - // SET - auto literal = LITERAL(42); - auto prop = PROPERTY_PAIR("prop"); - auto m_p = PROPERTY_LOOKUP("m", prop); - ctx.symbol_table_[*m_p->expression_] = r_m.node_sym_; - auto set_m_p = std::make_shared<plan::SetProperty>(r_m.op_, m_p, literal); - - const int plan_id = 42; - master().plan_dispatcher().DispatchPlan(plan_id, set_m_p, ctx.symbol_table_); - - // Master-side PullRemote, Synchronize - auto pull_remote = std::make_shared<query::plan::PullRemote>( - nullptr, plan_id, std::vector<Symbol>{n.sym_}); - auto synchronize = - std::make_shared<query::plan::Synchronize>(set_m_p, pull_remote, true); - - // RETURN - auto n_p = - storage.Create<PropertyLookup>(storage.Create<Identifier>("n"), prop); - ctx.symbol_table_[*n_p->expression_] = n.sym_; - auto return_n_p = NEXPR("n.prop", n_p); - auto return_n_p_sym = ctx.symbol_table_.CreateSymbol("n.p", true); - ctx.symbol_table_[*return_n_p] = return_n_p_sym; - auto produce = MakeProduce(synchronize, return_n_p); - - auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba); - ASSERT_EQ(results.size(), 2); - ASSERT_EQ(results[0].size(), 1); - EXPECT_EQ(results[0][0].ValueInt(), 42); - ASSERT_EQ(results[1].size(), 1); - EXPECT_EQ(results[1][0].ValueInt(), 42); - - // TODO test without advance command? -} - -TEST_F(DistributedGraphDbTest, Create) { - // Query: UNWIND range(0, 1000) as x CREATE () - auto &db = master(); - GraphDbAccessor dba{db}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - auto range = FN("range", LITERAL(0), LITERAL(1000)); - auto x = ctx.symbol_table_.CreateSymbol("x", true); - auto unwind = std::make_shared<plan::Unwind>(nullptr, range, x); - auto node = NODE("n"); - ctx.symbol_table_[*node->identifier_] = - ctx.symbol_table_.CreateSymbol("n", true); - auto create = std::make_shared<query::plan::CreateNode>(unwind, node, true); - PullAll(create, dba, ctx.symbol_table_); - dba.Commit(); - - EXPECT_GT(VertexCount(master()), 200); - EXPECT_GT(VertexCount(worker(1)), 200); - EXPECT_GT(VertexCount(worker(2)), 200); -} - -TEST_F(DistributedGraphDbTest, PullRemoteOrderBy) { - // Create some data on the master and both workers. - storage::Property prop; - { - GraphDbAccessor dba{master()}; - auto tx_id = dba.transaction_id(); - GraphDbAccessor dba1{worker(1), tx_id}; - GraphDbAccessor dba2{worker(2), tx_id}; - prop = dba.Property("prop"); - auto add_data = [prop](GraphDbAccessor &dba, int value) { - dba.InsertVertex().PropsSet(prop, value); - }; - - std::vector<int> data; - for (int i = 0; i < 300; ++i) data.push_back(i); - std::random_shuffle(data.begin(), data.end()); - - for (int i = 0; i < 100; ++i) add_data(dba, data[i]); - for (int i = 100; i < 200; ++i) add_data(dba1, data[i]); - for (int i = 200; i < 300; ++i) add_data(dba2, data[i]); - - dba.Commit(); - } - - auto &db = master(); - GraphDbAccessor dba{db}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - - // Query plan for: MATCH (n) RETURN n.prop ORDER BY n.prop; - auto n = MakeScanAll(storage, ctx.symbol_table_, "n"); - auto n_p = PROPERTY_LOOKUP("n", prop); - ctx.symbol_table_[*n_p->expression_] = n.sym_; - auto order_by = std::make_shared<plan::OrderBy>( - n.op_, - std::vector<std::pair<Ordering, Expression *>>{{Ordering::ASC, n_p}}, - std::vector<Symbol>{n.sym_}); - - const int plan_id = 42; - master().plan_dispatcher().DispatchPlan(plan_id, order_by, ctx.symbol_table_); - - auto pull_remote_order_by = std::make_shared<plan::PullRemoteOrderBy>( - order_by, plan_id, - std::vector<std::pair<Ordering, Expression *>>{{Ordering::ASC, n_p}}, - std::vector<Symbol>{n.sym_}); - - auto n_p_ne = NEXPR("n.prop", n_p); - ctx.symbol_table_[*n_p_ne] = ctx.symbol_table_.CreateSymbol("n.prop", true); - auto produce = MakeProduce(pull_remote_order_by, n_p_ne); - auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba); - - ASSERT_EQ(results.size(), 300); - for (int j = 0; j < 300; ++j) { - EXPECT_TRUE(TypedValue::BoolEqual{}(results[j][0], j)); - } -} - -class DistributedTransactionTimeout : public DistributedGraphDbTest { - protected: - int QueryExecutionTimeSec(int) override { return 1; } -}; - -TEST_F(DistributedTransactionTimeout, Timeout) { - InsertVertex(worker(1)); - InsertVertex(worker(1)); - - GraphDbAccessor dba{master()}; - Context ctx{dba}; - SymbolGenerator symbol_generator{ctx.symbol_table_}; - AstStorage storage; - - // Make distributed plan for MATCH (n) RETURN n - auto scan_all = MakeScanAll(storage, ctx.symbol_table_, "n"); - auto output = NEXPR("n", IDENT("n")); - auto produce = MakeProduce(scan_all.op_, output); - ctx.symbol_table_[*output->expression_] = scan_all.sym_; - ctx.symbol_table_[*output] = - ctx.symbol_table_.CreateSymbol("named_expression_1", true); - - const int plan_id = 42; - master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_); - tx::CommandId command_id = dba.transaction().cid(); - - Parameters params; - std::vector<query::Symbol> symbols{ctx.symbol_table_[*output]}; - auto remote_pull = [this, &command_id, ¶ms, &symbols, &dba]() { - return master() - .pull_clients() - .Pull(dba, 1, plan_id, command_id, params, symbols, false, 1) - .get() - .pull_state; - }; - ASSERT_EQ(remote_pull(), distributed::PullState::CURSOR_IN_PROGRESS); - // Sleep here so the remote gets a hinted error. - std::this_thread::sleep_for(2s); - EXPECT_EQ(remote_pull(), distributed::PullState::HINTED_ABORT_ERROR); -} diff --git a/tests/unit/distributed_serialization.cpp b/tests/unit/distributed_serialization.cpp deleted file mode 100644 index 4f9cb83ef..000000000 --- a/tests/unit/distributed_serialization.cpp +++ /dev/null @@ -1,162 +0,0 @@ -#include <gtest/gtest.h> -#include <sstream> - -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" - -#include "distributed/serialization.hpp" -#include "mvcc/version_list.hpp" -#include "query/typed_value.hpp" -#include "storage/edge.hpp" -#include "storage/property_value_store.hpp" -#include "storage/types.hpp" -#include "storage/vertex.hpp" -#include "transactions/engine_single_node.hpp" - -using namespace storage; - -template <typename TAddress> -TAddress ToGlobal(const TAddress &address, int worker_id) { - if (address.is_remote()) return address; - return TAddress{address.local()->gid_, worker_id}; -} - -#define CHECK_RETURN(condition) \ - { \ - if (!(condition)) return false; \ - } - -bool CheckEdges(const Edges &e1, int w1, const Edges &e2, int w2) { - CHECK_RETURN(e1.size() == e2.size()); - auto e1_it = e1.begin(); - for (auto e2_it = e2.begin(); e2_it != e2.end(); ++e1_it, ++e2_it) { - CHECK_RETURN(ToGlobal(e1_it->vertex, w1) == ToGlobal(e2_it->vertex, w2)); - CHECK_RETURN(ToGlobal(e1_it->edge, w1) == ToGlobal(e2_it->edge, w2)); - CHECK_RETURN(e1_it->edge_type == e2_it->edge_type); - } - return true; -} - -bool CheckProperties(const PropertyValueStore &p1, - const PropertyValueStore &p2) { - CHECK_RETURN(p1.size() == p2.size()); - auto p1_it = p1.begin(); - for (auto p2_it = p2.begin(); p2_it != p2.end(); ++p1_it, ++p2_it) { - CHECK_RETURN(p1_it->first == p2_it->first); - auto tv = - query::TypedValue(p1_it->second) == query::TypedValue(p2_it->second); - CHECK_RETURN(tv.IsBool()); - CHECK_RETURN(tv.ValueBool()); - } - return true; -} - -bool CheckVertex(const Vertex &v1, int w1, const Vertex &v2, int w2) { - CHECK_RETURN(CheckEdges(v1.in_, w1, v2.in_, w2)); - CHECK_RETURN(CheckEdges(v1.out_, w1, v2.out_, w2)); - CHECK_RETURN(v1.labels_ == v2.labels_); - CHECK_RETURN(CheckProperties(v1.properties_, v2.properties_)); - return true; -} - -bool CheckEdge(const Edge &e1, int w1, const Edge &e2, int w2) { - CHECK_RETURN(ToGlobal(e1.from_, w1) == ToGlobal(e2.from_, w2)); - CHECK_RETURN(ToGlobal(e1.to_, w1) == ToGlobal(e2.to_, w2)); - CHECK_RETURN(e1.edge_type_ == e2.edge_type_); - CHECK_RETURN(CheckProperties(e1.properties_, e2.properties_)); - return true; -} - -#undef CHECK_RETURN - -#define SAVE_AND_LOAD(type, name, element) \ - std::unique_ptr<type> name; \ - { \ - std::ostringstream ostream; \ - boost::archive::binary_oarchive oar{ostream}; \ - distributed::Save##type(oar, element, 0); \ - std::istringstream istream{ostream.str()}; \ - boost::archive::binary_iarchive iar{istream}; \ - name = distributed::Load##type(iar); \ - } - -TEST(DistributedSerialization, Empty) { - Vertex v; - int w_id{0}; - SAVE_AND_LOAD(Vertex, v_recovered, v) - EXPECT_TRUE(CheckVertex(v, w_id, *v_recovered, w_id)); -} - -#define UPDATE_AND_CHECK(type, x, action) \ - { \ - SAVE_AND_LOAD(type, before, x) \ - EXPECT_TRUE(Check##type(x, 0, *before, 0)); \ - action; \ - EXPECT_FALSE(Check##type(x, 0, *before, 0)); \ - SAVE_AND_LOAD(type, after, x) \ - EXPECT_TRUE(Check##type(x, 0, *after, 0)); \ - } - -#define UPDATE_AND_CHECK_V(v, action) UPDATE_AND_CHECK(Vertex, v, action) -#define UPDATE_AND_CHECK_E(e, action) UPDATE_AND_CHECK(Edge, e, action) - -TEST(DistributedSerialization, VertexLabels) { - Vertex v; - UPDATE_AND_CHECK_V(v, v.labels_.emplace_back(Label(1))); - UPDATE_AND_CHECK_V(v, v.labels_.emplace_back(Label(2))); - UPDATE_AND_CHECK_V(v, v.labels_.resize(1)); - UPDATE_AND_CHECK_V(v, v.labels_.clear()); -} - -TEST(DistributedSerialization, VertexProperties) { - Vertex v; - UPDATE_AND_CHECK_V(v, v.properties_.set(Property(1), true)); - UPDATE_AND_CHECK_V(v, v.properties_.set(Property(1), "string")); - UPDATE_AND_CHECK_V(v, v.properties_.set(Property(2), 42)); - UPDATE_AND_CHECK_V(v, v.properties_.erase(Property(1))); - UPDATE_AND_CHECK_V(v, v.properties_.clear()); -} - -class DistributedSerializationMvcc : public ::testing::Test { - protected: - tx::SingleNodeEngine engine; - tx::Transaction *tx = engine.Begin(); - mvcc::VersionList<Vertex> v1_vlist{*tx, 0}; - Vertex &v1 = *v1_vlist.Oldest(); - mvcc::VersionList<Vertex> v2_vlist{*tx, 1}; - Vertex &v2 = *v2_vlist.Oldest(); - mvcc::VersionList<Edge> e1_vlist{*tx, 0, &v1_vlist, &v2_vlist, EdgeType(0)}; - Edge &e1 = *e1_vlist.Oldest(); - mvcc::VersionList<Edge> e2_vlist{*tx, 1, &v2_vlist, &v1_vlist, EdgeType(2)}; - Edge &e2 = *e2_vlist.Oldest(); -}; - -TEST_F(DistributedSerializationMvcc, VertexEdges) { - UPDATE_AND_CHECK_V(v1, v1.out_.emplace(&v2_vlist, &e1_vlist, EdgeType(0))); - UPDATE_AND_CHECK_V(v2, v2.in_.emplace(&v1_vlist, &e1_vlist, EdgeType(0))); - UPDATE_AND_CHECK_V(v1, v1.in_.emplace(&v2_vlist, &e2_vlist, EdgeType(2))); - UPDATE_AND_CHECK_V(v2, v2.out_.emplace(&v1_vlist, &e2_vlist, EdgeType(2))); -} - -TEST_F(DistributedSerializationMvcc, EdgeFromAndTo) { - UPDATE_AND_CHECK_E(e1, e1.from_ = &v2_vlist); - UPDATE_AND_CHECK_E(e1, e1.to_ = &v1_vlist); -} - -TEST_F(DistributedSerializationMvcc, EdgeType) { - UPDATE_AND_CHECK_E(e1, e1.edge_type_ = EdgeType(123)); - UPDATE_AND_CHECK_E(e1, e1.edge_type_ = EdgeType(55)); -} - -TEST_F(DistributedSerializationMvcc, EdgeProperties) { - UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(1), true)); - UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(1), "string")); - UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(2), 42)); - UPDATE_AND_CHECK_E(e1, e1.properties_.erase(Property(1))); - UPDATE_AND_CHECK_E(e1, e1.properties_.clear()); -} - -#undef UPDATE_AND_CHECK_E -#undef UPDATE_AND_CHECK_V -#undef UPDATE_AND_CHECK -#undef SAVE_AND_LOAD diff --git a/tests/unit/distributed_token_sharing.cpp b/tests/unit/distributed_token_sharing.cpp deleted file mode 100644 index f2cff3a51..000000000 --- a/tests/unit/distributed_token_sharing.cpp +++ /dev/null @@ -1,33 +0,0 @@ -#include "distributed_common.hpp" - -#include <memory> -#include <thread> -#include <unordered_set> -#include <vector> - -#include "gtest/gtest.h" - -DECLARE_bool(dynamic_graph_partitioner_enabled); -DECLARE_int32(dgp_max_batch_size); - -using namespace distributed; -using namespace database; - -class TokenSharingTest : public DistributedGraphDbTest { - void SetUp() override { - FLAGS_dynamic_graph_partitioner_enabled = true; - FLAGS_dgp_max_batch_size = 1; - DistributedGraphDbTest::SetUp(); - } -}; - -TEST_F(TokenSharingTest, Integration) { - auto vb = InsertVertex(worker(1)); - for (int i = 0; i < 100; ++i) { - auto v = InsertVertex(master()); - InsertEdge(vb, v, "edge"); - } - std::this_thread::sleep_for(std::chrono::seconds(3)); - // Migrate at least something from or to here - EXPECT_NE(VertexCount(master()), 100); -} diff --git a/tests/unit/distributed_updates.cpp b/tests/unit/distributed_updates.cpp deleted file mode 100644 index 24ec0f9aa..000000000 --- a/tests/unit/distributed_updates.cpp +++ /dev/null @@ -1,562 +0,0 @@ -#include <functional> -#include <unordered_map> - -#include <gtest/gtest.h> - -#include "database/graph_db_accessor.hpp" -#include "distributed/updates_rpc_clients.hpp" -#include "distributed/updates_rpc_server.hpp" -#include "query/typed_value.hpp" -#include "storage/property_value.hpp" - -#include "distributed_common.hpp" - -class DistributedUpdateTest : public DistributedGraphDbTest { - protected: - std::unique_ptr<database::GraphDbAccessor> dba1; - std::unique_ptr<database::GraphDbAccessor> dba2; - storage::Label label; - std::unique_ptr<VertexAccessor> v1_dba1; - std::unique_ptr<VertexAccessor> v1_dba2; - - void SetUp() override { - DistributedGraphDbTest::SetUp(); - - database::GraphDbAccessor dba_tx1{worker(1)}; - auto v = dba_tx1.InsertVertex(); - auto v_ga = v.GlobalAddress(); - dba_tx1.Commit(); - - dba1 = std::make_unique<database::GraphDbAccessor>(worker(1)); - dba2 = std::make_unique<database::GraphDbAccessor>(worker(2), - dba1->transaction_id()); - - v1_dba1 = std::make_unique<VertexAccessor>(v_ga, *dba1); - v1_dba2 = std::make_unique<VertexAccessor>(v_ga, *dba2); - ASSERT_FALSE(v1_dba2->address().is_local()); - label = dba1->Label("l"); - v1_dba2->add_label(label); - } - - void TearDown() override { - dba2 = nullptr; - dba1 = nullptr; - DistributedGraphDbTest::TearDown(); - } -}; - -#define EXPECT_LABEL(var, old_result, new_result) \ - { \ - var->SwitchOld(); \ - EXPECT_EQ(var->has_label(label), old_result); \ - var->SwitchNew(); \ - EXPECT_EQ(var->has_label(label), new_result); \ - } - -TEST_F(DistributedUpdateTest, UpdateLocalOnly) { - EXPECT_LABEL(v1_dba2, false, true); - EXPECT_LABEL(v1_dba1, false, false); -} - -TEST_F(DistributedUpdateTest, UpdateApply) { - EXPECT_LABEL(v1_dba1, false, false); - worker(1).updates_server().Apply(dba1->transaction_id()); - EXPECT_LABEL(v1_dba1, false, true); -} - -#undef EXPECT_LABEL - -TEST_F(DistributedGraphDbTest, CreateVertex) { - gid::Gid gid; - { - database::GraphDbAccessor dba{worker(1)}; - auto v = dba.InsertVertexIntoRemote(2, {}, {}); - gid = v.gid(); - dba.Commit(); - } - { - database::GraphDbAccessor dba{worker(2)}; - auto v = dba.FindVertexOptional(gid, false); - ASSERT_TRUE(v); - } -} - -TEST_F(DistributedGraphDbTest, CreateVertexWithUpdate) { - gid::Gid gid; - storage::Property prop; - { - database::GraphDbAccessor dba{worker(1)}; - auto v = dba.InsertVertexIntoRemote(2, {}, {}); - gid = v.gid(); - prop = dba.Property("prop"); - v.PropsSet(prop, 42); - worker(2).updates_server().Apply(dba.transaction_id()); - dba.Commit(); - } - { - database::GraphDbAccessor dba{worker(2)}; - auto v = dba.FindVertexOptional(gid, false); - ASSERT_TRUE(v); - EXPECT_EQ(v->PropsAt(prop).Value<int64_t>(), 42); - } -} - -TEST_F(DistributedGraphDbTest, CreateVertexWithData) { - gid::Gid gid; - storage::Label l1; - storage::Label l2; - storage::Property prop; - { - database::GraphDbAccessor dba{worker(1)}; - l1 = dba.Label("l1"); - l2 = dba.Label("l2"); - prop = dba.Property("prop"); - auto v = dba.InsertVertexIntoRemote(2, {l1, l2}, {{prop, 42}}); - gid = v.gid(); - - // Check local visibility before commit. - EXPECT_TRUE(v.has_label(l1)); - EXPECT_TRUE(v.has_label(l2)); - EXPECT_EQ(v.PropsAt(prop).Value<int64_t>(), 42); - - worker(2).updates_server().Apply(dba.transaction_id()); - dba.Commit(); - } - { - database::GraphDbAccessor dba{worker(2)}; - auto v = dba.FindVertexOptional(gid, false); - ASSERT_TRUE(v); - // Check remote data after commit. - EXPECT_TRUE(v->has_label(l1)); - EXPECT_TRUE(v->has_label(l2)); - EXPECT_EQ(v->PropsAt(prop).Value<int64_t>(), 42); - } -} - -// Checks if expiring a local record for a local update before applying a remote -// update delta causes a problem -TEST_F(DistributedGraphDbTest, UpdateVertexRemoteAndLocal) { - gid::Gid gid; - storage::Label l1; - storage::Label l2; - { - database::GraphDbAccessor dba{worker(1)}; - auto v = dba.InsertVertex(); - gid = v.gid(); - l1 = dba.Label("label1"); - l2 = dba.Label("label2"); - dba.Commit(); - } - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v_local = dba1.FindVertex(gid, false); - auto v_remote = VertexAccessor(storage::VertexAddress(gid, 1), dba0); - - v_remote.add_label(l2); - v_local.add_label(l1); - - auto result = worker(1).updates_server().Apply(dba0.transaction_id()); - EXPECT_EQ(result, distributed::UpdateResult::DONE); - } -} - -TEST_F(DistributedGraphDbTest, AddSameLabelRemoteAndLocal) { - auto v_address = InsertVertex(worker(1)); - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v_local = dba1.FindVertex(v_address.gid(), false); - auto v_remote = VertexAccessor(v_address, dba0); - auto l1 = dba1.Label("label"); - v_remote.add_label(l1); - v_local.add_label(l1); - worker(1).updates_server().Apply(dba0.transaction_id()); - dba0.Commit(); - } - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v = dba1.FindVertex(v_address.gid(), false); - EXPECT_EQ(v.labels().size(), 1); - } -} - -TEST_F(DistributedGraphDbTest, IndexGetsUpdatedRemotely) { - storage::VertexAddress v_remote = InsertVertex(worker(1)); - storage::Label label; - { - database::GraphDbAccessor dba0{master()}; - label = dba0.Label("label"); - VertexAccessor va(v_remote, dba0); - va.add_label(label); - worker(1).updates_server().Apply(dba0.transaction_id()); - dba0.Commit(); - } - { - database::GraphDbAccessor dba1{worker(1)}; - auto vertices = dba1.Vertices(label, false); - EXPECT_EQ(std::distance(vertices.begin(), vertices.end()), 1); - } -} - -TEST_F(DistributedGraphDbTest, DeleteVertexRemoteCommit) { - auto v_address = InsertVertex(worker(1)); - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v_remote = VertexAccessor(v_address, dba0); - dba0.RemoveVertex(v_remote); - EXPECT_TRUE(dba1.FindVertexOptional(v_address.gid(), true)); - EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()), - distributed::UpdateResult::DONE); - EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true)); -} - -TEST_F(DistributedGraphDbTest, DeleteVertexRemoteBothDelete) { - auto v_address = InsertVertex(worker(1)); - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v_local = dba1.FindVertex(v_address.gid(), false); - auto v_remote = VertexAccessor(v_address, dba0); - EXPECT_TRUE(dba1.RemoveVertex(v_local)); - EXPECT_TRUE(dba0.RemoveVertex(v_remote)); - EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()), - distributed::UpdateResult::DONE); - EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true)); - } -} - -TEST_F(DistributedGraphDbTest, DeleteVertexRemoteStillConnected) { - auto v_address = InsertVertex(worker(1)); - auto e_address = InsertEdge(v_address, v_address, "edge"); - - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto v_remote = VertexAccessor(v_address, dba0); - dba0.RemoveVertex(v_remote); - EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()), - distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR); - EXPECT_TRUE(dba1.FindVertexOptional(v_address.gid(), true)); - } - { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - auto e_local = dba1.FindEdge(e_address.gid(), false); - auto v_local = dba1.FindVertex(v_address.gid(), false); - auto v_remote = VertexAccessor(v_address, dba0); - - dba1.RemoveEdge(e_local); - dba0.RemoveVertex(v_remote); - - EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()), - distributed::UpdateResult::DONE); - EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true)); - } -} - -class DistributedDetachDeleteTest : public DistributedGraphDbTest { - protected: - storage::VertexAddress w1_a; - storage::VertexAddress w1_b; - storage::VertexAddress w2_a; - - void SetUp() override { - DistributedGraphDbTest::SetUp(); - w1_a = InsertVertex(worker(1)); - w1_b = InsertVertex(worker(1)); - w2_a = InsertVertex(worker(2)); - } - - template <typename TF> - void Run(storage::VertexAddress v_address, TF check_func) { - for (int i : {0, 1, 2}) { - database::GraphDbAccessor dba0{master()}; - database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()}; - database::GraphDbAccessor dba2{worker(2), dba0.transaction_id()}; - - std::vector<std::reference_wrapper<database::GraphDbAccessor>> dba; - dba.emplace_back(dba0); - dba.emplace_back(dba1); - dba.emplace_back(dba2); - - auto &accessor = dba[i].get(); - auto v_accessor = VertexAccessor(v_address, accessor); - accessor.DetachRemoveVertex(v_accessor); - - for (auto db_accessor : dba) { - ASSERT_EQ(db_accessor.get().db().updates_server().Apply( - dba[0].get().transaction_id()), - distributed::UpdateResult::DONE); - } - - check_func(dba); - } - } -}; - -TEST_F(DistributedDetachDeleteTest, VertexCycle) { - auto e_address = InsertEdge(w1_a, w1_a, "edge"); - Run(w1_a, - [this, e_address]( - std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) { - EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true)); - EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true)); - }); -} - -TEST_F(DistributedDetachDeleteTest, TwoVerticesDifferentWorkers) { - auto e_address = InsertEdge(w1_a, w2_a, "edge"); - - // Delete from - Run(w1_a, - [this, e_address]( - std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) { - EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true)); - EXPECT_TRUE(dba[2].get().FindVertexOptional(w2_a.gid(), true)); - EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true)); - }); - - // Delete to - Run(w2_a, - [this, e_address]( - std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) { - EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_a.gid(), true)); - EXPECT_FALSE(dba[2].get().FindVertexOptional(w2_a.gid(), true)); - EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true)); - }); -} - -TEST_F(DistributedDetachDeleteTest, TwoVerticesSameWorkers) { - auto e_address = InsertEdge(w1_a, w1_b, "edge"); - - // Delete from - Run(w1_a, - [this, e_address]( - std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) { - EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true)); - EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_b.gid(), true)); - EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true)); - }); - - // Delete to - Run(w1_b, - [this, e_address]( - std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) { - EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_a.gid(), true)); - EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_b.gid(), true)); - EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true)); - }); -} - -class DistributedEdgeCreateTest : public DistributedGraphDbTest { - protected: - storage::VertexAddress w1_a; - storage::VertexAddress w1_b; - storage::VertexAddress w2_a; - std::unordered_map<std::string, PropertyValue> props{{"p1", 42}, - {"p2", true}}; - storage::EdgeAddress e_ga; - - void SetUp() override { - DistributedGraphDbTest::SetUp(); - w1_a = InsertVertex(worker(1)); - w1_b = InsertVertex(worker(1)); - w2_a = InsertVertex(worker(2)); - } - - void CreateEdge(database::GraphDb &creator, storage::VertexAddress from_addr, - storage::VertexAddress to_addr) { - CHECK(from_addr.is_remote() && to_addr.is_remote()) - << "Local address given to CreateEdge"; - database::GraphDbAccessor dba{creator}; - auto edge_type = dba.EdgeType("et"); - VertexAccessor v1{from_addr, dba}; - VertexAccessor v2{to_addr, dba}; - auto edge = dba.InsertEdge(v1, v2, edge_type); - e_ga = edge.GlobalAddress(); - - for (auto &kv : props) edge.PropsSet(dba.Property(kv.first), kv.second); - - master().updates_server().Apply(dba.transaction_id()); - worker(1).updates_server().Apply(dba.transaction_id()); - worker(2).updates_server().Apply(dba.transaction_id()); - dba.Commit(); - } - - void CheckState(database::GraphDb &db, bool edge_is_local, - storage::VertexAddress from_addr, - storage::VertexAddress to_addr) { - database::GraphDbAccessor dba{db}; - - // Check edge data. - { - EdgeAccessor edge{e_ga, dba}; - EXPECT_EQ(edge.address().is_local(), edge_is_local); - EXPECT_EQ(edge.GlobalAddress(), e_ga); - auto from = edge.from(); - EXPECT_EQ(from.GlobalAddress(), from_addr); - EXPECT_EQ(edge.from_addr().is_local(), from.is_local()); - auto to = edge.to(); - EXPECT_EQ(to.GlobalAddress(), to_addr); - EXPECT_EQ(edge.to_addr().is_local(), to.is_local()); - - EXPECT_EQ(edge.Properties().size(), props.size()); - for (auto &kv : props) { - auto equality = edge.PropsAt(dba.Property(kv.first)) == - query::TypedValue(kv.second); - EXPECT_TRUE(equality.IsBool() && equality.ValueBool()); - } - } - - auto edges = [](auto iterable) { - std::vector<EdgeAccessor> res; - for (auto edge : iterable) res.emplace_back(edge); - return res; - }; - - // Check `from` data. - { - VertexAccessor from{from_addr, dba}; - ASSERT_EQ(edges(from.out()).size(), 1); - EXPECT_EQ(edges(from.out())[0].GlobalAddress(), e_ga); - // In case of cycles we have 1 in the `in` edges. - EXPECT_EQ(edges(from.in()).size(), from_addr == to_addr); - } - - // Check `to` data. - { - VertexAccessor to{to_addr, dba}; - // In case of cycles we have 1 in the `out` edges. - EXPECT_EQ(edges(to.out()).size(), from_addr == to_addr); - ASSERT_EQ(edges(to.in()).size(), 1); - EXPECT_EQ(edges(to.in())[0].GlobalAddress(), e_ga); - } - } - - void CheckAll(storage::VertexAddress from_addr, - storage::VertexAddress to_addr) { - int edge_worker = from_addr.worker_id(); - EXPECT_EQ(EdgeCount(master()), edge_worker == 0); - EXPECT_EQ(EdgeCount(worker(1)), edge_worker == 1); - EXPECT_EQ(EdgeCount(worker(2)), edge_worker == 2); - CheckState(master(), edge_worker == 0, from_addr, to_addr); - CheckState(worker(1), edge_worker == 1, from_addr, to_addr); - CheckState(worker(2), edge_worker == 2, from_addr, to_addr); - } -}; - -TEST_F(DistributedEdgeCreateTest, LocalRemote) { - CreateEdge(worker(1), w1_a, w2_a); - CheckAll(w1_a, w2_a); -} - -TEST_F(DistributedEdgeCreateTest, RemoteLocal) { - CreateEdge(worker(2), w1_a, w2_a); - CheckAll(w1_a, w2_a); -} - -TEST_F(DistributedEdgeCreateTest, RemoteRemoteDifferentWorkers) { - CreateEdge(master(), w1_a, w2_a); - CheckAll(w1_a, w2_a); -} - -TEST_F(DistributedEdgeCreateTest, RemoteRemoteSameWorkers) { - CreateEdge(master(), w1_a, w1_b); - CheckAll(w1_a, w1_b); -} - -TEST_F(DistributedEdgeCreateTest, RemoteRemoteCycle) { - CreateEdge(master(), w1_a, w1_a); - CheckAll(w1_a, w1_a); -} - -class DistributedEdgeRemoveTest : public DistributedGraphDbTest { - protected: - storage::VertexAddress from_addr; - storage::VertexAddress to_addr; - storage::EdgeAddress edge_addr; - - void Create(database::GraphDb &from_db, database::GraphDb &to_db) { - from_addr = InsertVertex(from_db); - to_addr = InsertVertex(to_db); - edge_addr = InsertEdge(from_addr, to_addr, "edge_type"); - } - - void Delete(database::GraphDb &db) { - database::GraphDbAccessor dba{db}; - EdgeAccessor edge{edge_addr, dba}; - dba.RemoveEdge(edge); - master().updates_server().Apply(dba.transaction_id()); - worker(1).updates_server().Apply(dba.transaction_id()); - worker(2).updates_server().Apply(dba.transaction_id()); - dba.Commit(); - } - - template <typename TIterable> - auto Size(TIterable iterable) { - return std::distance(iterable.begin(), iterable.end()); - }; - - void CheckCreation() { - auto wid = from_addr.worker_id(); - ASSERT_TRUE(wid >= 0 && wid < 3); - ASSERT_EQ(EdgeCount(master()), wid == 0); - ASSERT_EQ(EdgeCount(worker(1)), wid == 1); - ASSERT_EQ(EdgeCount(worker(2)), wid == 2); - - database::GraphDbAccessor dba{master()}; - VertexAccessor from{from_addr, dba}; - EXPECT_EQ(Size(from.out()), 1); - EXPECT_EQ(Size(from.in()), 0); - - VertexAccessor to{to_addr, dba}; - EXPECT_EQ(Size(to.out()), 0); - EXPECT_EQ(Size(to.in()), 1); - } - - void CheckDeletion() { - EXPECT_EQ(EdgeCount(master()), 0); - EXPECT_EQ(EdgeCount(worker(1)), 0); - EXPECT_EQ(EdgeCount(worker(2)), 0); - - database::GraphDbAccessor dba{master()}; - - VertexAccessor from{from_addr, dba}; - EXPECT_EQ(Size(from.out()), 0); - EXPECT_EQ(Size(from.in()), 0); - - VertexAccessor to{to_addr, dba}; - EXPECT_EQ(Size(to.out()), 0); - EXPECT_EQ(Size(to.in()), 0); - } -}; - -TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersRemoteDelete) { - Create(worker(1), worker(2)); - CheckCreation(); - Delete(master()); - CheckDeletion(); -} - -TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersFromDelete) { - Create(worker(1), worker(2)); - CheckCreation(); - Delete(worker(1)); - CheckDeletion(); -} - -TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersToDelete) { - Create(worker(1), worker(2)); - CheckCreation(); - Delete(worker(2)); - CheckDeletion(); -} - -TEST_F(DistributedEdgeRemoveTest, SameVertexOwnersRemoteDelete) { - Create(worker(1), worker(1)); - CheckCreation(); - Delete(worker(2)); - CheckDeletion(); -} diff --git a/tests/unit/distributed_vertex_migrator.cpp b/tests/unit/distributed_vertex_migrator.cpp deleted file mode 100644 index 5796d0124..000000000 --- a/tests/unit/distributed_vertex_migrator.cpp +++ /dev/null @@ -1,181 +0,0 @@ -#include "distributed_common.hpp" - -#include <memory> -#include <thread> -#include <unordered_set> - -#include "gtest/gtest.h" - -#include "distributed/updates_rpc_clients.hpp" -#include "storage/dynamic_graph_partitioner/vertex_migrator.hpp" - -using namespace distributed; -using namespace database; - -DECLARE_bool(generate_vertex_ids); -DECLARE_bool(generate_edge_ids); - -// Check if the auto-generated gid property is unchanged after migration -TEST_F(DistributedGraphDbTest, VertexEdgeGidSaved) { - FLAGS_generate_vertex_ids = true; - FLAGS_generate_edge_ids = true; - // Fill master so that the ids are not the same on master and worker 1 - for (int i = 0; i < 10; ++i) { - auto va = InsertVertex(master()); - auto ea = InsertEdge(va, va, "edge"); - } - - auto va = InsertVertex(master()); - auto ea = InsertEdge(va, va, "edge"); - PropertyValue old_vgid_property(42); - PropertyValue old_egid_property(42); - { - database::GraphDbAccessor dba(master()); - VertexAccessor vaccessor(va, dba); - old_vgid_property = - vaccessor.PropsAt(dba.Property(PropertyValueStore::IdPropertyName)); - EXPECT_FALSE(old_vgid_property.IsNull()); - - EdgeAccessor eaccessor(ea, dba); - old_egid_property = - eaccessor.PropsAt(dba.Property(PropertyValueStore::IdPropertyName)); - EXPECT_FALSE(old_egid_property.IsNull()); - } - { - database::GraphDbAccessor dba(master()); - VertexAccessor accessor(va, dba); - VertexMigrator migrator(&dba); - migrator.MigrateVertex(accessor, worker(1).WorkerId()); - { - auto apply_futures = master().updates_clients().UpdateApplyAll( - master().WorkerId(), dba.transaction().id_); - // Destructor waits on application - } - dba.Commit(); - } - ASSERT_EQ(VertexCount(worker(1)), 1); - { - database::GraphDbAccessor dba(worker(1)); - auto vaccessor = *dba.Vertices(false).begin(); - auto eaccessor = *dba.Edges(false).begin(); - auto new_vgid_property = - vaccessor.PropsAt(dba.Property(PropertyValueStore::IdPropertyName)); - auto new_egid_property = - eaccessor.PropsAt(dba.Property(PropertyValueStore::IdPropertyName)); - EXPECT_EQ(old_vgid_property.Value<int64_t>(), - new_vgid_property.Value<int64_t>()); - EXPECT_EQ(old_egid_property.Value<int64_t>(), - new_egid_property.Value<int64_t>()); - } -} - -// Checks if two connected nodes from master will be transfered to worker 1 and -// if edge from vertex on the worker 2 will now point to worker 1 after transfer -TEST_F(DistributedGraphDbTest, SomeTransfer) { - auto va = InsertVertex(master()); - auto vb = InsertVertex(master()); - auto vc = InsertVertex(worker(2)); - InsertEdge(va, vb, "edge"); - InsertEdge(vc, va, "edge"); - { - database::GraphDbAccessor dba(master()); - VertexMigrator migrator(&dba); - for (auto &vertex : dba.Vertices(false)) { - migrator.MigrateVertex(vertex, worker(1).WorkerId()); - } - { - auto apply_futures = master().updates_clients().UpdateApplyAll( - master().WorkerId(), dba.transaction().id_); - // Destructor waits on application - } - dba.Commit(); - } - - EXPECT_EQ(VertexCount(master()), 0); - EXPECT_EQ(EdgeCount(master()), 0); - EXPECT_EQ(VertexCount(worker(1)), 2); - EXPECT_EQ(EdgeCount(worker(1)), 1); - - EXPECT_EQ(VertexCount(worker(2)), 1); - ASSERT_EQ(EdgeCount(worker(2)), 1); - { - database::GraphDbAccessor dba(worker(2)); - auto edge = *dba.Edges(false).begin(); - - // Updated remote edge on another worker - EXPECT_EQ(edge.to_addr().worker_id(), worker(1).WorkerId()); - } -} - -// Check if cycle edge is transfered only once since it's contained in both in -// and out edges of a vertex and if not handled correctly could cause problems -TEST_F(DistributedGraphDbTest, EdgeCycle) { - auto va = InsertVertex(master()); - InsertEdge(va, va, "edge"); - { - database::GraphDbAccessor dba(master()); - VertexMigrator migrator(&dba); - for (auto &vertex : dba.Vertices(false)) { - migrator.MigrateVertex(vertex, worker(1).WorkerId()); - } - { - auto apply_futures = master().updates_clients().UpdateApplyAll( - master().WorkerId(), dba.transaction().id_); - // Destructor waits on application - } - dba.Commit(); - } - - EXPECT_EQ(VertexCount(master()), 0); - EXPECT_EQ(EdgeCount(master()), 0); - EXPECT_EQ(VertexCount(worker(1)), 1); - EXPECT_EQ(EdgeCount(worker(1)), 1); -} - -TEST_F(DistributedGraphDbTest, TransferLabelsAndProperties) { - { - database::GraphDbAccessor dba(master()); - auto va = dba.InsertVertex(); - auto vb = dba.InsertVertex(); - va.add_label(dba.Label("l")); - vb.add_label(dba.Label("l")); - va.PropsSet(dba.Property("p"), 42); - vb.PropsSet(dba.Property("p"), 42); - - auto ea = dba.InsertEdge(va, vb, dba.EdgeType("edge")); - ea.PropsSet(dba.Property("pe"), 43); - auto eb = dba.InsertEdge(vb, va, dba.EdgeType("edge")); - eb.PropsSet(dba.Property("pe"), 43); - dba.Commit(); - } - - { - database::GraphDbAccessor dba(master()); - VertexMigrator migrator(&dba); - for (auto &vertex : dba.Vertices(false)) { - migrator.MigrateVertex(vertex, worker(1).WorkerId()); - } - { - auto apply_futures = master().updates_clients().UpdateApplyAll( - master().WorkerId(), dba.transaction().id_); - // Destructor waits on application - } - dba.Commit(); - } - - { - database::GraphDbAccessor dba(worker(1)); - EXPECT_EQ(VertexCount(master()), 0); - ASSERT_EQ(VertexCount(worker(1)), 2); - for (auto vertex : dba.Vertices(false)) { - ASSERT_EQ(vertex.labels().size(), 1); - EXPECT_EQ(vertex.labels()[0], dba.Label("l")); - EXPECT_EQ(vertex.PropsAt(dba.Property("p")).Value<int64_t>(), 42); - } - - ASSERT_EQ(EdgeCount(worker(1)), 2); - auto edge = *dba.Edges(false).begin(); - EXPECT_EQ(edge.PropsAt(dba.Property("pe")).Value<int64_t>(), 43); - EXPECT_EQ(edge.EdgeType(), dba.EdgeType("edge")); - } -} diff --git a/tests/unit/metrics.cpp b/tests/unit/metrics.cpp deleted file mode 100644 index 25fd15a7f..000000000 --- a/tests/unit/metrics.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include "stats/metrics.hpp" - -#include <thread> - -#include "gtest/gtest.h" - -using namespace std::chrono_literals; - -using namespace stats; - -TEST(Metrics, Counter) { - Counter &x = GetCounter("counter"); - EXPECT_EQ(*x.Flush(), 0); - EXPECT_EQ(x.Value(), 0); - x.Bump(); - EXPECT_EQ(*x.Flush(), 1); - EXPECT_EQ(x.Value(), 1); - - Counter &y = GetCounter("counter"); - EXPECT_EQ(*y.Flush(), 1); - EXPECT_EQ(y.Value(), 1); - - y.Bump(5); - EXPECT_EQ(*x.Flush(), 6); - EXPECT_EQ(x.Value(), 6); - EXPECT_EQ(*y.Flush(), 6); - EXPECT_EQ(y.Value(), 6); -} - -TEST(Metrics, Gauge) { - Gauge &x = GetGauge("gauge"); - EXPECT_EQ(*x.Flush(), 0); - x.Set(1); - EXPECT_EQ(*x.Flush(), 1); - - Gauge &y = GetGauge("gauge"); - EXPECT_EQ(*y.Flush(), 1); - - x.Set(2); - EXPECT_EQ(*x.Flush(), 2); - EXPECT_EQ(*y.Flush(), 2); -} - -TEST(Metrics, IntervalMin) { - IntervalMin &x = GetIntervalMin("min"); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); - x.Add(5); - x.Add(3); - EXPECT_EQ(*x.Flush(), 3); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); - x.Add(3); - x.Add(5); - EXPECT_EQ(*x.Flush(), 3); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); -} - -TEST(Metrics, IntervalMax) { - IntervalMax &x = GetIntervalMax("max"); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); - x.Add(5); - x.Add(3); - EXPECT_EQ(*x.Flush(), 5); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); - x.Add(3); - x.Add(5); - EXPECT_EQ(*x.Flush(), 5); - EXPECT_EQ(x.Flush(), std::experimental::nullopt); -} - -TEST(Metrics, Stopwatch) { - auto d1 = Stopwatch("stopwatch", [] { std::this_thread::sleep_for(150ms); }); - EXPECT_TRUE(140 <= d1 && d1 <= 160); - - auto d2 = Stopwatch("stopwatch", [] { std::this_thread::sleep_for(300ms); }); - EXPECT_TRUE(290 <= d2 && d2 <= 310); - - Counter &total_time = GetCounter("stopwatch.total_time"); - Counter &count = GetCounter("stopwatch.count"); - IntervalMin &min = GetIntervalMin("stopwatch.min"); - IntervalMax &max = GetIntervalMax("stopwatch.max"); - - EXPECT_TRUE(430 <= total_time.Value() && total_time.Value() <= 470); - EXPECT_EQ(count.Value(), 2); - - auto m = *min.Flush(); - EXPECT_TRUE(140 <= m && m <= 160); - - auto M = *max.Flush(); - EXPECT_TRUE(290 <= M && M <= 310); -} diff --git a/tests/unit/query_common.hpp b/tests/unit/query_common.hpp index 96dde4ecd..f3e210b41 100644 --- a/tests/unit/query_common.hpp +++ b/tests/unit/query_common.hpp @@ -570,8 +570,3 @@ auto GetMerge(AstStorage &storage, Pattern *pattern, OnMatch on_match, storage.Create<query::Reduce>( \ storage.Create<query::Identifier>(accumulator), initializer, \ storage.Create<query::Identifier>(variable), list, expr) -#define CREATE_USER(username, password) \ - storage.Create<query::ModifyUser>((username), LITERAL(password), true) -#define ALTER_USER(username, password) \ - storage.Create<query::ModifyUser>((username), LITERAL(password), false) -#define DROP_USER(usernames) storage.Create<query::DropUser>((usernames)) diff --git a/tests/unit/query_plan_match_filter_return.cpp b/tests/unit/query_plan_match_filter_return.cpp index 3eef118b4..57cdcc2c8 100644 --- a/tests/unit/query_plan_match_filter_return.cpp +++ b/tests/unit/query_plan_match_filter_return.cpp @@ -13,13 +13,10 @@ #include "communication/result_stream_faker.hpp" #include "database/graph_db.hpp" -#include "distributed/data_manager.hpp" -#include "distributed/updates_rpc_server.hpp" #include "query/context.hpp" #include "query/exceptions.hpp" #include "query/plan/operator.hpp" -#include "distributed_common.hpp" #include "query_plan_common.hpp" using namespace query; @@ -829,10 +826,9 @@ struct hash<std::pair<int, int>> { } // namespace std /** A test fixture for breadth first expansion */ -class QueryPlanExpandBfs - : public testing::TestWithParam<std::pair<TestType, int>> { +class QueryPlanExpandBfs : public testing::Test { protected: - QueryPlanExpandBfs() : cluster(GetParam().first, GetParam().second) {} + QueryPlanExpandBfs() {} // Worker IDs where vertices are located. const std::vector<int> vertices = {0, 1, 1, 0, 1, 2}; @@ -842,8 +838,7 @@ class QueryPlanExpandBfs // Style-guide non-conformant name due to PROPERTY_PAIR and PROPERTY_LOOKUP // macro requirements. - Cluster cluster; - database::GraphDb &db{*cluster.master()}; + database::SingleNode db; database::GraphDbAccessor dba{db}; std::pair<std::string, storage::Property> prop = PROPERTY_PAIR("property"); storage::EdgeType edge_type = dba.EdgeType("edge_type"); @@ -859,19 +854,18 @@ class QueryPlanExpandBfs Symbol inner_edge = symbol_table.CreateSymbol("inner_edge", true); Symbol inner_node = symbol_table.CreateSymbol("inner_node", true); + void AdvanceCommand(tx::TransactionId tx_id) { + database::GraphDbAccessor dba{db, tx_id}; + dba.AdvanceCommand(); + } + void SetUp() { for (auto p : iter::enumerate(vertices)) { int id, worker; std::tie(id, worker) = p; - if (GetParam().first == TestType::SINGLE_NODE || worker == 0) { - auto vertex = dba.InsertVertex(); - vertex.PropsSet(prop.second, id); - v.push_back(vertex.GlobalAddress()); - } else { - auto vertex = - dba.InsertVertexIntoRemote(worker, {}, {{prop.second, id}}); - v.push_back(vertex.GlobalAddress()); - } + auto vertex = dba.InsertVertex(); + vertex.PropsSet(prop.second, id); + v.push_back(vertex.GlobalAddress()); } for (auto p : edges) { @@ -882,7 +876,7 @@ class QueryPlanExpandBfs e.emplace(p, edge.GlobalAddress()); } - cluster.AdvanceCommand(dba.transaction_id()); + AdvanceCommand(dba.transaction_id()); } // Defines and performs a breadth-first expansion with the given parameters. @@ -950,7 +944,7 @@ class QueryPlanExpandBfs } }; -TEST_P(QueryPlanExpandBfs, Basic) { +TEST_F(QueryPlanExpandBfs, Basic) { auto results = ExpandBF(EdgeAtom::Direction::BOTH, 1, 1000, nullptr, GraphView::OLD, {VertexAccessor(v[0], dba)}); @@ -980,7 +974,7 @@ TEST_P(QueryPlanExpandBfs, Basic) { EdgesEqual(results[4].first, {1, 12, 25, 53})); } -TEST_P(QueryPlanExpandBfs, EdgeDirection) { +TEST_F(QueryPlanExpandBfs, EdgeDirection) { { auto results = ExpandBF(EdgeAtom::Direction::OUT, 1, 1000, nullptr, GraphView::OLD, {VertexAccessor(v[4], dba)}); @@ -1030,12 +1024,7 @@ TEST_P(QueryPlanExpandBfs, EdgeDirection) { } } -TEST_P(QueryPlanExpandBfs, Where) { - // TODO(mtomic): lambda filtering in distributed - if (GetParam().first == TestType::DISTRIBUTED) { - return; - } - +TEST_F(QueryPlanExpandBfs, Where) { auto ident = IDENT("inner_element"); { symbol_table[*ident] = inner_node; @@ -1065,7 +1054,7 @@ TEST_P(QueryPlanExpandBfs, Where) { } } -TEST_P(QueryPlanExpandBfs, GraphState) { +TEST_F(QueryPlanExpandBfs, GraphState) { auto ExpandSize = [this](GraphView graph_view) { return ExpandBF(EdgeAtom::Direction::BOTH, 1, 1000, nullptr, graph_view, {VertexAccessor(v[0], dba)}) @@ -1081,38 +1070,36 @@ TEST_P(QueryPlanExpandBfs, GraphState) { v.push_back(to.GlobalAddress()); dba.InsertEdge(from, to, edge_type); - cluster.ApplyUpdates(dba.transaction_id()); } EXPECT_EQ(ExpandSize(GraphView::OLD), 5); EXPECT_EQ(ExpandSize(GraphView::NEW), 6); - cluster.AdvanceCommand(dba.transaction_id()); + AdvanceCommand(dba.transaction_id()); EXPECT_EQ(ExpandSize(GraphView::OLD), 6); EXPECT_EQ(ExpandSize(GraphView::NEW), 6); { v.push_back(dba.InsertVertex().GlobalAddress()); - cluster.AdvanceCommand(dba.transaction_id()); + AdvanceCommand(dba.transaction_id()); auto from = VertexAccessor(v[4], dba); auto to = VertexAccessor(v[7], dba); dba.InsertEdge(from, to, edge_type); - cluster.ApplyUpdates(dba.transaction_id()); } EXPECT_EQ(ExpandSize(GraphView::OLD), 6); EXPECT_EQ(ExpandSize(GraphView::NEW), 7); - cluster.AdvanceCommand(dba.transaction_id()); + AdvanceCommand(dba.transaction_id()); EXPECT_EQ(ExpandSize(GraphView::OLD), 7); EXPECT_EQ(ExpandSize(GraphView::NEW), 7); } -TEST_P(QueryPlanExpandBfs, MultipleInputs) { +TEST_F(QueryPlanExpandBfs, MultipleInputs) { auto results = ExpandBF(EdgeAtom::Direction::BOTH, 1, 1000, nullptr, GraphView::OLD, {VertexAccessor(v[0], dba), VertexAccessor(v[3], dba)}); @@ -1123,7 +1110,7 @@ TEST_P(QueryPlanExpandBfs, MultipleInputs) { EXPECT_EQ(found, (std::vector<int>{1, 2, 2, 1, 2})); } -TEST_P(QueryPlanExpandBfs, ExistingNode) { +TEST_F(QueryPlanExpandBfs, ExistingNode) { using testing::ElementsAre; using testing::WhenSorted; @@ -1163,7 +1150,7 @@ TEST_P(QueryPlanExpandBfs, ExistingNode) { } } -TEST_P(QueryPlanExpandBfs, OptionalMatch) { +TEST_F(QueryPlanExpandBfs, OptionalMatch) { { auto results = ExpandBF(EdgeAtom::Direction::BOTH, 1, 1000, nullptr, GraphView::OLD, {TypedValue::Null}); @@ -1177,7 +1164,7 @@ TEST_P(QueryPlanExpandBfs, OptionalMatch) { } } -TEST_P(QueryPlanExpandBfs, ExpansionDepth) { +TEST_F(QueryPlanExpandBfs, ExpansionDepth) { { auto results = ExpandBF(EdgeAtom::Direction::BOTH, 2, 3, nullptr, GraphView::OLD, {VertexAccessor(v[0], dba)}); @@ -1192,14 +1179,6 @@ TEST_P(QueryPlanExpandBfs, ExpansionDepth) { } } -INSTANTIATE_TEST_CASE_P(SingleNode, QueryPlanExpandBfs, - ::testing::Values(std::make_pair(TestType::SINGLE_NODE, - 0))); - -INSTANTIATE_TEST_CASE_P(Distributed, QueryPlanExpandBfs, - ::testing::Values(std::make_pair(TestType::DISTRIBUTED, - 2))); - /** A test fixture for weighted shortest path expansion */ class QueryPlanExpandWeightedShortestPath : public testing::Test { public: diff --git a/tests/unit/query_planner.cpp b/tests/unit/query_planner.cpp index a2518726a..b6e1da9d8 100644 --- a/tests/unit/query_planner.cpp +++ b/tests/unit/query_planner.cpp @@ -5,8 +5,6 @@ #include <typeinfo> #include <unordered_set> -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -17,9 +15,6 @@ #include "query/plan/operator.hpp" #include "query/plan/planner.hpp" -#include "capnp/message.h" -#include "query/plan/operator.capnp.h" - #include "query_common.hpp" namespace query { @@ -121,23 +116,10 @@ class PlanChecker : public HierarchicalLogicalOperatorVisitor { VISIT(CreateIndex); - PRE_VISIT(PullRemote); - - bool PreVisit(Synchronize &op) override { - CheckOp(op); - op.input()->Accept(*this); - return false; - } - bool PreVisit(Cartesian &op) override { CheckOp(op); return false; } - - PRE_VISIT(PullRemoteOrderBy); - - VISIT(ModifyUser); - VISIT(DropUser); #undef PRE_VISIT #undef VISIT @@ -262,12 +244,6 @@ class ExpectAggregate : public OpChecker<Aggregate> { std::unordered_set<query::Expression *> group_by_; }; -auto ExpectMasterAggregate( - const std::vector<query::Aggregation *> &aggregations, - const std::unordered_set<query::Expression *> &group_by) { - return ExpectAggregate(true, aggregations, group_by); -} - class ExpectMerge : public OpChecker<Merge> { public: ExpectMerge(const std::list<BaseOpChecker *> &on_match, @@ -387,45 +363,6 @@ class ExpectCreateIndex : public OpChecker<CreateIndex> { storage::Property property_; }; -class ExpectPullRemote : public OpChecker<PullRemote> { - public: - ExpectPullRemote() {} - ExpectPullRemote(const std::vector<Symbol> &symbols) : symbols_(symbols) {} - - void ExpectOp(PullRemote &op, const SymbolTable &) override { - EXPECT_THAT(op.symbols(), testing::UnorderedElementsAreArray(symbols_)); - } - - private: - std::vector<Symbol> symbols_; -}; - -class ExpectSynchronize : public OpChecker<Synchronize> { - public: - explicit ExpectSynchronize(bool advance_command) - : has_pull_(false), advance_command_(advance_command) {} - ExpectSynchronize(const std::vector<Symbol> &symbols = {}, - bool advance_command = false) - : expect_pull_(symbols), - has_pull_(true), - advance_command_(advance_command) {} - - void ExpectOp(Synchronize &op, const SymbolTable &symbol_table) override { - if (has_pull_) { - ASSERT_TRUE(op.pull_remote()); - expect_pull_.ExpectOp(*op.pull_remote(), symbol_table); - } else { - EXPECT_FALSE(op.pull_remote()); - } - EXPECT_EQ(op.advance_command(), advance_command_); - } - - private: - ExpectPullRemote expect_pull_; - bool has_pull_ = true; - bool advance_command_ = false; -}; - class ExpectCartesian : public OpChecker<Cartesian> { public: ExpectCartesian(const std::list<std::unique_ptr<BaseOpChecker>> &left, @@ -459,19 +396,6 @@ class ExpectCreateNode : public OpChecker<CreateNode> { bool on_random_worker_ = false; }; -class ExpectPullRemoteOrderBy : public OpChecker<PullRemoteOrderBy> { - public: - ExpectPullRemoteOrderBy(const std::vector<Symbol> symbols) - : symbols_(symbols) {} - - void ExpectOp(PullRemoteOrderBy &op, const SymbolTable &) override { - EXPECT_THAT(op.symbols(), testing::UnorderedElementsAreArray(symbols_)); - } - - private: - std::vector<Symbol> symbols_; -}; - auto MakeSymbolTable(query::Query &query) { SymbolTable symbol_table; SymbolGenerator symbol_generator(symbol_table); @@ -494,98 +418,6 @@ class Planner { std::unique_ptr<LogicalOperator> plan_; }; -class ExpectModifyUser : public OpChecker<ModifyUser> { - public: - ExpectModifyUser(std::string username, bool is_create) - : username_(username), is_create_(is_create) {} - - void ExpectOp(ModifyUser &modify_user, const SymbolTable &) override { - EXPECT_EQ(username_, modify_user.username()); - // TODO(mtomic): proper password verification - EXPECT_NE(dynamic_cast<query::Expression *>(modify_user.password()), - nullptr); - EXPECT_EQ(is_create_, modify_user.is_create()); - } - - private: - std::string username_; - bool is_create_; -}; - -class ExpectDropUser : public OpChecker<DropUser> { - public: - ExpectDropUser(std::vector<std::string> usernames) : usernames_(usernames) {} - - void ExpectOp(DropUser &drop_user, const SymbolTable &) override { - EXPECT_EQ(usernames_, drop_user.usernames()); - } - - private: - std::vector<std::string> usernames_; -}; - -class SerializedPlanner { - public: - template <class TDbAccessor> - SerializedPlanner(std::vector<SingleQueryPart> single_query_parts, - PlanningContext<TDbAccessor> &context) { - std::stringstream stream; - { - auto original_plan = MakeLogicalPlanForSingleQuery<RuleBasedPlanner>( - single_query_parts, context); - boost::archive::binary_oarchive out_archive(stream); - out_archive << original_plan; - } - { - boost::archive::binary_iarchive in_archive(stream); - std::tie(plan_, ast_storage_) = LoadPlan(in_archive); - } - } - - auto &plan() { return *plan_; } - - private: - AstStorage ast_storage_; - std::unique_ptr<LogicalOperator> plan_; -}; - -void SavePlan(const LogicalOperator &plan, ::capnp::MessageBuilder *message) { - auto builder = message->initRoot<query::plan::capnp::LogicalOperator>(); - LogicalOperator::SaveHelper helper; - plan.Save(&builder, &helper); -} - -auto LoadPlan(const ::query::plan::capnp::LogicalOperator::Reader &reader) { - auto plan = LogicalOperator::Construct(reader); - LogicalOperator::LoadHelper helper; - plan->Load(reader, &helper); - return std::make_pair(std::move(plan), std::move(helper.ast_storage)); -} - -class CapnpPlanner { - public: - template <class TDbAccessor> - CapnpPlanner(std::vector<SingleQueryPart> single_query_parts, - PlanningContext<TDbAccessor> &context) { - ::capnp::MallocMessageBuilder message; - { - auto original_plan = MakeLogicalPlanForSingleQuery<RuleBasedPlanner>( - single_query_parts, context); - SavePlan(*original_plan, &message); - } - { - auto reader = message.getRoot<query::plan::capnp::LogicalOperator>(); - std::tie(plan_, ast_storage_) = LoadPlan(reader); - } - } - - auto &plan() { return *plan_; } - - private: - AstStorage ast_storage_; - std::unique_ptr<LogicalOperator> plan_; -}; - class FakeDbAccessor { public: int64_t VerticesCount(storage::Label label) const { @@ -692,57 +524,6 @@ auto CheckPlan(AstStorage &storage, TChecker... checker) { CheckPlan(planner.plan(), symbol_table, checker...); } -struct ExpectedDistributedPlan { - std::list<std::unique_ptr<BaseOpChecker>> master_checkers; - std::vector<std::list<std::unique_ptr<BaseOpChecker>>> worker_checkers; -}; - -template <class TPlanner> -DistributedPlan MakeDistributedPlan(query::AstStorage &storage) { - auto symbol_table = MakeSymbolTable(*storage.query()); - FakeDbAccessor dba; - auto planner = MakePlanner<TPlanner>(dba, storage, symbol_table); - std::atomic<int64_t> next_plan_id{0}; - return MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id); -} - -void CheckDistributedPlan(DistributedPlan &distributed_plan, - ExpectedDistributedPlan &expected) { - PlanChecker plan_checker(expected.master_checkers, - distributed_plan.symbol_table); - distributed_plan.master_plan->Accept(plan_checker); - EXPECT_TRUE(plan_checker.checkers_.empty()); - if (expected.worker_checkers.empty()) { - EXPECT_TRUE(distributed_plan.worker_plans.empty()); - } else { - ASSERT_EQ(distributed_plan.worker_plans.size(), - expected.worker_checkers.size()); - for (size_t i = 0; i < expected.worker_checkers.size(); ++i) { - PlanChecker plan_checker(expected.worker_checkers[i], - distributed_plan.symbol_table); - auto worker_plan = distributed_plan.worker_plans[i].second; - worker_plan->Accept(plan_checker); - EXPECT_TRUE(plan_checker.checkers_.empty()); - } - } -} - -void CheckDistributedPlan(const LogicalOperator &plan, - const SymbolTable &symbol_table, - ExpectedDistributedPlan &expected_distributed_plan) { - std::atomic<int64_t> next_plan_id{0}; - auto distributed_plan = MakeDistributedPlan(plan, symbol_table, next_plan_id); - EXPECT_EQ(next_plan_id - 1, distributed_plan.worker_plans.size()); - CheckDistributedPlan(distributed_plan, expected_distributed_plan); -} - -template <class TPlanner> -void CheckDistributedPlan(AstStorage &storage, - ExpectedDistributedPlan &expected_distributed_plan) { - auto distributed_plan = MakeDistributedPlan<TPlanner>(storage); - CheckDistributedPlan(distributed_plan, expected_distributed_plan); -} - template <class T> std::list<std::unique_ptr<BaseOpChecker>> MakeCheckers(T arg) { std::list<std::unique_ptr<BaseOpChecker>> l; @@ -757,47 +538,10 @@ std::list<std::unique_ptr<BaseOpChecker>> MakeCheckers(T arg, Rest &&... rest) { return std::move(l); } -ExpectedDistributedPlan ExpectDistributed( - std::list<std::unique_ptr<BaseOpChecker>> master_checker) { - return ExpectedDistributedPlan{std::move(master_checker)}; -} - -ExpectedDistributedPlan ExpectDistributed( - std::list<std::unique_ptr<BaseOpChecker>> master_checker, - std::list<std::unique_ptr<BaseOpChecker>> worker_checker) { - ExpectedDistributedPlan expected{std::move(master_checker)}; - expected.worker_checkers.emplace_back(std::move(worker_checker)); - return expected; -} - -void AddWorkerCheckers( - ExpectedDistributedPlan &expected, - std::list<std::unique_ptr<BaseOpChecker>> worker_checker) { - expected.worker_checkers.emplace_back(std::move(worker_checker)); -} - -template <class... Rest> -void AddWorkerCheckers(ExpectedDistributedPlan &expected, - std::list<std::unique_ptr<BaseOpChecker>> worker_checker, - Rest &&... rest) { - expected.worker_checkers.emplace_back(std::move(worker_checker)); - AddWorkerCheckers(expected, std::forward<Rest>(rest)...); -} - -template <class... Rest> -ExpectedDistributedPlan ExpectDistributed( - std::list<std::unique_ptr<BaseOpChecker>> master_checker, - std::list<std::unique_ptr<BaseOpChecker>> worker_checker, Rest &&... rest) { - ExpectedDistributedPlan expected{std::move(master_checker)}; - expected.worker_checkers.emplace_back(std::move(worker_checker)); - AddWorkerCheckers(expected, std::forward<Rest>(rest)...); - return expected; -} - template <class T> class TestPlanner : public ::testing::Test {}; -using PlannerTypes = ::testing::Types<Planner, SerializedPlanner, CapnpPlanner>; +using PlannerTypes = ::testing::Types<Planner>; TYPED_TEST_CASE(TestPlanner, PlannerTypes); @@ -810,11 +554,6 @@ TYPED_TEST(TestPlanner, MatchNodeReturn) { FakeDbAccessor dba; auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, CreateNodeReturn) { @@ -829,14 +568,6 @@ TYPED_TEST(TestPlanner, CreateNodeReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(), acc, ExpectProduce()); - { - auto expected = ExpectDistributed(MakeCheckers( - ExpectCreateNode(true), ExpectSynchronize(false), ExpectProduce())); - std::atomic<int64_t> next_plan_id{0}; - auto distributed_plan = - MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id); - CheckDistributedPlan(distributed_plan, expected); - } } TYPED_TEST(TestPlanner, CreateExpand) { @@ -847,11 +578,6 @@ TYPED_TEST(TestPlanner, CreateExpand) { QUERY(SINGLE_QUERY(CREATE(PATTERN( NODE("n"), EDGE("r", Direction::OUT, {relationship}), NODE("m"))))); CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(), - ExpectSynchronize(false)), - {}}; - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, CreateMultipleNode) { @@ -859,11 +585,6 @@ TYPED_TEST(TestPlanner, CreateMultipleNode) { AstStorage storage; QUERY(SINGLE_QUERY(CREATE(PATTERN(NODE("n")), PATTERN(NODE("m"))))); CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateNode()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectCreateNode(true), - ExpectSynchronize(false)), - {}}; - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, CreateNodeExpandNode) { @@ -876,11 +597,6 @@ TYPED_TEST(TestPlanner, CreateNodeExpandNode) { PATTERN(NODE("l"))))); CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(), ExpectCreateNode()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(), - ExpectCreateNode(true), ExpectSynchronize(false)), - {}}; - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, CreateNamedPattern) { @@ -892,11 +608,6 @@ TYPED_TEST(TestPlanner, CreateNamedPattern) { "p", NODE("n"), EDGE("r", Direction::OUT, {relationship}), NODE("m"))))); CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(), ExpectConstructNamedPath()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(), - ExpectConstructNamedPath(), ExpectSynchronize(false)), - {}}; - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchCreateExpand) { @@ -909,10 +620,6 @@ TYPED_TEST(TestPlanner, MatchCreateExpand) { CREATE(PATTERN(NODE("n"), EDGE("r", Direction::OUT, {relationship}), NODE("m"))))); CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectCreateExpand()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectCreateExpand(), ExpectSynchronize()), - MakeCheckers(ExpectScanAll(), ExpectCreateExpand())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchLabeledNodes) { @@ -926,11 +633,6 @@ TYPED_TEST(TestPlanner, MatchLabeledNodes) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAllByLabel(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAllByLabel(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAllByLabel(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchPathReturn) { @@ -947,11 +649,6 @@ TYPED_TEST(TestPlanner, MatchPathReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchNamedPatternReturn) { @@ -969,13 +666,6 @@ TYPED_TEST(TestPlanner, MatchNamedPatternReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_p)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), - ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), - ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchNamedPatternWithPredicateReturn) { @@ -993,13 +683,6 @@ TYPED_TEST(TestPlanner, MatchNamedPatternWithPredicateReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), ExpectFilter(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_p)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), - ExpectFilter(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(), - ExpectFilter(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, OptionalMatchNamedPatternReturn) { @@ -1023,12 +706,6 @@ TYPED_TEST(TestPlanner, OptionalMatchNamedPatternReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectOptional(optional_symbols, optional), ExpectProduce()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectOptional(optional_symbols, optional), ExpectProduce(), - ExpectPullRemote({symbol_table.at(*as_p)})), - MakeCheckers(ExpectOptional(optional_symbols, optional), - ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchWhereReturn) { @@ -1044,11 +721,6 @@ TYPED_TEST(TestPlanner, MatchWhereReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectFilter(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectFilter(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectFilter(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchDelete) { @@ -1056,10 +728,6 @@ TYPED_TEST(TestPlanner, MatchDelete) { AstStorage storage; QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), DELETE(IDENT("n")))); CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectDelete()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectDelete(), ExpectSynchronize()), - MakeCheckers(ExpectScanAll(), ExpectDelete())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchNodeSet) { @@ -1073,12 +741,6 @@ TYPED_TEST(TestPlanner, MatchNodeSet) { SET("n", IDENT("n")), SET("n", {label}))); CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectSetProperty(), ExpectSetProperties(), ExpectSetLabels()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectSetProperty(), ExpectSetProperties(), - ExpectSetLabels(), ExpectSynchronize()), - MakeCheckers(ExpectScanAll(), ExpectSetProperty(), ExpectSetProperties(), - ExpectSetLabels())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchRemove) { @@ -1091,12 +753,6 @@ TYPED_TEST(TestPlanner, MatchRemove) { REMOVE(PROPERTY_LOOKUP("n", prop)), REMOVE("n", {label}))); CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectRemoveProperty(), ExpectRemoveLabels()); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectRemoveProperty(), - ExpectRemoveLabels(), ExpectSynchronize()), - MakeCheckers(ExpectScanAll(), ExpectRemoveProperty(), - ExpectRemoveLabels())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchMultiPattern) { @@ -1160,25 +816,6 @@ TYPED_TEST(TestPlanner, MultiMatch) { CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(), ExpectScanAll(), ExpectExpand(), ExpectExpand(), ExpectExpandUniquenessFilter<EdgeAccessor>(), ExpectProduce()); - auto get_symbol = [&symbol_table](const auto *atom_node) { - return symbol_table.at(*atom_node->identifier_); - }; - ExpectPullRemote left_pull( - {get_symbol(node_n), get_symbol(edge_r), get_symbol(node_m)}); - auto left_cart = MakeCheckers(ExpectScanAll(), ExpectExpand(), left_pull); - ExpectPullRemote right_pull({get_symbol(node_j), get_symbol(edge_e), - get_symbol(node_i), get_symbol(edge_f), - get_symbol(node_h)}); - auto right_cart = - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectExpand(), - ExpectExpandUniquenessFilter<EdgeAccessor>(), right_pull); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectProduce()), - MakeCheckers(ExpectScanAll(), ExpectExpand()), - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectExpand(), - ExpectExpandUniquenessFilter<EdgeAccessor>())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MultiMatchSameStart) { @@ -1195,11 +832,6 @@ TYPED_TEST(TestPlanner, MultiMatchSameStart) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchWithReturn) { @@ -1214,11 +846,6 @@ TYPED_TEST(TestPlanner, MatchWithReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_new)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MatchWithWhereReturn) { @@ -1235,13 +862,6 @@ TYPED_TEST(TestPlanner, MatchWithWhereReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(), ExpectFilter(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_new)}); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(), - ExpectFilter(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectProduce(), - ExpectFilter(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, CreateMultiExpand) { @@ -1255,11 +875,6 @@ TYPED_TEST(TestPlanner, CreateMultiExpand) { PATTERN(NODE("n"), EDGE("p", Direction::OUT, {p}), NODE("l"))))); CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(), ExpectCreateExpand()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(), - ExpectCreateExpand(), ExpectSynchronize(false)), - {}}; - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchWithSumWhereReturn) { @@ -1293,20 +908,6 @@ TYPED_TEST(TestPlanner, MatchReturnSum) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), aggr, ExpectProduce()); - { - std::atomic<int64_t> next_plan_id{0}; - auto distributed_plan = - MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id); - auto merge_sum = SUM(IDENT("worker_sum")); - auto master_aggr = ExpectMasterAggregate({merge_sum}, {n_prop2}); - ExpectPullRemote pull( - {symbol_table.at(*sum), symbol_table.at(*n_prop2->expression_)}); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), aggr, pull, master_aggr, - ExpectProduce(), ExpectProduce()), - MakeCheckers(ExpectScanAll(), aggr)); - CheckDistributedPlan(distributed_plan, expected); - } } TYPED_TEST(TestPlanner, CreateWithSum) { @@ -1339,11 +940,6 @@ TYPED_TEST(TestPlanner, MatchWithCreate) { PATTERN(NODE("a"), EDGE("r", Direction::OUT, {r_type}), NODE("b"))))); CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectProduce(), ExpectCreateExpand()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectCreateExpand(), - ExpectSynchronize()), - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectCreateExpand())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchReturnSkipLimit) { @@ -1357,12 +953,6 @@ TYPED_TEST(TestPlanner, MatchReturnSkipLimit) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(), ExpectSkip(), ExpectLimit()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(), pull, - ExpectSkip(), ExpectLimit()), - MakeCheckers(ExpectScanAll(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, CreateWithSkipReturnLimit) { @@ -1383,12 +973,6 @@ TYPED_TEST(TestPlanner, CreateWithSkipReturnLimit) { // us here (but who knows if they change it again). CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(), acc, ExpectProduce(), ExpectSkip(), ExpectProduce(), ExpectLimit()); - ExpectedDistributedPlan expected{ - MakeCheckers(ExpectCreateNode(true), ExpectSynchronize(true), - ExpectProduce(), ExpectSkip(), ExpectProduce(), - ExpectLimit()), - {}}; - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, CreateReturnSumSkipLimit) { @@ -1422,17 +1006,6 @@ TYPED_TEST(TestPlanner, MatchReturnOrderBy) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(), ExpectOrderBy()); - ExpectPullRemoteOrderBy pull_order_by( - {symbol_table.at(*as_m), symbol_table.at(*node_n->identifier_)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectOrderBy(), - pull_order_by), - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectOrderBy())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); - // Even though last operator pulls and orders by `m` and `n`, we expect only - // `m` as the output of the query execution. - EXPECT_THAT(planner.plan().OutputSymbols(symbol_table), - testing::UnorderedElementsAre(symbol_table.at(*as_m))); } TYPED_TEST(TestPlanner, CreateWithOrderByWhere) { @@ -1462,10 +1035,6 @@ TYPED_TEST(TestPlanner, CreateWithOrderByWhere) { CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(), ExpectCreateExpand(), acc, ExpectProduce(), ExpectOrderBy(), ExpectFilter()); - auto expected = ExpectDistributed(MakeCheckers( - ExpectCreateNode(true), ExpectCreateExpand(), ExpectSynchronize(true), - ExpectProduce(), ExpectOrderBy(), ExpectFilter())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, ReturnAddSumCountOrderBy) { @@ -1477,9 +1046,6 @@ TYPED_TEST(TestPlanner, ReturnAddSumCountOrderBy) { RETURN(ADD(sum, count), AS("result"), ORDER_BY(IDENT("result"))))); auto aggr = ExpectAggregate({sum, count}, {}); CheckPlan<TypeParam>(storage, aggr, ExpectProduce(), ExpectOrderBy()); - auto expected = - ExpectDistributed(MakeCheckers(aggr, ExpectProduce(), ExpectOrderBy())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, MatchMerge) { @@ -1541,11 +1107,6 @@ TYPED_TEST(TestPlanner, MatchUnwindReturn) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectUnwind(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n), symbol_table.at(*as_x)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectUnwind(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectUnwind(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, ReturnDistinctOrderBySkipLimit) { @@ -1555,10 +1116,6 @@ TYPED_TEST(TestPlanner, ReturnDistinctOrderBySkipLimit) { SKIP(LITERAL(1)), LIMIT(LITERAL(1))))); CheckPlan<TypeParam>(storage, ExpectProduce(), ExpectDistinct(), ExpectOrderBy(), ExpectSkip(), ExpectLimit()); - auto expected = ExpectDistributed( - MakeCheckers(ExpectProduce(), ExpectDistinct(), ExpectOrderBy(), - ExpectSkip(), ExpectLimit())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, CreateWithDistinctSumWhereReturn) { @@ -1611,13 +1168,6 @@ TYPED_TEST(TestPlanner, MatchWhereBeforeExpand) { auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectFilter(), ExpectExpand(), ExpectProduce()); - ExpectPullRemote pull({symbol_table.at(*as_n)}); - auto expected = - ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectFilter(), - ExpectExpand(), ExpectProduce(), pull), - MakeCheckers(ExpectScanAll(), ExpectFilter(), - ExpectExpand(), ExpectProduce())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); } TYPED_TEST(TestPlanner, MultiMatchWhere) { @@ -1737,8 +1287,6 @@ TYPED_TEST(TestPlanner, FunctionAggregationReturn) { RETURN(FN("sqrt", sum), AS("result"), group_by_literal, AS("group_by")))); auto aggr = ExpectAggregate({sum}, {group_by_literal}); CheckPlan<TypeParam>(storage, aggr, ExpectProduce()); - auto expected = ExpectDistributed(MakeCheckers(aggr, ExpectProduce())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, FunctionWithoutArguments) { @@ -1746,8 +1294,6 @@ TYPED_TEST(TestPlanner, FunctionWithoutArguments) { AstStorage storage; QUERY(SINGLE_QUERY(RETURN(FN("pi"), AS("pi")))); CheckPlan<TypeParam>(storage, ExpectProduce()); - auto expected = ExpectDistributed(MakeCheckers(ExpectProduce())); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, ListLiteralAggregationReturn) { @@ -1846,9 +1392,6 @@ TYPED_TEST(TestPlanner, CreateIndex) { AstStorage storage; QUERY(SINGLE_QUERY(CREATE_INDEX_ON(label, property))); CheckPlan<TypeParam>(storage, ExpectCreateIndex(label, property)); - auto expected = - ExpectDistributed(MakeCheckers(ExpectCreateIndex(label, property))); - CheckDistributedPlan<TypeParam>(storage, expected); } TYPED_TEST(TestPlanner, AtomIndexedLabelProperty) { @@ -2216,413 +1759,4 @@ TYPED_TEST(TestPlanner, ReturnAsteriskOmitsLambdaSymbols) { } } -TYPED_TEST(TestPlanner, ModifyUser) { - { - // Test CREATE USER user WITH PASSWORD 'password' - FakeDbAccessor dba; - AstStorage storage; - QUERY(SINGLE_QUERY(CREATE_USER("user", "password"))); - CheckPlan<TypeParam>(storage, ExpectModifyUser("user", true)); - auto expected = - ExpectDistributed(MakeCheckers(ExpectModifyUser("user", true))); - CheckDistributedPlan<TypeParam>(storage, expected); - } - { - // Test ALTER USER user WITH PASSWORD 'password' - FakeDbAccessor dba; - AstStorage storage; - QUERY(SINGLE_QUERY(ALTER_USER("user", "password"))); - CheckPlan<TypeParam>(storage, ExpectModifyUser("user", false)); - auto expected = - ExpectDistributed(MakeCheckers(ExpectModifyUser("user", false))); - CheckDistributedPlan<TypeParam>(storage, expected); - } -} - -TYPED_TEST(TestPlanner, DropUser) { - // Test DROP USER user1, user2, user3 - AstStorage storage; - std::vector<std::string> usernames({"user1", "user2", "user3"}); - QUERY(SINGLE_QUERY(DROP_USER(usernames))); - CheckPlan<TypeParam>(storage, ExpectDropUser(usernames)); - auto expected = ExpectDistributed(MakeCheckers(ExpectDropUser(usernames))); - CheckDistributedPlan<TypeParam>(storage, expected); -} - -TYPED_TEST(TestPlanner, DistributedAvg) { - // Test MATCH (n) RETURN AVG(n.prop) AS res - AstStorage storage; - FakeDbAccessor dba; - auto prop = dba.Property("prop"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), - RETURN(AVG(PROPERTY_LOOKUP("n", prop)), AS("res")))); - auto distributed_plan = MakeDistributedPlan<TypeParam>(storage); - auto &symbol_table = distributed_plan.symbol_table; - auto worker_sum = SUM(PROPERTY_LOOKUP("n", prop)); - auto worker_count = COUNT(PROPERTY_LOOKUP("n", prop)); - { - ASSERT_EQ(distributed_plan.worker_plans.size(), 1U); - auto worker_plan = distributed_plan.worker_plans.back().second; - auto worker_aggr_op = std::dynamic_pointer_cast<Aggregate>(worker_plan); - ASSERT_TRUE(worker_aggr_op); - ASSERT_EQ(worker_aggr_op->aggregations().size(), 2U); - symbol_table[*worker_sum] = worker_aggr_op->aggregations()[0].output_sym; - symbol_table[*worker_count] = worker_aggr_op->aggregations()[1].output_sym; - } - auto worker_aggr = ExpectAggregate({worker_sum, worker_count}, {}); - auto merge_sum = SUM(IDENT("worker_sum")); - auto merge_count = SUM(IDENT("worker_count")); - auto master_aggr = ExpectMasterAggregate({merge_sum, merge_count}, {}); - ExpectPullRemote pull( - {symbol_table.at(*worker_sum), symbol_table.at(*worker_count)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), worker_aggr, pull, master_aggr, - ExpectProduce(), ExpectProduce()), - MakeCheckers(ExpectScanAll(), worker_aggr)); - CheckDistributedPlan(distributed_plan, expected); -} - -TYPED_TEST(TestPlanner, DistributedCollectList) { - // Test MATCH (n) RETURN COLLECT(n.prop) AS res - AstStorage storage; - FakeDbAccessor dba; - auto prop = dba.Property("prop"); - auto node_n = NODE("n"); - auto collect = COLLECT_LIST(PROPERTY_LOOKUP("n", prop)); - QUERY(SINGLE_QUERY(MATCH(PATTERN(node_n)), RETURN(collect, AS("res")))); - auto distributed_plan = MakeDistributedPlan<TypeParam>(storage); - auto &symbol_table = distributed_plan.symbol_table; - auto aggr = ExpectAggregate({collect}, {}); - ExpectPullRemote pull({symbol_table.at(*node_n->identifier_)}); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), pull, aggr, ExpectProduce()), - MakeCheckers(ExpectScanAll())); - CheckDistributedPlan(distributed_plan, expected); -} - -TYPED_TEST(TestPlanner, DistributedMatchCreateReturn) { - // Test MATCH (n) CREATE (m) RETURN m - AstStorage storage; - auto *ident_m = IDENT("m"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), CREATE(PATTERN(NODE("m"))), - RETURN(ident_m, AS("m")))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto acc = ExpectAccumulate({symbol_table.at(*ident_m)}); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - auto expected = ExpectDistributed( - MakeCheckers(ExpectScanAll(), ExpectCreateNode(), - ExpectSynchronize({symbol_table.at(*ident_m)}), - ExpectProduce()), - MakeCheckers(ExpectScanAll(), ExpectCreateNode())); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianCreate) { - // Test MATCH (a), (b) CREATE (a)-[e:r]->(b) RETURN e - AstStorage storage; - FakeDbAccessor dba; - auto relationship = dba.EdgeType("r"); - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - QUERY(SINGLE_QUERY( - MATCH(PATTERN(node_a), PATTERN(node_b)), - CREATE(PATTERN(NODE("a"), EDGE("e", Direction::OUT, {relationship}), - NODE("b"))), - RETURN("e"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto left_cart = - MakeCheckers(ExpectScanAll(), - ExpectPullRemote({symbol_table.at(*node_a->identifier_)})); - auto right_cart = - MakeCheckers(ExpectScanAll(), - ExpectPullRemote({symbol_table.at(*node_b->identifier_)})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectCreateExpand(), ExpectSynchronize(false), - ExpectProduce()), - MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll())); - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianExpand) { - // Test MATCH (a), (b)-[e]-(c) RETURN c - AstStorage storage; - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - auto *edge_e = EDGE("e"); - auto *node_c = NODE("c"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(node_a), PATTERN(node_b, edge_e, node_c)), - RETURN("c"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*node_a->identifier_); - auto left_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_a})); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto sym_e = symbol_table.at(*edge_e->identifier_); - auto sym_c = symbol_table.at(*node_c->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectExpand(), - ExpectPullRemote({sym_b, sym_e, sym_c})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectProduce()), - MakeCheckers(ExpectScanAll()), - MakeCheckers(ExpectScanAll(), ExpectExpand())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianExpandToExisting) { - // Test MATCH (a), (b)-[e]-(a) RETURN e - AstStorage storage; - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - QUERY(SINGLE_QUERY( - MATCH(PATTERN(node_a), PATTERN(node_b, EDGE("e"), NODE("a"))), - RETURN("e"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*node_a->identifier_); - auto left_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_a})); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectExpand(), ExpectProduce()), - MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianExpandFromExisting) { - // Test MATCH (a), (b), (a)-[e]-(b) RETURN e - AstStorage storage; - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(node_a), PATTERN(node_b), - PATTERN(NODE("a"), EDGE("e"), NODE("b"))), - RETURN("e"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*node_a->identifier_); - auto left_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_a})); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectExpand(), ExpectProduce()), - MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianFilter) { - // Test MATCH (a), (b), (c) WHERE a = 42 AND b = a AND c = b RETURN c - AstStorage storage; - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - auto *node_c = NODE("c"); - QUERY(SINGLE_QUERY( - MATCH(PATTERN(node_a), PATTERN(node_b), PATTERN(node_c)), - WHERE(AND(AND(EQ(IDENT("a"), LITERAL(42)), EQ(IDENT("b"), IDENT("a"))), - EQ(IDENT("c"), IDENT("b")))), - RETURN("c"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*node_a->identifier_); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto sym_c = symbol_table.at(*node_c->identifier_); - auto left_cart = - MakeCheckers(ExpectScanAll(), ExpectFilter(), ExpectPullRemote({sym_a})); - auto mid_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b})); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_c})); - auto mid_right_cart = - MakeCheckers(ExpectCartesian(std::move(mid_cart), std::move(right_cart)), - ExpectFilter()); - auto expected = ExpectDistributed( - MakeCheckers( - ExpectCartesian(std::move(left_cart), std::move(mid_right_cart)), - ExpectFilter(), ExpectProduce()), - MakeCheckers(ExpectScanAll(), ExpectFilter()), - MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianProduce) { - // Test MATCH (a) WITH a MATCH (b) WHERE b = a RETURN b; - AstStorage storage; - auto *with_a = WITH("a"); - auto *node_b = NODE("b"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("a"))), with_a, MATCH(PATTERN(node_b)), - WHERE(EQ(IDENT("b"), IDENT("a"))), RETURN("b"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*with_a->body_.named_expressions[0]); - auto left_cart = - MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectPullRemote({sym_a})); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectFilter(), ExpectProduce()), - MakeCheckers(ExpectScanAll(), ExpectProduce()), - MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianUnwind) { - // Test MATCH (a), (b) UNWIND a AS x RETURN x - AstStorage storage; - auto *node_a = NODE("a"); - auto *node_b = NODE("b"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(node_a), PATTERN(node_b)), - UNWIND(IDENT("a"), AS("x")), RETURN("x"))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_a = symbol_table.at(*node_a->identifier_); - auto left_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_a})); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectUnwind(), ExpectProduce()), - MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TYPED_TEST(TestPlanner, DistributedCartesianCreateNode) { - // Test MATCH (a) CREATE (b) WITH b MATCH (c) CREATE (d) - AstStorage storage; - auto *node_b = NODE("b"); - auto *node_c = NODE("c"); - QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("a"))), CREATE(PATTERN(node_b)), - WITH("b"), MATCH(PATTERN(node_c)), - CREATE(PATTERN(NODE("d"))))); - auto symbol_table = MakeSymbolTable(*storage.query()); - auto sym_b = symbol_table.at(*node_b->identifier_); - auto left_cart = - MakeCheckers(ExpectScanAll(), ExpectCreateNode(), - ExpectSynchronize({sym_b}, true), ExpectProduce()); - auto sym_c = symbol_table.at(*node_c->identifier_); - auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_c})); - auto expected = ExpectDistributed( - MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)), - ExpectCreateNode(true), ExpectSynchronize(false)), - MakeCheckers(ExpectScanAll(), ExpectCreateNode()), - MakeCheckers(ExpectScanAll())); - FakeDbAccessor dba; - auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table); - CheckDistributedPlan(planner.plan(), symbol_table, expected); -} - -TEST(CapnpSerial, Union) { - std::vector<Symbol> left_symbols{ - Symbol("symbol", 1, true, Symbol::Type::Edge)}; - std::vector<Symbol> right_symbols{ - Symbol("symbol", 3, true, Symbol::Type::Any)}; - auto union_symbols = right_symbols; - auto union_op = std::make_unique<Union>(nullptr, nullptr, union_symbols, - left_symbols, right_symbols); - std::unique_ptr<LogicalOperator> loaded_plan; - ::capnp::MallocMessageBuilder message; - SavePlan(*union_op, &message); - AstStorage new_storage; - std::tie(loaded_plan, new_storage) = - LoadPlan(message.getRoot<query::plan::capnp::LogicalOperator>()); - ASSERT_TRUE(loaded_plan); - auto *loaded_op = dynamic_cast<Union *>(loaded_plan.get()); - ASSERT_TRUE(loaded_op); - EXPECT_FALSE(loaded_op->left_op()); - EXPECT_FALSE(loaded_op->right_op()); - EXPECT_EQ(loaded_op->left_symbols(), left_symbols); - EXPECT_EQ(loaded_op->right_symbols(), right_symbols); - EXPECT_EQ(loaded_op->union_symbols(), union_symbols); -} - -TEST(CapnpSerial, Cartesian) { - std::vector<Symbol> left_symbols{ - Symbol("left_symbol", 1, true, Symbol::Type::Edge)}; - std::vector<Symbol> right_symbols{ - Symbol("right_symbol", 3, true, Symbol::Type::Any)}; - auto cartesian = std::make_unique<Cartesian>(nullptr, left_symbols, nullptr, - right_symbols); - std::unique_ptr<LogicalOperator> loaded_plan; - ::capnp::MallocMessageBuilder message; - SavePlan(*cartesian, &message); - AstStorage new_storage; - std::tie(loaded_plan, new_storage) = - LoadPlan(message.getRoot<query::plan::capnp::LogicalOperator>()); - ASSERT_TRUE(loaded_plan); - auto *loaded_op = dynamic_cast<Cartesian *>(loaded_plan.get()); - ASSERT_TRUE(loaded_op); - EXPECT_FALSE(loaded_op->left_op()); - EXPECT_FALSE(loaded_op->right_op()); - EXPECT_EQ(loaded_op->left_symbols(), left_symbols); - EXPECT_EQ(loaded_op->right_symbols(), right_symbols); -} - -TEST(CapnpSerial, Synchronize) { - auto synchronize = std::make_unique<Synchronize>(nullptr, nullptr, true); - std::unique_ptr<LogicalOperator> loaded_plan; - ::capnp::MallocMessageBuilder message; - SavePlan(*synchronize, &message); - AstStorage new_storage; - std::tie(loaded_plan, new_storage) = - LoadPlan(message.getRoot<query::plan::capnp::LogicalOperator>()); - ASSERT_TRUE(loaded_plan); - auto *loaded_op = dynamic_cast<Synchronize *>(loaded_plan.get()); - ASSERT_TRUE(loaded_op); - EXPECT_FALSE(loaded_op->input()); - EXPECT_FALSE(loaded_op->pull_remote()); - EXPECT_TRUE(loaded_op->advance_command()); -} - -TEST(CapnpSerial, PullRemote) { - std::vector<Symbol> symbols{Symbol("symbol", 1, true, Symbol::Type::Edge)}; - auto pull_remote = std::make_unique<PullRemote>(nullptr, 42, symbols); - std::unique_ptr<LogicalOperator> loaded_plan; - ::capnp::MallocMessageBuilder message; - SavePlan(*pull_remote, &message); - AstStorage new_storage; - std::tie(loaded_plan, new_storage) = - LoadPlan(message.getRoot<query::plan::capnp::LogicalOperator>()); - ASSERT_TRUE(loaded_plan); - auto *loaded_op = dynamic_cast<PullRemote *>(loaded_plan.get()); - ASSERT_TRUE(loaded_op); - EXPECT_FALSE(loaded_op->input()); - EXPECT_EQ(loaded_op->plan_id(), 42); - EXPECT_EQ(loaded_op->symbols(), symbols); -} - -TEST(CapnpSerial, PullRemoteOrderBy) { - auto once = std::make_shared<Once>(); - AstStorage storage; - std::vector<Symbol> symbols{ - Symbol("my_symbol", 2, true, Symbol::Type::Vertex, 3)}; - std::vector<std::pair<query::Ordering, query::Expression *>> order_by{ - {query::Ordering::ASC, IDENT("my_symbol")}}; - auto pull_remote_order_by = - std::make_unique<PullRemoteOrderBy>(once, 42, order_by, symbols); - std::unique_ptr<LogicalOperator> loaded_plan; - ::capnp::MallocMessageBuilder message; - SavePlan(*pull_remote_order_by, &message); - AstStorage new_storage; - std::tie(loaded_plan, new_storage) = - LoadPlan(message.getRoot<query::plan::capnp::LogicalOperator>()); - ASSERT_TRUE(loaded_plan); - auto *loaded_op = dynamic_cast<PullRemoteOrderBy *>(loaded_plan.get()); - ASSERT_TRUE(loaded_op); - ASSERT_TRUE(std::dynamic_pointer_cast<Once>(loaded_op->input())); - EXPECT_EQ(loaded_op->plan_id(), 42); - EXPECT_EQ(loaded_op->symbols(), symbols); - ASSERT_EQ(loaded_op->order_by().size(), 1); - EXPECT_TRUE(dynamic_cast<query::Identifier *>(loaded_op->order_by()[0])); - ASSERT_EQ(loaded_op->compare().ordering().size(), 1); - EXPECT_EQ(loaded_op->compare().ordering()[0], query::Ordering::ASC); -} - } // namespace diff --git a/tests/unit/query_semantic.cpp b/tests/unit/query_semantic.cpp index 010bf57a2..f83d1b2df 100644 --- a/tests/unit/query_semantic.cpp +++ b/tests/unit/query_semantic.cpp @@ -1,8 +1,6 @@ #include <memory> #include <sstream> -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" #include "gtest/gtest.h" #include "query/frontend/ast/ast.hpp" @@ -1087,25 +1085,3 @@ TEST_F(TestSymbolGenerator, MatchUnion) { query->Accept(symbol_generator); EXPECT_EQ(symbol_table.max_position(), 8); } - -TEST(TestSymbolTable, Serialization) { - SymbolTable original_table; - SymbolGenerator symbol_generator{original_table}; - AstStorage storage; - auto ident_a = IDENT("a"); - auto sym_a = original_table.CreateSymbol("a", true, Symbol::Type::Vertex, 0); - original_table[*ident_a] = sym_a; - auto ident_b = IDENT("b"); - auto sym_b = original_table.CreateSymbol("b", false, Symbol::Type::Edge, 1); - original_table[*ident_b] = sym_b; - std::stringstream stream; - { - boost::archive::binary_oarchive out_archive(stream); - out_archive << original_table; - } - SymbolTable serialized_table; - boost::archive::binary_iarchive in_archive(stream); - in_archive >> serialized_table; - EXPECT_EQ(serialized_table.max_position(), original_table.max_position()); - EXPECT_EQ(serialized_table.table(), original_table.table()); -} diff --git a/tests/unit/raft.cpp b/tests/unit/raft.cpp deleted file mode 100644 index 14bebefec..000000000 --- a/tests/unit/raft.cpp +++ /dev/null @@ -1,660 +0,0 @@ -#include "gtest/gtest.h" - -#include <chrono> -#include <experimental/optional> -#include <thread> - -#include "communication/raft/raft.hpp" -#include "communication/raft/storage/memory.hpp" -#include "communication/raft/test_utils.hpp" - -using namespace std::chrono_literals; - -using testing::Values; - -using namespace communication::raft; -using namespace communication::raft::test_utils; - -using communication::raft::impl::RaftMemberImpl; -using communication::raft::impl::RaftMode; - -const RaftConfig test_config1{{"a"}, 150ms, 300ms, 70ms, 30ms}; -const RaftConfig test_config2{{"a", "b"}, 150ms, 300ms, 70ms, 30ms}; -const RaftConfig test_config3{{"a", "b", "c"}, 150ms, 300ms, 70ms, 30ms}; -const RaftConfig test_config5{ - {"a", "b", "c", "d", "e"}, 150ms, 300ms, 70ms, 30ms}; - -class RaftMemberImplTest : public ::testing::Test { - public: - RaftMemberImplTest() - : storage_(1, "a", {}), member(network_, storage_, "a", test_config5) {} - - void SetLog(std::vector<LogEntry<DummyState>> log) { - storage_.log_ = std::move(log); - } - - NoOpNetworkInterface<DummyState> network_; - InMemoryStorage<DummyState> storage_; - RaftMemberImpl<DummyState> member; -}; - -TEST_F(RaftMemberImplTest, Constructor) { - EXPECT_EQ(member.mode_, RaftMode::FOLLOWER); - EXPECT_EQ(member.term_, 1); - EXPECT_EQ(*member.voted_for_, "a"); - EXPECT_EQ(member.commit_index_, 0); -} - -TEST_F(RaftMemberImplTest, CandidateOrLeaderTransitionToFollower) { - member.mode_ = RaftMode::CANDIDATE; - member.CandidateTransitionToLeader(); - - member.CandidateOrLeaderTransitionToFollower(); - EXPECT_EQ(member.mode_, RaftMode::FOLLOWER); - EXPECT_EQ(member.leader_, std::experimental::nullopt); - EXPECT_LT(member.next_election_time_, TimePoint::max()); -} - -TEST_F(RaftMemberImplTest, CandidateTransitionToLeader) { - member.mode_ = RaftMode::CANDIDATE; - member.CandidateTransitionToLeader(); - - EXPECT_EQ(member.mode_, RaftMode::LEADER); - EXPECT_EQ(*member.leader_, "a"); - EXPECT_EQ(member.next_election_time_, TimePoint::max()); -} - -TEST_F(RaftMemberImplTest, CandidateOrLeaderNoteTerm) { - member.mode_ = RaftMode::LEADER; - member.term_ = 5; - member.CandidateOrLeaderNoteTerm(5); - - EXPECT_EQ(member.mode_, RaftMode::LEADER); - EXPECT_EQ(member.term_, 5); - - member.CandidateOrLeaderNoteTerm(6); - EXPECT_EQ(member.mode_, RaftMode::FOLLOWER); - EXPECT_EQ(member.term_, 6); -} - -TEST_F(RaftMemberImplTest, StartNewElection) { - member.StartNewElection(); - - EXPECT_EQ(member.mode_, RaftMode::CANDIDATE); - EXPECT_EQ(member.term_, 2); - EXPECT_EQ(member.voted_for_, member.id_); -} - -TEST_F(RaftMemberImplTest, CountVotes) { - member.StartNewElection(); - EXPECT_FALSE(member.CountVotes()); - - member.peer_states_["b"]->voted_for_me = true; - EXPECT_FALSE(member.CountVotes()); - - member.peer_states_["c"]->voted_for_me = true; - EXPECT_TRUE(member.CountVotes()); -} - -TEST_F(RaftMemberImplTest, AdvanceCommitIndex) { - SetLog({{1}, {1}, {1}, {1}, {2}, {2}, {2}, {2}}); - - member.mode_ = RaftMode::LEADER; - member.term_ = 2; - - member.peer_states_["b"]->match_index = 4; - member.peer_states_["c"]->match_index = 4; - - EXPECT_EQ(member.commit_index_, 0); - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 0); - - member.peer_states_["b"]->match_index = 4; - member.peer_states_["c"]->match_index = 4; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 0); - - member.peer_states_["b"]->match_index = 5; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 0); - - member.peer_states_["c"]->match_index = 5; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 5); - - member.peer_states_["d"]->match_index = 6; - member.peer_states_["e"]->match_index = 7; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 6); - - member.peer_states_["c"]->match_index = 8; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 7); - - member.peer_states_["a"]->match_index = 8; - member.AdvanceCommitIndex(); - EXPECT_EQ(member.commit_index_, 8); -} - -TEST(RequestVote, SimpleElection) { - NextReplyNetworkInterface<DummyState> network; - InMemoryStorage<DummyState> storage(1, {}, {{1}, {1}}); - RaftMemberImpl<DummyState> member(network, storage, "a", test_config5); - - member.StartNewElection(); - - std::unique_lock<std::mutex> lock(member.mutex_); - - PeerRpcReply next_reply; - next_reply.type = RpcType::REQUEST_VOTE; - - network.on_request_ = [](const PeerRpcRequest<DummyState> &request) { - ASSERT_EQ(request.type, RpcType::REQUEST_VOTE); - ASSERT_EQ(request.request_vote.candidate_term, 2); - ASSERT_EQ(request.request_vote.candidate_id, "a"); - ASSERT_EQ(request.request_vote.last_log_index, 2); - ASSERT_EQ(request.request_vote.last_log_term, 1); - }; - - /* member 'b' first voted for us */ - next_reply.request_vote.term = 2; - next_reply.request_vote.vote_granted = true; - network.next_reply_ = next_reply; - member.RequestVote("b", *member.peer_states_["b"], lock); - EXPECT_EQ(member.mode_, RaftMode::CANDIDATE); - EXPECT_TRUE(member.peer_states_["b"]->request_vote_done); - EXPECT_TRUE(member.peer_states_["b"]->voted_for_me); - - /* member 'c' didn't */ - next_reply.request_vote.vote_granted = false; - network.next_reply_ = next_reply; - member.RequestVote("c", *member.peer_states_["c"], lock); - EXPECT_TRUE(member.peer_states_["c"]->request_vote_done); - EXPECT_FALSE(member.peer_states_["c"]->voted_for_me); - EXPECT_EQ(member.mode_, RaftMode::CANDIDATE); - - /* but member 'd' did */ - next_reply.request_vote.vote_granted = true; - network.next_reply_ = next_reply; - member.RequestVote("d", *member.peer_states_["d"], lock); - EXPECT_TRUE(member.peer_states_["d"]->request_vote_done); - EXPECT_TRUE(member.peer_states_["d"]->voted_for_me); - EXPECT_EQ(member.mode_, RaftMode::LEADER); - - /* no-op entry should be at the end of leader's log */ - EXPECT_EQ(storage.log_.back().term, 2); - EXPECT_EQ(storage.log_.back().command, std::experimental::nullopt); -} - -TEST(AppendEntries, SimpleLogSync) { - NextReplyNetworkInterface<DummyState> network; - InMemoryStorage<DummyState> storage(3, {}, {{1}, {1}, {2}, {3}}); - RaftMemberImpl<DummyState> member(network, storage, "a", test_config2); - - member.mode_ = RaftMode::LEADER; - - std::unique_lock<std::mutex> lock(member.mutex_); - - PeerRpcReply reply; - reply.type = RpcType::APPEND_ENTRIES; - - reply.append_entries.term = 3; - reply.append_entries.success = false; - network.next_reply_ = reply; - - LogIndex expected_prev_log_index; - TermId expected_prev_log_term; - std::vector<LogEntry<DummyState>> expected_entries; - - network.on_request_ = [&](const PeerRpcRequest<DummyState> &request) { - EXPECT_EQ(request.type, RpcType::APPEND_ENTRIES); - EXPECT_EQ(request.append_entries.leader_term, 3); - EXPECT_EQ(request.append_entries.leader_id, "a"); - EXPECT_EQ(request.append_entries.prev_log_index, expected_prev_log_index); - EXPECT_EQ(request.append_entries.prev_log_term, expected_prev_log_term); - EXPECT_EQ(request.append_entries.entries, expected_entries); - }; - - /* initial state after election */ - auto &peer_state = *member.peer_states_["b"]; - peer_state.match_index = 0; - peer_state.next_index = 5; - peer_state.suppress_log_entries = true; - - /* send a heartbeat and find out logs don't match */ - expected_prev_log_index = 4; - expected_prev_log_term = 3; - expected_entries = {}; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 0); - EXPECT_EQ(peer_state.next_index, 4); - EXPECT_EQ(member.commit_index_, 0); - - /* move `next_index` until we find a match, `expected_entries` will be empty - * because `suppress_log_entries` will be true */ - expected_entries = {}; - - expected_prev_log_index = 3; - expected_prev_log_term = 2; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 0); - EXPECT_EQ(peer_state.next_index, 3); - EXPECT_EQ(peer_state.suppress_log_entries, true); - EXPECT_EQ(member.commit_index_, 0); - - expected_prev_log_index = 2; - expected_prev_log_term = 1; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 0); - EXPECT_EQ(peer_state.next_index, 2); - EXPECT_EQ(peer_state.suppress_log_entries, true); - EXPECT_EQ(member.commit_index_, 0); - - /* we found a match */ - reply.append_entries.success = true; - network.next_reply_ = reply; - - expected_prev_log_index = 1; - expected_prev_log_term = 1; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 1); - EXPECT_EQ(peer_state.next_index, 2); - EXPECT_EQ(peer_state.suppress_log_entries, false); - EXPECT_EQ(member.commit_index_, 4); - - /* now sync them */ - expected_prev_log_index = 1; - expected_prev_log_term = 1; - expected_entries = {{1}, {2}, {3}}; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 4); - EXPECT_EQ(peer_state.next_index, 5); - EXPECT_EQ(peer_state.suppress_log_entries, false); - EXPECT_EQ(member.commit_index_, 4); - - /* heartbeat after successful log sync */ - expected_prev_log_index = 4; - expected_prev_log_term = 3; - expected_entries = {}; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 4); - EXPECT_EQ(peer_state.next_index, 5); - EXPECT_EQ(member.commit_index_, 4); - - /* replicate a newly appended entry */ - storage.AppendLogEntry({3}); - - expected_prev_log_index = 4; - expected_prev_log_term = 3; - expected_entries = {{3}}; - member.AppendEntries("b", peer_state, lock); - EXPECT_EQ(peer_state.match_index, 5); - EXPECT_EQ(peer_state.next_index, 6); - EXPECT_EQ(member.commit_index_, 5); -} - -template <class TestParam> -class RaftMemberParamTest : public ::testing::TestWithParam<TestParam> { - public: - virtual void SetUp() { - /* Some checks to verify that test case is valid. */ - - /* Member's term should be greater than or equal to last log term. */ - ASSERT_GE(storage_.term_, storage_.GetLogTerm(storage_.GetLastLogIndex())); - - ASSERT_GE(peer_storage_.term_, - peer_storage_.GetLogTerm(peer_storage_.GetLastLogIndex())); - - /* If two logs match at some index, the entire prefix should match. */ - LogIndex pos = - std::min(storage_.GetLastLogIndex(), peer_storage_.GetLastLogIndex()); - - for (; pos > 0; --pos) { - if (storage_.GetLogEntry(pos) == peer_storage_.GetLogEntry(pos)) { - break; - } - } - - for (; pos > 0; --pos) { - ASSERT_EQ(storage_.GetLogEntry(pos), peer_storage_.GetLogEntry(pos)); - } - } - - RaftMemberParamTest(InMemoryStorage<DummyState> storage, - InMemoryStorage<DummyState> peer_storage) - : network_(NoOpNetworkInterface<DummyState>()), - storage_(storage), - member_(network_, storage_, "a", test_config3), - peer_storage_(peer_storage) {} - - NoOpNetworkInterface<DummyState> network_; - InMemoryStorage<DummyState> storage_; - RaftMemberImpl<DummyState> member_; - - InMemoryStorage<DummyState> peer_storage_; -}; - -struct OnRequestVoteTestParam { - TermId term; - std::experimental::optional<MemberId> voted_for; - std::vector<LogEntry<DummyState>> log; - - TermId peer_term; - std::vector<LogEntry<DummyState>> peer_log; - - bool expected_reply; -}; - -class OnRequestVoteTest : public RaftMemberParamTest<OnRequestVoteTestParam> { - public: - OnRequestVoteTest() - : RaftMemberParamTest( - InMemoryStorage<DummyState>(GetParam().term, GetParam().voted_for, - GetParam().log), - InMemoryStorage<DummyState>(GetParam().peer_term, {}, - GetParam().peer_log)) {} - virtual ~OnRequestVoteTest() {} -}; - -TEST_P(OnRequestVoteTest, RequestVoteTest) { - auto reply = member_.OnRequestVote( - {GetParam().peer_term, "b", peer_storage_.GetLastLogIndex(), - peer_storage_.GetLogTerm(peer_storage_.GetLastLogIndex())}); - - EXPECT_EQ(reply.vote_granted, GetParam().expected_reply); - - /* Our term should always be at least as large as sender's term. */ - /* If we accepted the request, our term should be equal to candidate's term - * and voted_for should be set. */ - EXPECT_EQ(reply.term, std::max(GetParam().peer_term, GetParam().term)); - EXPECT_EQ(storage_.term_, std::max(GetParam().peer_term, GetParam().term)); - EXPECT_EQ(storage_.voted_for_, - reply.vote_granted ? "b" : GetParam().voted_for); -} - -/* Member 'b' is starting an election for term 5 and sending RequestVote RPC - * to 'a'. Logs are empty so log-up-to-date check will always pass. */ -INSTANTIATE_TEST_CASE_P( - TermAndVotedForCheck, OnRequestVoteTest, - Values( - /* we didn't vote for anyone in a smaller term -> accept */ - OnRequestVoteTestParam{3, {}, {}, 5, {}, true}, - /* we voted for someone in smaller term -> accept */ - OnRequestVoteTestParam{4, "c", {}, 5, {}, true}, - /* equal term but we didn't vote for anyone in it -> accept */ - OnRequestVoteTestParam{5, {}, {}, 5, {}, true}, - /* equal term but we voted for this candidate-> accept */ - OnRequestVoteTestParam{5, "b", {}, 5, {}, true}, - /* equal term but we voted for someone else -> decline */ - OnRequestVoteTestParam{5, "c", {}, 5, {}, false}, - /* larger term and haven't voted for anyone -> decline */ - OnRequestVoteTestParam{6, {}, {}, 5, {}, false}, - /* larger term and we voted for someone else -> decline */ - OnRequestVoteTestParam{6, "a", {}, 5, {}, false})); - -/* Member 'a' log: - * 1 2 3 4 5 6 7 - * | 1 | 1 | 1 | 2 | 3 | 3 | - * - * It is in term 5. - */ - -/* Member 'b' is sending RequestVote RPC to 'a' for term 8. */ -INSTANTIATE_TEST_CASE_P( - LogUpToDateCheck, OnRequestVoteTest, - Values( - /* candidate's last log term is smaller -> decline */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}}, - false}, - /* candidate's last log term is smaller -> decline */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}, {2}, {2}, {2}}, - false}, - /* candidate's term is equal, but our log is longer -> decline */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}, {3}}, - false}, - /* equal logs -> accept */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}, {3}, {3}}, - true}, - /* candidate's term is larger -> accept */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}, {4}}, - true}, - /* equal terms, but candidate's log is longer -> accept */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {1}, {1}, {2}, {3}, {3}, {3}}, - true}, - /* candidate's last log term is larger -> accept */ - OnRequestVoteTestParam{5, - {}, - {{1}, {1}, {1}, {2}, {3}, {3}}, - 8, - {{1}, {2}, {3}, {4}, {5}}, - true})); - -struct OnAppendEntriesTestParam { - TermId term; - std::vector<LogEntry<DummyState>> log; - - TermId peer_term; - std::vector<LogEntry<DummyState>> peer_log; - LogIndex peer_next_index; - - bool expected_reply; - std::vector<LogEntry<DummyState>> expected_log; -}; - -class OnAppendEntriesTest - : public RaftMemberParamTest<OnAppendEntriesTestParam> { - public: - OnAppendEntriesTest() - : RaftMemberParamTest( - InMemoryStorage<DummyState>(GetParam().term, {}, GetParam().log), - InMemoryStorage<DummyState>(GetParam().peer_term, {}, - GetParam().peer_log)) {} - virtual ~OnAppendEntriesTest() {} -}; - -TEST_P(OnAppendEntriesTest, All) { - auto last_log_index = GetParam().peer_next_index - 1; - auto last_log_term = peer_storage_.GetLogTerm(last_log_index); - auto entries = peer_storage_.GetLogSuffix(GetParam().peer_next_index); - auto reply = member_.OnAppendEntries( - {GetParam().peer_term, "b", last_log_index, last_log_term, entries, 0}); - - EXPECT_EQ(reply.success, GetParam().expected_reply); - EXPECT_EQ(reply.term, std::max(GetParam().peer_term, GetParam().term)); - EXPECT_EQ(storage_.log_, GetParam().expected_log); -} - -/* Member 'a' recieved AppendEntries RPC from member 'b'. The request will - * contain no log entries, representing just a heartbeat, as it is not - * important in these scenarios. */ -INSTANTIATE_TEST_CASE_P( - TermAndLogConsistencyCheck, OnAppendEntriesTest, - Values( - /* sender has stale term -> decline */ - OnAppendEntriesTestParam{/* my term*/ 8, - {{1}, {1}, {2}}, - 7, - {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}, - 7, - false, - {{1}, {1}, {2}}}, - /* we're missing entries 4, 5 and 6 -> decline, but update term */ - OnAppendEntriesTestParam{4, - {{1}, {1}, {2}}, - 8, - {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}, - 7, - false, - {{1}, {1}, {2}}}, - /* we're missing entry 4 -> decline, but update term */ - OnAppendEntriesTestParam{5, - {{1}, {1}, {2}}, - 8, - {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}, - 5, - false, - {{1}, {1}, {2}}}, - /* log terms don't match at entry 4 -> decline, but update term */ - OnAppendEntriesTestParam{5, - {{1}, {1}, {2}}, - 8, - {{1}, {1}, {3}, {3}, {4}, {5}, {5}, {6}}, - 4, - false, - {{1}, {1}, {2}}}, - /* logs match -> accept and update term */ - OnAppendEntriesTestParam{5, - {{1}, {1}, {2}}, - 8, - {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}, - 4, - true, - {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}}, - /* now follow some log truncation tests */ - /* no truncation, append a single entry */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 9, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* no truncation, append multiple entries */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 4, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* no truncation, leader's log is prefix of ours */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {6}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 4, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {6}}}, - /* another one, now with entries from newer term */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {7}, {7}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 4, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {7}, {7}}}, - /* no truncation, partial match between our log and appended entries - */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {5}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 4, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* truncate suffix */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {4}, {4}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 5, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* truncate suffix, with partial match between our log and appened - entries */ - OnAppendEntriesTestParam{ - 8, - {{1}, {1}, {1}, {4}, {4}, {4}, {4}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 4, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* delete whole log */ - OnAppendEntriesTestParam{ - 8, - {{5}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 1, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}, - /* append on empty log */ - OnAppendEntriesTestParam{ - 8, - {{}}, - 8, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}, - 1, - true, - {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}})); - -TEST(RaftMemberTest, AddCommand) { - NextReplyNetworkInterface<IntState> network; - - std::vector<IntState::Change> changes = {{IntState::Change::Type::ADD, 5}, - {IntState::Change::Type::ADD, 10}}; - - network.on_request_ = [&network, num_calls = 0 ]( - const PeerRpcRequest<IntState> &request) mutable { - ++num_calls; - PeerRpcReply reply; - - if (num_calls == 1) { - reply.type = RpcType::REQUEST_VOTE; - reply.request_vote.term = 1; - reply.request_vote.vote_granted = true; - } else { - reply.type = RpcType::APPEND_ENTRIES; - reply.append_entries.term = 1; - reply.append_entries.success = true; - } - - network.next_reply_ = reply; - }; - - InMemoryStorage<IntState> storage(0, {}, {}); - RaftMember<IntState> member(network, storage, "a", test_config2); - - std::this_thread::sleep_for(500ms); - - member.AddCommand(changes[0], false); - member.AddCommand(changes[1], true); - - ASSERT_EQ(storage.log_.size(), 3); - EXPECT_EQ(storage.log_[0].command, std::experimental::nullopt); - EXPECT_TRUE(storage.log_[1].command && - *storage.log_[1].command == changes[0]); - EXPECT_TRUE(storage.log_[2].command && - *storage.log_[2].command == changes[1]); -} diff --git a/tests/unit/raft_storage.cpp b/tests/unit/raft_storage.cpp deleted file mode 100644 index 0d101dced..000000000 --- a/tests/unit/raft_storage.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include <experimental/optional> - -#include "gtest/gtest.h" - -#include "communication/raft/storage/file.hpp" -#include "communication/raft/test_utils.hpp" - -using communication::raft::LogEntry; -using communication::raft::SimpleFileStorage; -using communication::raft::test_utils::IntState; - -TEST(SimpleFileStorageTest, All) { - typedef LogEntry<IntState> Log; - auto GetLog = [](int term, int d) { - return Log{term, IntState::Change{IntState::Change::Type::SET, d}}; - }; - - { - SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir")); - EXPECT_EQ(storage.GetTermAndVotedFor().first, 0); - EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt); - EXPECT_EQ(storage.GetLastLogIndex(), 0); - - storage.WriteTermAndVotedFor(1, "a"); - EXPECT_EQ(storage.GetTermAndVotedFor().first, 1); - EXPECT_EQ(*storage.GetTermAndVotedFor().second, "a"); - - storage.AppendLogEntry(GetLog(1, 1)); - storage.AppendLogEntry(GetLog(1, 2)); - - EXPECT_EQ(storage.GetLastLogIndex(), 2); - - EXPECT_EQ(storage.GetLogSuffix(1), - std::vector<Log>({GetLog(1, 1), GetLog(1, 2)})); - } - - { - SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir")); - - EXPECT_EQ(storage.GetTermAndVotedFor().first, 1); - EXPECT_EQ(*storage.GetTermAndVotedFor().second, "a"); - EXPECT_EQ(storage.GetLastLogIndex(), 2); - EXPECT_EQ(storage.GetLogSuffix(1), - std::vector<Log>({GetLog(1, 1), GetLog(1, 2)})); - - storage.TruncateLogSuffix(2); - EXPECT_EQ(storage.GetLogSuffix(1), std::vector<Log>({GetLog(1, 1)})); - - storage.WriteTermAndVotedFor(2, std::experimental::nullopt); - storage.AppendLogEntry(GetLog(2, 3)); - - EXPECT_EQ(storage.GetTermAndVotedFor().first, 2); - EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt); - EXPECT_EQ(storage.GetLogSuffix(1), - std::vector<Log>({GetLog(1, 1), GetLog(2, 3)})); - } - - { - SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir")); - - EXPECT_EQ(storage.GetTermAndVotedFor().first, 2); - EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt); - EXPECT_EQ(storage.GetLogSuffix(1), - std::vector<Log>({GetLog(1, 1), GetLog(2, 3)})); - } - - fs::remove("raft_storage_test_dir/metadata"); - fs::remove("raft_storage_test_dir/1"); - fs::remove("raft_storage_test_dir/2"); - fs::remove("raft_storage_test_dir"); -} diff --git a/tests/unit/rpc.cpp b/tests/unit/rpc.cpp deleted file mode 100644 index 09de7a552..000000000 --- a/tests/unit/rpc.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include <thread> - -#include "capnp/serialize.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -#include "communication/rpc/client.hpp" -#include "communication/rpc/client_pool.hpp" -#include "communication/rpc/messages.hpp" -#include "communication/rpc/server.hpp" -#include "utils/timer.hpp" - -using namespace communication::rpc; -using namespace std::literals::chrono_literals; - -struct SumReq { - using Capnp = ::capnp::AnyPointer; - static const MessageType TypeInfo; - - SumReq() {} // Needed for serialization. - SumReq(int x, int y) : x(x), y(y) {} - int x; - int y; - - void Save(::capnp::AnyPointer::Builder *builder) const { - auto list_builder = builder->initAs<::capnp::List<int>>(2); - list_builder.set(0, x); - list_builder.set(1, y); - } - - void Load(const ::capnp::AnyPointer::Reader &reader) { - auto list_reader = reader.getAs<::capnp::List<int>>(); - x = list_reader[0]; - y = list_reader[1]; - } -}; - -const MessageType SumReq::TypeInfo{0, "SumReq"}; - -struct SumRes { - using Capnp = ::capnp::AnyPointer; - static const MessageType TypeInfo; - - SumRes() {} // Needed for serialization. - SumRes(int sum) : sum(sum) {} - - int sum; - - void Save(::capnp::AnyPointer::Builder *builder) const { - auto list_builder = builder->initAs<::capnp::List<int>>(1); - list_builder.set(0, sum); - } - - void Load(const ::capnp::AnyPointer::Reader &reader) { - auto list_reader = reader.getAs<::capnp::List<int>>(); - sum = list_reader[0]; - } -}; - -const MessageType SumRes::TypeInfo{1, "SumRes"}; - -using Sum = RequestResponse<SumReq, SumRes>; - -struct EchoMessage { - using Capnp = ::capnp::AnyPointer; - static const MessageType TypeInfo; - - EchoMessage() {} // Needed for serialization. - EchoMessage(const std::string &data) : data(data) {} - - std::string data; - - void Save(::capnp::AnyPointer::Builder *builder) const { - auto list_builder = builder->initAs<::capnp::List<::capnp::Text>>(1); - list_builder.set(0, data); - } - - void Load(const ::capnp::AnyPointer::Reader &reader) { - auto list_reader = reader.getAs<::capnp::List<::capnp::Text>>(); - data = list_reader[0]; - } -}; - -const MessageType EchoMessage::TypeInfo{2, "EchoMessage"}; - -using Echo = RequestResponse<EchoMessage, EchoMessage>; - -TEST(Rpc, Call) { - Server server({"127.0.0.1", 0}); - server.Register<Sum>([](const auto &req_reader, auto *res_builder) { - SumReq req; - req.Load(req_reader); - SumRes res(req.x + req.y); - res.Save(res_builder); - }); - std::this_thread::sleep_for(100ms); - - Client client(server.endpoint()); - auto sum = client.Call<Sum>(10, 20); - ASSERT_TRUE(sum); - EXPECT_EQ(sum->sum, 30); -} - -TEST(Rpc, Abort) { - Server server({"127.0.0.1", 0}); - server.Register<Sum>([](const auto &req_reader, auto *res_builder) { - SumReq req; - req.Load(req_reader); - std::this_thread::sleep_for(500ms); - SumRes res(req.x + req.y); - res.Save(res_builder); - }); - std::this_thread::sleep_for(100ms); - - Client client(server.endpoint()); - - std::thread thread([&client]() { - std::this_thread::sleep_for(100ms); - LOG(INFO) << "Shutting down the connection!"; - client.Abort(); - }); - - utils::Timer timer; - auto sum = client.Call<Sum>(10, 20); - EXPECT_FALSE(sum); - EXPECT_LT(timer.Elapsed(), 200ms); - - thread.join(); -} - -TEST(Rpc, ClientPool) { - Server server({"127.0.0.1", 0}); - server.Register<Sum>([](const auto &req_reader, auto *res_builder) { - SumReq req; - req.Load(req_reader); - std::this_thread::sleep_for(100ms); - SumRes res(req.x + req.y); - res.Save(res_builder); - }); - std::this_thread::sleep_for(100ms); - - Client client(server.endpoint()); - - /* these calls should take more than 400ms because we're using a regular - * client */ - auto get_sum_client = [&client](int x, int y) { - auto sum = client.Call<Sum>(x, y); - ASSERT_TRUE(sum); - EXPECT_EQ(sum->sum, x + y); - }; - - utils::Timer t1; - std::vector<std::thread> threads; - for (int i = 0; i < 4; ++i) { - threads.emplace_back(get_sum_client, 2 * i, 2 * i + 1); - } - for (int i = 0; i < 4; ++i) { - threads[i].join(); - } - threads.clear(); - - EXPECT_GE(t1.Elapsed(), 400ms); - - ClientPool pool(server.endpoint()); - - /* these calls shouldn't take much more that 100ms because they execute in - * parallel */ - auto get_sum = [&pool](int x, int y) { - auto sum = pool.Call<Sum>(x, y); - ASSERT_TRUE(sum); - EXPECT_EQ(sum->sum, x + y); - }; - - utils::Timer t2; - for (int i = 0; i < 4; ++i) { - threads.emplace_back(get_sum, 2 * i, 2 * i + 1); - } - for (int i = 0; i < 4; ++i) { - threads[i].join(); - } - EXPECT_LE(t2.Elapsed(), 200ms); -} - -TEST(Rpc, LargeMessage) { - Server server({"127.0.0.1", 0}); - server.Register<Echo>([](const auto &req_reader, auto *res_builder) { - EchoMessage res; - res.Load(req_reader); - res.Save(res_builder); - }); - std::this_thread::sleep_for(100ms); - - std::string testdata(100000, 'a'); - - Client client(server.endpoint()); - auto echo = client.Call<Echo>(testdata); - ASSERT_TRUE(echo); - EXPECT_EQ(echo->data, testdata); -} diff --git a/tests/unit/rpc_worker_clients.cpp b/tests/unit/rpc_worker_clients.cpp deleted file mode 100644 index 1153a4fb0..000000000 --- a/tests/unit/rpc_worker_clients.cpp +++ /dev/null @@ -1,146 +0,0 @@ -#include <mutex> - -#include "capnp/serialize.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -#include "communication/rpc/messages.hpp" -#include "communication/rpc/server.hpp" -#include "distributed/cluster_discovery_master.hpp" -#include "distributed/cluster_discovery_worker.hpp" -#include "distributed/coordination_master.hpp" -#include "distributed/coordination_worker.hpp" -#include "distributed/rpc_worker_clients.hpp" -#include "distributed/serialization.hpp" -#include "io/network/endpoint.hpp" - -using namespace std::literals::chrono_literals; - -namespace distributed { - -struct IncrementCounterReq { - using Capnp = ::capnp::AnyPointer; - static const communication::rpc::MessageType TypeInfo; - - void Save(::capnp::AnyPointer::Builder *) const {} - - void Load(const ::capnp::AnyPointer::Reader &) {} -}; - -const communication::rpc::MessageType IncrementCounterReq::TypeInfo{ - 0, "IncrementCounterReq"}; - -struct IncrementCounterRes { - using Capnp = ::capnp::AnyPointer; - static const communication::rpc::MessageType TypeInfo; - - void Save(::capnp::AnyPointer::Builder *) const {} - - void Load(const ::capnp::AnyPointer::Reader &) {} -}; - -const communication::rpc::MessageType IncrementCounterRes::TypeInfo{ - 1, "IncrementCounterRes"}; - -using IncrementCounterRpc = - communication::rpc::RequestResponse<IncrementCounterReq, - IncrementCounterRes>; -}; // namespace distributed - -class RpcWorkerClientsTest : public ::testing::Test { - protected: - const io::network::Endpoint kLocalHost{"127.0.0.1", 0}; - const int kWorkerCount = 2; - void SetUp() override { - master_coord_->SetRecoveryInfo(std::experimental::nullopt); - for (int i = 1; i <= kWorkerCount; ++i) { - workers_server_.emplace_back( - std::make_unique<communication::rpc::Server>(kLocalHost)); - - workers_coord_.emplace_back( - std::make_unique<distributed::WorkerCoordination>( - *workers_server_.back(), master_server_.endpoint())); - - cluster_discovery_.emplace_back( - std::make_unique<distributed::ClusterDiscoveryWorker>( - *workers_server_.back(), *workers_coord_.back(), - rpc_workers_.GetClientPool(0))); - - cluster_discovery_.back()->RegisterWorker(i); - - workers_server_.back()->Register<distributed::IncrementCounterRpc>( - [this, i](const auto &req_reader, auto *res_builder) { - std::unique_lock<std::mutex> lock(mutex_); - workers_cnt_[i]++; - }); - } - } - - void TearDown() override { - std::vector<std::thread> wait_on_shutdown; - for (int i = 0; i < workers_coord_.size(); ++i) { - wait_on_shutdown.emplace_back([i, this]() { - workers_coord_[i]->WaitForShutdown(); - workers_server_[i] = nullptr; - }); - } - - std::this_thread::sleep_for(300ms); - - // Starts server shutdown and notifies the workers - master_coord_ = std::experimental::nullopt; - for (auto &worker : wait_on_shutdown) worker.join(); - } - - std::vector<std::unique_ptr<communication::rpc::Server>> workers_server_; - std::vector<std::unique_ptr<distributed::WorkerCoordination>> workers_coord_; - std::vector<std::unique_ptr<distributed::ClusterDiscoveryWorker>> - cluster_discovery_; - std::mutex mutex_; - std::unordered_map<int, int> workers_cnt_; - - communication::rpc::Server master_server_{kLocalHost}; - std::experimental::optional<distributed::MasterCoordination> master_coord_{ - master_server_.endpoint()}; - - distributed::RpcWorkerClients rpc_workers_{*master_coord_}; - distributed::ClusterDiscoveryMaster cluster_disocvery_{ - master_server_, *master_coord_, rpc_workers_}; -}; - -TEST_F(RpcWorkerClientsTest, GetWorkerIds) { - EXPECT_THAT(rpc_workers_.GetWorkerIds(), testing::UnorderedElementsAreArray( - master_coord_->GetWorkerIds())); -} - -TEST_F(RpcWorkerClientsTest, GetClientPool) { - auto &pool1 = rpc_workers_.GetClientPool(1); - auto &pool2 = rpc_workers_.GetClientPool(2); - EXPECT_NE(&pool1, &pool2); - EXPECT_EQ(&pool1, &rpc_workers_.GetClientPool(1)); -} - -TEST_F(RpcWorkerClientsTest, ExecuteOnWorker) { - auto execute = [](int worker_id, auto &client) -> void { - ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>()); - }; - - rpc_workers_.ExecuteOnWorker<void>(1, execute).get(); - EXPECT_EQ(workers_cnt_[0], 0); - EXPECT_EQ(workers_cnt_[1], 1); - EXPECT_EQ(workers_cnt_[2], 0); -} - -TEST_F(RpcWorkerClientsTest, ExecuteOnWorkers) { - auto execute = [](int worker_id, auto &client) -> void { - ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>()); - }; - - // Skip master - for (auto &future : rpc_workers_.ExecuteOnWorkers<void>(0, execute)) - future.get(); - - EXPECT_EQ(workers_cnt_[0], 0); - EXPECT_EQ(workers_cnt_[1], 1); - EXPECT_EQ(workers_cnt_[2], 1); -} diff --git a/tests/unit/serialization.cpp b/tests/unit/serialization.cpp deleted file mode 100644 index ac255efe9..000000000 --- a/tests/unit/serialization.cpp +++ /dev/null @@ -1,390 +0,0 @@ -#include <experimental/optional> -#include <sstream> - -#include "gtest/gtest.h" - -#include "boost/archive/binary_iarchive.hpp" -#include "boost/archive/binary_oarchive.hpp" -#include "capnp/message.h" -#include "utils/serialization.hpp" - -using std::experimental::optional; -using std::string_literals::operator""s; - -TEST(Serialization, Optional) { - std::stringstream ss; - - optional<int> x1 = {}; - optional<int> x2 = 42; - optional<int> y1, y2; - - { - boost::archive::binary_oarchive ar(ss); - ar << x1; - ar << x2; - } - - { - boost::archive::binary_iarchive ar(ss); - ar >> y1; - ar >> y2; - } - - EXPECT_EQ(x1, y1); - EXPECT_EQ(x2, y2); -} - -TEST(Serialization, Tuple) { - std::stringstream ss; - - auto x1 = std::make_tuple("foo"s, 42, std::experimental::make_optional(3.14)); - auto x2 = std::make_tuple(); - auto x3 = std::make_tuple(1, 2, 3, 4, 5); - - decltype(x1) y1; - decltype(x2) y2; - decltype(x3) y3; - - { - boost::archive::binary_oarchive ar(ss); - ar << x1; - ar << x2; - ar << x3; - } - - { - boost::archive::binary_iarchive ar(ss); - ar >> y1; - ar >> y2; - ar >> y3; - } - - EXPECT_EQ(x1, y1); - EXPECT_EQ(x2, y2); - EXPECT_EQ(x3, y3); -} - -void CheckOptionalInt(const std::experimental::optional<int> &x1) { - ::capnp::MallocMessageBuilder message; - std::experimental::optional<int> y1; - { - auto builder = - message.initRoot<utils::capnp::Optional<utils::capnp::BoxInt32>>(); - auto save = [](utils::capnp::BoxInt32::Builder *builder, int value) { - builder->setValue(value); - }; - utils::SaveOptional<utils::capnp::BoxInt32, int>(x1, &builder, save); - } - - { - auto reader = - message.getRoot<utils::capnp::Optional<utils::capnp::BoxInt32>>(); - auto load = [](const utils::capnp::BoxInt32::Reader &reader) -> int { - return reader.getValue(); - }; - y1 = utils::LoadOptional<utils::capnp::BoxInt32, int>(reader, load); - } - - EXPECT_EQ(x1, y1); -} - -TEST(Serialization, CapnpOptional) { - std::experimental::optional<int> x1 = {}; - std::experimental::optional<int> x2 = 42; - - CheckOptionalInt(x1); - CheckOptionalInt(x2); -} - -TEST(Serialization, CapnpOptionalNonCopyable) { - std::experimental::optional<std::unique_ptr<int>> data = - std::make_unique<int>(5); - ::capnp::MallocMessageBuilder message; - { - auto builder = message.initRoot<utils::capnp::Optional< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto save = [](auto *ptr_builder, const auto &data) { - auto save_int = [](auto *int_builder, int value) { - int_builder->setValue(value); - }; - utils::SaveUniquePtr<utils::capnp::BoxInt32, int>(data, ptr_builder, - save_int); - }; - utils::SaveOptional<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(data, &builder, save); - } - std::experimental::optional<std::unique_ptr<int>> element; - { - auto reader = message.getRoot<utils::capnp::Optional< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto load = [](const auto &ptr_reader) { - auto load_int = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - return utils::LoadUniquePtr<utils::capnp::BoxInt32, int>(ptr_reader, - load_int); - }; - element = - utils::LoadOptional<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(reader, load); - } - EXPECT_EQ(*element.value(), 5); -} - -void CheckUniquePtrInt(const std::unique_ptr<int> &x1) { - ::capnp::MallocMessageBuilder message; - std::unique_ptr<int> y1; - { - auto builder = - message.initRoot<utils::capnp::UniquePtr<utils::capnp::BoxInt32>>(); - auto save = [](utils::capnp::BoxInt32::Builder *builder, int value) { - builder->setValue(value); - }; - utils::SaveUniquePtr<utils::capnp::BoxInt32, int>(x1, &builder, save); - } - { - auto reader = - message.getRoot<utils::capnp::UniquePtr<utils::capnp::BoxInt32>>(); - auto load = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - y1 = utils::LoadUniquePtr<utils::capnp::BoxInt32, int>(reader, load); - } - if (!x1) - EXPECT_EQ(y1, nullptr); - else - EXPECT_EQ(*x1, *y1); -} - -TEST(Serialization, CapnpUniquePtr) { - auto x1 = std::make_unique<int>(42); - std::unique_ptr<int> x2; - - CheckUniquePtrInt(x1); - CheckUniquePtrInt(x2); -} - -TEST(Serialization, CapnpUniquePtrNonCopyable) { - std::unique_ptr<std::unique_ptr<int>> data = - std::make_unique<std::unique_ptr<int>>(std::make_unique<int>(5)); - ::capnp::MallocMessageBuilder message; - { - auto builder = message.initRoot<utils::capnp::UniquePtr< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto save = [](auto *ptr_builder, const auto &data) { - auto save_int = [](auto *int_builder, int value) { - int_builder->setValue(value); - }; - utils::SaveUniquePtr<utils::capnp::BoxInt32, int>(data, ptr_builder, - save_int); - }; - utils::SaveUniquePtr<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(data, &builder, save); - } - std::unique_ptr<std::unique_ptr<int>> element; - { - auto reader = message.getRoot<utils::capnp::UniquePtr< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto load = [](const auto &ptr_reader) { - auto load_int = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - return new std::unique_ptr<int>( - utils::LoadUniquePtr<utils::capnp::BoxInt32, int>(ptr_reader, - load_int)); - }; - element = - utils::LoadUniquePtr<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(reader, load); - } - EXPECT_EQ(**element, 5); -} - -TEST(Serialization, CapnpSharedPtr) { - std::vector<int *> saved_pointers; - auto p1 = std::make_shared<int>(5); - std::shared_ptr<int> p2; - std::vector<std::shared_ptr<int>> pointers{p1, p1, p2}; - ::capnp::MallocMessageBuilder message; - { - auto builders = message.initRoot< - ::capnp::List<utils::capnp::SharedPtr<utils::capnp::BoxInt32>>>( - pointers.size()); - auto save = [](utils::capnp::BoxInt32::Builder *builder, int value) { - builder->setValue(value); - }; - for (size_t i = 0; i < pointers.size(); ++i) { - auto ptr_builder = builders[i]; - utils::SaveSharedPtr<utils::capnp::BoxInt32, int>( - pointers[i], &ptr_builder, save, &saved_pointers); - } - } - EXPECT_EQ(saved_pointers.size(), 1); - std::vector<std::pair<uint64_t, std::shared_ptr<int>>> loaded_pointers; - std::vector<std::shared_ptr<int>> elements; - { - auto reader = message.getRoot< - ::capnp::List<utils::capnp::SharedPtr<utils::capnp::BoxInt32>>>(); - auto load = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - for (const auto ptr_reader : reader) { - elements.emplace_back(utils::LoadSharedPtr<utils::capnp::BoxInt32, int>( - ptr_reader, load, &loaded_pointers)); - } - } - EXPECT_EQ(loaded_pointers.size(), 1); - EXPECT_EQ(elements.size(), 3); - EXPECT_EQ(*elements[0], 5); - EXPECT_EQ(*elements[0], *elements[1]); - EXPECT_EQ(elements[2].get(), nullptr); -} - -TEST(Serialization, CapnpSharedPtrNonCopyable) { - std::shared_ptr<std::unique_ptr<int>> data = - std::make_shared<std::unique_ptr<int>>(std::make_unique<int>(5)); - std::vector<std::unique_ptr<int> *> saved_pointers; - ::capnp::MallocMessageBuilder message; - { - auto builder = message.initRoot<utils::capnp::SharedPtr< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto save = [](auto *ptr_builder, const auto &data) { - auto save_int = [](auto *int_builder, int value) { - int_builder->setValue(value); - }; - utils::SaveUniquePtr<utils::capnp::BoxInt32, int>(data, ptr_builder, - save_int); - }; - utils::SaveSharedPtr<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(data, &builder, save, - &saved_pointers); - } - std::shared_ptr<std::unique_ptr<int>> element; - std::vector<std::pair<uint64_t, std::shared_ptr<std::unique_ptr<int>>>> - loaded_pointers; - { - auto reader = message.getRoot<utils::capnp::SharedPtr< - utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto load = [](const auto &ptr_reader) { - auto load_int = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - return new std::unique_ptr<int>( - utils::LoadUniquePtr<utils::capnp::BoxInt32, int>(ptr_reader, - load_int)); - }; - element = - utils::LoadSharedPtr<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(reader, load, - &loaded_pointers); - } - EXPECT_EQ(**element, 5); -} - -TEST(Serialization, CapnpVectorPrimitive) { - std::vector<int> data{1, 2, 3}; - ::capnp::MallocMessageBuilder message; - { - auto list_builder = message.initRoot<::capnp::List<int>>(data.size()); - utils::SaveVector<int>(data, &list_builder); - } - std::vector<int> elements; - { - auto reader = message.getRoot<::capnp::List<int>>(); - utils::LoadVector<int>(&elements, reader); - } - EXPECT_EQ(elements.size(), 3); - EXPECT_EQ(elements[0], 1); - EXPECT_EQ(elements[1], 2); - EXPECT_EQ(elements[2], 3); -} - -TEST(Serialization, CapnpVector) { - std::vector<int> data{1, 2, 3}; - ::capnp::MallocMessageBuilder message; - { - auto list_builder = - message.initRoot<::capnp::List<utils::capnp::BoxInt32>>(data.size()); - auto save = [](utils::capnp::BoxInt32::Builder *builder, int value) { - builder->setValue(value); - }; - utils::SaveVector<utils::capnp::BoxInt32, int>(data, &list_builder, save); - } - std::vector<int> elements; - { - auto reader = message.getRoot<::capnp::List<utils::capnp::BoxInt32>>(); - auto load = [](const utils::capnp::BoxInt32::Reader &reader) -> int { - return reader.getValue(); - }; - utils::LoadVector<utils::capnp::BoxInt32, int>(&elements, reader, load); - } - EXPECT_EQ(elements.size(), 3); - EXPECT_EQ(elements[0], 1); - EXPECT_EQ(elements[1], 2); - EXPECT_EQ(elements[2], 3); -} - -TEST(Serialization, CapnpVectorNonCopyable) { - std::vector<std::unique_ptr<int>> data; - data.emplace_back(std::make_unique<int>(5)); - data.emplace_back(std::make_unique<int>(10)); - ::capnp::MallocMessageBuilder message; - { - auto list_builder = message.initRoot< - ::capnp::List<utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>( - data.size()); - auto save = [](auto *ptr_builder, const auto &data) { - auto save_int = [](auto *int_builder, int value) { - int_builder->setValue(value); - }; - utils::SaveUniquePtr<utils::capnp::BoxInt32, int>(data, ptr_builder, - save_int); - }; - utils::SaveVector<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(data, &list_builder, save); - } - std::vector<std::unique_ptr<int>> elements; - { - auto reader = message.getRoot< - ::capnp::List<utils::capnp::UniquePtr<utils::capnp::BoxInt32>>>(); - auto load = [](const auto &ptr_reader) { - auto load_int = [](const auto &int_reader) { - return new int(int_reader.getValue()); - }; - return utils::LoadUniquePtr<utils::capnp::BoxInt32, int>(ptr_reader, - load_int); - }; - utils::LoadVector<utils::capnp::UniquePtr<utils::capnp::BoxInt32>, - std::unique_ptr<int>>(&elements, reader, load); - } - EXPECT_EQ(elements.size(), 2); - EXPECT_EQ(*elements[0], 5); - EXPECT_EQ(*elements[1], 10); -} - -TEST(Serialization, CapnpMap) { - std::map<std::string, std::string> map{{"my_key", "my_value"}, - {"other_key", "other_value"}}; - ::capnp::MallocMessageBuilder message; - { - auto map_builder = - message.initRoot<utils::capnp::Map<capnp::Text, capnp::Text>>(); - utils::SaveMap<capnp::Text, capnp::Text>( - map, &map_builder, [](auto *entry_builder, const auto &entry) { - entry_builder->setKey(entry.first); - entry_builder->setValue(entry.second); - }); - } - std::map<std::string, std::string> new_map; - { - auto map_reader = - message.getRoot<utils::capnp::Map<capnp::Text, capnp::Text>>(); - utils::LoadMap<capnp::Text, capnp::Text>( - &new_map, map_reader, [](const auto &entry_reader) { - std::string key = entry_reader.getKey(); - std::string value = entry_reader.getValue(); - return std::make_pair(key, value); - }); - } - EXPECT_EQ(new_map, map); -} diff --git a/tests/unit/transaction_engine_distributed.cpp b/tests/unit/transaction_engine_distributed.cpp deleted file mode 100644 index 22b241e78..000000000 --- a/tests/unit/transaction_engine_distributed.cpp +++ /dev/null @@ -1,150 +0,0 @@ -#include <algorithm> -#include <mutex> -#include <unordered_set> -#include <vector> - -#include "gtest/gtest.h" - -#include "communication/rpc/server.hpp" -#include "distributed/cluster_discovery_master.hpp" -#include "distributed/coordination_master.hpp" -#include "io/network/endpoint.hpp" -#include "transactions/engine_master.hpp" -#include "transactions/engine_rpc_messages.hpp" -#include "transactions/engine_worker.hpp" - -using namespace tx; -using namespace communication::rpc; -using namespace distributed; - -class WorkerEngineTest : public testing::Test { - protected: - const std::string local{"127.0.0.1"}; - - Server master_server_{{local, 0}}; - MasterCoordination master_coordination_{master_server_.endpoint()}; - RpcWorkerClients rpc_worker_clients_{master_coordination_}; - ClusterDiscoveryMaster cluster_disocvery_{ - master_server_, master_coordination_, rpc_worker_clients_}; - - MasterEngine master_{master_server_, rpc_worker_clients_}; - ClientPool master_client_pool{master_server_.endpoint()}; - - WorkerEngine worker_{master_client_pool}; -}; - -TEST_F(WorkerEngineTest, BeginOnWorker) { - worker_.Begin(); - auto second = worker_.Begin(); - EXPECT_EQ(master_.RunningTransaction(second->id_)->snapshot().size(), 1); -} - -TEST_F(WorkerEngineTest, AdvanceOnWorker) { - auto tx = worker_.Begin(); - auto cid = tx->cid(); - EXPECT_EQ(worker_.Advance(tx->id_), cid + 1); -} - -TEST_F(WorkerEngineTest, CommitOnWorker) { - auto tx = worker_.Begin(); - auto tx_id = tx->id_; - worker_.Commit(*tx); - EXPECT_TRUE(master_.Info(tx_id).is_committed()); -} - -TEST_F(WorkerEngineTest, AbortOnWorker) { - auto tx = worker_.Begin(); - auto tx_id = tx->id_; - worker_.Abort(*tx); - EXPECT_TRUE(master_.Info(tx_id).is_aborted()); -} - -TEST_F(WorkerEngineTest, RunningTransaction) { - master_.Begin(); - master_.Begin(); - worker_.RunningTransaction(1); - worker_.RunningTransaction(2); - int count = 0; - worker_.LocalForEachActiveTransaction([&count](Transaction &t) { - ++count; - if (t.id_ == 1) { - EXPECT_EQ(t.snapshot(), - tx::Snapshot(std::vector<tx::TransactionId>{})); - } else { - EXPECT_EQ(t.snapshot(), tx::Snapshot({1})); - } - }); - EXPECT_EQ(count, 2); -} - -TEST_F(WorkerEngineTest, Info) { - auto *tx_1 = master_.Begin(); - auto *tx_2 = master_.Begin(); - // We can't check active transactions in the worker (see comments there for - // info). - master_.Commit(*tx_1); - EXPECT_TRUE(master_.Info(1).is_committed()); - EXPECT_TRUE(worker_.Info(1).is_committed()); - master_.Abort(*tx_2); - EXPECT_TRUE(master_.Info(2).is_aborted()); - EXPECT_TRUE(worker_.Info(2).is_aborted()); -} - -TEST_F(WorkerEngineTest, GlobalGcSnapshot) { - auto *tx_1 = master_.Begin(); - master_.Begin(); - master_.Commit(*tx_1); - EXPECT_EQ(master_.GlobalGcSnapshot(), tx::Snapshot({1, 2})); - EXPECT_EQ(worker_.GlobalGcSnapshot(), master_.GlobalGcSnapshot()); -} - -TEST_F(WorkerEngineTest, GlobalActiveTransactions) { - auto *tx_1 = master_.Begin(); - master_.Begin(); - auto *tx_3 = master_.Begin(); - master_.Begin(); - master_.Commit(*tx_1); - master_.Abort(*tx_3); - EXPECT_EQ(worker_.GlobalActiveTransactions(), tx::Snapshot({2, 4})); -} - -TEST_F(WorkerEngineTest, LocalLast) { - master_.Begin(); - EXPECT_EQ(worker_.LocalLast(), 0); - worker_.RunningTransaction(1); - EXPECT_EQ(worker_.LocalLast(), 1); - master_.Begin(); - EXPECT_EQ(worker_.LocalLast(), 1); - master_.Begin(); - EXPECT_EQ(worker_.LocalLast(), 1); - master_.Begin(); - worker_.RunningTransaction(4); - EXPECT_EQ(worker_.LocalLast(), 4); -} - -TEST_F(WorkerEngineTest, LocalForEachActiveTransaction) { - master_.Begin(); - worker_.RunningTransaction(1); - master_.Begin(); - master_.Begin(); - master_.Begin(); - worker_.RunningTransaction(4); - std::unordered_set<tx::TransactionId> local; - worker_.LocalForEachActiveTransaction( - [&local](Transaction &t) { local.insert(t.id_); }); - EXPECT_EQ(local, std::unordered_set<tx::TransactionId>({1, 4})); -} - -TEST_F(WorkerEngineTest, EnsureTxIdGreater) { - ASSERT_LE(master_.Begin()->id_, 40); - worker_.EnsureNextIdGreater(42); - EXPECT_EQ(master_.Begin()->id_, 43); - EXPECT_EQ(worker_.Begin()->id_, 44); -} - -TEST_F(WorkerEngineTest, GlobalNext) { - auto tx = master_.Begin(); - EXPECT_NE(worker_.LocalLast(), worker_.GlobalLast()); - EXPECT_EQ(master_.LocalLast(), worker_.GlobalLast()); - EXPECT_EQ(worker_.GlobalLast(), tx->id_); -} diff --git a/tools/src/CMakeLists.txt b/tools/src/CMakeLists.txt index 8087c990d..5c301c4ca 100644 --- a/tools/src/CMakeLists.txt +++ b/tools/src/CMakeLists.txt @@ -2,10 +2,6 @@ add_executable(mg_import_csv mg_import_csv/main.cpp) target_link_libraries(mg_import_csv memgraph_lib kvstore_dummy_lib) -# StatsD Target -add_executable(mg_statsd mg_statsd/main.cpp) -target_link_libraries(mg_statsd memgraph_lib kvstore_dummy_lib) - # Strip the executable in release build. string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type) if (lower_build_type STREQUAL "release") @@ -17,4 +13,4 @@ endif() install(TARGETS mg_import_csv RUNTIME DESTINATION bin) # Target for building all the tool executables. -add_custom_target(tools DEPENDS mg_import_csv mg_statsd) +add_custom_target(tools DEPENDS mg_import_csv) diff --git a/tools/src/mg_statsd/main.cpp b/tools/src/mg_statsd/main.cpp deleted file mode 100644 index dbf337dfa..000000000 --- a/tools/src/mg_statsd/main.cpp +++ /dev/null @@ -1,73 +0,0 @@ -#include "gflags/gflags.h" - -#include "communication/rpc/server.hpp" -#include "io/network/socket.hpp" -#include "stats/stats.hpp" -#include "stats/stats_rpc_messages.hpp" -#include "utils/flag_validation.hpp" - -DEFINE_string(interface, "0.0.0.0", - "Communication interface on which to listen."); -DEFINE_VALIDATED_int32(port, 2500, "Communication port on which to listen.", - FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max())); - -DEFINE_string(graphite_address, "", "Graphite address."); -DEFINE_int32(graphite_port, 0, "Graphite port."); -DEFINE_string(prefix, "", "Prefix for all collected stats"); - -std::string GraphiteFormat(const stats::StatsReq &req) { - std::stringstream sstr; - if (!FLAGS_prefix.empty()) { - sstr << FLAGS_prefix << "." << req.metric_path; - } else { - sstr << req.metric_path; - } - for (const auto &tag : req.tags) { - sstr << ";" << tag.first << "=" << tag.second; - } - sstr << " " << req.value << " " << req.timestamp << "\n"; - return sstr.str(); -} - -int main(int argc, char *argv[]) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - - communication::rpc::Server server({FLAGS_interface, (uint16_t)FLAGS_port}); - - io::network::Socket graphite_socket; - - CHECK(graphite_socket.Connect( - {FLAGS_graphite_address, (uint16_t)FLAGS_graphite_port})) - << "Failed to connect to Graphite"; - graphite_socket.SetKeepAlive(); - - server.Register<stats::StatsRpc>( - [&](const auto &req_reader, auto *res_builder) { - stats::StatsReq req; - req.Load(req_reader); - LOG(INFO) << "StatsRpc::Received"; - std::string data = GraphiteFormat(req); - graphite_socket.Write(data); - stats::StatsRes res; - res.Save(res_builder); - }); - - server.Register<stats::BatchStatsRpc>( - [&](const auto &req_reader, auto *res_builder) { - // TODO(mtomic): batching? - stats::BatchStatsReq req; - req.Load(req_reader); - LOG(INFO) << fmt::format("BatchStatsRpc::Received: {}", - req.requests.size()); - for (size_t i = 0; i < req.requests.size(); ++i) { - std::string data = GraphiteFormat(req.requests[i]); - graphite_socket.Write(data, i + 1 < req.requests.size()); - } - stats::BatchStatsRes res; - res.Save(res_builder); - }); - - std::this_thread::sleep_until(std::chrono::system_clock::time_point::max()); - - return 0; -} diff --git a/tools/tests/CMakeLists.txt b/tools/tests/CMakeLists.txt index 46c515f78..daeee6f92 100644 --- a/tools/tests/CMakeLists.txt +++ b/tools/tests/CMakeLists.txt @@ -3,9 +3,6 @@ include_directories(SYSTEM ${GTEST_INCLUDE_DIR}) add_executable(mg_recovery_check mg_recovery_check.cpp) target_link_libraries(mg_recovery_check memgraph_lib gtest gtest_main kvstore_dummy_lib) -add_executable(mg_statsd_client statsd/mg_statsd_client.cpp) -target_link_libraries(mg_statsd_client memgraph_lib kvstore_dummy_lib) - # Copy CSV data to CMake build dir configure_file(csv/comment_nodes.csv csv/comment_nodes.csv COPYONLY) configure_file(csv/forum_nodes.csv csv/forum_nodes.csv COPYONLY) diff --git a/tools/tests/statsd/mg_statsd_client.cpp b/tools/tests/statsd/mg_statsd_client.cpp deleted file mode 100644 index f96343236..000000000 --- a/tools/tests/statsd/mg_statsd_client.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include "gflags/gflags.h" -#include "glog/logging.h" - -#include "stats/stats.hpp" -#include "stats/stats_rpc_messages.hpp" -#include "utils/string.hpp" - -// TODO (buda): move this logic to a unit test - -bool parse_input(const std::string &s, std::string &metric_path, - std::vector<std::pair<std::string, std::string>> &tags, - double &value) { - auto words = utils::Split(s, " "); - if (words.size() < 2) { - return false; - } - - metric_path = words[0]; - - try { - value = std::stod(words.back()); - } catch (std::exception &e) { - return false; - } - - tags.clear(); - for (size_t i = 1; i < words.size() - 1; ++i) { - auto tag_value = utils::Split(words[i], "=", 1); - if (tag_value.size() != 2) { - return false; - } - // TODO(mtomic): tags probably need to be escaped before sending to graphite - tags.emplace_back(tag_value[0], tag_value[1]); - } - - return true; -} - -int main(int argc, char *argv[]) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - - LOG(INFO) << "Usage: metric_path tag1=value1 ... tagn=valuen " - "metric_value"; - - stats::InitStatsLogging(); - - std::string line; - std::string metric_path; - std::vector<std::pair<std::string, std::string>> tags; - double value; - - while (true) { - std::getline(std::cin, line); - if (!parse_input(line, metric_path, tags, value)) { - LOG(ERROR) << "Invalid input"; - continue; - } - stats::LogStat(metric_path, value, tags); - } - - return 0; -}