From 53bca9fa3a801c3f6153e5f7c5d8fb664581ca07 Mon Sep 17 00:00:00 2001
From: Teon Banek <teon.banek@memgraph.io>
Date: Thu, 18 Oct 2018 10:08:14 +0200
Subject: [PATCH] Prepare release v0.10.0

Reviewers: buda, teon.banek

Subscribers: mtomic, pullbot

Differential Revision: https://phabricator.memgraph.io/D1375
---
 CMakeLists.txt                                |    8 -
 apollo_archives.yaml                          |    1 -
 apollo_build.yaml                             |   11 +-
 docs/user_technical/import-tools.md           |    3 +-
 src/CMakeLists.txt                            |   32 +-
 src/communication/bolt/client.hpp             |    2 +
 src/communication/raft/network_common.hpp     |   48 -
 src/communication/raft/raft-inl.hpp           |  699 ---------
 src/communication/raft/raft.hpp               |  277 ----
 src/communication/raft/rpc.hpp                |  117 --
 src/communication/raft/storage/file.hpp       |  239 ----
 src/communication/raft/storage/memory.hpp     |   63 -
 src/communication/raft/test_utils.hpp         |  141 --
 src/communication/rpc/client.cpp              |  116 --
 src/communication/rpc/client.hpp              |   76 -
 src/communication/rpc/client_pool.hpp         |   46 -
 src/communication/rpc/messages-inl.hpp        |  134 --
 src/communication/rpc/messages.hpp            |   74 -
 src/communication/rpc/protocol.cpp            |   92 --
 src/communication/rpc/protocol.hpp            |   55 -
 src/communication/rpc/server.cpp              |   24 -
 src/communication/rpc/server.hpp              |   86 --
 src/database/config.cpp                       |   39 -
 src/database/counters.cpp                     |   48 -
 src/database/counters.hpp                     |   24 -
 src/database/graph_db.cpp                     |  290 +---
 src/database/graph_db.hpp                     |   65 -
 src/database/graph_db_accessor.cpp            |  172 +--
 src/database/graph_db_accessor.hpp            |    8 -
 src/database/state_delta.hpp                  |   48 -
 src/database/storage_gc.hpp                   |    1 -
 src/database/storage_gc_master.hpp            |   67 -
 src/database/storage_gc_worker.hpp            |   46 -
 src/distributed/cache.cpp                     |   99 --
 src/distributed/cache.hpp                     |   62 -
 src/distributed/cluster_discovery_master.cpp  |   33 -
 src/distributed/cluster_discovery_master.hpp  |   27 -
 src/distributed/cluster_discovery_worker.cpp  |   30 -
 src/distributed/cluster_discovery_worker.hpp  |   43 -
 src/distributed/coordination.cpp              |   34 -
 src/distributed/coordination.hpp              |   36 -
 src/distributed/coordination_master.cpp       |   83 --
 src/distributed/coordination_master.hpp       |   50 -
 src/distributed/coordination_rpc_messages.hpp |   95 --
 src/distributed/coordination_worker.cpp       |   47 -
 src/distributed/coordination_worker.hpp       |   33 -
 src/distributed/data_manager.cpp              |   54 -
 src/distributed/data_manager.hpp              |   45 -
 src/distributed/data_rpc_clients.cpp          |   27 -
 src/distributed/data_rpc_clients.hpp          |   28 -
 src/distributed/data_rpc_messages.hpp         |   68 -
 src/distributed/data_rpc_server.cpp           |   29 -
 src/distributed/data_rpc_server.hpp           |   17 -
 src/distributed/durability_rpc_clients.cpp    |   25 -
 src/distributed/durability_rpc_clients.hpp    |   28 -
 src/distributed/durability_rpc_messages.hpp   |   17 -
 src/distributed/durability_rpc_server.cpp     |   18 -
 src/distributed/durability_rpc_server.hpp     |   21 -
 src/distributed/index_rpc_messages.hpp        |   32 -
 src/distributed/index_rpc_server.cpp          |   33 -
 src/distributed/index_rpc_server.hpp          |   22 -
 src/distributed/plan_consumer.cpp             |   39 -
 src/distributed/plan_consumer.hpp             |   44 -
 src/distributed/plan_dispatcher.cpp           |   35 -
 src/distributed/plan_dispatcher.hpp           |   30 -
 src/distributed/plan_rpc_messages.hpp         |   63 -
 src/distributed/produce_rpc_server.cpp        |  169 ---
 src/distributed/produce_rpc_server.hpp        |   89 --
 src/distributed/pull_produce_rpc_messages.hpp |  376 -----
 src/distributed/pull_rpc_clients.cpp          |   72 -
 src/distributed/pull_rpc_clients.hpp          |   47 -
 src/distributed/rpc_worker_clients.hpp        |  133 --
 src/distributed/serialization.hpp             |  183 ---
 src/distributed/storage_gc_rpc_messages.hpp   |   39 -
 .../transactional_cache_cleaner.hpp           |   87 --
 ...ansactional_cache_cleaner_rpc_messages.hpp |   13 -
 src/distributed/updates_rpc_clients.cpp       |  116 --
 src/distributed/updates_rpc_clients.hpp       |   76 -
 src/distributed/updates_rpc_messages.hpp      |  203 ---
 src/distributed/updates_rpc_server.cpp        |  349 -----
 src/distributed/updates_rpc_server.hpp        |  104 --
 src/durability/recovery.hpp                   |    9 -
 src/io/network/endpoint.hpp                   |   11 -
 src/memgraph_bolt.cpp                         |   72 +-
 src/query/common.hpp                          |    7 -
 src/query/frontend/ast/ast.cpp                |   61 -
 src/query/frontend/ast/ast.hpp                | 1271 -----------------
 src/query/frontend/semantic/symbol.hpp        |   14 -
 src/query/frontend/semantic/symbol_table.hpp  |   11 -
 src/query/interpreter.cpp                     |   60 +-
 src/query/interpreter.hpp                     |   13 +-
 src/query/plan/distributed.cpp                |   61 -
 src/query/plan/operator.cpp                   |  759 +---------
 src/query/plan/operator.hpp                   |  781 +---------
 src/stats/metrics.cpp                         |  105 --
 src/stats/metrics.hpp                         |  202 ---
 src/stats/stats.cpp                           |  113 --
 src/stats/stats.hpp                           |   33 -
 src/stats/stats_rpc_messages.hpp              |   62 -
 src/storage/address.hpp                       |    7 -
 src/storage/concurrent_id_mapper_master.cpp   |   46 -
 src/storage/concurrent_id_mapper_master.hpp   |   20 -
 .../concurrent_id_mapper_rpc_messages.hpp     |   29 -
 src/storage/concurrent_id_mapper_worker.cpp   |   60 -
 src/storage/concurrent_id_mapper_worker.hpp   |   34 -
 src/storage/record_accessor.cpp               |   88 +-
 src/storage/record_accessor.hpp               |    5 -
 src/storage/types.hpp                         |   30 -
 src/storage/vertex_accessor.cpp               |   14 +-
 src/transactions/commit_log.hpp               |    9 -
 src/transactions/engine_master.cpp            |   85 --
 src/transactions/engine_master.hpp            |   30 -
 src/transactions/engine_rpc_messages.hpp      |   70 -
 src/transactions/engine_single_node.cpp       |    1 -
 src/transactions/engine_worker.cpp            |  194 ---
 src/transactions/engine_worker.hpp            |   74 -
 src/transactions/snapshot.hpp                 |   10 -
 src/utils/serialization.hpp                   |  193 ---
 tests/CMakeLists.txt                          |    3 -
 tests/distributed/card_fraud/.gitignore       |    2 -
 tests/distributed/card_fraud/apollo_runs.py   |   44 -
 tests/distributed/card_fraud/card_fraud.py    |  222 ---
 tests/distributed/card_fraud/config.json      |    8 -
 .../card_fraud/generate_dataset.sh            |   18 -
 tests/distributed/common.py                   |    1 -
 tests/distributed/jail_faker.py               |    1 -
 tests/distributed/jail_service.py             |  150 --
 tests/distributed/local_runner                |   54 -
 tests/distributed/master.py                   |   81 --
 tests/distributed/raft/CMakeLists.txt         |   29 -
 tests/distributed/raft/README.md              |   13 -
 tests/distributed/raft/example_client.cpp     |   48 -
 tests/distributed/raft/example_server.cpp     |   73 -
 tests/distributed/raft/example_test.py        |   61 -
 tests/distributed/raft/messages.hpp           |   47 -
 .../clients/card_fraud_client.cpp             |   24 +-
 .../clients/long_running_common.hpp           |   21 +-
 tests/manual/card_fraud_local.cpp             |   77 -
 tests/manual/distributed_common.hpp           |   98 --
 tests/manual/distributed_repl.cpp             |   61 -
 tests/manual/query_planner.cpp                |   69 -
 tests/manual/raft_rpc.cpp                     |   60 -
 .../unit/concurrent_id_mapper_distributed.cpp |   52 -
 tests/unit/counters.cpp                       |   26 -
 tests/unit/cypher_main_visitor.cpp            |   32 +-
 tests/unit/database_master.cpp                |   11 -
 tests/unit/distributed_common.hpp             |  139 --
 tests/unit/distributed_coordination.cpp       |  183 ---
 tests/unit/distributed_data_exchange.cpp      |   90 --
 tests/unit/distributed_durability.cpp         |  117 --
 tests/unit/distributed_gc.cpp                 |   78 -
 tests/unit/distributed_graph_db.cpp           |  182 ---
 tests/unit/distributed_interpretation.cpp     |  279 ----
 tests/unit/distributed_query_plan.cpp         |  363 -----
 tests/unit/distributed_serialization.cpp      |  162 ---
 tests/unit/distributed_updates.cpp            |  560 --------
 tests/unit/durability.cpp                     |    2 +
 tests/unit/metrics.cpp                        |   90 --
 tests/unit/query_planner.cpp                  |  488 +------
 tests/unit/query_semantic.cpp                 |   24 -
 tests/unit/raft.cpp                           |  660 ---------
 tests/unit/raft_storage.cpp                   |   71 -
 tests/unit/rpc.cpp                            |  175 ---
 tests/unit/rpc_worker_clients.cpp             |  125 --
 tests/unit/serialization.cpp                  |   64 -
 tests/unit/transaction_engine_distributed.cpp |  150 --
 tools/src/CMakeLists.txt                      |    6 +-
 tools/src/mg_import_csv/main.cpp              |   20 +
 tools/src/mg_statsd/main.cpp                  |   65 -
 tools/tests/CMakeLists.txt                    |    3 -
 tools/tests/statsd/mg_statsd_client.cpp       |   69 -
 171 files changed, 158 insertions(+), 16592 deletions(-)
 delete mode 100644 src/communication/raft/network_common.hpp
 delete mode 100644 src/communication/raft/raft-inl.hpp
 delete mode 100644 src/communication/raft/raft.hpp
 delete mode 100644 src/communication/raft/rpc.hpp
 delete mode 100644 src/communication/raft/storage/file.hpp
 delete mode 100644 src/communication/raft/storage/memory.hpp
 delete mode 100644 src/communication/raft/test_utils.hpp
 delete mode 100644 src/communication/rpc/client.cpp
 delete mode 100644 src/communication/rpc/client.hpp
 delete mode 100644 src/communication/rpc/client_pool.hpp
 delete mode 100644 src/communication/rpc/messages-inl.hpp
 delete mode 100644 src/communication/rpc/messages.hpp
 delete mode 100644 src/communication/rpc/protocol.cpp
 delete mode 100644 src/communication/rpc/protocol.hpp
 delete mode 100644 src/communication/rpc/server.cpp
 delete mode 100644 src/communication/rpc/server.hpp
 delete mode 100644 src/database/storage_gc_master.hpp
 delete mode 100644 src/database/storage_gc_worker.hpp
 delete mode 100644 src/distributed/cache.cpp
 delete mode 100644 src/distributed/cache.hpp
 delete mode 100644 src/distributed/cluster_discovery_master.cpp
 delete mode 100644 src/distributed/cluster_discovery_master.hpp
 delete mode 100644 src/distributed/cluster_discovery_worker.cpp
 delete mode 100644 src/distributed/cluster_discovery_worker.hpp
 delete mode 100644 src/distributed/coordination.cpp
 delete mode 100644 src/distributed/coordination.hpp
 delete mode 100644 src/distributed/coordination_master.cpp
 delete mode 100644 src/distributed/coordination_master.hpp
 delete mode 100644 src/distributed/coordination_rpc_messages.hpp
 delete mode 100644 src/distributed/coordination_worker.cpp
 delete mode 100644 src/distributed/coordination_worker.hpp
 delete mode 100644 src/distributed/data_manager.cpp
 delete mode 100644 src/distributed/data_manager.hpp
 delete mode 100644 src/distributed/data_rpc_clients.cpp
 delete mode 100644 src/distributed/data_rpc_clients.hpp
 delete mode 100644 src/distributed/data_rpc_messages.hpp
 delete mode 100644 src/distributed/data_rpc_server.cpp
 delete mode 100644 src/distributed/data_rpc_server.hpp
 delete mode 100644 src/distributed/durability_rpc_clients.cpp
 delete mode 100644 src/distributed/durability_rpc_clients.hpp
 delete mode 100644 src/distributed/durability_rpc_messages.hpp
 delete mode 100644 src/distributed/durability_rpc_server.cpp
 delete mode 100644 src/distributed/durability_rpc_server.hpp
 delete mode 100644 src/distributed/index_rpc_messages.hpp
 delete mode 100644 src/distributed/index_rpc_server.cpp
 delete mode 100644 src/distributed/index_rpc_server.hpp
 delete mode 100644 src/distributed/plan_consumer.cpp
 delete mode 100644 src/distributed/plan_consumer.hpp
 delete mode 100644 src/distributed/plan_dispatcher.cpp
 delete mode 100644 src/distributed/plan_dispatcher.hpp
 delete mode 100644 src/distributed/plan_rpc_messages.hpp
 delete mode 100644 src/distributed/produce_rpc_server.cpp
 delete mode 100644 src/distributed/produce_rpc_server.hpp
 delete mode 100644 src/distributed/pull_produce_rpc_messages.hpp
 delete mode 100644 src/distributed/pull_rpc_clients.cpp
 delete mode 100644 src/distributed/pull_rpc_clients.hpp
 delete mode 100644 src/distributed/rpc_worker_clients.hpp
 delete mode 100644 src/distributed/serialization.hpp
 delete mode 100644 src/distributed/storage_gc_rpc_messages.hpp
 delete mode 100644 src/distributed/transactional_cache_cleaner.hpp
 delete mode 100644 src/distributed/transactional_cache_cleaner_rpc_messages.hpp
 delete mode 100644 src/distributed/updates_rpc_clients.cpp
 delete mode 100644 src/distributed/updates_rpc_clients.hpp
 delete mode 100644 src/distributed/updates_rpc_messages.hpp
 delete mode 100644 src/distributed/updates_rpc_server.cpp
 delete mode 100644 src/distributed/updates_rpc_server.hpp
 delete mode 100644 src/stats/metrics.cpp
 delete mode 100644 src/stats/metrics.hpp
 delete mode 100644 src/stats/stats.cpp
 delete mode 100644 src/stats/stats.hpp
 delete mode 100644 src/stats/stats_rpc_messages.hpp
 delete mode 100644 src/storage/concurrent_id_mapper_master.cpp
 delete mode 100644 src/storage/concurrent_id_mapper_master.hpp
 delete mode 100644 src/storage/concurrent_id_mapper_rpc_messages.hpp
 delete mode 100644 src/storage/concurrent_id_mapper_worker.cpp
 delete mode 100644 src/storage/concurrent_id_mapper_worker.hpp
 delete mode 100644 src/transactions/engine_master.cpp
 delete mode 100644 src/transactions/engine_master.hpp
 delete mode 100644 src/transactions/engine_rpc_messages.hpp
 delete mode 100644 src/transactions/engine_worker.cpp
 delete mode 100644 src/transactions/engine_worker.hpp
 delete mode 100644 src/utils/serialization.hpp
 delete mode 100644 tests/distributed/card_fraud/.gitignore
 delete mode 100755 tests/distributed/card_fraud/apollo_runs.py
 delete mode 100644 tests/distributed/card_fraud/card_fraud.py
 delete mode 100644 tests/distributed/card_fraud/config.json
 delete mode 100755 tests/distributed/card_fraud/generate_dataset.sh
 delete mode 120000 tests/distributed/common.py
 delete mode 120000 tests/distributed/jail_faker.py
 delete mode 100755 tests/distributed/jail_service.py
 delete mode 100755 tests/distributed/local_runner
 delete mode 100755 tests/distributed/master.py
 delete mode 100644 tests/distributed/raft/CMakeLists.txt
 delete mode 100644 tests/distributed/raft/README.md
 delete mode 100644 tests/distributed/raft/example_client.cpp
 delete mode 100644 tests/distributed/raft/example_server.cpp
 delete mode 100644 tests/distributed/raft/example_test.py
 delete mode 100644 tests/distributed/raft/messages.hpp
 delete mode 100644 tests/manual/card_fraud_local.cpp
 delete mode 100644 tests/manual/distributed_common.hpp
 delete mode 100644 tests/manual/distributed_repl.cpp
 delete mode 100644 tests/manual/raft_rpc.cpp
 delete mode 100644 tests/unit/concurrent_id_mapper_distributed.cpp
 delete mode 100644 tests/unit/counters.cpp
 delete mode 100644 tests/unit/database_master.cpp
 delete mode 100644 tests/unit/distributed_common.hpp
 delete mode 100644 tests/unit/distributed_coordination.cpp
 delete mode 100644 tests/unit/distributed_data_exchange.cpp
 delete mode 100644 tests/unit/distributed_durability.cpp
 delete mode 100644 tests/unit/distributed_gc.cpp
 delete mode 100644 tests/unit/distributed_graph_db.cpp
 delete mode 100644 tests/unit/distributed_interpretation.cpp
 delete mode 100644 tests/unit/distributed_query_plan.cpp
 delete mode 100644 tests/unit/distributed_serialization.cpp
 delete mode 100644 tests/unit/distributed_updates.cpp
 delete mode 100644 tests/unit/metrics.cpp
 delete mode 100644 tests/unit/raft.cpp
 delete mode 100644 tests/unit/raft_storage.cpp
 delete mode 100644 tests/unit/rpc.cpp
 delete mode 100644 tests/unit/rpc_worker_clients.cpp
 delete mode 100644 tests/unit/serialization.cpp
 delete mode 100644 tests/unit/transaction_engine_distributed.cpp
 delete mode 100644 tools/src/mg_statsd/main.cpp
 delete mode 100644 tools/tests/statsd/mg_statsd_client.cpp

diff --git a/CMakeLists.txt b/CMakeLists.txt
index e9e42697a..5130b6856 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -137,9 +137,6 @@ if (USE_READLINE)
   endif()
 endif()
 
-set(Boost_USE_STATIC_LIBS ON)
-find_package(Boost 1.62 REQUIRED COMPONENTS iostreams serialization)
-
 set(libs_dir ${CMAKE_SOURCE_DIR}/libs)
 add_subdirectory(libs EXCLUDE_FROM_ALL)
 
@@ -186,7 +183,6 @@ option(EXPERIMENTAL "Build experimental binaries" OFF)
 option(CUSTOMERS "Build customer binaries" OFF)
 option(TEST_COVERAGE "Generate coverage reports from running memgraph" OFF)
 option(TOOLS "Build tools binaries" ON)
-option(MG_COMMUNITY "Build Memgraph Community Edition" OFF)
 
 if (TEST_COVERAGE)
   string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
@@ -197,10 +193,6 @@ if (TEST_COVERAGE)
   set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
 endif()
 
-if (MG_COMMUNITY)
-  add_definitions(-DMG_COMMUNITY)
-endif()
-
 # Add subprojects
 include_directories(src)
 add_subdirectory(src)
diff --git a/apollo_archives.yaml b/apollo_archives.yaml
index 6f79721fc..5400128a0 100644
--- a/apollo_archives.yaml
+++ b/apollo_archives.yaml
@@ -3,7 +3,6 @@
     - build_debug/memgraph
     - build_release/memgraph
     - build_release/tools/src/mg_import_csv
-    - build_release/tools/src/mg_statsd
     - config
   filename: binaries.tar.gz
 
diff --git a/apollo_build.yaml b/apollo_build.yaml
index e06ea9048..226b778d4 100644
--- a/apollo_build.yaml
+++ b/apollo_build.yaml
@@ -33,13 +33,8 @@
     cmake -DCMAKE_BUILD_TYPE=release ..
     TIMEOUT=1000 make -j$THREADS memgraph tools memgraph__macro_benchmark memgraph__stress memgraph__manual__card_fraud_generate_snapshot
 
-    # Generate distributed card fraud dataset.
-    cd ../tests/distributed/card_fraud
-    ./generate_dataset.sh
-    cd ../../..
-
     # Checkout to parent commit and initialize.
-    cd ../parent
+    cd ../../parent
     git checkout HEAD~1
     TIMEOUT=600 ./init
 
@@ -88,7 +83,3 @@
     cd ../../docs/user_technical
     # TODO (mferencevic): uncomment this once couscous is replaced with pandoc
     #./bundle_community
-
-    # Generate distributed card fraud dataset.
-    cd ../../tests/distributed/card_fraud
-    ./generate_dataset.sh
diff --git a/docs/user_technical/import-tools.md b/docs/user_technical/import-tools.md
index 7815b0003..3bf48c2d4 100644
--- a/docs/user_technical/import-tools.md
+++ b/docs/user_technical/import-tools.md
@@ -94,7 +94,7 @@ will load the new dataset.
 Use the following command:
 
 ```
-mg_import_csv --nodes=comment_nodes.csv --nodes=forum_nodes.csv --relationships=relationships.csv
+mg_import_csv --overwrite --nodes=comment_nodes.csv --nodes=forum_nodes.csv --relationships=relationships.csv
 ```
 
 If using Docker, things are a bit more complicated. First you need to move the
@@ -110,6 +110,7 @@ Then, run the importer with the following:
 ```
 docker run -v mg_lib:/var/lib/memgraph -v mg_etc:/etc/memgraph -v mg_import:/import-data \
   --entrypoint=mg_import_csv memgraph \
+  --overwrite \
   --nodes=/import-data/comment_nodes.csv --nodes=/import-data/forum_nodes.csv \
   --relationships=/import-data/relationships.csv
 ```
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 371fcd7d4..159094f75 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -4,33 +4,12 @@
 set(memgraph_src_files
     communication/buffer.cpp
     communication/bolt/v1/decoder/decoded_value.cpp
-    communication/rpc/client.cpp
-    communication/rpc/protocol.cpp
-    communication/rpc/server.cpp
     data_structures/concurrent/skiplist_gc.cpp
     database/config.cpp
     database/counters.cpp
     database/graph_db.cpp
     database/graph_db_accessor.cpp
     database/state_delta.cpp
-    distributed/cluster_discovery_master.cpp
-    distributed/cluster_discovery_worker.cpp
-    distributed/coordination.cpp
-    distributed/coordination_master.cpp
-    distributed/coordination_worker.cpp
-    distributed/durability_rpc_clients.cpp
-    distributed/durability_rpc_server.cpp
-    distributed/index_rpc_server.cpp
-    distributed/plan_consumer.cpp
-    distributed/plan_dispatcher.cpp
-    distributed/cache.cpp
-    distributed/data_manager.cpp
-    distributed/data_rpc_clients.cpp
-    distributed/data_rpc_server.cpp
-    distributed/produce_rpc_server.cpp
-    distributed/pull_rpc_clients.cpp
-    distributed/updates_rpc_clients.cpp
-    distributed/updates_rpc_server.cpp
     durability/paths.cpp
     durability/recovery.cpp
     durability/snapshooter.cpp
@@ -46,16 +25,11 @@ set(memgraph_src_files
     query/frontend/stripped.cpp
     query/interpret/awesome_memgraph_functions.cpp
     query/interpreter.cpp
-    query/plan/distributed.cpp
     query/plan/operator.cpp
     query/plan/preprocess.cpp
     query/plan/rule_based_planner.cpp
     query/plan/variable_start_planner.cpp
     query/typed_value.cpp
-    stats/metrics.cpp
-    stats/stats.cpp
-    storage/concurrent_id_mapper_master.cpp
-    storage/concurrent_id_mapper_worker.cpp
     storage/edge_accessor.cpp
     storage/locking/record_lock.cpp
     storage/property_value.cpp
@@ -63,9 +37,7 @@ set(memgraph_src_files
     storage/vertex_accessor.cpp
     threading/sync/rwlock.cpp
     threading/thread.cpp
-    transactions/engine_master.cpp
     transactions/engine_single_node.cpp
-    transactions/engine_worker.cpp
     utils/demangle.cpp
     utils/file.cpp
     utils/network.cpp
@@ -78,9 +50,7 @@ string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
 
 # memgraph_lib depend on these libraries
 set(MEMGRAPH_ALL_LIBS stdc++fs Threads::Threads fmt cppitertools
-    antlr_opencypher_parser_lib dl glog gflags
-    ${Boost_IOSTREAMS_LIBRARY_RELEASE}
-    ${Boost_SERIALIZATION_LIBRARY_RELEASE})
+    antlr_opencypher_parser_lib dl glog gflags)
 
 if (USE_LTALLOC)
     list(APPEND MEMGRAPH_ALL_LIBS ltalloc)
diff --git a/src/communication/bolt/client.hpp b/src/communication/bolt/client.hpp
index c5a8773dc..a44b3be1f 100644
--- a/src/communication/bolt/client.hpp
+++ b/src/communication/bolt/client.hpp
@@ -7,6 +7,8 @@
 #include "communication/bolt/v1/encoder/chunked_encoder_buffer.hpp"
 #include "communication/bolt/v1/encoder/client_encoder.hpp"
 
+#include "communication/client.hpp"
+
 #include "query/typed_value.hpp"
 #include "utils/exceptions.hpp"
 
diff --git a/src/communication/raft/network_common.hpp b/src/communication/raft/network_common.hpp
deleted file mode 100644
index 347b98df5..000000000
--- a/src/communication/raft/network_common.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-#pragma once
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "communication/raft/raft.hpp"
-
-namespace communication::raft {
-
-enum class RpcType { REQUEST_VOTE, APPEND_ENTRIES };
-
-template <class State>
-struct PeerRpcRequest : public rpc::Message {
-  RpcType type;
-  RequestVoteRequest request_vote;
-  AppendEntriesRequest<State> append_entries;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<rpc::Message>(*this);
-    ar &type;
-    ar &request_vote;
-    ar &append_entries;
-  }
-};
-
-struct PeerRpcReply : public rpc::Message {
-  RpcType type;
-  RequestVoteReply request_vote;
-  AppendEntriesReply append_entries;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<rpc::Message>(*this);
-    ar &type;
-    ar &request_vote;
-    ar &append_entries;
-  }
-};
-
-}  // namespace communication::raft
diff --git a/src/communication/raft/raft-inl.hpp b/src/communication/raft/raft-inl.hpp
deleted file mode 100644
index f5c52c67a..000000000
--- a/src/communication/raft/raft-inl.hpp
+++ /dev/null
@@ -1,699 +0,0 @@
-#pragma once
-
-#include <algorithm>
-
-#include "fmt/format.h"
-#include "glog/logging.h"
-
-namespace communication::raft {
-
-namespace impl {
-
-template <class State>
-RaftMemberImpl<State>::RaftMemberImpl(RaftNetworkInterface<State> &network,
-                                      RaftStorageInterface<State> &storage,
-                                      const MemberId &id,
-                                      const RaftConfig &config)
-    : network_(network), storage_(storage), id_(id), config_(config) {
-  std::lock_guard<std::mutex> lock(mutex_);
-
-  tie(term_, voted_for_) = storage_.GetTermAndVotedFor();
-
-  for (const auto &peer_id : config_.members) {
-    peer_states_[peer_id] = std::make_unique<RaftPeerState>();
-  }
-
-  SetElectionTimer();
-}
-
-template <class State>
-RaftMemberImpl<State>::~RaftMemberImpl() {
-  Stop();
-}
-
-template <class State>
-void RaftMemberImpl<State>::Stop() {
-  {
-    std::lock_guard<std::mutex> lock(mutex_);
-    if (!exiting_) {
-      LogInfo("Stopping...");
-      exiting_ = true;
-    }
-  }
-  state_changed_.notify_all();
-}
-
-template <class State>
-template <class... Args>
-void RaftMemberImpl<State>::LogInfo(const std::string &format,
-                                    Args &&... args) {
-  LOG(INFO) << fmt::format("[id = {}, term = {}] {}", id_, term_,
-                           fmt::format(format, std::forward<Args>(args)...))
-            << std::endl;
-}
-
-template <class State>
-void RaftMemberImpl<State>::TimerThreadMain() {
-  std::unique_lock<std::mutex> lock(mutex_);
-  while (!exiting_) {
-    if (Clock::now() >= next_election_time_) {
-      StartNewElection();
-    }
-    state_changed_.wait_until(lock, next_election_time_);
-  }
-}
-
-template <class State>
-void RaftMemberImpl<State>::PeerThreadMain(std::string peer_id) {
-  RaftPeerState &peer_state = *peer_states_[peer_id];
-
-  LogInfo("Peer thread started for {}", peer_id);
-
-  std::unique_lock<std::mutex> lock(mutex_);
-
-  /* This loop will either call a function that issues an RPC or wait on the
-   * condition variable. It must not do both! Lock on `mutex_` is released while
-   * waiting for RPC response, which might cause us to miss a notification on
-   * `state_changed_` conditional variable and wait indefinitely. The safest
-   * thing to do is to assume some important part of state was modified while we
-   * were waiting for the response and loop around to check. */
-  while (!exiting_) {
-    TimePoint now = Clock::now();
-    TimePoint wait_until;
-
-    if (mode_ != RaftMode::FOLLOWER && peer_state.backoff_until > now) {
-      wait_until = peer_state.backoff_until;
-    } else {
-      switch (mode_) {
-        case RaftMode::FOLLOWER:
-          wait_until = TimePoint::max();
-          break;
-        case RaftMode::CANDIDATE:
-          if (!peer_state.request_vote_done) {
-            RequestVote(peer_id, peer_state, lock);
-            continue;
-          }
-          break;
-        case RaftMode::LEADER:
-          if (peer_state.next_index <= storage_.GetLastLogIndex() ||
-              now >= peer_state.next_heartbeat_time) {
-            AppendEntries(peer_id, peer_state, lock);
-            continue;
-          } else {
-            wait_until = peer_state.next_heartbeat_time;
-          }
-          break;
-      }
-    }
-
-    state_changed_.wait_until(lock, wait_until);
-  }
-
-  LogInfo("Peer thread exiting for {}", peer_id);
-}
-
-template <class State>
-void RaftMemberImpl<State>::CandidateOrLeaderTransitionToFollower() {
-  DCHECK(mode_ != RaftMode::FOLLOWER)
-      << "`CandidateOrLeaderTransitionToFollower` called from follower mode";
-  mode_ = RaftMode::FOLLOWER;
-  leader_ = {};
-  SetElectionTimer();
-}
-
-template <class State>
-void RaftMemberImpl<State>::CandidateTransitionToLeader() {
-  DCHECK(mode_ == RaftMode::CANDIDATE)
-      << "`CandidateTransitionToLeader` called while not in candidate mode";
-  mode_ = RaftMode::LEADER;
-  leader_ = id_;
-
-  /* We don't want to trigger elections while in leader mode. */
-  next_election_time_ = TimePoint::max();
-
-  /* [Raft thesis, Section 6.4]
-   * "The Leader Completeness Property guarantees that a leader has all
-   * committed entries, but at the start of its term, it may not know which
-   * those are. To find out, it needs to commit an entry from its term. Raft
-   * handles this by having each leader commit a blank no-op entry into the log
-   * at the start of its term. As soon as this no-op entry is committed, the
-   * leader’s commit index will be at least as large as any other servers’
-   * during its term." */
-  LogEntry<State> entry;
-  entry.term = term_;
-  entry.command = std::experimental::nullopt;
-  storage_.AppendLogEntry(entry);
-}
-
-template <class State>
-bool RaftMemberImpl<State>::CandidateOrLeaderNoteTerm(const TermId new_term) {
-  DCHECK(mode_ != RaftMode::FOLLOWER)
-      << "`CandidateOrLeaderNoteTerm` called from follower mode";
-  /* [Raft thesis, Section 3.3]
-   * "Current terms are exchanged whenever servers communicate; if one server's
-   * current term is smaller than the other's, then it updates its current term
-   * to the larger value. If a candidate or leader discovers that its term is
-   * out of date, it immediately reverts to follower state." */
-  if (term_ < new_term) {
-    UpdateTermAndVotedFor(new_term, {});
-    CandidateOrLeaderTransitionToFollower();
-    return true;
-  }
-  return false;
-}
-
-template <class State>
-void RaftMemberImpl<State>::UpdateTermAndVotedFor(
-    const TermId new_term,
-    const std::experimental::optional<MemberId> &new_voted_for) {
-  term_ = new_term;
-  voted_for_ = new_voted_for;
-  leader_ = {};
-
-  storage_.WriteTermAndVotedFor(term_, voted_for_);
-}
-
-template <class State>
-void RaftMemberImpl<State>::SetElectionTimer() {
-  /* [Raft thesis, section 3.4]
-   * "Raft uses randomized election timeouts to ensure that split votes are rare
-   * and that they are resolved quickly. To prevent split votes in the first
-   * place, election timeouts are chosen randomly from a fixed interval (e.g.,
-   * 150-300 ms)." */
-  std::uniform_int_distribution<uint64_t> distribution(
-      config_.leader_timeout_min.count(), config_.leader_timeout_max.count());
-  Clock::duration wait_interval = std::chrono::milliseconds(distribution(rng_));
-  next_election_time_ = Clock::now() + wait_interval;
-}
-
-template <class State>
-void RaftMemberImpl<State>::StartNewElection() {
-  LogInfo("Starting new election");
-  /* [Raft thesis, section 3.4]
-   * "To begin an election, a follower increments its current term and
-   * transitions to candidate state.  It then votes for itself and issues
-   * RequestVote RPCs in parallel to each of the other servers in the cluster."
-   */
-  UpdateTermAndVotedFor(term_ + 1, id_);
-  mode_ = RaftMode::CANDIDATE;
-
-  /* [Raft thesis, section 3.4]
-   * "Each candidate restarts its randomized election timeout at the start of an
-   * election, and it waits for that timeout to elapse before starting the next
-   * election; this reduces the likelihood of another split vote in the new
-   * election." */
-  SetElectionTimer();
-
-  for (const auto &peer_id : config_.members) {
-    if (peer_id == id_) {
-      continue;
-    }
-    auto &peer_state = peer_states_[peer_id];
-    peer_state->request_vote_done = false;
-    peer_state->voted_for_me = false;
-    peer_state->match_index = 0;
-    peer_state->next_index = storage_.GetLastLogIndex() + 1;
-
-    /* [Raft thesis, section 3.5]
-     * "Until the leader has discovered where it and the follower's logs match,
-     * the leader can send AppendEntries with no entries (like heartbeats) to
-     * save bandwidth. Then, once the matchIndex immediately precedes the
-     * nextIndex, the leader should begin to send the actual entries." */
-    peer_state->suppress_log_entries = true;
-
-    /* [Raft thesis, section 3.4]
-     * "Once a candidate wins an election, it becomes leader. It then sends
-     * heartbeat messages to all of the other servers to establish its authority
-     * and prevent new elections."
-     *
-     * This will make newly elected leader send heartbeats immediately.
-     */
-    peer_state->next_heartbeat_time = TimePoint::min();
-    peer_state->backoff_until = TimePoint::min();
-  }
-
-  // We already have the majority if we're in a single node cluster.
-  if (CountVotes()) {
-    LogInfo("Elected as leader.");
-    CandidateTransitionToLeader();
-  }
-
-  /* Notify peer threads to start issuing RequestVote RPCs. */
-  state_changed_.notify_all();
-}
-
-template <class State>
-bool RaftMemberImpl<State>::CountVotes() {
-  DCHECK(mode_ == RaftMode::CANDIDATE)
-      << "`CountVotes` should only be called from candidate mode";
-  int num_votes = 0;
-  for (const auto &peer_id : config_.members) {
-    if (peer_id == id_ || peer_states_[peer_id]->voted_for_me) {
-      num_votes++;
-    }
-  }
-
-  return 2 * num_votes > config_.members.size();
-}
-
-template <class State>
-void RaftMemberImpl<State>::RequestVote(const std::string &peer_id,
-                                        RaftPeerState &peer_state,
-                                        std::unique_lock<std::mutex> &lock) {
-  LogInfo("Requesting vote from {}", peer_id);
-
-  RequestVoteRequest request;
-  request.candidate_term = term_;
-  request.candidate_id = id_;
-  request.last_log_index = storage_.GetLastLogIndex();
-  request.last_log_term = storage_.GetLogTerm(request.last_log_index);
-
-  RequestVoteReply reply;
-
-  /* Release lock before issuing RPC and waiting for response. */
-  /* TODO(mtomic): Revise how this will work with RPC cancellation. */
-  lock.unlock();
-  bool ok = network_.SendRequestVote(peer_id, request, reply);
-  lock.lock();
-
-  /* TODO(mtomic): Maybe implement exponential backoff. */
-  if (!ok) {
-    peer_state.backoff_until = Clock::now() + config_.rpc_backoff;
-    return;
-  }
-
-  if (term_ != request.candidate_term || mode_ != RaftMode::CANDIDATE ||
-      exiting_) {
-    LogInfo("Ignoring RequestVote RPC reply from {}", peer_id);
-    return;
-  }
-
-  if (CandidateOrLeaderNoteTerm(reply.term)) {
-    state_changed_.notify_all();
-    return;
-  }
-
-  DCHECK(reply.term == term_) << "Stale RequestVote RPC reply";
-
-  peer_state.request_vote_done = true;
-
-  if (reply.vote_granted) {
-    peer_state.voted_for_me = true;
-    LogInfo("Got vote from {}", peer_id);
-
-    if (CountVotes()) {
-      LogInfo("Elected as leader.");
-      CandidateTransitionToLeader();
-    }
-  } else {
-    LogInfo("Vote denied from {}", peer_id);
-  }
-
-  state_changed_.notify_all();
-}
-
-template <class State>
-void RaftMemberImpl<State>::AdvanceCommitIndex() {
-  DCHECK(mode_ == RaftMode::LEADER)
-      << "`AdvanceCommitIndex` can only be called from leader mode";
-
-  std::vector<LogIndex> match_indices;
-  for (const auto &peer : peer_states_) {
-    match_indices.push_back(peer.second->match_index);
-  }
-  match_indices.push_back(storage_.GetLastLogIndex());
-  std::sort(match_indices.begin(), match_indices.end(),
-            std::greater<LogIndex>());
-  LogIndex new_commit_index_ = match_indices[(config_.members.size() - 1) / 2];
-
-  LogInfo("Trying to advance commit index {} to {}", commit_index_,
-          new_commit_index_);
-
-  /* This can happen because we reset `match_index` to 0 for every peer when
-   * elected. */
-  if (commit_index_ >= new_commit_index_) {
-    return;
-  }
-
-  /* [Raft thesis, section 3.6.2]
-   * (...) Raft never commits log entries from previous terms by counting
-   * replicas. Only log entries from the leader's current term are committed by
-   * counting replicas; once an entry from the current term has been committed
-   * in this way, then all prior entries are committed indirectly because of the
-   * Log Matching Property." */
-  if (storage_.GetLogTerm(new_commit_index_) != term_) {
-    LogInfo("Cannot commit log entry from previous term");
-    return;
-  }
-
-  commit_index_ = std::max(commit_index_, new_commit_index_);
-}
-
-template <class State>
-void RaftMemberImpl<State>::AppendEntries(const std::string &peer_id,
-                                          RaftPeerState &peer_state,
-                                          std::unique_lock<std::mutex> &lock) {
-  LogInfo("Appending entries to {}", peer_id);
-
-  AppendEntriesRequest<State> request;
-  request.leader_term = term_;
-  request.leader_id = id_;
-
-  request.prev_log_index = peer_state.next_index - 1;
-  request.prev_log_term = storage_.GetLogTerm(peer_state.next_index - 1);
-
-  if (!peer_state.suppress_log_entries &&
-      peer_state.next_index <= storage_.GetLastLogIndex()) {
-    request.entries = storage_.GetLogSuffix(peer_state.next_index);
-  } else {
-    request.entries = {};
-  }
-
-  request.leader_commit = commit_index_;
-
-  AppendEntriesReply reply;
-
-  /* Release lock before issuing RPC and waiting for response. */
-  /* TODO(mtomic): Revise how this will work with RPC cancellation. */
-  lock.unlock();
-  bool ok = network_.SendAppendEntries(peer_id, request, reply);
-  lock.lock();
-
-  /* TODO(mtomic): Maybe implement exponential backoff. */
-  if (!ok) {
-    /* There is probably something wrong with this peer, let's avoid sending log
-     * entries. */
-    peer_state.suppress_log_entries = true;
-    peer_state.backoff_until = Clock::now() + config_.rpc_backoff;
-    return;
-  }
-
-  if (term_ != request.leader_term || exiting_) {
-    return;
-  }
-
-  if (CandidateOrLeaderNoteTerm(reply.term)) {
-    state_changed_.notify_all();
-    return;
-  }
-
-  DCHECK(mode_ == RaftMode::LEADER)
-      << "Elected leader for term should never change";
-  DCHECK(reply.term == term_) << "Got stale AppendEntries reply";
-
-  if (reply.success) {
-    /* We've found a match, we can start sending log entries. */
-    peer_state.suppress_log_entries = false;
-
-    LogIndex new_match_index = request.prev_log_index + request.entries.size();
-    DCHECK(peer_state.match_index <= new_match_index)
-        << "`match_index` should increase monotonically within a term";
-    peer_state.match_index = new_match_index;
-    AdvanceCommitIndex();
-    peer_state.next_index = peer_state.match_index + 1;
-    peer_state.next_heartbeat_time = Clock::now() + config_.heartbeat_interval;
-  } else {
-    DCHECK(peer_state.next_index > 1)
-        << "Log replication should not fail for first log entry.";
-    --peer_state.next_index;
-  }
-
-  state_changed_.notify_all();
-}
-
-template <class State>
-RequestVoteReply RaftMemberImpl<State>::OnRequestVote(
-    const RequestVoteRequest &request) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  LogInfo("RequestVote RPC request from {}", request.candidate_id);
-
-  RequestVoteReply reply;
-
-  /* [Raft thesis, Section 3.3]
-   * "If a server receives a request with a stale term number, it rejects the
-   * request." */
-  if (request.candidate_term < term_) {
-    reply.term = term_;
-    reply.vote_granted = false;
-    return reply;
-  }
-
-  /* [Raft thesis, Section 3.3]
-   * "Current terms are exchanged whenever servers communicate; if one server's
-   * current term is smaller than the other's, then it updates its current term
-   * to the larger value. If a candidate or leader discovers that its term is
-   * out of date, it immediately reverts to follower state." */
-  if (request.candidate_term > term_) {
-    if (mode_ != RaftMode::FOLLOWER) {
-      CandidateOrLeaderTransitionToFollower();
-    }
-    UpdateTermAndVotedFor(request.candidate_term, {});
-  }
-
-  /* [Raft thesis, Section 3.6.1]
-   * "Raft uses the voting process to prevent a candidate from winning an
-   * election unless its log contains all committed entries. (...) The
-   * RequestVote RPC implements this restriction: the RPC includes information
-   * about the candidate's log, and the voter denies its vote if its own log is
-   * more up-to-date than that of the candidate. Raft determines which of two
-   * logs is more up-to-date by comparing the index and term of the last entries
-   * in the logs. If the logs have last entries with different terms, then the
-   * log with the later term is more up-to-date. If the logs end with the same
-   * term, then whichever log is longer is more up-to-date." */
-  LogIndex my_last_log_index = storage_.GetLastLogIndex();
-  TermId my_last_log_term = storage_.GetLogTerm(my_last_log_index);
-  if (my_last_log_term > request.last_log_term ||
-      (my_last_log_term == request.last_log_term &&
-       my_last_log_index > request.last_log_index)) {
-    reply.term = term_;
-    reply.vote_granted = false;
-    return reply;
-  }
-
-  /* [Raft thesis, Section 3.4]
-   * "Each server will vote for at most one candidate in a given term, on a
-   * firstcome-first-served basis."
-   */
-
-  /* We voted for someone else in this term. */
-  if (request.candidate_term == term_ && voted_for_ &&
-      *voted_for_ != request.candidate_id) {
-    reply.term = term_;
-    reply.vote_granted = false;
-    return reply;
-  }
-
-  /* Now we know we will vote for this candidate, because it's term is at least
-   * as big as ours and we haven't voted for anyone else. */
-  UpdateTermAndVotedFor(request.candidate_term, request.candidate_id);
-
-  /* [Raft thesis, Section 3.4]
-   * A server remains in follower state as long as it receives valid RPCs from a
-   * leader or candidate. */
-  SetElectionTimer();
-  state_changed_.notify_all();
-
-  reply.term = request.candidate_term;
-  reply.vote_granted = true;
-  return reply;
-}
-
-template <class State>
-AppendEntriesReply RaftMemberImpl<State>::OnAppendEntries(
-    const AppendEntriesRequest<State> &request) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  LogInfo("AppendEntries RPC request from {}", request.leader_id);
-
-  AppendEntriesReply reply;
-
-  /* [Raft thesis, Section 3.3]
-   * "If a server receives a request with a stale term number, it rejects the
-   * request." */
-  if (request.leader_term < term_) {
-    reply.term = term_;
-    reply.success = false;
-    return reply;
-  }
-
-  /* [Raft thesis, Section 3.3]
-   * "Current terms are exchanged whenever servers communicate; if one server's
-   * current term is smaller than the other's, then it updates its current term
-   * to the larger value. If a candidate or leader discovers that its term is
-   * out of date, it immediately reverts to follower state." */
-  if (request.leader_term > term_) {
-    if (mode_ != RaftMode::FOLLOWER) {
-      CandidateOrLeaderTransitionToFollower();
-    }
-    UpdateTermAndVotedFor(request.leader_term, {});
-  }
-
-  /* [Raft thesis, Section 3.4]
-   * "While waiting for votes, a candidate may receive an AppendEntries RPC from
-   * another server claiming to be leader. If the leader's term (included in its
-   * RPC) is at least as large as the candidate's current term, then the
-   * candidate recognizes the leader as legitimate and returns to follower
-   * state." */
-  if (mode_ == RaftMode::CANDIDATE && request.leader_term == term_) {
-    CandidateOrLeaderTransitionToFollower();
-  }
-
-  DCHECK(mode_ != RaftMode::LEADER)
-      << "Leader cannot accept `AppendEntries` RPC";
-  DCHECK(term_ == request.leader_term) << "Term should be equal to request "
-                                          "term when accepting `AppendEntries` "
-                                          "RPC";
-
-  leader_ = request.leader_id;
-
-  /* [Raft thesis, Section 3.4]
-   * A server remains in follower state as long as it receives valid RPCs from a
-   * leader or candidate. */
-  SetElectionTimer();
-  state_changed_.notify_all();
-
-  /* [Raft thesis, Section 3.5]
-   * "When sending an AppendEntries RPC, the leader includes the index and term
-   * of the entry in its log that immediately precedes the new entries. If the
-   * follower does not find an entry in its log with the same index and term,
-   * then it refuses the new entries." */
-  if (request.prev_log_index > storage_.GetLastLogIndex() ||
-      storage_.GetLogTerm(request.prev_log_index) != request.prev_log_term) {
-    reply.term = term_;
-    reply.success = false;
-    return reply;
-  }
-
-  /* [Raft thesis, Section 3.5]
-   * "To bring a follower's log into consistency with its own, the leader must
-   * find the latest log entry where the two logs agree, delete any entries in
-   * the follower's log after that point, and send the follower all of the
-   * leader's entries after that point." */
-
-  /* Entry at `request.prev_log_index` is the last entry where ours and leader's
-   * logs agree. It's time to replace the tail of the log with new entries from
-   * the leader. We have to be careful here as duplicated AppendEntries RPCs
-   * could cause data loss.
-   *
-   * There is a possibility that an old AppendEntries RPC is duplicated and
-   * received after processing newer one. For example, leader appends entry 3
-   * and then entry 4, but follower recieves entry 3, then entry 4, and then
-   * entry 3 again. We have to be careful not to delete entry 4 from log when
-   * processing the last RPC. */
-  LogIndex index = request.prev_log_index;
-  auto it = request.entries.begin();
-  for (; it != request.entries.end(); ++it) {
-    ++index;
-    if (index > storage_.GetLastLogIndex()) {
-      break;
-    }
-    if (storage_.GetLogTerm(index) != it->term) {
-      LogInfo("Truncating log suffix from index {}", index);
-      DCHECK(commit_index_ < index)
-          << "Committed entries should never be truncated from the log";
-      storage_.TruncateLogSuffix(index);
-      break;
-    }
-  }
-
-  LogInfo("Appending {} out of {} logs from {}.", request.entries.end() - it,
-          request.entries.size(), request.leader_id);
-
-  for (; it != request.entries.end(); ++it) {
-    storage_.AppendLogEntry(*it);
-  }
-
-  commit_index_ = std::max(commit_index_, request.leader_commit);
-
-  /* Let's bump election timer once again, we don't want to take down the leader
-   * because of our long disk writes. */
-  SetElectionTimer();
-  state_changed_.notify_all();
-
-  reply.term = term_;
-  reply.success = true;
-  return reply;
-}
-
-template <class State>
-ClientResult RaftMemberImpl<State>::AddCommand(
-    const typename State::Change &command, bool blocking) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  if (mode_ != RaftMode::LEADER) {
-    return ClientResult::NOT_LEADER;
-  }
-
-  LogEntry<State> entry;
-  entry.term = term_;
-  entry.command = command;
-  storage_.AppendLogEntry(entry);
-
-  // Entry is already replicated if this is a single node cluster.
-  AdvanceCommitIndex();
-
-  state_changed_.notify_all();
-
-  if (!blocking) {
-    return ClientResult::OK;
-  }
-
-  LogIndex index = storage_.GetLastLogIndex();
-
-  while (!exiting_ && term_ == entry.term) {
-    if (commit_index_ >= index) {
-      return ClientResult::OK;
-    }
-    state_changed_.wait(lock);
-  }
-
-  return ClientResult::NOT_LEADER;
-}
-
-}  // namespace impl
-
-template <class State>
-RaftMember<State>::RaftMember(RaftNetworkInterface<State> &network,
-                              RaftStorageInterface<State> &storage,
-                              const MemberId &id, const RaftConfig &config)
-    : network_(network), impl_(network, storage, id, config) {
-  timer_thread_ =
-      std::thread(&impl::RaftMemberImpl<State>::TimerThreadMain, &impl_);
-
-  for (const auto &peer_id : config.members) {
-    if (peer_id != id) {
-      peer_threads_.emplace_back(&impl::RaftMemberImpl<State>::PeerThreadMain,
-                                 &impl_, peer_id);
-    }
-  }
-
-  network_.Start(*this);
-}
-
-template <class State>
-RaftMember<State>::~RaftMember() {
-  impl_.Stop();
-  timer_thread_.join();
-
-  for (auto &peer_thread : peer_threads_) {
-    peer_thread.join();
-  }
-}
-
-template <class State>
-ClientResult RaftMember<State>::AddCommand(
-    const typename State::Change &command, bool blocking) {
-  return impl_.AddCommand(command, blocking);
-}
-
-template <class State>
-RequestVoteReply RaftMember<State>::OnRequestVote(
-    const RequestVoteRequest &request) {
-  return impl_.OnRequestVote(request);
-}
-
-template <class State>
-AppendEntriesReply RaftMember<State>::OnAppendEntries(
-    const AppendEntriesRequest<State> &request) {
-  return impl_.OnAppendEntries(request);
-}
-
-}  // namespace communication::raft
diff --git a/src/communication/raft/raft.hpp b/src/communication/raft/raft.hpp
deleted file mode 100644
index 0c82671f3..000000000
--- a/src/communication/raft/raft.hpp
+++ /dev/null
@@ -1,277 +0,0 @@
-#pragma once
-
-#include <chrono>
-#include <condition_variable>
-#include <experimental/optional>
-#include <map>
-#include <mutex>
-#include <random>
-#include <set>
-#include <thread>
-#include <vector>
-
-#include "boost/serialization/vector.hpp"
-#include "glog/logging.h"
-
-#include "utils/serialization.hpp"
-
-namespace communication::raft {
-
-template <class State>
-class RaftMember;
-
-enum class ClientResult { NOT_LEADER, OK };
-
-using Clock = std::chrono::system_clock;
-using TimePoint = std::chrono::system_clock::time_point;
-
-using MemberId = std::string;
-using TermId = uint64_t;
-
-using ClientId = uint64_t;
-using CommandId = uint64_t;
-
-using LogIndex = uint64_t;
-
-template <class State>
-struct LogEntry {
-  int term;
-
-  std::experimental::optional<typename State::Change> command;
-
-  bool operator==(const LogEntry &rhs) const {
-    return term == rhs.term && command == rhs.command;
-  }
-  bool operator!=(const LogEntry &rhs) const { return !(*this == rhs); }
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &term;
-    ar &command;
-  }
-};
-
-/* Raft RPC requests and replies as described in [Raft thesis, Figure 3.1]. */
-struct RequestVoteRequest {
-  TermId candidate_term;
-  MemberId candidate_id;
-  LogIndex last_log_index;
-  TermId last_log_term;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &candidate_term;
-    ar &candidate_id;
-    ar &last_log_index;
-    ar &last_log_term;
-  }
-};
-
-struct RequestVoteReply {
-  TermId term;
-  bool vote_granted;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &term;
-    ar &vote_granted;
-  }
-};
-
-template <class State>
-struct AppendEntriesRequest {
-  TermId leader_term;
-  MemberId leader_id;
-  LogIndex prev_log_index;
-  TermId prev_log_term;
-  std::vector<LogEntry<State>> entries;
-  LogIndex leader_commit;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &leader_term;
-    ar &leader_id;
-    ar &prev_log_index;
-    ar &prev_log_term;
-    ar &entries;
-    ar &leader_commit;
-  }
-};
-
-struct AppendEntriesReply {
-  TermId term;
-  bool success;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &term;
-    ar &success;
-  }
-};
-
-template <class State>
-class RaftNetworkInterface {
- public:
-  virtual ~RaftNetworkInterface() = default;
-
-  /* These function return false if RPC failed for some reason (e.g. cannot
-   * establish connection or request cancelled). Otherwise
-   * `reply` contains response from peer. */
-  virtual bool SendRequestVote(const MemberId &recipient,
-                               const RequestVoteRequest &request,
-                               RequestVoteReply &reply) = 0;
-
-  virtual bool SendAppendEntries(const MemberId &recipient,
-                                 const AppendEntriesRequest<State> &request,
-                                 AppendEntriesReply &reply) = 0;
-
-  /* This will be called once the RaftMember is ready to start receiving RPCs.
-   */
-  virtual void Start(RaftMember<State> &member) = 0;
-};
-
-template <class State>
-class RaftStorageInterface {
- public:
-  virtual ~RaftStorageInterface() = default;
-
-  virtual void WriteTermAndVotedFor(
-      const TermId term,
-      const std::experimental::optional<std::string> &voted_for) = 0;
-  virtual std::pair<TermId, std::experimental::optional<MemberId>>
-  GetTermAndVotedFor() = 0;
-  virtual void AppendLogEntry(const LogEntry<State> &entry) = 0;
-  virtual TermId GetLogTerm(const LogIndex index) = 0;
-  virtual LogEntry<State> GetLogEntry(const LogIndex index) = 0;
-  virtual std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) = 0;
-  virtual LogIndex GetLastLogIndex() = 0;
-  virtual void TruncateLogSuffix(const LogIndex index) = 0;
-};
-
-struct RaftConfig {
-  std::vector<MemberId> members;
-  std::chrono::milliseconds leader_timeout_min;
-  std::chrono::milliseconds leader_timeout_max;
-  std::chrono::milliseconds heartbeat_interval;
-  std::chrono::milliseconds rpc_backoff;
-};
-
-namespace impl {
-
-enum class RaftMode { FOLLOWER, CANDIDATE, LEADER };
-
-struct RaftPeerState {
-  bool request_vote_done;
-  bool voted_for_me;
-  LogIndex match_index;
-  LogIndex next_index;
-  bool suppress_log_entries;
-  Clock::time_point next_heartbeat_time;
-  Clock::time_point backoff_until;
-};
-
-template <class State>
-class RaftMemberImpl {
- public:
-  explicit RaftMemberImpl(RaftNetworkInterface<State> &network,
-                          RaftStorageInterface<State> &storage,
-                          const MemberId &id, const RaftConfig &config);
-
-  ~RaftMemberImpl();
-
-  void Stop();
-
-  void TimerThreadMain();
-  void PeerThreadMain(std::string peer_id);
-
-  void UpdateTermAndVotedFor(
-      const TermId new_term,
-      const std::experimental::optional<MemberId> &new_voted_for);
-  void CandidateOrLeaderTransitionToFollower();
-  void CandidateTransitionToLeader();
-  bool CandidateOrLeaderNoteTerm(const TermId new_term);
-
-  void StartNewElection();
-  void SetElectionTimer();
-  bool CountVotes();
-  void RequestVote(const MemberId &peer_id, RaftPeerState &peer_state,
-                   std::unique_lock<std::mutex> &lock);
-
-  void AdvanceCommitIndex();
-  void AppendEntries(const MemberId &peer_id, RaftPeerState &peer_state,
-                     std::unique_lock<std::mutex> &lock);
-
-  RequestVoteReply OnRequestVote(const RequestVoteRequest &request);
-  AppendEntriesReply OnAppendEntries(
-      const AppendEntriesRequest<State> &request);
-
-  ClientResult AddCommand(const typename State::Change &command, bool blocking);
-
-  template <class... Args>
-  void LogInfo(const std::string &, Args &&...);
-
-  RaftNetworkInterface<State> &network_;
-  RaftStorageInterface<State> &storage_;
-
-  MemberId id_;
-  RaftConfig config_;
-
-  TermId term_;
-  RaftMode mode_ = RaftMode::FOLLOWER;
-  std::experimental::optional<MemberId> voted_for_ = std::experimental::nullopt;
-  std::experimental::optional<MemberId> leader_ = std::experimental::nullopt;
-
-  TimePoint next_election_time_;
-
-  LogIndex commit_index_ = 0;
-
-  bool exiting_ = false;
-
-  std::map<std::string, std::unique_ptr<RaftPeerState>> peer_states_;
-
-  /* This mutex protects all of the internal state. */
-  std::mutex mutex_;
-
-  /* Used to notify waiting threads that some of the internal state has changed.
-   * It is notified when following events occurr:
-   *  - mode change
-   *  - election start
-   *  - `next_election_time_` update on RPC from leader or candidate
-   *  - destructor is called
-   *  - `commit_index_` is advanced
-   */
-  std::condition_variable state_changed_;
-
-  std::mt19937_64 rng_ = std::mt19937_64(std::random_device{}());
-};
-
-}  // namespace impl
-
-template <class State>
-class RaftMember final {
- public:
-  explicit RaftMember(RaftNetworkInterface<State> &network,
-                      RaftStorageInterface<State> &storage, const MemberId &id,
-                      const RaftConfig &config);
-  ~RaftMember();
-
-  ClientResult AddCommand(const typename State::Change &command, bool blocking);
-
-  RequestVoteReply OnRequestVote(const RequestVoteRequest &request);
-  AppendEntriesReply OnAppendEntries(
-      const AppendEntriesRequest<State> &request);
-
- private:
-  RaftNetworkInterface<State> &network_;
-  impl::RaftMemberImpl<State> impl_;
-
-  /* Timer thread for triggering elections. */
-  std::thread timer_thread_;
-
-  /* One thread per peer for outgoing RPCs. */
-  std::vector<std::thread> peer_threads_;
-};
-
-}  // namespace communication::raft
-
-#include "raft-inl.hpp"
diff --git a/src/communication/raft/rpc.hpp b/src/communication/raft/rpc.hpp
deleted file mode 100644
index 4dddf66d5..000000000
--- a/src/communication/raft/rpc.hpp
+++ /dev/null
@@ -1,117 +0,0 @@
-#pragma once
-
-#include <unordered_map>
-
-#include "glog/logging.h"
-
-#include "communication/raft/network_common.hpp"
-#include "communication/raft/raft.hpp"
-#include "communication/rpc/client.hpp"
-#include "communication/rpc/server.hpp"
-#include "io/network/endpoint.hpp"
-
-/* Implementation of `RaftNetworkInterface` using RPC. Raft RPC requests and
- * responses are wrapped in `PeerRpcRequest` and `PeerRpcReply`. */
-
-// TODO(mtomic): Unwrap RPCs and use separate request-response protocols instead
-// of `PeerProtocol`, or at least use an union to avoid sending unnecessary data
-// over the wire.
-
-namespace communication::raft {
-
-template <class State>
-using PeerProtocol = rpc::RequestResponse<PeerRpcRequest<State>, PeerRpcReply>;
-
-template <class State>
-class RpcNetwork : public RaftNetworkInterface<State> {
- public:
-  RpcNetwork(rpc::Server &server,
-             std::unordered_map<std::string, io::network::Endpoint> directory)
-      : server_(server), directory_(std::move(directory)) {}
-
-  virtual void Start(RaftMember<State> &member) override {
-    server_.Register<PeerProtocol<State>>(
-        [&member](const PeerRpcRequest<State> &request) {
-          auto reply = std::make_unique<PeerRpcReply>();
-          reply->type = request.type;
-          switch (request.type) {
-            case RpcType::REQUEST_VOTE:
-              reply->request_vote = member.OnRequestVote(request.request_vote);
-              break;
-            case RpcType::APPEND_ENTRIES:
-              reply->append_entries =
-                  member.OnAppendEntries(request.append_entries);
-              break;
-            default:
-              LOG(ERROR) << "Unknown RPC type: "
-                         << static_cast<int>(request.type);
-          }
-          return reply;
-        });
-  }
-
-  virtual bool SendRequestVote(const MemberId &recipient,
-                               const RequestVoteRequest &request,
-                               RequestVoteReply &reply) override {
-    PeerRpcRequest<State> req;
-    PeerRpcReply rep;
-
-    req.type = RpcType::REQUEST_VOTE;
-    req.request_vote = request;
-
-    if (!SendRpc(recipient, req, rep)) {
-      return false;
-    }
-
-    reply = rep.request_vote;
-    return true;
-  }
-
-  virtual bool SendAppendEntries(const MemberId &recipient,
-                                 const AppendEntriesRequest<State> &request,
-                                 AppendEntriesReply &reply) override {
-    PeerRpcRequest<State> req;
-    PeerRpcReply rep;
-
-    req.type = RpcType::APPEND_ENTRIES;
-    req.append_entries = request;
-
-    if (!SendRpc(recipient, req, rep)) {
-      return false;
-    }
-
-    reply = rep.append_entries;
-    return true;
-  }
-
- private:
-  bool SendRpc(const MemberId &recipient, const PeerRpcRequest<State> &request,
-               PeerRpcReply &reply) {
-    auto &client = GetClient(recipient);
-    auto response = client.template Call<PeerProtocol<State>>(request);
-
-    if (!response) {
-      return false;
-    }
-
-    reply = *response;
-    return true;
-  }
-
-  rpc::Client &GetClient(const MemberId &id) {
-    auto it = clients_.find(id);
-    if (it == clients_.end()) {
-      auto ne = directory_[id];
-      it = clients_.try_emplace(id, ne).first;
-    }
-    return it->second;
-  }
-
-  rpc::Server &server_;
-  // TODO(mtomic): how to update and distribute this?
-  std::unordered_map<MemberId, io::network::Endpoint> directory_;
-
-  std::unordered_map<MemberId, rpc::Client> clients_;
-};
-
-}  // namespace communication::raft
diff --git a/src/communication/raft/storage/file.hpp b/src/communication/raft/storage/file.hpp
deleted file mode 100644
index e45b7011a..000000000
--- a/src/communication/raft/storage/file.hpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * @file
- *
- * Raft log is stored inside a folder. Each log entry is stored in a file named
- * by its index. There is a special file named "metadata" which stores Raft
- * metadata and also the last log index, which is used on startup to identify
- * which log entry files are valid.
- */
-#pragma once
-
-#include <fcntl.h>
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/iostreams/device/file_descriptor.hpp"
-#include "boost/iostreams/stream.hpp"
-
-#include "communication/raft/raft.hpp"
-#include "communication/raft/storage/memory.hpp"
-#include "utils/file.hpp"
-
-namespace communication::raft {
-
-struct SimpleFileStorageMetadata {
-  TermId term;
-  std::experimental::optional<MemberId> voted_for;
-  LogIndex last_log_index;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &term &voted_for &last_log_index;
-  }
-};
-
-template <class State>
-class SimpleFileStorage : public RaftStorageInterface<State> {
- public:
-  explicit SimpleFileStorage(const fs::path &parent_dir) : memory_storage_() {
-    try {
-      dir_ = utils::OpenDir(parent_dir);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Error opening log directory: {}", e.what());
-    }
-
-    auto md = utils::TryOpenFile(dir_, "metadata", O_RDONLY);
-    if (!md) {
-      LOG(WARNING) << fmt::format("No metadata file found in directory '{}'",
-                                  parent_dir);
-      return;
-    }
-
-    boost::iostreams::file_descriptor_source src(
-        md->Handle(),
-        boost::iostreams::file_descriptor_flags::never_close_handle);
-    boost::iostreams::stream<boost::iostreams::file_descriptor_source> is(src);
-    boost::archive::binary_iarchive iar(is);
-
-    SimpleFileStorageMetadata metadata;
-
-    try {
-      iar >> metadata;
-    } catch (boost::archive::archive_exception &e) {
-      LOG(FATAL) << "Failed to deserialize Raft metadata: " << e.what();
-    }
-
-    LOG(INFO) << fmt::format(
-        "Read term = {} and voted_for = {} from storage", metadata.term,
-        metadata.voted_for ? *metadata.voted_for : "(none)");
-
-    memory_storage_.term_ = metadata.term;
-    memory_storage_.voted_for_ = metadata.voted_for;
-    memory_storage_.log_.reserve(metadata.last_log_index);
-
-    for (LogIndex idx = 1; idx <= metadata.last_log_index; ++idx) {
-      utils::File entry_file;
-
-      try {
-        entry_file = utils::OpenFile(dir_, fmt::format("{}", idx), O_RDONLY);
-      } catch (std::system_error &e) {
-        LOG(FATAL) << fmt::format("Failed to open entry file {}: {}", idx,
-                                  e.what());
-      }
-
-      boost::iostreams::file_descriptor_source src(
-          entry_file.Handle(),
-          boost::iostreams::file_descriptor_flags::never_close_handle);
-      boost::iostreams::stream<boost::iostreams::file_descriptor_source> is(
-          src);
-      boost::archive::binary_iarchive iar(is);
-      LogEntry<State> entry;
-
-      try {
-        iar >> entry;
-        memory_storage_.log_.emplace_back(std::move(entry));
-      } catch (boost::archive::archive_exception &e) {
-        LOG(FATAL) << fmt::format("Failed to deserialize log entry {}: {}", idx,
-                                  e.what());
-      }
-    }
-
-    LOG(INFO) << fmt::format("Read {} log entries", metadata.last_log_index);
-  }
-
-  void WriteTermAndVotedFor(
-      TermId term,
-      const std::experimental::optional<MemberId> &voted_for) override {
-    memory_storage_.WriteTermAndVotedFor(term, voted_for);
-    WriteMetadata();
-
-    // Metadata file might be newly created so we have to fsync the directory.
-    try {
-      utils::Fsync(dir_);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to fsync Raft log directory: {}",
-                                e.what());
-    }
-  }
-
-  std::pair<TermId, std::experimental::optional<MemberId>> GetTermAndVotedFor()
-      override {
-    return memory_storage_.GetTermAndVotedFor();
-  }
-
-  void AppendLogEntry(const LogEntry<State> &entry) override {
-    memory_storage_.AppendLogEntry(entry);
-
-    utils::File entry_file;
-
-    try {
-      entry_file = utils::OpenFile(
-          dir_, fmt::format("{}", memory_storage_.GetLastLogIndex()),
-          O_WRONLY | O_CREAT | O_TRUNC, 0644);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to open log entry file: {}", e.what());
-    }
-
-    boost::iostreams::file_descriptor_sink sink(
-        entry_file.Handle(),
-        boost::iostreams::file_descriptor_flags::never_close_handle);
-    boost::iostreams::stream<boost::iostreams::file_descriptor_sink> os(sink);
-    boost::archive::binary_oarchive oar(os);
-
-    try {
-      oar << entry;
-      os.flush();
-    } catch (boost::archive::archive_exception &e) {
-      LOG(FATAL) << fmt::format("Failed to serialize log entry: {}", e.what());
-    }
-
-    try {
-      utils::Fsync(entry_file);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to write log entry file to disk: {}",
-                                e.what());
-    }
-
-    // We update the metadata only after the log entry file is written to
-    // disk. This ensures that no file in range [1, last_log_index] is
-    // corrupted.
-    WriteMetadata();
-
-    try {
-      utils::Fsync(dir_);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to fsync Raft log directory: {}",
-                                e.what());
-    }
-  }
-
-  TermId GetLogTerm(const LogIndex index) override {
-    return memory_storage_.GetLogTerm(index);
-  }
-
-  LogEntry<State> GetLogEntry(const LogIndex index) override {
-    return memory_storage_.GetLogEntry(index);
-  }
-
-  std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) override {
-    return memory_storage_.GetLogSuffix(index);
-  }
-
-  LogIndex GetLastLogIndex() override {
-    return memory_storage_.GetLastLogIndex();
-  }
-
-  void TruncateLogSuffix(const LogIndex index) override {
-    return memory_storage_.TruncateLogSuffix(index);
-  }
-
- private:
-  InMemoryStorage<State> memory_storage_;
-  utils::File dir_;
-
-  void WriteMetadata() {
-    // We first write data to a temporary file, ensure data is safely written
-    // to disk, and then rename the file. Since rename is an atomic operation,
-    // "metadata" file won't get corrupted in case of program crash.
-    utils::File md_tmp;
-    try {
-      md_tmp =
-          OpenFile(dir_, "metadata.new", O_WRONLY | O_CREAT | O_TRUNC, 0644);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to open temporary metadata file: {}",
-                                e.what());
-    }
-
-    boost::iostreams::file_descriptor_sink sink(
-        md_tmp.Handle(),
-        boost::iostreams::file_descriptor_flags::never_close_handle);
-    boost::iostreams::stream<boost::iostreams::file_descriptor_sink> os(sink);
-    boost::archive::binary_oarchive oar(os);
-
-    try {
-      oar << SimpleFileStorageMetadata{
-          memory_storage_.GetTermAndVotedFor().first,
-          memory_storage_.GetTermAndVotedFor().second,
-          memory_storage_.GetLastLogIndex()};
-    } catch (boost::archive::archive_exception &e) {
-      LOG(FATAL) << "Error serializing Raft metadata";
-    }
-    os.flush();
-
-    try {
-      utils::Fsync(md_tmp);
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format(
-          "Failed to write temporary metadata file to disk: {}", e.what());
-    }
-
-    try {
-      utils::Rename(dir_, "metadata.new", dir_, "metadata");
-    } catch (std::system_error &e) {
-      LOG(FATAL) << fmt::format("Failed to move temporary metadata file: {}",
-                                e.what());
-    }
-  }
-};
-
-}  // namespace communication::raft
diff --git a/src/communication/raft/storage/memory.hpp b/src/communication/raft/storage/memory.hpp
deleted file mode 100644
index e280a29e9..000000000
--- a/src/communication/raft/storage/memory.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-#pragma once
-
-#include "communication/raft/raft.hpp"
-
-namespace communication::raft {
-
-template <class State>
-class InMemoryStorage : public RaftStorageInterface<State> {
- public:
-  InMemoryStorage()
-      : term_(0), voted_for_(std::experimental::nullopt), log_() {}
-
-  InMemoryStorage(const TermId term,
-                  const std::experimental::optional<std::string> &voted_for,
-                  const std::vector<LogEntry<State>> log)
-      : term_(term), voted_for_(voted_for), log_(log) {}
-
-  void WriteTermAndVotedFor(
-      const TermId term,
-      const std::experimental::optional<std::string> &voted_for) {
-    term_ = term;
-    voted_for_ = voted_for;
-  }
-
-  std::pair<TermId, std::experimental::optional<MemberId>>
-  GetTermAndVotedFor() {
-    return {term_, voted_for_};
-  }
-
-  void AppendLogEntry(const LogEntry<State> &entry) { log_.push_back(entry); }
-
-  TermId GetLogTerm(const LogIndex index) {
-    CHECK(0 <= index && index <= log_.size())
-        << "Trying to read nonexistent log entry";
-    return index > 0 ? log_[index - 1].term : 0;
-  }
-
-  LogEntry<State> GetLogEntry(const LogIndex index) {
-    CHECK(1 <= index && index <= log_.size())
-        << "Trying to get nonexistent log entry";
-    return log_[index - 1];
-  }
-
-  std::vector<LogEntry<State>> GetLogSuffix(const LogIndex index) {
-    CHECK(1 <= index && index <= log_.size())
-        << "Trying to get nonexistent log entries";
-    return std::vector<LogEntry<State>>(log_.begin() + index - 1, log_.end());
-  }
-
-  LogIndex GetLastLogIndex(void) { return log_.size(); }
-
-  void TruncateLogSuffix(const LogIndex index) {
-    CHECK(1 <= index <= log_.size())
-        << "Trying to remove nonexistent log entries";
-    log_.erase(log_.begin() + index - 1, log_.end());
-  }
-
-  TermId term_;
-  std::experimental::optional<MemberId> voted_for_;
-  std::vector<LogEntry<State>> log_;
-};
-
-}  // namespace communication::raft
diff --git a/src/communication/raft/test_utils.hpp b/src/communication/raft/test_utils.hpp
deleted file mode 100644
index 97b212030..000000000
--- a/src/communication/raft/test_utils.hpp
+++ /dev/null
@@ -1,141 +0,0 @@
-#include <functional>
-
-#include "communication/raft/network_common.hpp"
-#include "communication/raft/raft.hpp"
-
-namespace communication::raft::test_utils {
-
-struct DummyState {
-  struct Change {
-    bool operator==(const Change &) const { return true; }
-    bool operator!=(const Change &) const { return false; }
-
-    template <class TArchive>
-    void serialize(TArchive &, unsigned int) {}
-  };
-
-  template <class TArchive>
-  void serialize(TArchive &, unsigned int) {}
-};
-
-struct IntState {
-  int x;
-
-  struct Change {
-    enum Type { ADD, SUB, SET };
-    Type t;
-    int d;
-
-    bool operator==(const Change &rhs) const {
-      return t == rhs.t && d == rhs.d;
-    }
-    bool operator!=(const Change &rhs) const { return !(*this == rhs); };
-
-    template <class TArchive>
-    void serialize(TArchive &ar, unsigned int) {
-      ar &t;
-      ar &d;
-    }
-  };
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &x;
-  }
-};
-
-/* Implementations of `RaftNetworkInterface` for simpler unit testing. */
-
-/* `NoOpNetworkInterface` doesn't do anything -- it's like a server disconnected
- * from the network. */
-template <class State>
-class NoOpNetworkInterface : public RaftNetworkInterface<State> {
- public:
-  ~NoOpNetworkInterface() {}
-
-  virtual bool SendRequestVote(const MemberId &, const RequestVoteRequest &,
-                               RequestVoteReply &) override {
-    return false;
-  }
-
-  virtual bool SendAppendEntries(const MemberId &,
-                                 const AppendEntriesRequest<State> &,
-                                 AppendEntriesReply &) override {
-    return false;
-  }
-
-  virtual void Start(RaftMember<State> &) override {}
-};
-
-/* `NextReplyNetworkInterface` has two fields: `on_request_` and `next_reply_`
- * which is optional. `on_request_` is a callback that will be called before
- * processing requets. If `next_reply_` is not set, `Send*` functions will
- * return false, otherwise they return that reply. */
-template <class State>
-class NextReplyNetworkInterface : public RaftNetworkInterface<State> {
- public:
-  ~NextReplyNetworkInterface() {}
-
-  virtual bool SendRequestVote(const MemberId &,
-                               const RequestVoteRequest &request,
-                               RequestVoteReply &reply) override {
-    PeerRpcRequest<State> req;
-    req.type = RpcType::REQUEST_VOTE;
-    req.request_vote = request;
-    on_request_(req);
-    if (!next_reply_) {
-      return false;
-    }
-    DCHECK(next_reply_->type == RpcType::REQUEST_VOTE)
-        << "`next_reply_` type doesn't match the request type";
-    reply = next_reply_->request_vote;
-    return true;
-  }
-
-  virtual bool SendAppendEntries(const MemberId &,
-                                 const AppendEntriesRequest<State> &request,
-                                 AppendEntriesReply &reply) override {
-    PeerRpcRequest<State> req;
-    req.type = RpcType::APPEND_ENTRIES;
-    req.append_entries = request;
-    on_request_(req);
-    if (!next_reply_) {
-      return false;
-    }
-    DCHECK(next_reply_->type == RpcType::APPEND_ENTRIES)
-        << "`next_reply_` type doesn't match the request type";
-    reply = next_reply_->append_entries;
-    return true;
-  }
-
-  virtual void Start(RaftMember<State> &) override {}
-
-  std::function<void(const PeerRpcRequest<State> &)> on_request_;
-  std::experimental::optional<PeerRpcReply> next_reply_;
-};
-
-template <class State>
-class NoOpStorageInterface : public RaftStorageInterface<State> {
- public:
-  NoOpStorageInterface() {}
-
-  void WriteTermAndVotedFor(const TermId,
-                            const std::experimental::optional<std::string> &) {}
-
-  std::pair<TermId, std::experimental::optional<MemberId>>
-  GetTermAndVotedFor() {
-    return {0, {}};
-  }
-  void AppendLogEntry(const LogEntry<State> &) {}
-  TermId GetLogTerm(const LogIndex) { return 0; }
-  LogEntry<State> GetLogEntry(const LogIndex) { assert(false); }
-  std::vector<LogEntry<State>> GetLogSuffix(const LogIndex) { return {}; }
-  LogIndex GetLastLogIndex() { return 0; }
-  void TruncateLogSuffix(const LogIndex) {}
-
-  TermId term_;
-  std::experimental::optional<MemberId> voted_for_;
-  std::vector<LogEntry<State>> log_;
-};
-
-}  // namespace communication::raft::test_utils
diff --git a/src/communication/rpc/client.cpp b/src/communication/rpc/client.cpp
deleted file mode 100644
index aa4498a29..000000000
--- a/src/communication/rpc/client.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-#include <chrono>
-#include <thread>
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/export.hpp"
-#include "boost/serialization/unique_ptr.hpp"
-#include "gflags/gflags.h"
-
-#include "communication/rpc/client.hpp"
-
-DEFINE_HIDDEN_bool(rpc_random_latency, false,
-                   "If a random wait should happen on each RPC call, to "
-                   "simulate network latency.");
-
-namespace communication::rpc {
-
-Client::Client(const io::network::Endpoint &endpoint) : endpoint_(endpoint) {}
-
-std::unique_ptr<Message> Client::Call(const Message &request) {
-  std::lock_guard<std::mutex> guard(mutex_);
-
-  if (FLAGS_rpc_random_latency) {
-    auto microseconds = (int)(1000 * rand_(gen_));
-    std::this_thread::sleep_for(std::chrono::microseconds(microseconds));
-  }
-
-  // Check if the connection is broken (if we haven't used the client for a
-  // long time the server could have died).
-  if (client_ && client_->ErrorStatus()) {
-    client_ = std::experimental::nullopt;
-  }
-
-  // Connect to the remote server.
-  if (!client_) {
-    client_.emplace();
-    if (!client_->Connect(endpoint_)) {
-      LOG(ERROR) << "Couldn't connect to remote address " << endpoint_;
-      client_ = std::experimental::nullopt;
-      return nullptr;
-    }
-  }
-
-  // Serialize and send request.
-  std::stringstream request_stream(std::ios_base::out | std::ios_base::binary);
-  {
-    boost::archive::binary_oarchive request_archive(request_stream);
-    // Serialize reference as pointer (to serialize the derived class). The
-    // request is read in protocol.cpp.
-    request_archive << &request;
-    // Archive destructor ensures everything is written.
-  }
-
-  const std::string &request_buffer = request_stream.str();
-  CHECK(request_buffer.size() <= std::numeric_limits<MessageSize>::max())
-      << fmt::format(
-             "Trying to send message of size {}, max message size is {}",
-             request_buffer.size(), std::numeric_limits<MessageSize>::max());
-
-  MessageSize request_data_size = request_buffer.size();
-  if (!client_->Write(reinterpret_cast<uint8_t *>(&request_data_size),
-                      sizeof(MessageSize), true)) {
-    LOG(ERROR) << "Couldn't send request size to " << client_->endpoint();
-    client_ = std::experimental::nullopt;
-    return nullptr;
-  }
-
-  if (!client_->Write(request_buffer)) {
-    LOG(ERROR) << "Couldn't send request data to " << client_->endpoint();
-    client_ = std::experimental::nullopt;
-    return nullptr;
-  }
-
-  // Receive response data size.
-  if (!client_->Read(sizeof(MessageSize))) {
-    LOG(ERROR) << "Couldn't get response from " << client_->endpoint();
-    client_ = std::experimental::nullopt;
-    return nullptr;
-  }
-  MessageSize response_data_size =
-      *reinterpret_cast<MessageSize *>(client_->GetData());
-  client_->ShiftData(sizeof(MessageSize));
-
-  // Receive response data.
-  if (!client_->Read(response_data_size)) {
-    LOG(ERROR) << "Couldn't get response from " << client_->endpoint();
-    client_ = std::experimental::nullopt;
-    return nullptr;
-  }
-
-  std::unique_ptr<Message> response;
-  {
-    std::stringstream response_stream(std::ios_base::in |
-                                      std::ios_base::binary);
-    response_stream.str(std::string(reinterpret_cast<char *>(client_->GetData()),
-                                    response_data_size));
-    boost::archive::binary_iarchive response_archive(response_stream);
-    response_archive >> response;
-  }
-
-  client_->ShiftData(response_data_size);
-
-  return response;
-}
-
-void Client::Abort() {
-  if (!client_) return;
-  // We need to call Shutdown on the client to abort any pending read or
-  // write operations.
-  client_->Shutdown();
-  client_ = std::experimental::nullopt;
-}
-
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/client.hpp b/src/communication/rpc/client.hpp
deleted file mode 100644
index b5712028e..000000000
--- a/src/communication/rpc/client.hpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-#include <memory>
-#include <mutex>
-#include <random>
-
-#include <glog/logging.h>
-
-#include "communication/client.hpp"
-#include "communication/rpc/messages.hpp"
-#include "io/network/endpoint.hpp"
-#include "utils/demangle.hpp"
-
-namespace communication::rpc {
-
-// Client is thread safe, but it is recommended to use thread_local clients.
-class Client {
- public:
-  Client(const io::network::Endpoint &endpoint);
-
-  // Call function can initiate only one request at the time. Function blocks
-  // until there is a response. If there was an error nullptr is returned.
-  template <typename TRequestResponse, typename... Args>
-  std::unique_ptr<typename TRequestResponse::Response> Call(Args &&... args) {
-    using Req = typename TRequestResponse::Request;
-    using Res = typename TRequestResponse::Response;
-    static_assert(std::is_base_of<Message, Req>::value,
-                  "TRequestResponse::Request must be derived from Message");
-    static_assert(std::is_base_of<Message, Res>::value,
-                  "TRequestResponse::Response must be derived from Message");
-    auto request = Req(std::forward<Args>(args)...);
-
-    if (VLOG_IS_ON(12)) {
-      auto req_type = utils::Demangle(request.type_index().name());
-      LOG(INFO) << "[RpcClient] sent " << (req_type ? req_type.value() : "");
-    }
-
-    std::unique_ptr<Message> response = Call(request);
-    auto *real_response = dynamic_cast<Res *>(response.get());
-    if (!real_response && response) {
-      // Since message_id was checked in private Call function, this means
-      // something is very wrong (probably on the server side).
-      LOG(ERROR) << "Message response was of unexpected type";
-      client_ = std::experimental::nullopt;
-      return nullptr;
-    }
-
-    if (VLOG_IS_ON(12) && response) {
-      auto res_type = utils::Demangle(response->type_index().name());
-      LOG(INFO) << "[RpcClient] received "
-                << (res_type ? res_type.value() : "");
-    }
-
-    response.release();
-    return std::unique_ptr<Res>(real_response);
-  }
-
-  // Call this function from another thread to abort a pending RPC call.
-  void Abort();
-
- private:
-  std::unique_ptr<Message> Call(const Message &request);
-
-  io::network::Endpoint endpoint_;
-  std::experimental::optional<communication::Client> client_;
-
-  std::mutex mutex_;
-
-  // Random generator for simulated network latency (enable with a flag).
-  // Distribution parameters are rule-of-thumb chosen.
-  std::mt19937 gen_{std::random_device{}()};
-  std::lognormal_distribution<> rand_{0.0, 1.11};
-};
-
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/client_pool.hpp b/src/communication/rpc/client_pool.hpp
deleted file mode 100644
index dbdf23d64..000000000
--- a/src/communication/rpc/client_pool.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-#pragma once
-
-#include <mutex>
-#include <stack>
-
-#include "communication/rpc/client.hpp"
-
-namespace communication::rpc {
-
-/**
- * A simple client pool that creates new RPC clients on demand. Useful when you
- * want to send RPCs to the same server from multiple threads without them
- * blocking each other.
- */
-class ClientPool {
- public:
-  ClientPool(const io::network::Endpoint &endpoint) : endpoint_(endpoint) {}
-
-  template <typename TRequestResponse, typename... Args>
-  std::unique_ptr<typename TRequestResponse::Response> Call(Args &&... args) {
-    std::unique_ptr<Client> client;
-
-    std::unique_lock<std::mutex> lock(mutex_);
-    if (unused_clients_.empty()) {
-      client = std::make_unique<Client>(endpoint_);
-    } else {
-      client = std::move(unused_clients_.top());
-      unused_clients_.pop();
-    }
-    lock.unlock();
-
-    auto resp = client->Call<TRequestResponse>(std::forward<Args>(args)...);
-
-    lock.lock();
-    unused_clients_.push(std::move(client));
-    return resp;
-  };
-
- private:
-  io::network::Endpoint endpoint_;
-
-  std::mutex mutex_;
-  std::stack<std::unique_ptr<Client>> unused_clients_;
-};
-
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/messages-inl.hpp b/src/communication/rpc/messages-inl.hpp
deleted file mode 100644
index 757009313..000000000
--- a/src/communication/rpc/messages-inl.hpp
+++ /dev/null
@@ -1,134 +0,0 @@
-#pragma once
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-
-#include "database/state_delta.hpp"
-#include "distributed/coordination_rpc_messages.hpp"
-#include "distributed/data_rpc_messages.hpp"
-#include "distributed/durability_rpc_messages.hpp"
-#include "distributed/index_rpc_messages.hpp"
-#include "distributed/plan_rpc_messages.hpp"
-#include "distributed/pull_produce_rpc_messages.hpp"
-#include "distributed/storage_gc_rpc_messages.hpp"
-#include "distributed/transactional_cache_cleaner_rpc_messages.hpp"
-#include "distributed/updates_rpc_messages.hpp"
-#include "durability/recovery.hpp"
-#include "stats/stats_rpc_messages.hpp"
-#include "storage/concurrent_id_mapper_rpc_messages.hpp"
-#include "transactions/engine_rpc_messages.hpp"
-
-#define ID_VALUE_EXPORT_BOOST_TYPE(type)      \
-  BOOST_CLASS_EXPORT(storage::type##IdReq);   \
-  BOOST_CLASS_EXPORT(storage::type##IdRes);   \
-  BOOST_CLASS_EXPORT(storage::Id##type##Req); \
-  BOOST_CLASS_EXPORT(storage::Id##type##Res);
-
-ID_VALUE_EXPORT_BOOST_TYPE(Label)
-ID_VALUE_EXPORT_BOOST_TYPE(EdgeType)
-ID_VALUE_EXPORT_BOOST_TYPE(Property)
-
-#undef ID_VALUE_EXPORT_BOOST_TYPE
-
-// Distributed transaction engine.
-BOOST_CLASS_EXPORT(tx::TxAndSnapshot);
-BOOST_CLASS_EXPORT(tx::BeginReq);
-BOOST_CLASS_EXPORT(tx::BeginRes);
-BOOST_CLASS_EXPORT(tx::AdvanceReq);
-BOOST_CLASS_EXPORT(tx::AdvanceRes);
-BOOST_CLASS_EXPORT(tx::CommitReq);
-BOOST_CLASS_EXPORT(tx::CommitRes);
-BOOST_CLASS_EXPORT(tx::AbortReq);
-BOOST_CLASS_EXPORT(tx::AbortRes);
-BOOST_CLASS_EXPORT(tx::SnapshotReq);
-BOOST_CLASS_EXPORT(tx::SnapshotRes);
-BOOST_CLASS_EXPORT(tx::CommandReq);
-BOOST_CLASS_EXPORT(tx::CommandRes);
-BOOST_CLASS_EXPORT(tx::GcSnapshotReq);
-BOOST_CLASS_EXPORT(tx::ClogInfoReq);
-BOOST_CLASS_EXPORT(tx::ClogInfoRes);
-BOOST_CLASS_EXPORT(tx::ActiveTransactionsReq);
-BOOST_CLASS_EXPORT(tx::EnsureNextIdGreaterReq);
-BOOST_CLASS_EXPORT(tx::EnsureNextIdGreaterRes);
-BOOST_CLASS_EXPORT(tx::GlobalLastReq);
-BOOST_CLASS_EXPORT(tx::GlobalLastRes);
-
-// Distributed coordination.
-BOOST_CLASS_EXPORT(durability::RecoveryInfo);
-BOOST_CLASS_EXPORT(distributed::RegisterWorkerReq);
-BOOST_CLASS_EXPORT(distributed::RegisterWorkerRes);
-BOOST_CLASS_EXPORT(distributed::ClusterDiscoveryReq);
-BOOST_CLASS_EXPORT(distributed::ClusterDiscoveryRes);
-BOOST_CLASS_EXPORT(distributed::StopWorkerReq);
-BOOST_CLASS_EXPORT(distributed::StopWorkerRes);
-
-// Distributed data exchange.
-BOOST_CLASS_EXPORT(distributed::EdgeReq);
-BOOST_CLASS_EXPORT(distributed::EdgeRes);
-BOOST_CLASS_EXPORT(distributed::VertexReq);
-BOOST_CLASS_EXPORT(distributed::VertexRes);
-BOOST_CLASS_EXPORT(distributed::TxGidPair);
-
-// Distributed plan exchange.
-BOOST_CLASS_EXPORT(distributed::DispatchPlanReq);
-BOOST_CLASS_EXPORT(distributed::DispatchPlanRes);
-BOOST_CLASS_EXPORT(distributed::RemovePlanReq);
-BOOST_CLASS_EXPORT(distributed::RemovePlanRes);
-
-// Pull.
-BOOST_CLASS_EXPORT(distributed::PullReq);
-BOOST_CLASS_EXPORT(distributed::PullRes);
-BOOST_CLASS_EXPORT(distributed::TransactionCommandAdvancedReq);
-BOOST_CLASS_EXPORT(distributed::TransactionCommandAdvancedRes);
-
-// Distributed indexes.
-BOOST_CLASS_EXPORT(distributed::BuildIndexReq);
-BOOST_CLASS_EXPORT(distributed::BuildIndexRes);
-BOOST_CLASS_EXPORT(distributed::IndexLabelPropertyTx);
-
-// Stats.
-BOOST_CLASS_EXPORT(stats::StatsReq);
-BOOST_CLASS_EXPORT(stats::StatsRes);
-BOOST_CLASS_EXPORT(stats::BatchStatsReq);
-BOOST_CLASS_EXPORT(stats::BatchStatsRes);
-
-// Updates.
-BOOST_CLASS_EXPORT(database::StateDelta);
-BOOST_CLASS_EXPORT(distributed::UpdateReq);
-BOOST_CLASS_EXPORT(distributed::UpdateRes);
-BOOST_CLASS_EXPORT(distributed::UpdateApplyReq);
-BOOST_CLASS_EXPORT(distributed::UpdateApplyRes);
-
-// Creates.
-BOOST_CLASS_EXPORT(distributed::CreateResult);
-BOOST_CLASS_EXPORT(distributed::CreateVertexReq);
-BOOST_CLASS_EXPORT(distributed::CreateVertexReqData);
-BOOST_CLASS_EXPORT(distributed::CreateVertexRes);
-BOOST_CLASS_EXPORT(distributed::CreateEdgeReqData);
-BOOST_CLASS_EXPORT(distributed::CreateEdgeReq);
-BOOST_CLASS_EXPORT(distributed::CreateEdgeRes);
-BOOST_CLASS_EXPORT(distributed::AddInEdgeReqData);
-BOOST_CLASS_EXPORT(distributed::AddInEdgeReq);
-BOOST_CLASS_EXPORT(distributed::AddInEdgeRes);
-
-// Removes.
-BOOST_CLASS_EXPORT(distributed::RemoveVertexReq);
-BOOST_CLASS_EXPORT(distributed::RemoveVertexRes);
-BOOST_CLASS_EXPORT(distributed::RemoveEdgeReq);
-BOOST_CLASS_EXPORT(distributed::RemoveEdgeRes);
-BOOST_CLASS_EXPORT(distributed::RemoveInEdgeData);
-BOOST_CLASS_EXPORT(distributed::RemoveInEdgeReq);
-BOOST_CLASS_EXPORT(distributed::RemoveInEdgeRes);
-
-// Durability
-BOOST_CLASS_EXPORT(distributed::MakeSnapshotReq);
-BOOST_CLASS_EXPORT(distributed::MakeSnapshotRes);
-
-// Storage Gc.
-BOOST_CLASS_EXPORT(distributed::GcClearedStatusReq);
-BOOST_CLASS_EXPORT(distributed::GcClearedStatusRes);
-
-// Transactional Cache Cleaner.
-BOOST_CLASS_EXPORT(distributed::WaitOnTransactionEndReq);
-BOOST_CLASS_EXPORT(distributed::WaitOnTransactionEndRes);
diff --git a/src/communication/rpc/messages.hpp b/src/communication/rpc/messages.hpp
deleted file mode 100644
index 604868350..000000000
--- a/src/communication/rpc/messages.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-#pragma once
-
-#include <memory>
-#include <type_traits>
-#include <typeindex>
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-
-namespace communication::rpc {
-
-using MessageSize = uint32_t;
-
-/**
- * Base class for messages.
- */
-class Message {
- public:
-  virtual ~Message() {}
-
-  /**
-   * Run-time type identification that is used for callbacks.
-   *
-   * Warning: this works because of the virtual destructor, don't remove it from
-   * this class
-   */
-  std::type_index type_index() const { return typeid(*this); }
-
- private:
-  friend boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &, unsigned int) {}
-};
-
-template <typename TRequest, typename TResponse>
-struct RequestResponse {
-  using Request = TRequest;
-  using Response = TResponse;
-};
-
-}  // namespace communication::rpc
-
-// RPC Pimp
-#define RPC_NO_MEMBER_MESSAGE(name)                                       \
-  struct name : public communication::rpc::Message {                      \
-    name() {}                                                             \
-                                                                          \
-   private:                                                               \
-    friend class boost::serialization::access;                            \
-                                                                          \
-    template <class TArchive>                                             \
-    void serialize(TArchive &ar, unsigned int) {                          \
-      ar &boost::serialization::base_object<communication::rpc::Message>( \
-          *this);                                                         \
-    }                                                                     \
-  }
-
-#define RPC_SINGLE_MEMBER_MESSAGE(name, type)                             \
-  struct name : public communication::rpc::Message {                      \
-    name() {}                                                             \
-    name(const type &member) : member(member) {}                          \
-    type member;                                                          \
-                                                                          \
-   private:                                                               \
-    friend class boost::serialization::access;                            \
-                                                                          \
-    template <class TArchive>                                             \
-    void serialize(TArchive &ar, unsigned int) {                          \
-      ar &boost::serialization::base_object<communication::rpc::Message>( \
-          *this);                                                         \
-      ar &member;                                                         \
-    }                                                                     \
-  }
diff --git a/src/communication/rpc/protocol.cpp b/src/communication/rpc/protocol.cpp
deleted file mode 100644
index 937532e1e..000000000
--- a/src/communication/rpc/protocol.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-#include <sstream>
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/unique_ptr.hpp"
-#include "fmt/format.h"
-
-#include "communication/rpc/messages-inl.hpp"
-#include "communication/rpc/messages.hpp"
-#include "communication/rpc/protocol.hpp"
-#include "communication/rpc/server.hpp"
-#include "utils/demangle.hpp"
-
-namespace communication::rpc {
-
-Session::Session(Server &server, communication::InputStream &input_stream,
-                 communication::OutputStream &output_stream)
-    : server_(server),
-      input_stream_(input_stream),
-      output_stream_(output_stream) {}
-
-void Session::Execute() {
-  if (input_stream_.size() < sizeof(MessageSize)) return;
-  MessageSize request_len =
-      *reinterpret_cast<MessageSize *>(input_stream_.data());
-  uint64_t request_size = sizeof(MessageSize) + request_len;
-  input_stream_.Resize(request_size);
-  if (input_stream_.size() < request_size) return;
-
-  // Read the request message.
-  std::unique_ptr<Message> request([this, request_len]() {
-    Message *req_ptr = nullptr;
-    std::stringstream stream(std::ios_base::in | std::ios_base::binary);
-    stream.str(std::string(
-        reinterpret_cast<char *>(input_stream_.data() + sizeof(MessageSize)),
-        request_len));
-    boost::archive::binary_iarchive archive(stream);
-    // Sent from client.cpp
-    archive >> req_ptr;
-    return req_ptr;
-  }());
-  input_stream_.Shift(sizeof(MessageSize) + request_len);
-
-  auto callbacks_accessor = server_.callbacks_.access();
-  auto it = callbacks_accessor.find(request->type_index());
-  if (it == callbacks_accessor.end()) {
-    // Throw exception to close the socket and cleanup the session.
-    throw SessionException(
-        "Session trying to execute an unregistered RPC call!");
-  }
-
-  if (VLOG_IS_ON(12)) {
-    auto req_type = utils::Demangle(request->type_index().name());
-    LOG(INFO) << "[RpcServer] received " << (req_type ? req_type.value() : "");
-  }
-
-  std::unique_ptr<Message> response = it->second(*(request.get()));
-
-  if (!response) {
-    throw SessionException("Trying to send nullptr instead of message");
-  }
-
-  // Serialize and send response
-  std::stringstream stream(std::ios_base::out | std::ios_base::binary);
-  {
-    boost::archive::binary_oarchive archive(stream);
-    archive << response;
-    // Archive destructor ensures everything is written.
-  }
-
-  const std::string &buffer = stream.str();
-  if (buffer.size() > std::numeric_limits<MessageSize>::max()) {
-    throw SessionException(fmt::format(
-        "Trying to send response of size {}, max response size is {}",
-        buffer.size(), std::numeric_limits<MessageSize>::max()));
-  }
-
-  MessageSize input_stream_size = buffer.size();
-  if (!output_stream_.Write(reinterpret_cast<uint8_t *>(&input_stream_size),
-                            sizeof(MessageSize), true)) {
-    throw SessionException("Couldn't send response size!");
-  }
-  if (!output_stream_.Write(buffer)) {
-    throw SessionException("Couldn't send response data!");
-  }
-
-  if (VLOG_IS_ON(12)) {
-    auto res_type = utils::Demangle(response->type_index().name());
-    LOG(INFO) << "[RpcServer] sent " << (res_type ? res_type.value() : "");
-  }
-}
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/protocol.hpp b/src/communication/rpc/protocol.hpp
deleted file mode 100644
index cbaacc7e3..000000000
--- a/src/communication/rpc/protocol.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-#pragma once
-
-#include <chrono>
-#include <cstdint>
-#include <memory>
-
-#include "communication/rpc/messages.hpp"
-#include "communication/session.hpp"
-
-/**
- * @brief Protocol
- *
- * Has classes and functions that implement the server side of our
- * RPC protocol.
- *
- * Message layout: MessageSize message_size,
- *                 message_size bytes serialized_message
- */
-namespace communication::rpc {
-
-// Forward declaration of class Server
-class Server;
-
-/**
- * This class is thrown when the Session wants to indicate that a fatal error
- * occured during execution.
- */
-class SessionException : public utils::BasicException {
-  using utils::BasicException::BasicException;
-};
-
-/**
- * Distributed Protocol Session
- *
- * This class is responsible for handling a single client connection.
- */
-class Session {
- public:
-  Session(Server &server, communication::InputStream &input_stream,
-          communication::OutputStream &output_stream);
-
-  /**
-   * Executes the protocol after data has been read into the stream.
-   * Goes through the protocol states in order to execute commands from the
-   * client.
-   */
-  void Execute();
-
- private:
-  Server &server_;
-  communication::InputStream &input_stream_;
-  communication::OutputStream &output_stream_;
-};
-
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/server.cpp b/src/communication/rpc/server.cpp
deleted file mode 100644
index 19eade72c..000000000
--- a/src/communication/rpc/server.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/export.hpp"
-#include "boost/serialization/unique_ptr.hpp"
-
-#include "communication/rpc/server.hpp"
-
-namespace communication::rpc {
-
-Server::Server(const io::network::Endpoint &endpoint,
-               size_t workers_count)
-    : server_(endpoint, *this, -1, "RPC", workers_count) {}
-
-void Server::StopProcessingCalls() {
-  server_.Shutdown();
-  server_.AwaitShutdown();
-}
-
-const io::network::Endpoint &Server::endpoint() const {
-  return server_.endpoint();
-}
-}  // namespace communication::rpc
diff --git a/src/communication/rpc/server.hpp b/src/communication/rpc/server.hpp
deleted file mode 100644
index 100a0f0eb..000000000
--- a/src/communication/rpc/server.hpp
+++ /dev/null
@@ -1,86 +0,0 @@
-#pragma once
-
-#include <type_traits>
-#include <unordered_map>
-#include <vector>
-
-#include "communication/rpc/messages.hpp"
-#include "communication/rpc/protocol.hpp"
-#include "communication/server.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "data_structures/queue.hpp"
-#include "io/network/endpoint.hpp"
-#include "utils/demangle.hpp"
-
-namespace communication::rpc {
-
-class Server {
- public:
-  Server(const io::network::Endpoint &endpoint,
-         size_t workers_count = std::thread::hardware_concurrency());
-  Server(const Server &) = delete;
-  Server(Server &&) = delete;
-  Server &operator=(const Server &) = delete;
-  Server &operator=(Server &&) = delete;
-
-  void StopProcessingCalls();
-
-  const io::network::Endpoint &endpoint() const;
-
-  template <typename TRequestResponse>
-  void Register(
-      std::function<std::unique_ptr<typename TRequestResponse::Response>(
-          const typename TRequestResponse::Request &)>
-          callback) {
-    static_assert(
-        std::is_base_of<Message, typename TRequestResponse::Request>::value,
-        "TRequestResponse::Request must be derived from Message");
-    static_assert(
-        std::is_base_of<Message, typename TRequestResponse::Response>::value,
-        "TRequestResponse::Response must be derived from Message");
-    auto callbacks_accessor = callbacks_.access();
-    auto got = callbacks_accessor.insert(
-        typeid(typename TRequestResponse::Request),
-        [callback = callback](const Message &base_message) {
-          const auto &message =
-              dynamic_cast<const typename TRequestResponse::Request &>(
-                  base_message);
-          return callback(message);
-        });
-    CHECK(got.second) << "Callback for that message type already registered";
-    if (VLOG_IS_ON(12)) {
-      auto req_type =
-          utils::Demangle(typeid(typename TRequestResponse::Request).name());
-      auto res_type =
-          utils::Demangle(typeid(typename TRequestResponse::Response).name());
-      LOG(INFO) << "[RpcServer] register " << (req_type ? req_type.value() : "")
-                << " -> " << (res_type ? res_type.value() : "");
-    }
-  }
-
-  template <typename TRequestResponse>
-  void UnRegister() {
-    static_assert(
-        std::is_base_of<Message, typename TRequestResponse::Request>::value,
-        "TRequestResponse::Request must be derived from Message");
-    static_assert(
-        std::is_base_of<Message, typename TRequestResponse::Response>::value,
-        "TRequestResponse::Response must be derived from Message");
-    auto callbacks_accessor = callbacks_.access();
-    auto deleted =
-        callbacks_accessor.remove(typeid(typename TRequestResponse::Request));
-    CHECK(deleted) << "Trying to remove unknown message type callback";
-  }
-
- private:
-  friend class Session;
-
-  ConcurrentMap<std::type_index,
-                std::function<std::unique_ptr<Message>(const Message &)>>
-      callbacks_;
-
-  std::mutex mutex_;
-  communication::Server<Session, Server> server_;
-};  // namespace communication::rpc
-
-}  // namespace communication::rpc
diff --git a/src/database/config.cpp b/src/database/config.cpp
index b376f2c42..221739532 100644
--- a/src/database/config.cpp
+++ b/src/database/config.cpp
@@ -27,35 +27,6 @@ DEFINE_int32(gc_cycle_sec, 30,
              "Amount of time between starts of two cleaning cycles in seconds. "
              "-1 to turn off.");
 
-#ifndef MG_COMMUNITY
-// Distributed master/worker flags.
-DEFINE_VALIDATED_HIDDEN_int32(worker_id, 0,
-                              "ID of a worker in a distributed system. Igored "
-                              "in single-node.",
-                              FLAG_IN_RANGE(0, 1 << gid::kWorkerIdSize));
-DEFINE_HIDDEN_string(master_host, "0.0.0.0",
-                     "For master node indicates the host served on. For worker "
-                     "node indicates the master location.");
-DEFINE_VALIDATED_HIDDEN_int32(
-    master_port, 0,
-    "For master node the port on which to serve. For "
-    "worker node indicates the master's port.",
-    FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max()));
-DEFINE_HIDDEN_string(worker_host, "0.0.0.0",
-                     "For worker node indicates the host served on. For master "
-                     "node this flag is not used.");
-DEFINE_VALIDATED_HIDDEN_int32(
-    worker_port, 0,
-    "For master node it's unused. For worker node "
-    "indicates the port on which to serve. If zero (default value), a port is "
-    "chosen at random. Sent to the master when registring worker node.",
-    FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max()));
-DEFINE_VALIDATED_HIDDEN_int32(rpc_num_workers,
-                              std::max(std::thread::hardware_concurrency(), 1U),
-                              "Number of workers (RPC)",
-                              FLAG_IN_RANGE(1, INT32_MAX));
-#endif
-
 // clang-format off
 database::Config::Config()
     // Durability flags.
@@ -68,15 +39,5 @@ database::Config::Config()
       // Misc flags.
       gc_cycle_sec{FLAGS_gc_cycle_sec},
       query_execution_time_sec{FLAGS_query_execution_time_sec}
-#ifndef MG_COMMUNITY
-      ,
-      // Distributed flags.
-      rpc_num_workers{FLAGS_rpc_num_workers},
-      worker_id{FLAGS_worker_id},
-      master_endpoint{FLAGS_master_host,
-                      static_cast<uint16_t>(FLAGS_master_port)},
-      worker_endpoint{FLAGS_worker_host,
-                      static_cast<uint16_t>(FLAGS_worker_port)}
-#endif
 {}
 // clang-format on
diff --git a/src/database/counters.cpp b/src/database/counters.cpp
index 5ee380ddc..d392616d7 100644
--- a/src/database/counters.cpp
+++ b/src/database/counters.cpp
@@ -1,23 +1,7 @@
 #include "database/counters.hpp"
 
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-#include "boost/serialization/utility.hpp"
-
 namespace database {
 
-RPC_SINGLE_MEMBER_MESSAGE(CountersGetReq, std::string);
-RPC_SINGLE_MEMBER_MESSAGE(CountersGetRes, int64_t);
-using CountersGetRpc =
-    communication::rpc::RequestResponse<CountersGetReq, CountersGetRes>;
-
-using CountersSetReqData = std::pair<std::string, int64_t>;
-RPC_SINGLE_MEMBER_MESSAGE(CountersSetReq, CountersSetReqData);
-RPC_NO_MEMBER_MESSAGE(CountersSetRes);
-using CountersSetRpc =
-    communication::rpc::RequestResponse<CountersSetReq, CountersSetRes>;
-
 int64_t SingleNodeCounters::Get(const std::string &name) {
   return counters_.access()
       .emplace(name, std::make_tuple(name), std::make_tuple(0))
@@ -30,36 +14,4 @@ void SingleNodeCounters::Set(const std::string &name, int64_t value) {
   if (!name_counter_pair.second) name_counter_pair.first->second.store(value);
 }
 
-MasterCounters::MasterCounters(communication::rpc::Server &server)
-    : rpc_server_(server) {
-  rpc_server_.Register<CountersGetRpc>([this](const CountersGetReq &req) {
-    return std::make_unique<CountersGetRes>(Get(req.member));
-  });
-  rpc_server_.Register<CountersSetRpc>([this](const CountersSetReq &req) {
-    Set(req.member.first, req.member.second);
-    return std::make_unique<CountersSetRes>();
-  });
-}
-
-WorkerCounters::WorkerCounters(
-    communication::rpc::ClientPool &master_client_pool)
-    : master_client_pool_(master_client_pool) {}
-
-int64_t WorkerCounters::Get(const std::string &name) {
-  auto response = master_client_pool_.Call<CountersGetRpc>(name);
-  CHECK(response) << "CountersGetRpc failed";
-  return response->member;
-}
-
-void WorkerCounters::Set(const std::string &name, int64_t value) {
-  auto response =
-      master_client_pool_.Call<CountersSetRpc>(CountersSetReqData{name, value});
-  CHECK(response) << "CountersSetRpc failed";
-}
-
 }  // namespace database
-
-BOOST_CLASS_EXPORT(database::CountersGetReq);
-BOOST_CLASS_EXPORT(database::CountersGetRes);
-BOOST_CLASS_EXPORT(database::CountersSetReq);
-BOOST_CLASS_EXPORT(database::CountersSetRes);
diff --git a/src/database/counters.hpp b/src/database/counters.hpp
index c5661107b..e125498f2 100644
--- a/src/database/counters.hpp
+++ b/src/database/counters.hpp
@@ -4,9 +4,6 @@
 #include <cstdint>
 #include <string>
 
-#include "communication/rpc/client_pool.hpp"
-#include "communication/rpc/messages.hpp"
-#include "communication/rpc/server.hpp"
 #include "data_structures/concurrent/concurrent_map.hpp"
 
 namespace database {
@@ -42,25 +39,4 @@ class SingleNodeCounters : public Counters {
   ConcurrentMap<std::string, std::atomic<int64_t>> counters_;
 };
 
-/** Implementation for distributed master. */
-class MasterCounters : public SingleNodeCounters {
- public:
-  MasterCounters(communication::rpc::Server &server);
-
- private:
-  communication::rpc::Server &rpc_server_;
-};
-
-/** Implementation for distributed worker. */
-class WorkerCounters : public Counters {
- public:
-  WorkerCounters(communication::rpc::ClientPool &master_client_pool);
-
-  int64_t Get(const std::string &name) override;
-  void Set(const std::string &name, int64_t value) override;
-
- private:
-  communication::rpc::ClientPool &master_client_pool_;
-};
-
 }  // namespace database
diff --git a/src/database/graph_db.cpp b/src/database/graph_db.cpp
index 324dfbca2..2f89ae29b 100644
--- a/src/database/graph_db.cpp
+++ b/src/database/graph_db.cpp
@@ -2,38 +2,14 @@
 
 #include "glog/logging.h"
 
-#include "communication/rpc/server.hpp"
 #include "database/graph_db.hpp"
-#include "database/storage_gc_master.hpp"
+#include "database/graph_db_accessor.hpp"
 #include "database/storage_gc_single_node.hpp"
-#include "database/storage_gc_worker.hpp"
-#include "distributed/cluster_discovery_master.hpp"
-#include "distributed/cluster_discovery_worker.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "distributed/data_manager.hpp"
-#include "distributed/data_rpc_clients.hpp"
-#include "distributed/data_rpc_server.hpp"
-#include "distributed/durability_rpc_clients.hpp"
-#include "distributed/durability_rpc_messages.hpp"
-#include "distributed/durability_rpc_server.hpp"
-#include "distributed/index_rpc_server.hpp"
-#include "distributed/plan_consumer.hpp"
-#include "distributed/plan_dispatcher.hpp"
-#include "distributed/produce_rpc_server.hpp"
-#include "distributed/pull_rpc_clients.hpp"
-#include "distributed/transactional_cache_cleaner.hpp"
-#include "distributed/updates_rpc_clients.hpp"
-#include "distributed/updates_rpc_server.hpp"
 #include "durability/paths.hpp"
 #include "durability/recovery.hpp"
 #include "durability/snapshooter.hpp"
-#include "storage/concurrent_id_mapper_master.hpp"
 #include "storage/concurrent_id_mapper_single_node.hpp"
-#include "storage/concurrent_id_mapper_worker.hpp"
-#include "transactions/engine_master.hpp"
 #include "transactions/engine_single_node.hpp"
-#include "transactions/engine_worker.hpp"
 #include "utils/flag_validation.hpp"
 
 using namespace storage;
@@ -69,22 +45,6 @@ class PrivateBase : public GraphDb {
     storage_ = std::make_unique<Storage>(WorkerId());
   }
 
-  distributed::PullRpcClients &pull_clients() override {
-    LOG(FATAL) << "Remote pull clients only available in master.";
-  }
-  distributed::ProduceRpcServer &produce_server() override {
-    LOG(FATAL) << "Remote produce server only available in worker.";
-  }
-  distributed::PlanConsumer &plan_consumer() override {
-    LOG(FATAL) << "Plan consumer only available in distributed worker.";
-  }
-  distributed::PlanDispatcher &plan_dispatcher() override {
-    LOG(FATAL) << "Plan dispatcher only available in distributed master.";
-  }
-  distributed::IndexRpcClients &index_rpc_clients() override {
-    LOG(FATAL) << "Index RPC clients only available in distributed master.";
-  }
-
  protected:
   std::unique_ptr<Storage> storage_ =
       std::make_unique<Storage>(config_.worker_id);
@@ -121,7 +81,6 @@ struct TypemapPack {
 class SingleNode : public PrivateBase {
  public:
   explicit SingleNode(const Config &config) : PrivateBase(config) {}
-  GraphDb::Type type() const override { return GraphDb::Type::SINGLE_NODE; }
   IMPL_GETTERS
 
   tx::SingleNodeEngine tx_engine_{&wal_};
@@ -131,27 +90,7 @@ class SingleNode : public PrivateBase {
   TypemapPack<SingleNodeConcurrentIdMapper> typemap_pack_;
   database::SingleNodeCounters counters_;
   std::vector<int> GetWorkerIds() const override { return {0}; }
-  distributed::DataRpcServer &data_server() override {
-    LOG(FATAL) << "Remote data server not available in single-node.";
-  }
-  distributed::DataRpcClients &data_clients() override {
-    LOG(FATAL) << "Remote data clients not available in single-node.";
-  }
-  distributed::PlanDispatcher &plan_dispatcher() override {
-    LOG(FATAL) << "Plan Dispatcher not available in single-node.";
-  }
-  distributed::PlanConsumer &plan_consumer() override {
-    LOG(FATAL) << "Plan Consumer not available in single-node.";
-  }
-  distributed::UpdatesRpcServer &updates_server() override {
-    LOG(FATAL) << "Remote updates server not available in single-node.";
-  }
-  distributed::UpdatesRpcClients &updates_clients() override {
-    LOG(FATAL) << "Remote updates clients not available in single-node.";
-  }
-  distributed::DataManager &data_manager() override {
-    LOG(FATAL) << "Remote data manager not available in single-node.";
-  }
+
   void ReinitializeStorage() override {
     // Release gc scheduler to stop it from touching storage
     storage_gc_ = nullptr;
@@ -161,144 +100,6 @@ class SingleNode : public PrivateBase {
   }
 };
 
-#define IMPL_DISTRIBUTED_GETTERS                                              \
-  std::vector<int> GetWorkerIds() const override {                            \
-    return coordination_.GetWorkerIds();                                      \
-  }                                                                           \
-  distributed::DataRpcServer &data_server() override { return data_server_; } \
-  distributed::DataRpcClients &data_clients() override {                      \
-    return data_clients_;                                                     \
-  }                                                                           \
-  distributed::UpdatesRpcServer &updates_server() override {                  \
-    return updates_server_;                                                   \
-  }                                                                           \
-  distributed::UpdatesRpcClients &updates_clients() override {                \
-    return updates_clients_;                                                  \
-  }                                                                           \
-  distributed::DataManager &data_manager() override { return data_manager_; }
-
-class Master : public PrivateBase {
- public:
-  explicit Master(const Config &config) : PrivateBase(config) {}
-
-  GraphDb::Type type() const override {
-    return GraphDb::Type::DISTRIBUTED_MASTER;
-  }
-
-  // Makes a local snapshot and forces the workers to do the same. Snapshot is
-  // written here only if workers sucesfully created their own snapshot
-  bool MakeSnapshot(GraphDbAccessor &accessor) override {
-    auto workers_snapshot =
-        durability_rpc_clients_.MakeSnapshot(accessor.transaction_id());
-    if (!workers_snapshot.get()) return false;
-    // This can be further optimized by creating master snapshot at the same
-    // time as workers snapshots but this forces us to delete the master
-    // snapshot if we succeed in creating it and workers somehow fail. Because
-    // we have an assumption that every snapshot that exists on master with some
-    // tx_id visibility also exists on workers
-    return PrivateBase::MakeSnapshot(accessor);
-  }
-
-  IMPL_GETTERS
-  IMPL_DISTRIBUTED_GETTERS
-  distributed::PlanDispatcher &plan_dispatcher() override {
-    return plan_dispatcher_;
-  }
-  distributed::PullRpcClients &pull_clients() override { return pull_clients_; }
-  distributed::IndexRpcClients &index_rpc_clients() override {
-    return index_rpc_clients_;
-  }
-
-  void ReinitializeStorage() override {
-    // Release gc scheduler to stop it from touching storage
-    storage_gc_ = nullptr;
-    PrivateBase::ReinitializeStorage();
-    storage_gc_ = std::make_unique<StorageGcMaster>(
-        *storage_, tx_engine_, config_.gc_cycle_sec, server_, coordination_);
-  }
-
-  communication::rpc::Server server_{
-      config_.master_endpoint, static_cast<size_t>(config_.rpc_num_workers)};
-  tx::MasterEngine tx_engine_{server_, rpc_worker_clients_, &wal_};
-  distributed::MasterCoordination coordination_{server_.endpoint()};
-  std::unique_ptr<StorageGcMaster> storage_gc_ =
-      std::make_unique<StorageGcMaster>(
-          *storage_, tx_engine_, config_.gc_cycle_sec, server_, coordination_);
-  distributed::RpcWorkerClients rpc_worker_clients_{coordination_};
-  TypemapPack<MasterConcurrentIdMapper> typemap_pack_{server_};
-  database::MasterCounters counters_{server_};
-  distributed::DurabilityRpcClients durability_rpc_clients_{
-      rpc_worker_clients_};
-  distributed::DataRpcServer data_server_{*this, server_};
-  distributed::DataRpcClients data_clients_{rpc_worker_clients_};
-  distributed::PlanDispatcher plan_dispatcher_{rpc_worker_clients_};
-  distributed::PullRpcClients pull_clients_{rpc_worker_clients_};
-  distributed::IndexRpcClients index_rpc_clients_{rpc_worker_clients_};
-  distributed::UpdatesRpcServer updates_server_{*this, server_};
-  distributed::UpdatesRpcClients updates_clients_{rpc_worker_clients_};
-  distributed::DataManager data_manager_{*this, data_clients_};
-  distributed::TransactionalCacheCleaner cache_cleaner_{
-      tx_engine_, updates_server_, data_manager_};
-  distributed::ClusterDiscoveryMaster cluster_discovery_{server_, coordination_,
-                                                         rpc_worker_clients_};
-};
-
-class Worker : public PrivateBase {
- public:
-  explicit Worker(const Config &config) : PrivateBase(config) {
-    cluster_discovery_.RegisterWorker(config.worker_id);
-  }
-
-  GraphDb::Type type() const override {
-    return GraphDb::Type::DISTRIBUTED_WORKER;
-  }
-  IMPL_GETTERS
-  IMPL_DISTRIBUTED_GETTERS
-  distributed::PlanConsumer &plan_consumer() override { return plan_consumer_; }
-  distributed::ProduceRpcServer &produce_server() override {
-    return produce_server_;
-  }
-
-  void ReinitializeStorage() override {
-    // Release gc scheduler to stop it from touching storage
-    storage_gc_ = nullptr;
-    PrivateBase::ReinitializeStorage();
-    storage_gc_ = std::make_unique<StorageGcWorker>(
-        *storage_, tx_engine_, config_.gc_cycle_sec,
-        rpc_worker_clients_.GetClientPool(0), config_.worker_id);
-  }
-
-  communication::rpc::Server server_{
-      config_.worker_endpoint, static_cast<size_t>(config_.rpc_num_workers)};
-  distributed::WorkerCoordination coordination_{server_,
-                                                config_.master_endpoint};
-  distributed::RpcWorkerClients rpc_worker_clients_{coordination_};
-  tx::WorkerEngine tx_engine_{rpc_worker_clients_.GetClientPool(0)};
-  std::unique_ptr<StorageGcWorker> storage_gc_ =
-      std::make_unique<StorageGcWorker>(
-          *storage_, tx_engine_, config_.gc_cycle_sec,
-          rpc_worker_clients_.GetClientPool(0), config_.worker_id);
-  TypemapPack<WorkerConcurrentIdMapper> typemap_pack_{
-      rpc_worker_clients_.GetClientPool(0)};
-  database::WorkerCounters counters_{rpc_worker_clients_.GetClientPool(0)};
-  distributed::DataRpcServer data_server_{*this, server_};
-  distributed::DataRpcClients data_clients_{rpc_worker_clients_};
-  distributed::PlanConsumer plan_consumer_{server_};
-  distributed::ProduceRpcServer produce_server_{*this, tx_engine_, server_,
-                                                plan_consumer_};
-  distributed::IndexRpcServer index_rpc_server_{*this, server_};
-  distributed::UpdatesRpcServer updates_server_{*this, server_};
-  distributed::UpdatesRpcClients updates_clients_{rpc_worker_clients_};
-  distributed::DataManager data_manager_{*this, data_clients_};
-  distributed::WorkerTransactionalCacheCleaner cache_cleaner_{
-      tx_engine_, server_, produce_server_, updates_server_, data_manager_};
-  distributed::DurabilityRpcServer durability_rpc_server_{*this, server_};
-  distributed::ClusterDiscoveryWorker cluster_discovery_{
-      server_, coordination_, rpc_worker_clients_.GetClientPool(0)};
-};
-
-#undef IMPL_GETTERS
-
 PublicBase::PublicBase(std::unique_ptr<PrivateBase> impl)
     : impl_(std::move(impl)) {
   if (impl_->config_.durability_enabled)
@@ -306,41 +107,18 @@ PublicBase::PublicBase(std::unique_ptr<PrivateBase> impl)
 
   // Durability recovery.
   {
-    auto db_type = impl_->type();
-
     // What we should recover.
     std::experimental::optional<durability::RecoveryInfo>
         required_recovery_info;
-    if (db_type == Type::DISTRIBUTED_WORKER) {
-      required_recovery_info = dynamic_cast<impl::Worker *>(impl_.get())
-                                   ->cluster_discovery_.recovery_info();
-    }
 
     // What we recover.
     std::experimental::optional<durability::RecoveryInfo> recovery_info;
 
     // Recover only if necessary.
-    if ((db_type != Type::DISTRIBUTED_WORKER &&
-         impl_->config_.db_recover_on_startup) ||
-        (db_type == Type::DISTRIBUTED_WORKER && required_recovery_info)) {
+    if (impl_->config_.db_recover_on_startup) {
       recovery_info = durability::Recover(impl_->config_.durability_directory,
                                           *impl_, required_recovery_info);
     }
-
-    // Post-recovery setup and checking.
-    switch (db_type) {
-      case Type::DISTRIBUTED_MASTER:
-        dynamic_cast<impl::Master *>(impl_.get())
-            ->coordination_.SetRecoveryInfo(recovery_info);
-        break;
-      case Type::DISTRIBUTED_WORKER:
-        if (required_recovery_info != recovery_info)
-          LOG(FATAL) << "Memgraph worker failed to recover the database state "
-                        "recovered on the master";
-        break;
-      case Type::SINGLE_NODE:
-        break;
-    }
   }
 
   if (impl_->config_.durability_enabled) {
@@ -374,14 +152,12 @@ PublicBase::~PublicBase() {
 
   // If we are not a worker we can do a snapshot on exit if it's enabled. Doing
   // this on the master forces workers to do the same through rpcs
-  if (impl_->config_.snapshot_on_exit &&
-      impl_->type() != Type::DISTRIBUTED_WORKER) {
+  if (impl_->config_.snapshot_on_exit) {
     GraphDbAccessor dba(*this);
     MakeSnapshot(dba);
   }
 }
 
-GraphDb::Type PublicBase::type() const { return impl_->type(); }
 Storage &PublicBase::storage() { return impl_->storage(); }
 durability::WriteAheadLog &PublicBase::wal() { return impl_->wal(); }
 tx::Engine &PublicBase::tx_engine() { return impl_->tx_engine(); }
@@ -400,36 +176,6 @@ int PublicBase::WorkerId() const { return impl_->WorkerId(); }
 std::vector<int> PublicBase::GetWorkerIds() const {
   return impl_->GetWorkerIds();
 }
-distributed::DataRpcServer &PublicBase::data_server() {
-  return impl_->data_server();
-}
-distributed::DataRpcClients &PublicBase::data_clients() {
-  return impl_->data_clients();
-}
-distributed::PlanDispatcher &PublicBase::plan_dispatcher() {
-  return impl_->plan_dispatcher();
-}
-distributed::IndexRpcClients &PublicBase::index_rpc_clients() {
-  return impl_->index_rpc_clients();
-}
-distributed::PlanConsumer &PublicBase::plan_consumer() {
-  return impl_->plan_consumer();
-}
-distributed::PullRpcClients &PublicBase::pull_clients() {
-  return impl_->pull_clients();
-}
-distributed::ProduceRpcServer &PublicBase::produce_server() {
-  return impl_->produce_server();
-}
-distributed::UpdatesRpcServer &PublicBase::updates_server() {
-  return impl_->updates_server();
-}
-distributed::UpdatesRpcClients &PublicBase::updates_clients() {
-  return impl_->updates_clients();
-}
-distributed::DataManager &PublicBase::data_manager() {
-  return impl_->data_manager();
-}
 
 bool PublicBase::MakeSnapshot(GraphDbAccessor &accessor) {
   return impl_->MakeSnapshot(accessor);
@@ -457,32 +203,4 @@ MasterBase::~MasterBase() { snapshot_creator_ = nullptr; }
 
 SingleNode::SingleNode(Config config)
     : MasterBase(std::make_unique<impl::SingleNode>(config)) {}
-
-Master::Master(Config config)
-    : MasterBase(std::make_unique<impl::Master>(config)) {}
-
-io::network::Endpoint Master::endpoint() const {
-  return dynamic_cast<impl::Master *>(impl_.get())->server_.endpoint();
-}
-
-io::network::Endpoint Master::GetEndpoint(int worker_id) {
-  return dynamic_cast<impl::Master *>(impl_.get())
-      ->coordination_.GetEndpoint(worker_id);
-}
-
-Worker::Worker(Config config)
-    : PublicBase(std::make_unique<impl::Worker>(config)) {}
-
-io::network::Endpoint Worker::endpoint() const {
-  return dynamic_cast<impl::Worker *>(impl_.get())->server_.endpoint();
-}
-
-io::network::Endpoint Worker::GetEndpoint(int worker_id) {
-  return dynamic_cast<impl::Worker *>(impl_.get())
-      ->coordination_.GetEndpoint(worker_id);
-}
-
-void Worker::WaitForShutdown() {
-  dynamic_cast<impl::Worker *>(impl_.get())->coordination_.WaitForShutdown();
-}
 }  // namespace database
diff --git a/src/database/graph_db.hpp b/src/database/graph_db.hpp
index dc94f6cab..c74691eb0 100644
--- a/src/database/graph_db.hpp
+++ b/src/database/graph_db.hpp
@@ -14,19 +14,6 @@
 #include "transactions/engine.hpp"
 #include "utils/scheduler.hpp"
 
-namespace distributed {
-class DataRpcServer;
-class DataRpcClients;
-class PlanDispatcher;
-class PlanConsumer;
-class PullRpcClients;
-class ProduceRpcServer;
-class UpdatesRpcServer;
-class UpdatesRpcClients;
-class DataManager;
-class IndexRpcClients;
-}  // namespace distributed
-
 namespace database {
 
 /// Database configuration. Initialized from flags, but modifiable.
@@ -76,12 +63,9 @@ struct Config {
  */
 class GraphDb {
  public:
-  enum class Type { SINGLE_NODE, DISTRIBUTED_MASTER, DISTRIBUTED_WORKER };
-
   GraphDb() {}
   virtual ~GraphDb() {}
 
-  virtual Type type() const = 0;
   virtual Storage &storage() = 0;
   virtual durability::WriteAheadLog &wal() = 0;
   virtual tx::Engine &tx_engine() = 0;
@@ -94,23 +78,6 @@ class GraphDb {
   virtual int WorkerId() const = 0;
   virtual std::vector<int> GetWorkerIds() const = 0;
 
-  // Supported only in distributed master and worker, not in single-node.
-  virtual distributed::DataRpcServer &data_server() = 0;
-  virtual distributed::DataRpcClients &data_clients() = 0;
-  virtual distributed::UpdatesRpcServer &updates_server() = 0;
-  virtual distributed::UpdatesRpcClients &updates_clients() = 0;
-  virtual distributed::DataManager &data_manager() = 0;
-
-  // Supported only in distributed master.
-  virtual distributed::PullRpcClients &pull_clients() = 0;
-  virtual distributed::PlanDispatcher &plan_dispatcher() = 0;
-  virtual distributed::IndexRpcClients &index_rpc_clients() = 0;
-
-  // Supported only in distributed worker.
-  // TODO remove once end2end testing is possible.
-  virtual distributed::ProduceRpcServer &produce_server() = 0;
-  virtual distributed::PlanConsumer &plan_consumer() = 0;
-
   // Makes a snapshot from the visibility of the given accessor
   virtual bool MakeSnapshot(GraphDbAccessor &accessor) = 0;
 
@@ -136,7 +103,6 @@ class PrivateBase;
 // initialization and cleanup.
 class PublicBase : public GraphDb {
  public:
-  Type type() const override;
   Storage &storage() override;
   durability::WriteAheadLog &wal() override;
   tx::Engine &tx_engine() override;
@@ -147,16 +113,6 @@ class PublicBase : public GraphDb {
   void CollectGarbage() override;
   int WorkerId() const override;
   std::vector<int> GetWorkerIds() const override;
-  distributed::DataRpcServer &data_server() override;
-  distributed::DataRpcClients &data_clients() override;
-  distributed::PlanDispatcher &plan_dispatcher() override;
-  distributed::IndexRpcClients &index_rpc_clients() override;
-  distributed::PlanConsumer &plan_consumer() override;
-  distributed::PullRpcClients &pull_clients() override;
-  distributed::ProduceRpcServer &produce_server() override;
-  distributed::UpdatesRpcServer &updates_server() override;
-  distributed::UpdatesRpcClients &updates_clients() override;
-  distributed::DataManager &data_manager() override;
 
   bool is_accepting_transactions() const { return is_accepting_transactions_; }
   bool MakeSnapshot(GraphDbAccessor &accessor) override;
@@ -188,25 +144,4 @@ class SingleNode : public MasterBase {
  public:
   explicit SingleNode(Config config = Config());
 };
-
-class Master : public MasterBase {
- public:
-  explicit Master(Config config = Config());
-  /** Gets this master's endpoint. */
-  io::network::Endpoint endpoint() const;
-  /** Gets the endpoint of the worker with the given id. */
-  // TODO make const once Coordination::GetEndpoint is const.
-  io::network::Endpoint GetEndpoint(int worker_id);
-};
-
-class Worker : public impl::PublicBase {
- public:
-  explicit Worker(Config config = Config());
-  /** Gets this worker's endpoint. */
-  io::network::Endpoint endpoint() const;
-  /** Gets the endpoint of the worker with the given id. */
-  // TODO make const once Coordination::GetEndpoint is const.
-  io::network::Endpoint GetEndpoint(int worker_id);
-  void WaitForShutdown();
-};
 }  // namespace database
diff --git a/src/database/graph_db_accessor.cpp b/src/database/graph_db_accessor.cpp
index 9305dfd42..8c7f6efb9 100644
--- a/src/database/graph_db_accessor.cpp
+++ b/src/database/graph_db_accessor.cpp
@@ -4,9 +4,6 @@
 
 #include "database/graph_db_accessor.hpp"
 #include "database/state_delta.hpp"
-#include "distributed/data_manager.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "distributed/updates_rpc_clients.hpp"
 #include "storage/address_types.hpp"
 #include "storage/edge.hpp"
 #include "storage/edge_accessor.hpp"
@@ -77,26 +74,6 @@ VertexAccessor GraphDbAccessor::InsertVertex(
   return VertexAccessor(vertex_vlist, *this);
 }
 
-VertexAccessor GraphDbAccessor::InsertVertexIntoRemote(
-    int worker_id, const std::vector<storage::Label> &labels,
-    const std::unordered_map<storage::Property, query::TypedValue>
-        &properties) {
-  CHECK(worker_id != db().WorkerId())
-      << "Not allowed to call InsertVertexIntoRemote for local worker";
-
-  gid::Gid gid = db().updates_clients().CreateVertex(
-      worker_id, transaction_id(), labels, properties);
-
-  auto vertex = std::make_unique<Vertex>();
-  vertex->labels_ = labels;
-  for (auto &kv : properties) vertex->properties_.set(kv.first, kv.second);
-
-  db().data_manager()
-      .Elements<Vertex>(transaction_id())
-      .emplace(gid, nullptr, std::move(vertex));
-  return VertexAccessor({gid, worker_id}, *this);
-}
-
 std::experimental::optional<VertexAccessor> GraphDbAccessor::FindVertexOptional(
     gid::Gid gid, bool current_state) {
   VertexAccessor record_accessor(db_.storage().LocalAddress<Vertex>(gid),
@@ -129,8 +106,6 @@ EdgeAccessor GraphDbAccessor::FindEdge(gid::Gid gid, bool current_state) {
 void GraphDbAccessor::BuildIndex(storage::Label label,
                                  storage::Property property) {
   DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
-  DCHECK(db_.type() != GraphDb::Type::DISTRIBUTED_WORKER)
-      << "BuildIndex invoked on worker";
 
   db_.storage().index_build_tx_in_progress_.access().insert(transaction_.id_);
 
@@ -174,16 +149,6 @@ void GraphDbAccessor::BuildIndex(storage::Label label,
   // CreateIndex.
   GraphDbAccessor dba(db_);
 
-  std::experimental::optional<std::vector<utils::Future<bool>>>
-      index_rpc_completions;
-
-  // Notify all workers to start building an index if we are the master since
-  // they don't have to wait anymore
-  if (db_.type() == GraphDb::Type::DISTRIBUTED_MASTER) {
-    index_rpc_completions.emplace(db_.index_rpc_clients().GetBuildIndexFutures(
-        label, property, transaction_id(), this->db_.WorkerId()));
-  }
-
   // Add transaction to the build_tx_in_progress as this transaction doesn't
   // change data and shouldn't block other parallel index creations
   auto read_transaction_id = dba.transaction().id_;
@@ -198,21 +163,6 @@ void GraphDbAccessor::BuildIndex(storage::Label label,
 
   dba.PopulateIndex(key);
 
-  // Check if all workers sucesfully built their indexes and after this we can
-  // set the index as built
-  if (index_rpc_completions) {
-    // Wait first, check later - so that every thread finishes and none
-    // terminates - this can probably be optimized in case we fail early so that
-    // we notify other workers to stop building indexes
-    for (auto &index_built : *index_rpc_completions) index_built.wait();
-    for (auto &index_built : *index_rpc_completions) {
-      if (!index_built.get()) {
-        db_.storage().label_property_index_.DeleteIndex(key);
-        throw IndexCreationOnWorkerException("Index exists on a worker");
-      }
-    }
-  }
-
   dba.EnableIndex(key);
   dba.Commit();
 }
@@ -246,7 +196,6 @@ void GraphDbAccessor::UpdateLabelIndices(storage::Label label,
                                          const VertexAccessor &vertex_accessor,
                                          const Vertex *const vertex) {
   DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
-  DCHECK(vertex_accessor.is_local()) << "Only local vertices belong in indexes";
   auto *vlist_ptr = vertex_accessor.address().local();
   db_.storage().labels_index_.Update(label, vlist_ptr, vertex);
   db_.storage().label_property_index_.UpdateOnLabel(label, vlist_ptr, vertex);
@@ -256,7 +205,6 @@ void GraphDbAccessor::UpdatePropertyIndex(
     storage::Property property, const RecordAccessor<Vertex> &vertex_accessor,
     const Vertex *const vertex) {
   DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
-  DCHECK(vertex_accessor.is_local()) << "Only local vertices belong in indexes";
   db_.storage().label_property_index_.UpdateOnProperty(
       property, vertex_accessor.address().local(), vertex);
 }
@@ -337,14 +285,6 @@ bool GraphDbAccessor::RemoveVertex(VertexAccessor &vertex_accessor,
                                    bool check_empty) {
   DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
 
-  if (!vertex_accessor.is_local()) {
-    auto address = vertex_accessor.address();
-    db().updates_clients().RemoveVertex(address.worker_id(), transaction_id(),
-                                        address.gid(), check_empty);
-    // We can't know if we are going to be able to remove vertex until deferred
-    // updates on a remote worker are executed
-    return true;
-  }
   vertex_accessor.SwitchNew();
   // it's possible the vertex was removed already in this transaction
   // due to it getting matched multiple times by some patterns
@@ -387,66 +327,37 @@ EdgeAccessor GraphDbAccessor::InsertEdge(
   storage::EdgeAddress edge_address;
 
   Vertex *from_updated;
-  if (from.is_local()) {
-    auto gid = db_.storage().edge_generator_.Next(requested_gid);
-    edge_address = new mvcc::VersionList<Edge>(
-        transaction_, gid, from.address(), to.address(), edge_type);
-    // We need to insert edge_address to edges_ before calling update since
-    // update can throw and edge_vlist will not be garbage collected if it is
-    // not in edges_ skiplist.
-    bool success =
-        db_.storage().edges_.access().insert(gid, edge_address.local()).second;
-    CHECK(success) << "Attempting to insert an edge with an existing GID: "
-                   << gid;
+  auto gid = db_.storage().edge_generator_.Next(requested_gid);
+  edge_address = new mvcc::VersionList<Edge>(transaction_, gid, from.address(),
+                                             to.address(), edge_type);
+  // We need to insert edge_address to edges_ before calling update since
+  // update can throw and edge_vlist will not be garbage collected if it is
+  // not in edges_ skiplist.
+  bool success =
+      db_.storage().edges_.access().insert(gid, edge_address.local()).second;
+  CHECK(success) << "Attempting to insert an edge with an existing GID: "
+                 << gid;
 
-    from.SwitchNew();
-    from_updated = &from.update();
+  from.SwitchNew();
+  from_updated = &from.update();
 
-    // TODO when preparing WAL for distributed, most likely never use
-    // `CREATE_EDGE`, but always have it split into 3 parts (edge insertion,
-    // in/out modification).
-    wal().Emplace(database::StateDelta::CreateEdge(
-        transaction_.id_, gid, from.gid(), to.gid(), edge_type,
-        EdgeTypeName(edge_type)));
+  // TODO when preparing WAL for distributed, most likely never use
+  // `CREATE_EDGE`, but always have it split into 3 parts (edge insertion,
+  // in/out modification).
+  wal().Emplace(database::StateDelta::CreateEdge(
+      transaction_.id_, gid, from.gid(), to.gid(), edge_type,
+      EdgeTypeName(edge_type)));
 
-  } else {
-    edge_address = db().updates_clients().CreateEdge(transaction_id(), from, to,
-                                                     edge_type);
-
-    from_updated = db().data_manager()
-                       .Elements<Vertex>(transaction_id())
-                       .FindNew(from.gid());
-
-    // Create an Edge and insert it into the Cache so we see it locally.
-    db().data_manager()
-        .Elements<Edge>(transaction_id())
-        .emplace(
-            edge_address.gid(), nullptr,
-            std::make_unique<Edge>(from.address(), to.address(), edge_type));
-  }
   from_updated->out_.emplace(
       db_.storage().LocalizedAddressIfPossible(to.address()), edge_address,
       edge_type);
 
   Vertex *to_updated;
-  if (to.is_local()) {
-    // ensure that the "to" accessor has the latest version (Switch new)
-    // WARNING: must do that after the above "from.update()" for cases when
-    // we are creating a cycle and "from" and "to" are the same vlist
-    to.SwitchNew();
-    to_updated = &to.update();
-  } else {
-    // The RPC call for the `to` side is already handled if `from` is not local.
-    if (from.is_local() ||
-        from.address().worker_id() != to.address().worker_id()) {
-      db().updates_clients().AddInEdge(
-          transaction_id(), from,
-          db().storage().GlobalizedAddress(edge_address), to, edge_type);
-    }
-    to_updated = db().data_manager()
-                     .Elements<Vertex>(transaction_id())
-                     .FindNew(to.gid());
-  }
+  // ensure that the "to" accessor has the latest version (Switch new)
+  // WARNING: must do that after the above "from.update()" for cases when
+  // we are creating a cycle and "from" and "to" are the same vlist
+  to.SwitchNew();
+  to_updated = &to.update();
   to_updated->in_.emplace(
       db_.storage().LocalizedAddressIfPossible(from.address()), edge_address,
       edge_type);
@@ -479,35 +390,16 @@ int64_t GraphDbAccessor::EdgesCount() const {
 void GraphDbAccessor::RemoveEdge(EdgeAccessor &edge, bool remove_out_edge,
                                  bool remove_in_edge) {
   DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
-  if (edge.is_local()) {
-    // it's possible the edge was removed already in this transaction
-    // due to it getting matched multiple times by some patterns
-    // we can only delete it once, so check if it's already deleted
-    edge.SwitchNew();
-    if (edge.current().is_expired_by(transaction_)) return;
-    if (remove_out_edge) edge.from().RemoveOutEdge(edge.address());
-    if (remove_in_edge) edge.to().RemoveInEdge(edge.address());
+  // it's possible the edge was removed already in this transaction
+  // due to it getting matched multiple times by some patterns
+  // we can only delete it once, so check if it's already deleted
+  edge.SwitchNew();
+  if (edge.current().is_expired_by(transaction_)) return;
+  if (remove_out_edge) edge.from().RemoveOutEdge(edge.address());
+  if (remove_in_edge) edge.to().RemoveInEdge(edge.address());
 
-    edge.address().local()->remove(edge.current_, transaction_);
-    wal().Emplace(
-        database::StateDelta::RemoveEdge(transaction_.id_, edge.gid()));
-  } else {
-    auto edge_addr = edge.GlobalAddress();
-    auto from_addr = db().storage().GlobalizedAddress(edge.from_addr());
-    CHECK(edge_addr.worker_id() == from_addr.worker_id())
-        << "Edge and it's 'from' vertex not on the same worker";
-    auto to_addr = db().storage().GlobalizedAddress(edge.to_addr());
-    db().updates_clients().RemoveEdge(transaction_id(), edge_addr.worker_id(),
-                                      edge_addr.gid(), from_addr.gid(),
-                                      to_addr);
-
-    // Another RPC is necessary only if the first did not handle vertices on
-    // both sides.
-    if (edge_addr.worker_id() != to_addr.worker_id()) {
-      db().updates_clients().RemoveInEdge(transaction_id(), to_addr.worker_id(),
-                                          to_addr.gid(), edge_addr);
-    }
-  }
+  edge.address().local()->remove(edge.current_, transaction_);
+  wal().Emplace(database::StateDelta::RemoveEdge(transaction_.id_, edge.gid()));
 }
 
 storage::Label GraphDbAccessor::Label(const std::string &label_name) {
diff --git a/src/database/graph_db_accessor.hpp b/src/database/graph_db_accessor.hpp
index bacb710d0..f2a961ddc 100644
--- a/src/database/graph_db_accessor.hpp
+++ b/src/database/graph_db_accessor.hpp
@@ -9,7 +9,6 @@
 #include "glog/logging.h"
 
 #include "database/graph_db.hpp"
-#include "distributed/cache.hpp"
 #include "query/typed_value.hpp"
 #include "storage/address_types.hpp"
 #include "storage/edge_accessor.hpp"
@@ -78,13 +77,6 @@ class GraphDbAccessor {
   VertexAccessor InsertVertex(std::experimental::optional<gid::Gid>
                                   requested_gid = std::experimental::nullopt);
 
-  /** Creates a new Vertex on the given worker. It is NOT allowed to call this
-   * function with this worker's id. */
-  VertexAccessor InsertVertexIntoRemote(
-      int worker_id, const std::vector<storage::Label> &labels,
-      const std::unordered_map<storage::Property, query::TypedValue>
-          &properties);
-
   /**
    * Removes the vertex of the given accessor. If the vertex has any outgoing or
    * incoming edges, it is not deleted. See `DetachRemoveVertex` if you want to
diff --git a/src/database/state_delta.hpp b/src/database/state_delta.hpp
index 7c87e9c58..bce339ff7 100644
--- a/src/database/state_delta.hpp
+++ b/src/database/state_delta.hpp
@@ -7,7 +7,6 @@
 #include "storage/address_types.hpp"
 #include "storage/gid.hpp"
 #include "storage/property_value.hpp"
-#include "utils/serialization.hpp"
 
 namespace database {
 /** Describes single change to the database state. Used for durability (WAL) and
@@ -132,52 +131,5 @@ struct StateDelta {
   storage::Label label;
   std::string label_name;
   bool check_empty;
-
- private:
-  friend class boost::serialization::access;
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &type;
-    ar &transaction_id;
-    ar &vertex_id;
-    ar &edge_id;
-    ar &edge_address;
-    ar &vertex_from_id;
-    ar &vertex_from_address;
-    ar &vertex_to_id;
-    ar &vertex_to_address;
-    ar &edge_type;
-    ar &edge_type_name;
-    ar &property;
-    ar &property_name;
-    utils::SaveTypedValue(ar, value);
-    ar &label;
-    ar &label_name;
-    ar &check_empty;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &type;
-    ar &transaction_id;
-    ar &vertex_id;
-    ar &edge_id;
-    ar &edge_address;
-    ar &vertex_from_id;
-    ar &vertex_from_address;
-    ar &vertex_to_id;
-    ar &vertex_to_address;
-    ar &edge_type;
-    ar &edge_type_name;
-    ar &property;
-    ar &property_name;
-    query::TypedValue tv;
-    utils::LoadTypedValue(ar, tv);
-    value = tv;
-    ar &label;
-    ar &label_name;
-    ar &check_empty;
-  }
 };
 }  // namespace database
diff --git a/src/database/storage_gc.hpp b/src/database/storage_gc.hpp
index 241849224..effc5db4c 100644
--- a/src/database/storage_gc.hpp
+++ b/src/database/storage_gc.hpp
@@ -6,7 +6,6 @@
 #include "data_structures/concurrent/concurrent_map.hpp"
 #include "database/storage.hpp"
 #include "mvcc/version_list.hpp"
-#include "stats/metrics.hpp"
 #include "storage/deferred_deleter.hpp"
 #include "storage/edge.hpp"
 #include "storage/garbage_collector.hpp"
diff --git a/src/database/storage_gc_master.hpp b/src/database/storage_gc_master.hpp
deleted file mode 100644
index 02b9d513b..000000000
--- a/src/database/storage_gc_master.hpp
+++ /dev/null
@@ -1,67 +0,0 @@
-#pragma once
-
-#include <mutex>
-
-#include "database/storage_gc.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/storage_gc_rpc_messages.hpp"
-
-namespace database {
-class StorageGcMaster : public StorageGc {
- public:
-  using StorageGc::StorageGc;
-  StorageGcMaster(Storage &storage, tx::Engine &tx_engine, int pause_sec,
-                  communication::rpc::Server &rpc_server,
-                  distributed::MasterCoordination &coordination)
-      : StorageGc(storage, tx_engine, pause_sec),
-        rpc_server_(rpc_server),
-        coordination_(coordination) {
-    rpc_server_.Register<distributed::RanLocalGcRpc>(
-        [this](const distributed::GcClearedStatusReq &req) {
-          std::unique_lock<std::mutex> lock(worker_safe_transaction_mutex_);
-          worker_safe_transaction_[req.worker_id] = req.local_oldest_active;
-          return std::make_unique<distributed::GcClearedStatusRes>();
-        });
-  }
-
-  ~StorageGcMaster() {
-    // We have to stop scheduler before destroying this class because otherwise
-    // a task might try to utilize methods in this class which might cause pure
-    // virtual method called since they are not implemented for the base class.
-    scheduler_.Stop();
-    rpc_server_.UnRegister<distributed::RanLocalGcRpc>();
-  }
-
-  void CollectCommitLogGarbage(tx::TransactionId oldest_active) final {
-    // Workers are sending information when it's safe to delete every
-    // transaction older than oldest_active from their perspective i.e. there
-    // won't exist another transaction in the future with id larger than or
-    // equal to oldest_active that might trigger a query into a commit log about
-    // the state of transactions which we are deleting.
-    auto safe_transaction = GetClogSafeTransaction(oldest_active);
-    if (safe_transaction) {
-      tx::TransactionId min_safe = *safe_transaction;
-      {
-        std::unique_lock<std::mutex> lock(worker_safe_transaction_mutex_);
-        for (auto worker_id : coordination_.GetWorkerIds()) {
-          // Skip itself
-          if (worker_id == 0) continue;
-          min_safe = std::min(min_safe, worker_safe_transaction_[worker_id]);
-        }
-      }
-      // All workers reported back at least once
-      if (min_safe > 0) {
-        tx_engine_.GarbageCollectCommitLog(min_safe);
-        LOG(INFO) << "Clearing master commit log with tx: " << min_safe;
-      }
-    }
-  }
-
-  communication::rpc::Server &rpc_server_;
-  distributed::MasterCoordination &coordination_;
-  // Mapping of worker ids and oldest active transaction which is safe for
-  // deletion from worker perspective
-  std::unordered_map<int, tx::TransactionId> worker_safe_transaction_;
-  std::mutex worker_safe_transaction_mutex_;
-};
-}  // namespace database
diff --git a/src/database/storage_gc_worker.hpp b/src/database/storage_gc_worker.hpp
deleted file mode 100644
index 4d938dbb9..000000000
--- a/src/database/storage_gc_worker.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-#pragma once
-
-#include "communication/rpc/client_pool.hpp"
-#include "database/storage_gc.hpp"
-#include "distributed/storage_gc_rpc_messages.hpp"
-
-#include "transactions/engine_worker.hpp"
-#include "transactions/transaction.hpp"
-
-namespace database {
-class StorageGcWorker : public StorageGc {
- public:
-  StorageGcWorker(Storage &storage, tx::Engine &tx_engine, int pause_sec,
-                  communication::rpc::ClientPool &master_client_pool,
-                  int worker_id)
-      : StorageGc(storage, tx_engine, pause_sec),
-        master_client_pool_(master_client_pool),
-        worker_id_(worker_id) {}
-
-  ~StorageGcWorker() {
-    // We have to stop scheduler before destroying this class because otherwise
-    // a task might try to utilize methods in this class which might cause pure
-    // virtual method called since they are not implemented for the base class.
-    scheduler_.Stop();
-  }
-
-  void CollectCommitLogGarbage(tx::TransactionId oldest_active) final {
-    // We first need to delete transactions that we can delete to be sure that
-    // the locks are released as well. Otherwise some new transaction might
-    // try to acquire a lock which hasn't been released (if the transaction
-    // cache cleaner was not scheduled at this time), and take a look into the
-    // commit log which no longer contains that transaction id.
-    dynamic_cast<tx::WorkerEngine &>(tx_engine_)
-        .ClearTransactionalCache(oldest_active);
-    auto safe_to_delete = GetClogSafeTransaction(oldest_active);
-    if (safe_to_delete) {
-      master_client_pool_.Call<distributed::RanLocalGcRpc>(*safe_to_delete,
-                                                           worker_id_);
-      tx_engine_.GarbageCollectCommitLog(*safe_to_delete);
-    }
-  }
-
-  communication::rpc::ClientPool &master_client_pool_;
-  int worker_id_;
-};
-}  // namespace database
diff --git a/src/distributed/cache.cpp b/src/distributed/cache.cpp
deleted file mode 100644
index dc3e7721b..000000000
--- a/src/distributed/cache.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-
-#include "glog/logging.h"
-
-#include "database/storage.hpp"
-#include "distributed/cache.hpp"
-#include "storage/edge.hpp"
-#include "storage/vertex.hpp"
-
-namespace distributed {
-
-template <typename TRecord>
-TRecord *Cache<TRecord>::FindNew(gid::Gid gid) {
-  std::lock_guard<std::mutex> guard{lock_};
-  auto found = cache_.find(gid);
-  DCHECK(found != cache_.end())
-      << "FindNew for uninitialized remote Vertex/Edge";
-  auto &pair = found->second;
-  if (!pair.second) {
-    pair.second = std::unique_ptr<TRecord>(pair.first->CloneData());
-  }
-  return pair.second.get();
-}
-
-template <typename TRecord>
-void Cache<TRecord>::FindSetOldNew(tx::TransactionId tx_id, int worker_id,
-                                   gid::Gid gid, TRecord *&old_record,
-                                   TRecord *&new_record) {
-  {
-    std::lock_guard<std::mutex> guard(lock_);
-    auto found = cache_.find(gid);
-    if (found != cache_.end()) {
-      old_record = found->second.first.get();
-      new_record = found->second.second.get();
-      return;
-    }
-  }
-
-  auto remote = data_clients_.RemoteElement<TRecord>(worker_id, tx_id, gid);
-  LocalizeAddresses(*remote);
-
-  // This logic is a bit strange because we need to make sure that someone
-  // else didn't get a response and updated the cache before we did and we
-  // need a lock for that, but we also need to check if we can now return
-  // that result - otherwise we could get incosistent results for remote
-  // FindSetOldNew
-  std::lock_guard<std::mutex> guard(lock_);
-  auto it_pair = cache_.emplace(
-      gid, std::make_pair<rec_uptr, rec_uptr>(std::move(remote), nullptr));
-
-  old_record = it_pair.first->second.first.get();
-  new_record = it_pair.first->second.second.get();
-}
-
-template <typename TRecord>
-void Cache<TRecord>::emplace(gid::Gid gid, rec_uptr old_record,
-                             rec_uptr new_record) {
-  if (old_record) LocalizeAddresses(*old_record);
-  if (new_record) LocalizeAddresses(*new_record);
-
-  std::lock_guard<std::mutex> guard{lock_};
-  // We can't replace existing data because some accessors might be using
-  // it.
-  // TODO - consider if it's necessary and OK to copy just the data content.
-  auto found = cache_.find(gid);
-  if (found != cache_.end())
-    return;
-  else
-    cache_[gid] = std::make_pair(std::move(old_record), std::move(new_record));
-}
-
-template <typename TRecord>
-void Cache<TRecord>::ClearCache() {
-  std::lock_guard<std::mutex> guard{lock_};
-  cache_.clear();
-}
-
-template <>
-void Cache<Vertex>::LocalizeAddresses(Vertex &vertex) {
-  auto localize_edges = [this](auto &edges) {
-    for (auto &element : edges) {
-      element.vertex = storage_.LocalizedAddressIfPossible(element.vertex);
-      element.edge = storage_.LocalizedAddressIfPossible(element.edge);
-    }
-  };
-
-  localize_edges(vertex.in_.storage());
-  localize_edges(vertex.out_.storage());
-}
-
-template <>
-void Cache<Edge>::LocalizeAddresses(Edge &edge) {
-  edge.from_ = storage_.LocalizedAddressIfPossible(edge.from_);
-  edge.to_ = storage_.LocalizedAddressIfPossible(edge.to_);
-}
-
-template class Cache<Vertex>;
-template class Cache<Edge>;
-
-}  // namespace distributed
diff --git a/src/distributed/cache.hpp b/src/distributed/cache.hpp
deleted file mode 100644
index d41eb1ca2..000000000
--- a/src/distributed/cache.hpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#pragma once
-
-#include <mutex>
-#include <unordered_map>
-
-#include "distributed/data_rpc_clients.hpp"
-#include "storage/gid.hpp"
-
-namespace database {
-class Storage;
-}
-
-namespace distributed {
-
-/**
- * Used for caching Vertices and Edges that are stored on another worker in a
- * distributed system. Maps global IDs to (old, new) Vertex/Edge pointer
- * pairs.  It is possible that either "old" or "new" are nullptrs, but at
- * least one must be not-null. The Cache is the owner of TRecord
- * objects it points to.
- *
- * @tparam TRecord - Edge or Vertex
- */
-template <typename TRecord>
-class Cache {
-  using rec_uptr = std::unique_ptr<TRecord>;
-
- public:
-  Cache(database::Storage &storage, distributed::DataRpcClients &data_clients)
-      : storage_(storage), data_clients_(data_clients) {}
-
-  /// Returns the new data for the given ID. Creates it (as copy of old) if
-  /// necessary.
-  TRecord *FindNew(gid::Gid gid);
-
-  /// For the Vertex/Edge with the given global ID, looks for the data visible
-  /// from the given transaction's ID and command ID, and caches it. Sets the
-  /// given pointers to point to the fetched data. Analogue to
-  /// mvcc::VersionList::find_set_old_new.
-  void FindSetOldNew(tx::TransactionId tx_id, int worker_id, gid::Gid gid,
-                     TRecord *&old_record, TRecord *&new_record);
-
-  /// Sets the given records as (new, old) data for the given gid.
-  void emplace(gid::Gid gid, rec_uptr old_record, rec_uptr new_record);
-
-  /// Removes all the data from the cache.
-  void ClearCache();
-
- private:
-  database::Storage &storage_;
-
-  std::mutex lock_;
-  distributed::DataRpcClients &data_clients_;
-  // TODO it'd be better if we had VertexData and EdgeData in here, as opposed
-  // to Vertex and Edge.
-  std::unordered_map<gid::Gid, std::pair<rec_uptr, rec_uptr>> cache_;
-
-  // Localizes all the addresses in the record.
-  void LocalizeAddresses(TRecord &record);
-};
-
-}  // namespace distributed
diff --git a/src/distributed/cluster_discovery_master.cpp b/src/distributed/cluster_discovery_master.cpp
deleted file mode 100644
index d064bbd63..000000000
--- a/src/distributed/cluster_discovery_master.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "distributed/cluster_discovery_master.hpp"
-#include "communication/rpc/client_pool.hpp"
-#include "distributed/coordination_rpc_messages.hpp"
-
-namespace distributed {
-using Server = communication::rpc::Server;
-
-ClusterDiscoveryMaster::ClusterDiscoveryMaster(
-    Server &server, MasterCoordination &coordination,
-    RpcWorkerClients &rpc_worker_clients)
-    : server_(server),
-      coordination_(coordination),
-      rpc_worker_clients_(rpc_worker_clients) {
-  server_.Register<RegisterWorkerRpc>([this](const RegisterWorkerReq &req) {
-    bool registration_successful =
-        this->coordination_.RegisterWorker(req.desired_worker_id, req.endpoint);
-
-    if (registration_successful) {
-      rpc_worker_clients_.ExecuteOnWorkers<void>(
-          0, [req](communication::rpc::ClientPool &client_pool) {
-            auto result = client_pool.Call<ClusterDiscoveryRpc>(
-                req.desired_worker_id, req.endpoint);
-            CHECK(result) << "ClusterDiscoveryRpc failed";
-          });
-    }
-
-    return std::make_unique<RegisterWorkerRes>(
-        registration_successful, this->coordination_.RecoveryInfo(),
-        this->coordination_.GetWorkers());
-  });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/cluster_discovery_master.hpp b/src/distributed/cluster_discovery_master.hpp
deleted file mode 100644
index cc402e357..000000000
--- a/src/distributed/cluster_discovery_master.hpp
+++ /dev/null
@@ -1,27 +0,0 @@
-#pragma once
-
-#include "communication/rpc/server.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-
-namespace distributed {
-using Server = communication::rpc::Server;
-
-/** Handle cluster discovery on master.
- *
- * Cluster discovery on master handles worker registration and broadcasts new
- * worker information to already registered workers, and already registered
- * worker information to the new worker.
- */
-class ClusterDiscoveryMaster final {
- public:
-  ClusterDiscoveryMaster(Server &server, MasterCoordination &coordination,
-                         RpcWorkerClients &rpc_worker_clients);
-
- private:
-  Server &server_;
-  MasterCoordination &coordination_;
-  RpcWorkerClients &rpc_worker_clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/cluster_discovery_worker.cpp b/src/distributed/cluster_discovery_worker.cpp
deleted file mode 100644
index 3de166cbb..000000000
--- a/src/distributed/cluster_discovery_worker.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-#include "distributed/cluster_discovery_worker.hpp"
-#include "distributed/coordination_rpc_messages.hpp"
-
-namespace distributed {
-using Server = communication::rpc::Server;
-
-ClusterDiscoveryWorker::ClusterDiscoveryWorker(
-    Server &server, WorkerCoordination &coordination,
-    communication::rpc::ClientPool &client_pool)
-    : server_(server), coordination_(coordination), client_pool_(client_pool) {
-  server_.Register<ClusterDiscoveryRpc>([this](const ClusterDiscoveryReq &req) {
-    this->coordination_.RegisterWorker(req.worker_id, req.endpoint);
-    return std::make_unique<ClusterDiscoveryRes>();
-  });
-}
-
-void ClusterDiscoveryWorker::RegisterWorker(int worker_id) {
-  auto result =
-      client_pool_.Call<RegisterWorkerRpc>(worker_id, server_.endpoint());
-  CHECK(result) << "RegisterWorkerRpc failed";
-  CHECK(result->registration_successful) << "Unable to assign requested ID ("
-                                         << worker_id << ") to worker!";
-
-  for (auto &kv : result->workers) {
-    coordination_.RegisterWorker(kv.first, kv.second);
-  }
-  recovery_info_ = result->recovery_info;
-}
-
-}  // namespace distributed
diff --git a/src/distributed/cluster_discovery_worker.hpp b/src/distributed/cluster_discovery_worker.hpp
deleted file mode 100644
index 186cf08cb..000000000
--- a/src/distributed/cluster_discovery_worker.hpp
+++ /dev/null
@@ -1,43 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-
-#include "communication/rpc/client_pool.hpp"
-#include "communication/rpc/server.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "durability/recovery.hpp"
-
-namespace distributed {
-using Server = communication::rpc::Server;
-using ClientPool = communication::rpc::ClientPool;
-
-/** Handle cluster discovery on worker.
- *
- * Cluster discovery on worker handles worker registration by sending an rpc
- * request to master and processes received rpc response with other worker
- * information.
- */
-class ClusterDiscoveryWorker final {
- public:
-  ClusterDiscoveryWorker(Server &server, WorkerCoordination &coordination,
-                         ClientPool &client_pool);
-
-  /**
-   * Registers a worker with the master.
-   *
-   * @param worker_id - Desired ID. If master can't assign the desired worker
-   * id, worker will exit.
-   */
-  void RegisterWorker(int worker_id);
-
-  /** Returns the recovery info. Valid only after registration. */
-  auto recovery_info() const { return recovery_info_; }
-
- private:
-  Server &server_;
-  WorkerCoordination &coordination_;
-  communication::rpc::ClientPool &client_pool_;
-  std::experimental::optional<durability::RecoveryInfo> recovery_info_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/coordination.cpp b/src/distributed/coordination.cpp
deleted file mode 100644
index c112aeecf..000000000
--- a/src/distributed/coordination.cpp
+++ /dev/null
@@ -1,34 +0,0 @@
-#include "glog/logging.h"
-
-#include "distributed/coordination.hpp"
-
-namespace distributed {
-using Endpoint = io::network::Endpoint;
-
-Coordination::Coordination(const Endpoint &master_endpoint) {
-  // The master is always worker 0.
-  workers_.emplace(0, master_endpoint);
-}
-
-Endpoint Coordination::GetEndpoint(int worker_id) {
-  auto found = workers_.find(worker_id);
-  CHECK(found != workers_.end()) << "No endpoint registered for worker id: "
-                                 << worker_id;
-  return found->second;
-}
-
-std::vector<int> Coordination::GetWorkerIds() const {
-  std::vector<int> worker_ids;
-  for (auto worker : workers_) worker_ids.push_back(worker.first);
-  return worker_ids;
-}
-
-void Coordination::AddWorker(int worker_id, Endpoint endpoint) {
-  workers_.emplace(worker_id, endpoint);
-}
-
-std::unordered_map<int, Endpoint> Coordination::GetWorkers() {
-  return workers_;
-}
-
-}  // namespace distributed
diff --git a/src/distributed/coordination.hpp b/src/distributed/coordination.hpp
deleted file mode 100644
index a7018313b..000000000
--- a/src/distributed/coordination.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-#pragma once
-
-#include <unordered_map>
-#include <vector>
-
-#include "io/network/endpoint.hpp"
-
-namespace distributed {
-
-/** Coordination base class. This class is not thread safe. */
-class Coordination {
- public:
-  explicit Coordination(const io::network::Endpoint &master_endpoint);
-
-  /** Gets the endpoint for the given worker ID from the master. */
-  io::network::Endpoint GetEndpoint(int worker_id);
-
-  /** Returns all workers id, this includes master id(0). */
-  std::vector<int> GetWorkerIds() const;
-
-  /** Gets the mapping of worker id to worker endpoint including master (worker
-   * id = 0).
-   */
-  std::unordered_map<int, io::network::Endpoint> GetWorkers();
-
- protected:
-  ~Coordination() {}
-
-  /** Adds a worker to coordination. */
-  void AddWorker(int worker_id, io::network::Endpoint endpoint);
-
- private:
-  std::unordered_map<int, io::network::Endpoint> workers_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/coordination_master.cpp b/src/distributed/coordination_master.cpp
deleted file mode 100644
index 67c3c8869..000000000
--- a/src/distributed/coordination_master.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-#include <chrono>
-#include <thread>
-
-#include "glog/logging.h"
-
-#include "communication/rpc/client.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_rpc_messages.hpp"
-#include "utils/network.hpp"
-
-namespace distributed {
-
-MasterCoordination::MasterCoordination(const Endpoint &master_endpoint)
-    : Coordination(master_endpoint) {}
-
-bool MasterCoordination::RegisterWorker(int desired_worker_id,
-                                        Endpoint endpoint) {
-  // Worker's can't register before the recovery phase on the master is done to
-  // ensure the whole cluster is in a consistent state.
-  while (true) {
-    {
-      std::lock_guard<std::mutex> guard(lock_);
-      if (recovery_done_) break;
-    }
-    std::this_thread::sleep_for(std::chrono::milliseconds(200));
-  }
-
-  std::lock_guard<std::mutex> guard(lock_);
-  auto workers = GetWorkers();
-  // Check if the desired worker id already exists.
-  if (workers.find(desired_worker_id) != workers.end()) {
-    LOG(WARNING) << "Unable to assign requested ID (" << desired_worker_id
-                 << ") to worker at: " << endpoint;
-    // If the desired worker ID is already assigned, return -1 and don't add
-    // that worker to master coordination.
-    return false;
-  }
-
-  AddWorker(desired_worker_id, endpoint);
-  return true;
-}
-
-Endpoint MasterCoordination::GetEndpoint(int worker_id) {
-  std::lock_guard<std::mutex> guard(lock_);
-  return Coordination::GetEndpoint(worker_id);
-}
-
-MasterCoordination::~MasterCoordination() {
-  using namespace std::chrono_literals;
-  std::lock_guard<std::mutex> guard(lock_);
-  auto workers = GetWorkers();
-  for (const auto &kv : workers) {
-    // Skip master (self).
-    if (kv.first == 0) continue;
-    communication::rpc::Client client(kv.second);
-    auto result = client.Call<StopWorkerRpc>();
-    CHECK(result) << "StopWorkerRpc failed for worker: " << kv.first;
-  }
-
-  // Make sure all workers have died.
-  for (const auto &kv : workers) {
-    // Skip master (self).
-    if (kv.first == 0) continue;
-    while (utils::CanEstablishConnection(kv.second))
-      std::this_thread::sleep_for(0.5s);
-  }
-}
-
-void MasterCoordination::SetRecoveryInfo(
-    std::experimental::optional<durability::RecoveryInfo> info) {
-  std::lock_guard<std::mutex> guard(lock_);
-  recovery_done_ = true;
-  recovery_info_ = info;
-}
-
-std::experimental::optional<durability::RecoveryInfo>
-MasterCoordination::RecoveryInfo() const {
-  std::lock_guard<std::mutex> guard(lock_);
-  CHECK(recovery_done_) << "RecoveryInfo requested before it's available";
-  return recovery_info_;
-}
-
-}  // namespace distributed
diff --git a/src/distributed/coordination_master.hpp b/src/distributed/coordination_master.hpp
deleted file mode 100644
index a42194180..000000000
--- a/src/distributed/coordination_master.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-#include <mutex>
-#include <unordered_map>
-
-#include "distributed/coordination.hpp"
-#include "durability/recovery.hpp"
-#include "io/network/endpoint.hpp"
-
-namespace distributed {
-using Endpoint = io::network::Endpoint;
-
-/** Handles worker registration, getting of other workers' endpoints and
- * coordinated shutdown in a distributed memgraph. Master side. */
-class MasterCoordination final : public Coordination {
- public:
-  explicit MasterCoordination(const Endpoint &master_endpoint);
-
-  /** Shuts down all the workers and this master server. */
-  ~MasterCoordination();
-
-  /** Registers a new worker with this master coordination.
-   *
-   * @param desired_worker_id - The ID the worker would like to have.
-   * @return True if the desired ID for the worker is available, or false
-   * if the desired ID is already taken.
-   */
-  bool RegisterWorker(int desired_worker_id, Endpoint endpoint);
-
-  Endpoint GetEndpoint(int worker_id);
-
-  /// Sets the recovery info. nullopt indicates nothing was recovered.
-  void SetRecoveryInfo(
-      std::experimental::optional<durability::RecoveryInfo> info);
-
-  std::experimental::optional<durability::RecoveryInfo> RecoveryInfo() const;
-
- private:
-  // Most master functions aren't thread-safe.
-  mutable std::mutex lock_;
-
-  /// Durabiliry recovery info.
-  /// Indicates if the recovery phase is done.
-  bool recovery_done_{false};
-  /// If nullopt nothing was recovered.
-  std::experimental::optional<durability::RecoveryInfo> recovery_info_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/coordination_rpc_messages.hpp b/src/distributed/coordination_rpc_messages.hpp
deleted file mode 100644
index 756114b1b..000000000
--- a/src/distributed/coordination_rpc_messages.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-#include <unordered_map>
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/unordered_map.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "durability/recovery.hpp"
-#include "io/network/endpoint.hpp"
-
-namespace distributed {
-
-using communication::rpc::Message;
-using Endpoint = io::network::Endpoint;
-
-struct RegisterWorkerReq : public Message {
-  // Set desired_worker_id to -1 to get an automatically assigned ID.
-  RegisterWorkerReq(int desired_worker_id, const Endpoint &endpoint)
-      : desired_worker_id(desired_worker_id), endpoint(endpoint) {}
-  int desired_worker_id;
-  Endpoint endpoint;
-
- private:
-  friend class boost::serialization::access;
-  RegisterWorkerReq() {}
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &desired_worker_id;
-    ar &endpoint;
-  }
-};
-
-struct RegisterWorkerRes : public Message {
-  RegisterWorkerRes(
-      bool registration_successful,
-      std::experimental::optional<durability::RecoveryInfo> recovery_info,
-      std::unordered_map<int, Endpoint> workers)
-      : registration_successful(registration_successful),
-        recovery_info(recovery_info),
-        workers(std::move(workers)) {}
-
-  bool registration_successful;
-  std::experimental::optional<durability::RecoveryInfo> recovery_info;
-  std::unordered_map<int, Endpoint> workers;
-
- private:
-  friend class boost::serialization::access;
-  RegisterWorkerRes() {}
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &registration_successful;
-    ar &recovery_info;
-    ar &workers;
-  }
-};
-
-struct ClusterDiscoveryReq : public Message {
-  ClusterDiscoveryReq(int worker_id, Endpoint endpoint)
-      : worker_id(worker_id), endpoint(endpoint) {}
-
-  int worker_id;
-  Endpoint endpoint;
-
- private:
-  friend class boost::serialization::access;
-  ClusterDiscoveryReq() {}
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &worker_id;
-    ar &endpoint;
-  }
-};
-
-RPC_NO_MEMBER_MESSAGE(ClusterDiscoveryRes);
-RPC_NO_MEMBER_MESSAGE(StopWorkerReq);
-RPC_NO_MEMBER_MESSAGE(StopWorkerRes);
-
-using RegisterWorkerRpc =
-    communication::rpc::RequestResponse<RegisterWorkerReq, RegisterWorkerRes>;
-using StopWorkerRpc =
-    communication::rpc::RequestResponse<StopWorkerReq, StopWorkerRes>;
-using ClusterDiscoveryRpc =
-    communication::rpc::RequestResponse<ClusterDiscoveryReq,
-                                        ClusterDiscoveryRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/coordination_worker.cpp b/src/distributed/coordination_worker.cpp
deleted file mode 100644
index 4ae35a923..000000000
--- a/src/distributed/coordination_worker.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <chrono>
-#include <condition_variable>
-#include <mutex>
-#include <thread>
-
-#include "glog/logging.h"
-
-#include "distributed/coordination_rpc_messages.hpp"
-#include "distributed/coordination_worker.hpp"
-
-namespace distributed {
-
-using namespace std::literals::chrono_literals;
-
-WorkerCoordination::WorkerCoordination(communication::rpc::Server &server,
-                                       const Endpoint &master_endpoint)
-    : Coordination(master_endpoint), server_(server) {}
-
-void WorkerCoordination::RegisterWorker(int worker_id, Endpoint endpoint) {
-  std::lock_guard<std::mutex> guard(lock_);
-  AddWorker(worker_id, endpoint);
-}
-
-void WorkerCoordination::WaitForShutdown() {
-  using namespace std::chrono_literals;
-  std::mutex mutex;
-  std::condition_variable cv;
-  bool shutdown = false;
-
-  server_.Register<StopWorkerRpc>([&](const StopWorkerReq &) {
-    std::unique_lock<std::mutex> lk(mutex);
-    shutdown = true;
-    lk.unlock();
-    cv.notify_one();
-    return std::make_unique<StopWorkerRes>();
-  });
-
-  std::unique_lock<std::mutex> lk(mutex);
-  cv.wait(lk, [&shutdown] { return shutdown; });
-}
-
-Endpoint WorkerCoordination::GetEndpoint(int worker_id) {
-  std::lock_guard<std::mutex> guard(lock_);
-  return Coordination::GetEndpoint(worker_id);
-}
-
-}  // namespace distributed
diff --git a/src/distributed/coordination_worker.hpp b/src/distributed/coordination_worker.hpp
deleted file mode 100644
index d18e44e74..000000000
--- a/src/distributed/coordination_worker.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#pragma once
-
-#include <mutex>
-#include <unordered_map>
-
-#include "communication/rpc/server.hpp"
-#include "distributed/coordination.hpp"
-
-namespace distributed {
-
-/** Handles worker registration, getting of other workers' endpoints and
- * coordinated shutdown in a distributed memgraph. Worker side. */
-class WorkerCoordination final : public Coordination {
-  using Endpoint = io::network::Endpoint;
-
- public:
-  WorkerCoordination(communication::rpc::Server &server,
-                     const Endpoint &master_endpoint);
-
-  /** Registers the worker with the given endpoint. */
-  void RegisterWorker(int worker_id, Endpoint endpoint);
-
-  /** Starts listening for a remote shutdown command (issued by the master).
-   * Blocks the calling thread until that has finished. */
-  void WaitForShutdown();
-
-  Endpoint GetEndpoint(int worker_id);
-
- private:
-  communication::rpc::Server &server_;
-  mutable std::mutex lock_;
-};
-}  // namespace distributed
diff --git a/src/distributed/data_manager.cpp b/src/distributed/data_manager.cpp
deleted file mode 100644
index 9a619d692..000000000
--- a/src/distributed/data_manager.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-#include "database/storage.hpp"
-#include "distributed/data_manager.hpp"
-
-namespace distributed {
-
-template <typename TRecord>
-Cache<TRecord> &DataManager::GetCache(CacheT<TRecord> &collection,
-                                      tx::TransactionId tx_id) {
-  auto access = collection.access();
-  auto found = access.find(tx_id);
-  if (found != access.end()) return found->second;
-
-  return access
-      .emplace(
-          tx_id, std::make_tuple(tx_id),
-          std::make_tuple(std::ref(db_.storage()), std::ref(data_clients_)))
-      .first->second;
-}
-
-template <>
-Cache<Vertex> &DataManager::Elements<Vertex>(tx::TransactionId tx_id) {
-  return GetCache(vertices_caches_, tx_id);
-}
-
-template <>
-Cache<Edge> &DataManager::Elements<Edge>(tx::TransactionId tx_id) {
-  return GetCache(edges_caches_, tx_id);
-}
-
-DataManager::DataManager(database::GraphDb &db,
-                         distributed::DataRpcClients &data_clients)
-    : db_(db), data_clients_(data_clients) {}
-
-void DataManager::ClearCacheForSingleTransaction(tx::TransactionId tx_id) {
-  Elements<Vertex>(tx_id).ClearCache();
-  Elements<Edge>(tx_id).ClearCache();
-}
-
-void DataManager::ClearTransactionalCache(tx::TransactionId oldest_active) {
-  auto vertex_access = vertices_caches_.access();
-  for (auto &kv : vertex_access) {
-    if (kv.first < oldest_active) {
-      vertex_access.remove(kv.first);
-    }
-  }
-  auto edge_access = edges_caches_.access();
-  for (auto &kv : edge_access) {
-    if (kv.first < oldest_active) {
-      edge_access.remove(kv.first);
-    }
-  }
-}
-
-}  // namespace distributed
diff --git a/src/distributed/data_manager.hpp b/src/distributed/data_manager.hpp
deleted file mode 100644
index 4f2888ac2..000000000
--- a/src/distributed/data_manager.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-#pragma once
-
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "database/graph_db.hpp"
-#include "distributed/cache.hpp"
-#include "distributed/data_rpc_clients.hpp"
-#include "transactions/type.hpp"
-
-class Vertex;
-class Edge;
-
-namespace distributed {
-
-/// Handles remote data caches for edges and vertices, per transaction.
-class DataManager {
-  template <typename TRecord>
-  using CacheT = ConcurrentMap<tx::TransactionId, Cache<TRecord>>;
-
-  // Helper, gets or inserts a data cache for the given transaction.
-  template <typename TRecord>
-  Cache<TRecord> &GetCache(CacheT<TRecord> &collection,
-                           tx::TransactionId tx_id);
-
- public:
-  DataManager(database::GraphDb &db, distributed::DataRpcClients &data_clients);
-
-  /// Gets or creates the remote vertex/edge cache for the given transaction.
-  template <typename TRecord>
-  Cache<TRecord> &Elements(tx::TransactionId tx_id);
-
-  /// Removes all the caches for a single transaction.
-  void ClearCacheForSingleTransaction(tx::TransactionId tx_id);
-
-  /// Clears the cache of local transactions that have expired. The signature of
-  /// this method is dictated by `distributed::TransactionalCacheCleaner`.
-  void ClearTransactionalCache(tx::TransactionId oldest_active);
-
- private:
-  database::GraphDb &db_;
-  DataRpcClients &data_clients_;
-  CacheT<Vertex> vertices_caches_;
-  CacheT<Edge> edges_caches_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/data_rpc_clients.cpp b/src/distributed/data_rpc_clients.cpp
deleted file mode 100644
index 15bd008f9..000000000
--- a/src/distributed/data_rpc_clients.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-#include "distributed/data_rpc_clients.hpp"
-#include "distributed/data_rpc_messages.hpp"
-#include "storage/edge.hpp"
-#include "storage/vertex.hpp"
-
-namespace distributed {
-
-template <>
-std::unique_ptr<Edge> DataRpcClients::RemoteElement(int worker_id,
-                                                    tx::TransactionId tx_id,
-                                                    gid::Gid gid) {
-  auto response =
-      clients_.GetClientPool(worker_id).Call<EdgeRpc>(TxGidPair{tx_id, gid});
-  CHECK(response) << "EdgeRpc failed";
-  return std::move(response->name_output_);
-}
-
-template <>
-std::unique_ptr<Vertex> DataRpcClients::RemoteElement(
-    int worker_id, tx::TransactionId tx_id, gid::Gid gid) {
-  auto response =
-      clients_.GetClientPool(worker_id).Call<VertexRpc>(TxGidPair{tx_id, gid});
-  CHECK(response) << "VertexRpc failed";
-  return std::move(response->name_output_);
-}
-
-}  // namespace distributed
diff --git a/src/distributed/data_rpc_clients.hpp b/src/distributed/data_rpc_clients.hpp
deleted file mode 100644
index 087f3fc18..000000000
--- a/src/distributed/data_rpc_clients.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-#pragma once
-
-#include <mutex>
-#include <utility>
-
-#include "distributed/rpc_worker_clients.hpp"
-#include "storage/gid.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-/// Provides access to other worker's data.
-class DataRpcClients {
- public:
-  DataRpcClients(RpcWorkerClients &clients) : clients_(clients) {}
-  /// Returns a remote worker's record (vertex/edge) data for the given params.
-  /// That worker must own the vertex/edge for the given id, and that vertex
-  /// must be visible in given transaction.
-  template <typename TRecord>
-  std::unique_ptr<TRecord> RemoteElement(int worker_id,
-                                         tx::TransactionId tx_id,
-                                         gid::Gid gid);
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/data_rpc_messages.hpp b/src/distributed/data_rpc_messages.hpp
deleted file mode 100644
index 9a3c5840f..000000000
--- a/src/distributed/data_rpc_messages.hpp
+++ /dev/null
@@ -1,68 +0,0 @@
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include "communication/rpc/messages.hpp"
-#include "distributed/serialization.hpp"
-#include "storage/edge.hpp"
-#include "storage/gid.hpp"
-#include "storage/vertex.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-struct TxGidPair {
-  tx::TransactionId tx_id;
-  gid::Gid gid;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &tx_id;
-    ar &gid;
-  }
-};
-
-#define MAKE_RESPONSE(type, name)                                           \
-  class type##Res : public communication::rpc::Message {                    \
-   public:                                                                  \
-    type##Res() {}                                                          \
-    type##Res(const type *name, int worker_id)                              \
-        : name_input_(name), worker_id_(worker_id) {}                       \
-                                                                            \
-    template <class TArchive>                                               \
-    void save(TArchive &ar, unsigned int) const {                           \
-      ar << boost::serialization::base_object<                              \
-          const communication::rpc::Message>(*this);                        \
-      Save##type(ar, *name_input_, worker_id_);                             \
-    }                                                                       \
-                                                                            \
-    template <class TArchive>                                               \
-    void load(TArchive &ar, unsigned int) {                                 \
-      ar >> boost::serialization::base_object<communication::rpc::Message>( \
-                *this);                                                     \
-      auto v = Load##type(ar);                                              \
-      v.swap(name_output_);                                                 \
-    }                                                                       \
-    BOOST_SERIALIZATION_SPLIT_MEMBER()                                      \
-                                                                            \
-    const type *name_input_;                                                \
-    int worker_id_;                                                         \
-    std::unique_ptr<type> name_output_;                                     \
-  };
-
-MAKE_RESPONSE(Vertex, vertex)
-MAKE_RESPONSE(Edge, edge)
-
-#undef MAKE_RESPONSE
-
-RPC_SINGLE_MEMBER_MESSAGE(VertexReq, TxGidPair);
-RPC_SINGLE_MEMBER_MESSAGE(EdgeReq, TxGidPair);
-
-using VertexRpc = communication::rpc::RequestResponse<VertexReq, VertexRes>;
-using EdgeRpc = communication::rpc::RequestResponse<EdgeReq, EdgeRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/data_rpc_server.cpp b/src/distributed/data_rpc_server.cpp
deleted file mode 100644
index 90f32f305..000000000
--- a/src/distributed/data_rpc_server.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <memory>
-
-#include "data_rpc_server.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "distributed/data_rpc_messages.hpp"
-
-namespace distributed {
-
-DataRpcServer::DataRpcServer(database::GraphDb &db,
-                             communication::rpc::Server &server)
-    : db_(db), rpc_server_(server) {
-  rpc_server_.Register<VertexRpc>(
-      [this](const VertexReq &req) {
-        database::GraphDbAccessor dba(db_, req.member.tx_id);
-        auto vertex = dba.FindVertex(req.member.gid, false);
-        CHECK(vertex.GetOld())
-            << "Old record must exist when sending vertex by RPC";
-        return std::make_unique<VertexRes>(vertex.GetOld(), db_.WorkerId());
-      });
-
-  rpc_server_.Register<EdgeRpc>([this](const EdgeReq &req) {
-    database::GraphDbAccessor dba(db_, req.member.tx_id);
-    auto edge = dba.FindEdge(req.member.gid, false);
-    CHECK(edge.GetOld()) << "Old record must exist when sending edge by RPC";
-    return std::make_unique<EdgeRes>(edge.GetOld(), db_.WorkerId());
-  });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/data_rpc_server.hpp b/src/distributed/data_rpc_server.hpp
deleted file mode 100644
index 91612a5cc..000000000
--- a/src/distributed/data_rpc_server.hpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#pragma once
-
-#include "communication/rpc/server.hpp"
-#include "database/graph_db.hpp"
-
-namespace distributed {
-
-/// Serves this worker's data to others.
-class DataRpcServer {
- public:
-  DataRpcServer(database::GraphDb &db, communication::rpc::Server &server);
-
- private:
-  database::GraphDb &db_;
-  communication::rpc::Server &rpc_server_;
-};
-}  // namespace distributed
diff --git a/src/distributed/durability_rpc_clients.cpp b/src/distributed/durability_rpc_clients.cpp
deleted file mode 100644
index 528650db2..000000000
--- a/src/distributed/durability_rpc_clients.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-#include "distributed/durability_rpc_clients.hpp"
-
-#include "distributed/durability_rpc_messages.hpp"
-#include "transactions/transaction.hpp"
-#include "utils/future.hpp"
-
-namespace distributed {
-utils::Future<bool> DurabilityRpcClients::MakeSnapshot(tx::TransactionId tx) {
-  return utils::make_future(std::async(std::launch::async, [this, tx] {
-    auto futures = clients_.ExecuteOnWorkers<bool>(
-        0, [tx](communication::rpc::ClientPool &client_pool) {
-          auto res = client_pool.Call<MakeSnapshotRpc>(tx);
-          if (res == nullptr) return false;
-          return res->member;
-        });
-
-    bool created = true;
-    for (auto &future : futures) {
-      created &= future.get();
-    }
-
-    return created;
-  }));
-}
-}  // namespace distributed
diff --git a/src/distributed/durability_rpc_clients.hpp b/src/distributed/durability_rpc_clients.hpp
deleted file mode 100644
index 880bde3d9..000000000
--- a/src/distributed/durability_rpc_clients.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-#pragma once
-
-#include <future>
-#include <mutex>
-#include <utility>
-
-#include "distributed/rpc_worker_clients.hpp"
-#include "storage/gid.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-/// Provides an ability to trigger snapshooting on other workers.
-class DurabilityRpcClients {
- public:
-  DurabilityRpcClients(RpcWorkerClients &clients) : clients_(clients) {}
-
-  // Sends a snapshot request to workers and returns a future which becomes true
-  // if all workers sucesfully completed their snapshot creation, false
-  // otherwise
-  // @param tx - transaction from which to take db snapshot
-  utils::Future<bool> MakeSnapshot(tx::TransactionId tx);
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/durability_rpc_messages.hpp b/src/distributed/durability_rpc_messages.hpp
deleted file mode 100644
index baf147814..000000000
--- a/src/distributed/durability_rpc_messages.hpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#pragma once
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "transactions/transaction.hpp"
-
-namespace distributed {
-
-RPC_SINGLE_MEMBER_MESSAGE(MakeSnapshotReq, tx::TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(MakeSnapshotRes, bool);
-
-using MakeSnapshotRpc =
-    communication::rpc::RequestResponse<MakeSnapshotReq, MakeSnapshotRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/durability_rpc_server.cpp b/src/distributed/durability_rpc_server.cpp
deleted file mode 100644
index 801b59b16..000000000
--- a/src/distributed/durability_rpc_server.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-#include "distributed/durability_rpc_server.hpp"
-
-#include "database/graph_db.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "distributed/durability_rpc_messages.hpp"
-
-namespace distributed {
-
-DurabilityRpcServer::DurabilityRpcServer(database::GraphDb &db,
-                                         communication::rpc::Server &server)
-    : db_(db), rpc_server_(server) {
-  rpc_server_.Register<MakeSnapshotRpc>([this](const MakeSnapshotReq &req) {
-    database::GraphDbAccessor dba(this->db_, req.member);
-    return std::make_unique<MakeSnapshotRes>(this->db_.MakeSnapshot(dba));
-  });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/durability_rpc_server.hpp b/src/distributed/durability_rpc_server.hpp
deleted file mode 100644
index 1373b6aec..000000000
--- a/src/distributed/durability_rpc_server.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include "communication/rpc/server.hpp"
-
-namespace database {
-class GraphDb;
-};
-
-namespace distributed {
-
-class DurabilityRpcServer {
- public:
-  DurabilityRpcServer(database::GraphDb &db,
-                      communication::rpc::Server &server);
-
- private:
-  database::GraphDb &db_;
-  communication::rpc::Server &rpc_server_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/index_rpc_messages.hpp b/src/distributed/index_rpc_messages.hpp
deleted file mode 100644
index 3f9ebf321..000000000
--- a/src/distributed/index_rpc_messages.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include "communication/rpc/messages.hpp"
-#include "distributed/serialization.hpp"
-
-namespace distributed {
-
-struct IndexLabelPropertyTx {
-  storage::Label label;
-  storage::Property property;
-  tx::TransactionId tx_id;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &label;
-    ar &property;
-    ar &tx_id;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(BuildIndexReq, IndexLabelPropertyTx);
-RPC_NO_MEMBER_MESSAGE(BuildIndexRes);
-
-using BuildIndexRpc =
-    communication::rpc::RequestResponse<BuildIndexReq, BuildIndexRes>;
-}  // namespace distributed
diff --git a/src/distributed/index_rpc_server.cpp b/src/distributed/index_rpc_server.cpp
deleted file mode 100644
index 6964ebcc6..000000000
--- a/src/distributed/index_rpc_server.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "database/graph_db.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "distributed/index_rpc_server.hpp"
-
-namespace distributed {
-
-IndexRpcServer::IndexRpcServer(database::GraphDb &db,
-                               communication::rpc::Server &server)
-    : db_(db), rpc_server_(server) {
-  rpc_server_.Register<BuildIndexRpc>([this](const BuildIndexReq &req) {
-
-    database::LabelPropertyIndex::Key key{req.member.label,
-                                          req.member.property};
-    database::GraphDbAccessor dba(db_, req.member.tx_id);
-
-    if (db_.storage().label_property_index_.CreateIndex(key) == false) {
-      // If we are a distributed worker we just have to wait till the index
-      // (which should be in progress of being created) is created so that our
-      // return guarantess that the index has been built - this assumes that
-      // no worker thread that is creating an index will fail
-      while (!dba.LabelPropertyIndexExists(key.label_, key.property_)) {
-        // TODO reconsider this constant, currently rule-of-thumb chosen
-        std::this_thread::sleep_for(std::chrono::microseconds(100));
-      }
-    } else {
-      dba.PopulateIndex(key);
-      dba.EnableIndex(key);
-    }
-    return std::make_unique<BuildIndexRes>();
-  });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/index_rpc_server.hpp b/src/distributed/index_rpc_server.hpp
deleted file mode 100644
index 3aec58b2f..000000000
--- a/src/distributed/index_rpc_server.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-#pragma once
-
-namespace communication::rpc {
-class Server;
-}
-
-namespace database {
-class GraphDb;
-}
-
-namespace distributed {
-
-class IndexRpcServer {
- public:
-  IndexRpcServer(database::GraphDb &db, communication::rpc::Server &server);
-
- private:
-  database::GraphDb &db_;
-  communication::rpc::Server &rpc_server_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/plan_consumer.cpp b/src/distributed/plan_consumer.cpp
deleted file mode 100644
index 9e83b5785..000000000
--- a/src/distributed/plan_consumer.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-#include "distributed/plan_consumer.hpp"
-
-namespace distributed {
-
-PlanConsumer::PlanConsumer(communication::rpc::Server &server)
-    : server_(server) {
-  server_.Register<DistributedPlanRpc>([this](const DispatchPlanReq &req) {
-    plan_cache_.access().insert(
-        req.plan_id_,
-        std::make_unique<PlanPack>(
-            req.plan_, req.symbol_table_,
-            std::move(const_cast<DispatchPlanReq &>(req).storage_)));
-    return std::make_unique<DispatchPlanRes>();
-  });
-
-  server_.Register<RemovePlanRpc>([this](const RemovePlanReq &req) {
-    plan_cache_.access().remove(req.member);
-    return std::make_unique<RemovePlanRes>();
-  });
-}
-
-PlanConsumer::PlanPack &PlanConsumer::PlanForId(int64_t plan_id) const {
-  auto accessor = plan_cache_.access();
-  auto found = accessor.find(plan_id);
-  CHECK(found != accessor.end())
-      << "Missing plan and symbol table for plan id: " << plan_id;
-  return *found->second;
-}
-
-std::vector<int64_t> PlanConsumer::CachedPlanIds() const {
-  std::vector<int64_t> plan_ids;
-  auto access = plan_cache_.access();
-  plan_ids.reserve(access.size());
-  for (auto &kv : access) plan_ids.emplace_back(kv.first);
-
-  return plan_ids;
-}
-
-}  // namespace distributed
diff --git a/src/distributed/plan_consumer.hpp b/src/distributed/plan_consumer.hpp
deleted file mode 100644
index 0155805e4..000000000
--- a/src/distributed/plan_consumer.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-#pragma once
-
-#include <vector>
-
-#include "communication/rpc/server.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "distributed/plan_rpc_messages.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/plan/operator.hpp"
-
-namespace distributed {
-
-/** Handles plan consumption from master. Creates and holds a local cache of
- * plans. Worker side. */
-class PlanConsumer {
- public:
-  struct PlanPack {
-    PlanPack(std::shared_ptr<query::plan::LogicalOperator> plan,
-             SymbolTable symbol_table, AstTreeStorage storage)
-        : plan(plan),
-          symbol_table(std::move(symbol_table)),
-          storage(std::move(storage)) {}
-
-    std::shared_ptr<query::plan::LogicalOperator> plan;
-    SymbolTable symbol_table;
-    const AstTreeStorage storage;
-  };
-
-  explicit PlanConsumer(communication::rpc::Server &server);
-
-  /** Return cached plan and symbol table for a given plan id. */
-  PlanPack &PlanForId(int64_t plan_id) const;
-
-  /** Return the ids of all the cached plans. For testing. */
-  std::vector<int64_t> CachedPlanIds() const;
-
- private:
-  communication::rpc::Server &server_;
-  // TODO remove unique_ptr. This is to get it to work, emplacing into a
-  // ConcurrentMap is tricky.
-  mutable ConcurrentMap<int64_t, std::unique_ptr<PlanPack>> plan_cache_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/plan_dispatcher.cpp b/src/distributed/plan_dispatcher.cpp
deleted file mode 100644
index 090512303..000000000
--- a/src/distributed/plan_dispatcher.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <distributed/plan_dispatcher.hpp>
-
-namespace distributed {
-
-PlanDispatcher::PlanDispatcher(RpcWorkerClients &clients) : clients_(clients) {}
-
-void PlanDispatcher::DispatchPlan(
-    int64_t plan_id, std::shared_ptr<query::plan::LogicalOperator> plan,
-    const SymbolTable &symbol_table) {
-  auto futures = clients_.ExecuteOnWorkers<void>(
-      0, [plan_id, plan,
-          symbol_table](communication::rpc::ClientPool &client_pool) {
-        auto result =
-            client_pool.Call<DistributedPlanRpc>(plan_id, plan, symbol_table);
-        CHECK(result) << "DistributedPlanRpc failed";
-      });
-
-  for (auto &future : futures) {
-    future.wait();
-  }
-}
-
-void PlanDispatcher::RemovePlan(int64_t plan_id) {
-  auto futures = clients_.ExecuteOnWorkers<void>(
-      0, [plan_id](communication::rpc::ClientPool &client_pool) {
-        auto result = client_pool.Call<RemovePlanRpc>(plan_id);
-        CHECK(result) << "Failed to remove plan from worker";
-      });
-
-  for (auto &future : futures) {
-    future.wait();
-  }
-}
-
-}  // namespace distributed
diff --git a/src/distributed/plan_dispatcher.hpp b/src/distributed/plan_dispatcher.hpp
deleted file mode 100644
index 9e2105b31..000000000
--- a/src/distributed/plan_dispatcher.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-#include "distributed/coordination.hpp"
-#include "distributed/plan_rpc_messages.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/plan/operator.hpp"
-
-namespace distributed {
-
-/** Handles plan dispatching to all workers. Uses MasterCoordination to
- * acomplish that. Master side.
- */
-class PlanDispatcher {
- public:
-  explicit PlanDispatcher(RpcWorkerClients &clients);
-
-  /** Dispatch a plan to all workers and wait for their acknowledgement. */
-  void DispatchPlan(int64_t plan_id,
-                    std::shared_ptr<query::plan::LogicalOperator> plan,
-                    const SymbolTable &symbol_table);
-
-  /** Remove a plan from all workers and wait for their acknowledgement. */
-  void RemovePlan(int64_t plan_id);
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/plan_rpc_messages.hpp b/src/distributed/plan_rpc_messages.hpp
deleted file mode 100644
index 506365481..000000000
--- a/src/distributed/plan_rpc_messages.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-#pragma once
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "query/frontend/ast/ast.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/plan/operator.hpp"
-
-namespace distributed {
-
-using communication::rpc::Message;
-using SymbolTable = query::SymbolTable;
-using AstTreeStorage = query::AstTreeStorage;
-
-struct DispatchPlanReq : public Message {
-  DispatchPlanReq() {}
-  DispatchPlanReq(int64_t plan_id,
-                  std::shared_ptr<query::plan::LogicalOperator> plan,
-                  SymbolTable symbol_table)
-
-      : plan_id_(plan_id), plan_(plan), symbol_table_(symbol_table) {}
-  int64_t plan_id_;
-  std::shared_ptr<query::plan::LogicalOperator> plan_;
-  SymbolTable symbol_table_;
-  AstTreeStorage storage_;
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &plan_id_;
-    ar &plan_;
-    ar &symbol_table_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &plan_id_;
-    ar &plan_;
-    ar &symbol_table_;
-    storage_ = std::move(
-        ar.template get_helper<AstTreeStorage>(AstTreeStorage::kHelperId));
-  }
-};
-
-RPC_NO_MEMBER_MESSAGE(DispatchPlanRes);
-
-using DistributedPlanRpc =
-    communication::rpc::RequestResponse<DispatchPlanReq, DispatchPlanRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(RemovePlanReq, int64_t);
-RPC_NO_MEMBER_MESSAGE(RemovePlanRes);
-using RemovePlanRpc =
-    communication::rpc::RequestResponse<RemovePlanReq, RemovePlanRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/produce_rpc_server.cpp b/src/distributed/produce_rpc_server.cpp
deleted file mode 100644
index dba6d50de..000000000
--- a/src/distributed/produce_rpc_server.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-#include "distributed/produce_rpc_server.hpp"
-#include "distributed/data_manager.hpp"
-#include "distributed/pull_produce_rpc_messages.hpp"
-#include "query/common.hpp"
-#include "query/exceptions.hpp"
-#include "transactions/engine_worker.hpp"
-
-namespace distributed {
-
-ProduceRpcServer::OngoingProduce::OngoingProduce(
-    database::GraphDb &db, tx::TransactionId tx_id,
-    std::shared_ptr<query::plan::LogicalOperator> op,
-    query::SymbolTable symbol_table, Parameters parameters,
-    std::vector<query::Symbol> pull_symbols)
-    : dba_{db, tx_id},
-      context_(dba_),
-      pull_symbols_(std::move(pull_symbols)),
-      frame_(symbol_table.max_position()),
-      cursor_(op->MakeCursor(dba_)) {
-  context_.symbol_table_ = std::move(symbol_table);
-  context_.parameters_ = std::move(parameters);
-}
-
-std::pair<std::vector<query::TypedValue>, PullState>
-ProduceRpcServer::OngoingProduce::Pull() {
-  if (!accumulation_.empty()) {
-    auto results = std::move(accumulation_.back());
-    accumulation_.pop_back();
-    for (auto &element : results) {
-      try {
-        query::ReconstructTypedValue(element);
-      } catch (query::ReconstructionException &) {
-        cursor_state_ = PullState::RECONSTRUCTION_ERROR;
-        return std::make_pair(std::move(results), cursor_state_);
-      }
-    }
-
-    return std::make_pair(std::move(results), PullState::CURSOR_IN_PROGRESS);
-  }
-
-  return PullOneFromCursor();
-}
-
-PullState ProduceRpcServer::OngoingProduce::Accumulate() {
-  while (true) {
-    auto result = PullOneFromCursor();
-    if (result.second != PullState::CURSOR_IN_PROGRESS)
-      return result.second;
-    else
-      accumulation_.emplace_back(std::move(result.first));
-  }
-}
-
-std::pair<std::vector<query::TypedValue>, PullState>
-ProduceRpcServer::OngoingProduce::PullOneFromCursor() {
-  std::vector<query::TypedValue> results;
-
-  // Check if we already exhausted this cursor (or it entered an error
-  // state). This happens when we accumulate before normal pull.
-  if (cursor_state_ != PullState::CURSOR_IN_PROGRESS) {
-    return std::make_pair(results, cursor_state_);
-  }
-
-  try {
-    if (cursor_->Pull(frame_, context_)) {
-      results.reserve(pull_symbols_.size());
-      for (const auto &symbol : pull_symbols_) {
-        results.emplace_back(std::move(frame_[symbol]));
-      }
-    } else {
-      cursor_state_ = PullState::CURSOR_EXHAUSTED;
-    }
-  } catch (const mvcc::SerializationError &) {
-    cursor_state_ = PullState::SERIALIZATION_ERROR;
-  } catch (const LockTimeoutException &) {
-    cursor_state_ = PullState::LOCK_TIMEOUT_ERROR;
-  } catch (const RecordDeletedError &) {
-    cursor_state_ = PullState::UPDATE_DELETED_ERROR;
-  } catch (const query::ReconstructionException &) {
-    cursor_state_ = PullState::RECONSTRUCTION_ERROR;
-  } catch (const query::RemoveAttachedVertexException &) {
-    cursor_state_ = PullState::UNABLE_TO_DELETE_VERTEX_ERROR;
-  } catch (const query::QueryRuntimeException &) {
-    cursor_state_ = PullState::QUERY_ERROR;
-  } catch (const query::HintedAbortError &) {
-    cursor_state_ = PullState::HINTED_ABORT_ERROR;
-  }
-  return std::make_pair(std::move(results), cursor_state_);
-}
-
-ProduceRpcServer::ProduceRpcServer(
-    database::GraphDb &db, tx::Engine &tx_engine,
-    communication::rpc::Server &server,
-    const distributed::PlanConsumer &plan_consumer)
-    : db_(db),
-      produce_rpc_server_(server),
-      plan_consumer_(plan_consumer),
-      tx_engine_(tx_engine) {
-  produce_rpc_server_.Register<PullRpc>([this](const PullReq &req) {
-    return std::make_unique<PullRes>(Pull(req));
-  });
-
-  produce_rpc_server_.Register<TransactionCommandAdvancedRpc>(
-      [this](const TransactionCommandAdvancedReq &req) {
-        tx_engine_.UpdateCommand(req.member);
-        db_.data_manager().ClearCacheForSingleTransaction(req.member);
-        return std::make_unique<TransactionCommandAdvancedRes>();
-      });
-}
-
-void ProduceRpcServer::FinishAndClearOngoingProducePlans(
-    tx::TransactionId tx_id) {
-  std::lock_guard<std::mutex> guard{ongoing_produces_lock_};
-  for (auto it = ongoing_produces_.begin(); it != ongoing_produces_.end();) {
-    if (it->first.first == tx_id) {
-      it = ongoing_produces_.erase(it);
-    } else {
-      ++it;
-    }
-  }
-}
-
-ProduceRpcServer::OngoingProduce &ProduceRpcServer::GetOngoingProduce(
-    const PullReq &req) {
-  auto key_pair = std::make_pair(req.tx_id, req.plan_id);
-  std::lock_guard<std::mutex> guard{ongoing_produces_lock_};
-  auto found = ongoing_produces_.find(key_pair);
-  if (found != ongoing_produces_.end()) {
-    return found->second;
-  }
-  if (db_.type() == database::GraphDb::Type::DISTRIBUTED_WORKER) {
-    // On the worker cache the snapshot to have one RPC less.
-    dynamic_cast<tx::WorkerEngine &>(tx_engine_)
-        .RunningTransaction(req.tx_id, req.tx_snapshot);
-  }
-  auto &plan_pack = plan_consumer_.PlanForId(req.plan_id);
-  return ongoing_produces_
-      .emplace(std::piecewise_construct, std::forward_as_tuple(key_pair),
-               std::forward_as_tuple(db_, req.tx_id, plan_pack.plan,
-                                     plan_pack.symbol_table, req.params,
-                                     req.symbols))
-      .first->second;
-}
-
-PullResData ProduceRpcServer::Pull(const PullReq &req) {
-  auto &ongoing_produce = GetOngoingProduce(req);
-
-  PullResData result{db_.WorkerId(), req.send_old, req.send_new};
-  result.state_and_frames.pull_state = PullState::CURSOR_IN_PROGRESS;
-
-  if (req.accumulate) {
-    result.state_and_frames.pull_state = ongoing_produce.Accumulate();
-    // If an error ocurred, we need to return that error.
-    if (result.state_and_frames.pull_state != PullState::CURSOR_EXHAUSTED) {
-      return result;
-    }
-  }
-
-  for (int i = 0; i < req.batch_size; ++i) {
-    auto pull_result = ongoing_produce.Pull();
-    result.state_and_frames.pull_state = pull_result.second;
-    if (pull_result.second != PullState::CURSOR_IN_PROGRESS) break;
-    result.state_and_frames.frames.emplace_back(std::move(pull_result.first));
-  }
-
-  return result;
-}
-
-}  // namespace distributed
diff --git a/src/distributed/produce_rpc_server.hpp b/src/distributed/produce_rpc_server.hpp
deleted file mode 100644
index 88e619f97..000000000
--- a/src/distributed/produce_rpc_server.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-#pragma once
-
-#include <cstdint>
-#include <map>
-#include <mutex>
-#include <utility>
-#include <vector>
-
-#include "communication/rpc/server.hpp"
-#include "database/graph_db.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "distributed/plan_consumer.hpp"
-#include "query/context.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/interpret/frame.hpp"
-#include "query/parameters.hpp"
-#include "query/plan/operator.hpp"
-#include "query/typed_value.hpp"
-#include "transactions/engine.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-/// Handles the execution of a plan on the worker, requested by the remote
-/// master. Assumes that (tx_id, plan_id) uniquely identifies an execution, and
-/// that there will never be parallel requests for the same execution thus
-/// identified.
-class ProduceRpcServer {
-  /// Encapsulates a Cursor execution in progress. Can be used for pulling a
-  /// single result from the execution, or pulling all and accumulating the
-  /// results. Accumulations are used for synchronizing updates in distributed
-  /// MG (see query::plan::Synchronize).
-  class OngoingProduce {
-   public:
-    OngoingProduce(database::GraphDb &db, tx::TransactionId tx_id,
-                   std::shared_ptr<query::plan::LogicalOperator> op,
-                   query::SymbolTable symbol_table, Parameters parameters,
-                   std::vector<query::Symbol> pull_symbols);
-
-    /// Returns a vector of typed values (one for each `pull_symbol`), and an
-    /// indication of the pull result. The result data is valid only if the
-    /// returned state is CURSOR_IN_PROGRESS.
-    std::pair<std::vector<query::TypedValue>, PullState> Pull();
-
-    /// Accumulates all the frames pulled from the cursor and returns
-    /// CURSOR_EXHAUSTED. If an error occurs, an appropriate value is returned.
-    PullState Accumulate();
-
-   private:
-    database::GraphDbAccessor dba_;
-    query::Context context_;
-    std::vector<query::Symbol> pull_symbols_;
-    query::Frame frame_;
-    PullState cursor_state_{PullState::CURSOR_IN_PROGRESS};
-    std::vector<std::vector<query::TypedValue>> accumulation_;
-    std::unique_ptr<query::plan::Cursor> cursor_;
-
-    /// Pulls and returns a single result from the cursor.
-    std::pair<std::vector<query::TypedValue>, PullState> PullOneFromCursor();
-  };
-
- public:
-  ProduceRpcServer(database::GraphDb &db, tx::Engine &tx_engine,
-                   communication::rpc::Server &server,
-                   const distributed::PlanConsumer &plan_consumer);
-
-  /// Finish and clear ongoing produces for all plans that are tied to a
-  /// transaction with tx_id.
-  void FinishAndClearOngoingProducePlans(tx::TransactionId tx_id);
-
- private:
-  std::mutex ongoing_produces_lock_;
-  /// Mapping of (tx id, plan id) to OngoingProduce.
-  std::map<std::pair<tx::TransactionId, int64_t>, OngoingProduce>
-      ongoing_produces_;
-  database::GraphDb &db_;
-  communication::rpc::Server &produce_rpc_server_;
-  const distributed::PlanConsumer &plan_consumer_;
-  tx::Engine &tx_engine_;
-
-  /// Gets an ongoing produce for the given pull request. Creates a new one if
-  /// there is none currently existing.
-  OngoingProduce &GetOngoingProduce(const PullReq &req);
-
-  /// Performs a single remote pull for the given request.
-  PullResData Pull(const PullReq &req);
-};
-
-}  // namespace distributed
diff --git a/src/distributed/pull_produce_rpc_messages.hpp b/src/distributed/pull_produce_rpc_messages.hpp
deleted file mode 100644
index 9be2cf380..000000000
--- a/src/distributed/pull_produce_rpc_messages.hpp
+++ /dev/null
@@ -1,376 +0,0 @@
-#pragma once
-
-#include <cstdint>
-#include <functional>
-#include <string>
-
-#include "boost/serialization/utility.hpp"
-#include "boost/serialization/vector.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "distributed/serialization.hpp"
-#include "query/frontend/semantic/symbol.hpp"
-#include "query/parameters.hpp"
-#include "storage/address_types.hpp"
-#include "transactions/type.hpp"
-#include "utils/serialization.hpp"
-
-namespace distributed {
-
-/// The default number of results returned via RPC from remote execution to the
-/// master that requested it.
-constexpr int kDefaultBatchSize = 20;
-
-/// Returnd along with a batch of results in the remote-pull RPC. Indicates the
-/// state of execution on the worker.
-enum class PullState {
-  CURSOR_EXHAUSTED,
-  CURSOR_IN_PROGRESS,
-  SERIALIZATION_ERROR,
-  LOCK_TIMEOUT_ERROR,
-  UPDATE_DELETED_ERROR,
-  RECONSTRUCTION_ERROR,
-  UNABLE_TO_DELETE_VERTEX_ERROR,
-  HINTED_ABORT_ERROR,
-  QUERY_ERROR
-};
-
-struct PullReq : public communication::rpc::Message {
-  PullReq() {}
-  PullReq(tx::TransactionId tx_id, tx::Snapshot tx_snapshot, int64_t plan_id,
-          const Parameters &params, std::vector<query::Symbol> symbols,
-          bool accumulate, int batch_size, bool send_old, bool send_new)
-      : tx_id(tx_id),
-        tx_snapshot(tx_snapshot),
-        plan_id(plan_id),
-        params(params),
-        symbols(symbols),
-        accumulate(accumulate),
-        batch_size(batch_size),
-        send_old(send_old),
-        send_new(send_new) {}
-
-  tx::TransactionId tx_id;
-  tx::Snapshot tx_snapshot;
-  int64_t plan_id;
-  Parameters params;
-  std::vector<query::Symbol> symbols;
-  bool accumulate;
-  int batch_size;
-  // Indicates which of (old, new) records of a graph element should be sent.
-  bool send_old;
-  bool send_new;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void save(TArchive &ar, unsigned int) const {
-    ar << boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar << tx_id;
-    ar << tx_snapshot;
-    ar << plan_id;
-    ar << params.size();
-    for (auto &kv : params) {
-      ar << kv.first;
-      // Params never contain a vertex/edge, so save plan TypedValue.
-      utils::SaveTypedValue(ar, kv.second);
-    }
-    ar << symbols;
-    ar << accumulate;
-    ar << batch_size;
-    ar << send_old;
-    ar << send_new;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, unsigned int) {
-    ar >> boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar >> tx_id;
-    ar >> tx_snapshot;
-    ar >> plan_id;
-    size_t params_size;
-    ar >> params_size;
-    for (size_t i = 0; i < params_size; ++i) {
-      int token_pos;
-      ar >> token_pos;
-      query::TypedValue param;
-      // Params never contain a vertex/edge, so load plan TypedValue.
-      utils::LoadTypedValue(ar, param);
-      params.Add(token_pos, param);
-    }
-    ar >> symbols;
-    ar >> accumulate;
-    ar >> batch_size;
-    ar >> send_old;
-    ar >> send_new;
-  }
-  BOOST_SERIALIZATION_SPLIT_MEMBER()
-};
-
-/// The data returned to the end consumer (the Pull operator). Contains
-/// only the relevant parts of the response, ready for use.
-struct PullData {
-  PullState pull_state;
-  std::vector<std::vector<query::TypedValue>> frames;
-};
-
-/// The data of the remote pull response. Post-processing is required after
-/// deserialization to initialize Vertex/Edge typed values in the frames
-/// (possibly encapsulated in lists/maps) to their proper values. This requires
-/// a GraphDbAccessor and therefore can't be done as part of deserialization.
-///
-/// TODO - make it possible to inject a &GraphDbAcessor from the Pull
-/// layer
-/// all the way into RPC data deserialization to remove the requirement for
-/// post-processing. The current approach of holding references to parts of the
-/// frame (potentially embedded in lists/maps) is too error-prone.
-struct PullResData {
- private:
-  // Temp cache for deserialized vertices and edges. These objects are created
-  // during deserialization. They are used immediatelly after during
-  // post-processing. The vertex/edge data ownership gets transfered to the
-  // Cache, and the `element_in_frame` reference is used to set the
-  // appropriate accessor to the appropriate value. Not used on side that
-  // generates the response.
-  template <typename TRecord>
-  struct GraphElementData {
-    using AddressT = storage::Address<mvcc::VersionList<TRecord>>;
-    using PtrT = std::unique_ptr<TRecord>;
-
-    GraphElementData(AddressT address, PtrT old_record, PtrT new_record,
-                     query::TypedValue *element_in_frame)
-        : global_address(address),
-          old_record(std::move(old_record)),
-          new_record(std::move(new_record)),
-          element_in_frame(element_in_frame) {}
-
-    storage::Address<mvcc::VersionList<TRecord>> global_address;
-    std::unique_ptr<TRecord> old_record;
-    std::unique_ptr<TRecord> new_record;
-    // The position in frame is optional. This same structure is used for
-    // deserializing path elements, in which case the vertex/edge in question is
-    // not directly part of the frame.
-    query::TypedValue *element_in_frame;
-  };
-
-  // Same like `GraphElementData`, but for paths.
-  struct PathData {
-    PathData(query::TypedValue &path_in_frame) : path_in_frame(path_in_frame) {}
-    std::vector<GraphElementData<Vertex>> vertices;
-    std::vector<GraphElementData<Edge>> edges;
-    query::TypedValue &path_in_frame;
-  };
-
- public:
-  PullResData() {}  // Default constructor required for serialization.
-  PullResData(int worker_id, bool send_old, bool send_new)
-      : worker_id(worker_id), send_old(send_old), send_new(send_new) {}
-
-  PullResData(const PullResData &) = delete;
-  PullResData &operator=(const PullResData &) = delete;
-  PullResData(PullResData &&) = default;
-  PullResData &operator=(PullResData &&) = default;
-
-  PullData state_and_frames;
-  // Id of the worker on which the response is created, used for serializing
-  // vertices (converting local to global addresses).
-  int worker_id;
-  // Indicates which of (old, new) records of a graph element should be sent.
-  bool send_old;
-  bool send_new;
-
-  // Temporary caches used between deserialization and post-processing
-  // (transfering the ownership of this data to a Cache).
-  std::vector<GraphElementData<Vertex>> vertices;
-  std::vector<GraphElementData<Edge>> edges;
-  std::vector<PathData> paths;
-
-  /// Saves a typed value that is a vertex/edge/path.
-  template <class TArchive>
-  void SaveGraphElement(TArchive &ar, const query::TypedValue &value) const {
-    // Helper template function for storing a vertex or an edge.
-    auto save_element = [&ar, this](auto element_accessor) {
-      ar << element_accessor.GlobalAddress().raw();
-
-      // If both old and new are null, we need to reconstruct.
-      if (!(element_accessor.GetOld() || element_accessor.GetNew())) {
-        bool result = element_accessor.Reconstruct();
-        CHECK(result) << "Attempting to serialize an element not visible to "
-                         "current transaction.";
-      }
-      auto *old_rec = element_accessor.GetOld();
-      if (send_old && old_rec) {
-        ar << true;
-        distributed::SaveElement(ar, *old_rec, worker_id);
-      } else {
-        ar << false;
-      }
-      if (send_new) {
-        // Must call SwitchNew as that will trigger a potentially necesary
-        // Reconstruct.
-        element_accessor.SwitchNew();
-        auto *new_rec = element_accessor.GetNew();
-        if (new_rec) {
-          ar << true;
-          distributed::SaveElement(ar, *new_rec, worker_id);
-        } else {
-          ar << false;
-        }
-      } else {
-        ar << false;
-      }
-    };
-    switch (value.type()) {
-      case query::TypedValue::Type::Vertex:
-        save_element(value.ValueVertex());
-        break;
-      case query::TypedValue::Type::Edge:
-        save_element(value.ValueEdge());
-        break;
-      case query::TypedValue::Type::Path: {
-        auto &path = value.ValuePath();
-        ar << path.size();
-        save_element(path.vertices()[0]);
-        for (size_t i = 0; i < path.size(); ++i) {
-          save_element(path.edges()[i]);
-          save_element(path.vertices()[i + 1]);
-        }
-        break;
-      }
-      default:
-        LOG(FATAL) << "Unsupported graph element type: " << value.type();
-    }
-  }
-
-  /// Loads a typed value that is a vertex/edge/path. Part of the
-  /// deserialization process, populates the temporary data caches which are
-  /// processed later.
-  template <class TArchive>
-  void LoadGraphElement(TArchive &ar, query::TypedValue::Type type,
-                        query::TypedValue &value) {
-    auto load_edge = [](auto &ar) {
-      bool exists;
-      ar >> exists;
-      return exists ? LoadEdge(ar) : nullptr;
-    };
-    auto load_vertex = [](auto &ar) {
-      bool exists;
-      ar >> exists;
-      return exists ? LoadVertex(ar) : nullptr;
-    };
-
-    switch (type) {
-      case query::TypedValue::Type::Vertex: {
-        storage::VertexAddress::StorageT address;
-        ar >> address;
-        vertices.emplace_back(storage::VertexAddress(address), load_vertex(ar),
-                              load_vertex(ar), &value);
-        break;
-      }
-      case query::TypedValue::Type::Edge: {
-        storage::VertexAddress::StorageT address;
-        ar >> address;
-        edges.emplace_back(storage::EdgeAddress(address), load_edge(ar),
-                           load_edge(ar), &value);
-        break;
-      }
-      case query::TypedValue::Type::Path: {
-        size_t path_size;
-        ar >> path_size;
-
-        paths.emplace_back(value);
-        auto &path_data = paths.back();
-
-        storage::VertexAddress::StorageT vertex_address;
-        storage::EdgeAddress::StorageT edge_address;
-        ar >> vertex_address;
-        path_data.vertices.emplace_back(storage::VertexAddress(vertex_address),
-                                        load_vertex(ar), load_vertex(ar),
-                                        nullptr);
-        for (size_t i = 0; i < path_size; ++i) {
-          ar >> edge_address;
-          path_data.edges.emplace_back(storage::EdgeAddress(edge_address),
-                                       load_edge(ar), load_edge(ar), nullptr);
-          ar >> vertex_address;
-          path_data.vertices.emplace_back(
-              storage::VertexAddress(vertex_address), load_vertex(ar),
-              load_vertex(ar), nullptr);
-        }
-        break;
-      }
-      default:
-        LOG(FATAL) << "Unsupported graph element type: " << type;
-    }
-  }
-};
-
-class PullRes : public communication::rpc::Message {
- public:
-  PullRes() {}
-  PullRes(PullResData data) : data(std::move(data)) {}
-
-  PullResData data;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void save(TArchive &ar, unsigned int) const {
-    ar << boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar << data.state_and_frames.pull_state;
-    ar << data.state_and_frames.frames.size();
-    // We need to indicate how many values are in each frame.
-    // Assume all the frames have an equal number of elements.
-    ar << (data.state_and_frames.frames.size() == 0
-               ? 0
-               : data.state_and_frames.frames[0].size());
-    for (const auto &frame : data.state_and_frames.frames)
-      for (const auto &value : frame) {
-        utils::SaveTypedValue<TArchive>(
-            ar, value, [this](TArchive &ar, const query::TypedValue &value) {
-              data.SaveGraphElement(ar, value);
-            });
-      }
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, unsigned int) {
-    ar >> boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar >> data.state_and_frames.pull_state;
-    size_t frame_count;
-    ar >> frame_count;
-    data.state_and_frames.frames.reserve(frame_count);
-    size_t frame_size;
-    ar >> frame_size;
-    for (size_t i = 0; i < frame_count; ++i) {
-      data.state_and_frames.frames.emplace_back();
-      auto &current_frame = data.state_and_frames.frames.back();
-      current_frame.reserve(frame_size);
-      for (size_t j = 0; j < frame_size; ++j) {
-        current_frame.emplace_back();
-        utils::LoadTypedValue<TArchive>(
-            ar, current_frame.back(),
-            [this](TArchive &ar, query::TypedValue::TypedValue::Type type,
-                   query::TypedValue &value) {
-              data.LoadGraphElement(ar, type, value);
-            });
-      }
-    }
-  }
-  BOOST_SERIALIZATION_SPLIT_MEMBER()
-};
-
-using PullRpc = communication::rpc::RequestResponse<PullReq, PullRes>;
-
-// TODO make a separate RPC for the continuation of an existing pull, as an
-// optimization not to have to send the full PullReqData pack every
-// time.
-
-RPC_SINGLE_MEMBER_MESSAGE(TransactionCommandAdvancedReq, tx::TransactionId);
-RPC_NO_MEMBER_MESSAGE(TransactionCommandAdvancedRes);
-using TransactionCommandAdvancedRpc =
-    communication::rpc::RequestResponse<TransactionCommandAdvancedReq,
-                                        TransactionCommandAdvancedRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/pull_rpc_clients.cpp b/src/distributed/pull_rpc_clients.cpp
deleted file mode 100644
index a6fa8b8b6..000000000
--- a/src/distributed/pull_rpc_clients.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-#include <functional>
-
-#include "distributed/data_manager.hpp"
-#include "distributed/pull_rpc_clients.hpp"
-#include "storage/edge.hpp"
-#include "storage/vertex.hpp"
-
-namespace distributed {
-
-utils::Future<PullData> PullRpcClients::Pull(
-    database::GraphDbAccessor &dba, int worker_id, int64_t plan_id,
-    const Parameters &params, const std::vector<query::Symbol> &symbols,
-    bool accumulate, int batch_size) {
-  return clients_.ExecuteOnWorker<PullData>(
-      worker_id, [&dba, plan_id, params, symbols, accumulate,
-                  batch_size](ClientPool &client_pool) {
-        auto result = client_pool.Call<PullRpc>(
-            dba.transaction_id(), dba.transaction().snapshot(), plan_id, params,
-            symbols, accumulate, batch_size, true, true);
-
-        auto handle_vertex = [&dba](auto &v) {
-          dba.db()
-              .data_manager()
-              .Elements<Vertex>(dba.transaction_id())
-              .emplace(v.global_address.gid(), std::move(v.old_record),
-                       std::move(v.new_record));
-          if (v.element_in_frame) {
-            VertexAccessor va(v.global_address, dba);
-            *v.element_in_frame = va;
-          }
-        };
-        auto handle_edge = [&dba](auto &e) {
-          dba.db()
-              .data_manager()
-              .Elements<Edge>(dba.transaction_id())
-              .emplace(e.global_address.gid(), std::move(e.old_record),
-                       std::move(e.new_record));
-          if (e.element_in_frame) {
-            EdgeAccessor ea(e.global_address, dba);
-            *e.element_in_frame = ea;
-          }
-        };
-        for (auto &v : result->data.vertices) handle_vertex(v);
-        for (auto &e : result->data.edges) handle_edge(e);
-        for (auto &p : result->data.paths) {
-          handle_vertex(p.vertices[0]);
-          p.path_in_frame =
-              query::Path(VertexAccessor(p.vertices[0].global_address, dba));
-          query::Path &path_in_frame = p.path_in_frame.ValuePath();
-          for (size_t i = 0; i < p.edges.size(); ++i) {
-            handle_edge(p.edges[i]);
-            path_in_frame.Expand(EdgeAccessor(p.edges[i].global_address, dba));
-            handle_vertex(p.vertices[i + 1]);
-            path_in_frame.Expand(
-                VertexAccessor(p.vertices[i + 1].global_address, dba));
-          }
-        }
-
-        return std::move(result->data.state_and_frames);
-      });
-}
-
-std::vector<utils::Future<void>>
-PullRpcClients::NotifyAllTransactionCommandAdvanced(
-    tx::TransactionId tx_id) {
-  return clients_.ExecuteOnWorkers<void>(0, [tx_id](auto &client) {
-    auto res = client.template Call<TransactionCommandAdvancedRpc>(tx_id);
-    CHECK(res) << "TransactionCommandAdvanceRpc failed";
-  });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/pull_rpc_clients.hpp b/src/distributed/pull_rpc_clients.hpp
deleted file mode 100644
index 8be2ee3ff..000000000
--- a/src/distributed/pull_rpc_clients.hpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#pragma once
-
-#include <vector>
-
-#include "database/graph_db_accessor.hpp"
-#include "distributed/pull_produce_rpc_messages.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "query/frontend/semantic/symbol.hpp"
-#include "query/parameters.hpp"
-#include "transactions/type.hpp"
-#include "utils/future.hpp"
-
-namespace distributed {
-
-/// Provides means of calling for the execution of a plan on some remote worker,
-/// and getting the results of that execution. The results are returned in
-/// batches and are therefore accompanied with an enum indicator of the state of
-/// remote execution.
-class PullRpcClients {
-  using ClientPool = communication::rpc::ClientPool;
-
- public:
-  PullRpcClients(RpcWorkerClients &clients) : clients_(clients) {}
-
-  /// Calls a remote pull asynchroniously. IMPORTANT: take care not to call this
-  /// function for the same (tx_id, worker_id, plan_id) before the previous call
-  /// has ended.
-  ///
-  /// @todo: it might be cleaner to split Pull into {InitRemoteCursor,
-  /// Pull, RemoteAccumulate}, but that's a lot of refactoring and more
-  /// RPC calls.
-  utils::Future<PullData> Pull(database::GraphDbAccessor &dba, int worker_id,
-                               int64_t plan_id, const Parameters &params,
-                               const std::vector<query::Symbol> &symbols,
-                               bool accumulate,
-                               int batch_size = kDefaultBatchSize);
-
-  auto GetWorkerIds() { return clients_.GetWorkerIds(); }
-
-  std::vector<utils::Future<void>> NotifyAllTransactionCommandAdvanced(
-      tx::TransactionId tx_id);
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/rpc_worker_clients.hpp b/src/distributed/rpc_worker_clients.hpp
deleted file mode 100644
index 2fada4769..000000000
--- a/src/distributed/rpc_worker_clients.hpp
+++ /dev/null
@@ -1,133 +0,0 @@
-#pragma once
-
-#include <functional>
-#include <type_traits>
-#include <unordered_map>
-
-#include "communication/rpc/client_pool.hpp"
-#include "distributed/coordination.hpp"
-#include "distributed/index_rpc_messages.hpp"
-#include "distributed/transactional_cache_cleaner_rpc_messages.hpp"
-#include "storage/types.hpp"
-#include "transactions/transaction.hpp"
-
-#include "threading/thread_pool.hpp"
-#include "utils/future.hpp"
-
-namespace distributed {
-
-/** A cache of RPC clients (of the given name/kind) per MG distributed worker.
- * Thread safe. */
-class RpcWorkerClients {
- public:
-  RpcWorkerClients(Coordination &coordination)
-      : coordination_(coordination),
-        thread_pool_(std::thread::hardware_concurrency()) {}
-
-  RpcWorkerClients(const RpcWorkerClients &) = delete;
-  RpcWorkerClients(RpcWorkerClients &&) = delete;
-  RpcWorkerClients &operator=(const RpcWorkerClients &) = delete;
-  RpcWorkerClients &operator=(RpcWorkerClients &&) = delete;
-
-  auto &GetClientPool(int worker_id) {
-    std::lock_guard<std::mutex> guard{lock_};
-    auto found = client_pools_.find(worker_id);
-    if (found != client_pools_.end()) return found->second;
-    return client_pools_
-        .emplace(std::piecewise_construct, std::forward_as_tuple(worker_id),
-                 std::forward_as_tuple(coordination_.GetEndpoint(worker_id)))
-        .first->second;
-  }
-
-  auto GetWorkerIds() { return coordination_.GetWorkerIds(); }
-
-  /** Asynchroniously executes the given function on the rpc client for the
-   * given worker id. Returns an `utils::Future` of the given `execute`
-   * function's
-   * return type. */
-  template <typename TResult>
-  auto ExecuteOnWorker(
-      int worker_id,
-      std::function<TResult(communication::rpc::ClientPool &)> execute) {
-    auto &client_pool = GetClientPool(worker_id);
-    return thread_pool_.Run(execute, std::ref(client_pool));
-  }
-
-  /** Asynchroniously executes the `execute` function on all worker rpc clients
-   * except the one whose id is `skip_worker_id`. Returns a vectore of futures
-   * contaning the results of the `execute` function. */
-  template <typename TResult>
-  auto ExecuteOnWorkers(
-      int skip_worker_id,
-      std::function<TResult(communication::rpc::ClientPool &)> execute) {
-    std::vector<utils::Future<TResult>> futures;
-    for (auto &worker_id : coordination_.GetWorkerIds()) {
-      if (worker_id == skip_worker_id) continue;
-      futures.emplace_back(std::move(ExecuteOnWorker(worker_id, execute)));
-    }
-    return futures;
-  }
-
- private:
-  // TODO make Coordination const, it's member GetEndpoint must be const too.
-  Coordination &coordination_;
-  std::unordered_map<int, communication::rpc::ClientPool> client_pools_;
-  std::mutex lock_;
-  threading::ThreadPool thread_pool_;
-};
-
-/** Wrapper class around a RPC call to build indices.
- */
-class IndexRpcClients {
- public:
-  IndexRpcClients(RpcWorkerClients &clients) : clients_(clients) {}
-
-  auto GetBuildIndexFutures(const storage::Label &label,
-                            const storage::Property &property,
-                            tx::TransactionId transaction_id,
-                            int worker_id) {
-    return clients_.ExecuteOnWorkers<bool>(
-        worker_id, [label, property, transaction_id](
-                       communication::rpc::ClientPool &client_pool) {
-          return client_pool.Call<BuildIndexRpc>(
-                     distributed::IndexLabelPropertyTx{
-                         label, property, transaction_id}) != nullptr;
-        });
-  }
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-/** Join ongoing produces on all workers.
- *
- * Sends a RPC request to all workers when a transaction is ending, notifying
- * them to end all ongoing produces tied to that transaction.
- */
-class OngoingProduceJoinerRpcClients {
- public:
-  OngoingProduceJoinerRpcClients(RpcWorkerClients &clients)
-      : clients_(clients) {}
-
-  void JoinOngoingProduces(tx::TransactionId tx_id) {
-    auto futures = clients_.ExecuteOnWorkers<void>(
-        0, [tx_id](communication::rpc::ClientPool &client_pool) {
-          auto result =
-              client_pool.Call<distributed::WaitOnTransactionEndRpc>(tx_id);
-          CHECK(result)
-              << "[WaitOnTransactionEndRpc] failed to notify that transaction "
-              << tx_id << " ended";
-        });
-
-    // We need to wait for all workers to destroy pending futures to avoid using
-    // already destroyed (released) transaction objects.
-    for (auto &future : futures) {
-      future.wait();
-    }
-  }
-
- private:
-  RpcWorkerClients &clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/serialization.hpp b/src/distributed/serialization.hpp
deleted file mode 100644
index a11a5f2b2..000000000
--- a/src/distributed/serialization.hpp
+++ /dev/null
@@ -1,183 +0,0 @@
-#pragma once
-
-#include <cstdint>
-#include <memory>
-#include <vector>
-
-#include "storage/address_types.hpp"
-#include "storage/edge.hpp"
-#include "storage/types.hpp"
-#include "storage/vertex.hpp"
-#include "utils/serialization.hpp"
-
-namespace distributed {
-
-namespace impl {
-
-// Saves the given address into the given archive. Converts a local address to a
-// global one, using the given worker_id.
-template <typename TArchive, typename TAddress>
-void SaveAddress(TArchive &ar, TAddress address, int worker_id) {
-  if (address.is_local()) {
-    ar << address.local()->gid_;
-    ar << worker_id;
-  } else {
-    ar << address.gid();
-    ar << address.worker_id();
-  }
-};
-
-// Saves the given properties into the given archive.
-template <typename TArchive>
-void SaveProperties(TArchive &ar, const PropertyValueStore &props) {
-  ar << props.size();
-  for (auto &kv : props) {
-    ar << kv.first.storage();
-    utils::SaveTypedValue(ar, kv.second);
-  }
-}
-}  // namespace impl
-
-/**
- * Saves the given vertex into the given Boost archive.
- *
- * @param ar - Archive into which to serialize.
- * @param vertex - Getting serialized.
- * @param worker_id - ID of the worker this is happening on. Necessary for local
- * to global address conversion.
- * @tparam TArchive - type of archive.
- */
-template <typename TArchive>
-void SaveVertex(TArchive &ar, const Vertex &vertex, int worker_id) {
-  auto save_edges = [&ar, worker_id](auto &edges) {
-    ar << edges.size();
-    for (auto &edge_struct : edges) {
-      impl::SaveAddress(ar, edge_struct.vertex, worker_id);
-      impl::SaveAddress(ar, edge_struct.edge, worker_id);
-      ar << edge_struct.edge_type.storage();
-    }
-  };
-  save_edges(vertex.out_);
-  save_edges(vertex.in_);
-
-  ar << vertex.labels_.size();
-  for (auto &label : vertex.labels_) {
-    ar << label.storage();
-  }
-
-  impl::SaveProperties(ar, vertex.properties_);
-}
-
-/**
- * Saves the given edge into the given Boost archive.
- *
- * @param - Archive into which to serialize.
- * @param edge - Getting serialized.
- * @param worker_id - ID of the worker this is happening on. Necessary for local
- * to global address conversion.
- * @tparam TArchive - type of archive.
- */
-template <typename TArchive>
-void SaveEdge(TArchive &ar, const Edge &edge, int worker_id) {
-  impl::SaveAddress(ar, edge.from_, worker_id);
-  impl::SaveAddress(ar, edge.to_, worker_id);
-  ar << edge.edge_type_.storage();
-  impl::SaveProperties(ar, edge.properties_);
-}
-
-/// Alias for `SaveEdge` allowing for param type resolution.
-template <typename TArchive>
-void SaveElement(TArchive &ar, const Edge &record, int worker_id) {
-  return SaveEdge(ar, record, worker_id);
-}
-
-/// Alias for `SaveVertex` allowing for param type resolution.
-template <typename TArchive>
-void SaveElement(TArchive &ar, const Vertex &record, int worker_id) {
-  return SaveVertex(ar, record, worker_id);
-}
-
-namespace impl {
-
-template <typename TArchive>
-storage::VertexAddress LoadVertexAddress(TArchive &ar) {
-  gid::Gid vertex_id;
-  ar >> vertex_id;
-  int worker_id;
-  ar >> worker_id;
-  return {vertex_id, worker_id};
-}
-
-template <typename TArchive>
-void LoadProperties(TArchive &ar, PropertyValueStore &store) {
-  size_t count;
-  ar >> count;
-  for (size_t i = 0; i < count; ++i) {
-    storage::Property::StorageT prop;
-    ar >> prop;
-    query::TypedValue value;
-    utils::LoadTypedValue(ar, value);
-    store.set(storage::Property(prop), static_cast<PropertyValue>(value));
-  }
-}
-
-}  // namespace impl
-
-/**
- * Loads a Vertex from the given archive and returns it.
- *
- * @param ar - The archive to load from.
- * @tparam TArchive - archive type.
- */
-template <typename TArchive>
-std::unique_ptr<Vertex> LoadVertex(TArchive &ar) {
-  auto vertex = std::make_unique<Vertex>();
-
-  auto decode_edges = [&ar](Edges &edges) {
-    size_t count;
-    ar >> count;
-    for (size_t i = 0; i < count; ++i) {
-      auto vertex_address = impl::LoadVertexAddress(ar);
-      storage::EdgeType::StorageT edge_type;
-      gid::Gid edge_id;
-      ar >> edge_id;
-      int edge_worker_id;
-      ar >> edge_worker_id;
-      ar >> edge_type;
-      edges.emplace(vertex_address, {edge_id, edge_worker_id},
-                    storage::EdgeType(edge_type));
-    }
-  };
-  decode_edges(vertex->out_);
-  decode_edges(vertex->in_);
-
-  size_t count;
-  ar >> count;
-  for (size_t i = 0; i < count; ++i) {
-    storage::Label::StorageT label;
-    ar >> label;
-    vertex->labels_.emplace_back(label);
-  }
-  impl::LoadProperties(ar, vertex->properties_);
-
-  return vertex;
-}
-
-/**
- * Loads an Edge from the given archive and returns it.
- *
- * @param ar - The archive to load from.
- * @tparam TArchive - archive type.
- */
-template <typename TArchive>
-std::unique_ptr<Edge> LoadEdge(TArchive &ar) {
-  auto from = impl::LoadVertexAddress(ar);
-  auto to = impl::LoadVertexAddress(ar);
-  storage::EdgeType::StorageT edge_type;
-  ar >> edge_type;
-  auto edge = std::make_unique<Edge>(from, to, storage::EdgeType{edge_type});
-  impl::LoadProperties(ar, edge->properties_);
-
-  return edge;
-}
-}  // namespace distributed
diff --git a/src/distributed/storage_gc_rpc_messages.hpp b/src/distributed/storage_gc_rpc_messages.hpp
deleted file mode 100644
index 716993ede..000000000
--- a/src/distributed/storage_gc_rpc_messages.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-#pragma once
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "io/network/endpoint.hpp"
-#include "transactions/transaction.hpp"
-
-namespace distributed {
-
-using communication::rpc::Message;
-using Endpoint = io::network::Endpoint;
-
-struct GcClearedStatusReq : public Message {
-  GcClearedStatusReq() {}
-  GcClearedStatusReq(tx::TransactionId local_oldest_active, int worker_id)
-      : local_oldest_active(local_oldest_active), worker_id(worker_id) {}
-
-  tx::TransactionId local_oldest_active;
-  int worker_id;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &local_oldest_active;
-    ar &worker_id;
-  }
-};
-
-RPC_NO_MEMBER_MESSAGE(GcClearedStatusRes);
-
-using RanLocalGcRpc =
-    communication::rpc::RequestResponse<GcClearedStatusReq, GcClearedStatusRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/transactional_cache_cleaner.hpp b/src/distributed/transactional_cache_cleaner.hpp
deleted file mode 100644
index 4644023a1..000000000
--- a/src/distributed/transactional_cache_cleaner.hpp
+++ /dev/null
@@ -1,87 +0,0 @@
-#pragma once
-
-#include <functional>
-#include <vector>
-
-#include "communication/rpc/server.hpp"
-#include "distributed/produce_rpc_server.hpp"
-#include "distributed/transactional_cache_cleaner_rpc_messages.hpp"
-#include "transactions/engine.hpp"
-#include "transactions/engine_worker.hpp"
-#include "utils/scheduler.hpp"
-
-namespace distributed {
-
-/// Periodically calls `ClearTransactionalCache(oldest_transaction)` on all
-/// registered objects.
-class TransactionalCacheCleaner {
-  /// The wait time between two releases of local transaction objects that have
-  /// expired on the master.
-  static constexpr std::chrono::seconds kCacheReleasePeriod{1};
-
- public:
-  template <typename... T>
-  TransactionalCacheCleaner(tx::Engine &tx_engine, T &... caches)
-      : tx_engine_(tx_engine) {
-    Register(caches...);
-    cache_clearing_scheduler_.Run(
-        "DistrTxCacheGc", kCacheReleasePeriod,
-        [this]() { this->Clear(tx_engine_.GlobalGcSnapshot().back()); });
-  }
-
- protected:
-  /// Registers the given object for transactional cleaning. The object will
-  /// periodically get it's `ClearCache(tx::TransactionId)` method called
-  /// with the oldest active transaction id. Note that the ONLY guarantee for
-  /// the call param is that there are no transactions alive that have an id
-  /// lower than it.
-  template <typename TCache>
-  void Register(TCache &cache) {
-    functions_.emplace_back([&cache](tx::TransactionId oldest_active) {
-      cache.ClearTransactionalCache(oldest_active);
-    });
-  }
-
- private:
-  template <typename TCache, typename... T>
-  void Register(TCache &cache, T &... caches) {
-    Register(cache);
-    Register(caches...);
-  }
-
-  void Clear(tx::TransactionId oldest_active) {
-    for (auto &f : functions_) f(oldest_active);
-  }
-
-  tx::Engine &tx_engine_;
-  std::vector<std::function<void(tx::TransactionId &oldest_active)>> functions_;
-  utils::Scheduler cache_clearing_scheduler_;
-};
-
-/// Registers a RPC server that listens for `WaitOnTransactionEnd` requests
-/// that require all ongoing produces to finish. It also periodically calls
-/// `ClearTransactionalCache` on all registered objects.
-class WorkerTransactionalCacheCleaner : public TransactionalCacheCleaner {
- public:
-  template <class... T>
-  WorkerTransactionalCacheCleaner(tx::WorkerEngine &tx_engine,
-                                  communication::rpc::Server &server,
-                                  ProduceRpcServer &produce_server,
-                                  T &... caches)
-      : TransactionalCacheCleaner(tx_engine, caches...),
-        rpc_server_(server),
-        produce_server_(produce_server) {
-    Register(tx_engine);
-    rpc_server_.Register<WaitOnTransactionEndRpc>(
-        [this](const WaitOnTransactionEndReq &req) {
-          produce_server_.FinishAndClearOngoingProducePlans(req.member);
-          return std::make_unique<WaitOnTransactionEndRes>();
-        });
-  }
-
- private:
-  communication::rpc::Server &rpc_server_;
-  ProduceRpcServer &produce_server_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/transactional_cache_cleaner_rpc_messages.hpp b/src/distributed/transactional_cache_cleaner_rpc_messages.hpp
deleted file mode 100644
index a949ae828..000000000
--- a/src/distributed/transactional_cache_cleaner_rpc_messages.hpp
+++ /dev/null
@@ -1,13 +0,0 @@
-#pragma once
-
-#include "communication/rpc/messages.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-RPC_SINGLE_MEMBER_MESSAGE(WaitOnTransactionEndReq, tx::TransactionId);
-RPC_NO_MEMBER_MESSAGE(WaitOnTransactionEndRes);
-using WaitOnTransactionEndRpc =
-    communication::rpc::RequestResponse<WaitOnTransactionEndReq,
-                                        WaitOnTransactionEndRes>;
-};
diff --git a/src/distributed/updates_rpc_clients.cpp b/src/distributed/updates_rpc_clients.cpp
deleted file mode 100644
index bd5ccf52b..000000000
--- a/src/distributed/updates_rpc_clients.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-
-#include <unordered_map>
-#include <vector>
-
-#include "distributed/updates_rpc_clients.hpp"
-#include "query/exceptions.hpp"
-
-namespace distributed {
-
-namespace {
-void RaiseIfRemoteError(UpdateResult result) {
-  switch (result) {
-    case UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR:
-      throw query::RemoveAttachedVertexException();
-    case UpdateResult::SERIALIZATION_ERROR:
-      throw mvcc::SerializationError();
-    case UpdateResult::LOCK_TIMEOUT_ERROR:
-      throw LockTimeoutException(
-          "Remote LockTimeoutError during edge creation");
-    case UpdateResult::UPDATE_DELETED_ERROR:
-      throw RecordDeletedError();
-    case UpdateResult::DONE:
-      break;
-  }
-}
-}
-
-UpdateResult UpdatesRpcClients::Update(int worker_id,
-                                       const database::StateDelta &delta) {
-  auto res = worker_clients_.GetClientPool(worker_id).Call<UpdateRpc>(delta);
-  CHECK(res) << "UpdateRpc failed on worker: " << worker_id;
-  return res->member;
-}
-
-gid::Gid UpdatesRpcClients::CreateVertex(
-    int worker_id, tx::TransactionId tx_id,
-    const std::vector<storage::Label> &labels,
-    const std::unordered_map<storage::Property, query::TypedValue>
-        &properties) {
-  auto res = worker_clients_.GetClientPool(worker_id).Call<CreateVertexRpc>(
-      CreateVertexReqData{tx_id, labels, properties});
-  CHECK(res) << "CreateVertexRpc failed on worker: " << worker_id;
-  CHECK(res->member.result == UpdateResult::DONE)
-      << "Remote Vertex creation result not UpdateResult::DONE";
-  return res->member.gid;
-}
-
-storage::EdgeAddress UpdatesRpcClients::CreateEdge(
-    tx::TransactionId tx_id, VertexAccessor &from, VertexAccessor &to,
-    storage::EdgeType edge_type) {
-  CHECK(from.address().is_remote()) << "In CreateEdge `from` must be remote";
-
-  int from_worker = from.address().worker_id();
-  auto res = worker_clients_.GetClientPool(from_worker)
-                 .Call<CreateEdgeRpc>(CreateEdgeReqData{
-                     from.gid(), to.GlobalAddress(), edge_type, tx_id});
-  CHECK(res) << "CreateEdge RPC failed on worker: " << from_worker;
-  RaiseIfRemoteError(res->member.result);
-  return {res->member.gid, from_worker};
-}
-
-void UpdatesRpcClients::AddInEdge(tx::TransactionId tx_id,
-                                  VertexAccessor &from,
-                                  storage::EdgeAddress edge_address,
-                                  VertexAccessor &to,
-                                  storage::EdgeType edge_type) {
-  CHECK(to.address().is_remote() && edge_address.is_remote() &&
-        (from.GlobalAddress().worker_id() != to.address().worker_id()))
-      << "AddInEdge should only be called when `to` is remote and "
-         "`from` is not on the same worker as `to`.";
-  auto worker_id = to.GlobalAddress().worker_id();
-  auto res = worker_clients_.GetClientPool(worker_id).Call<AddInEdgeRpc>(
-      AddInEdgeReqData{from.GlobalAddress(), edge_address, to.gid(), edge_type,
-                       tx_id});
-  CHECK(res) << "AddInEdge RPC failed on worker: " << worker_id;
-  RaiseIfRemoteError(res->member);
-}
-
-void UpdatesRpcClients::RemoveVertex(int worker_id, tx::TransactionId tx_id,
-                                     gid::Gid gid, bool check_empty) {
-  auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveVertexRpc>(
-      RemoveVertexReqData{gid, tx_id, check_empty});
-  CHECK(res) << "RemoveVertex RPC failed on worker: " << worker_id;
-  RaiseIfRemoteError(res->member);
-}
-
-void UpdatesRpcClients::RemoveEdge(tx::TransactionId tx_id, int worker_id,
-                                   gid::Gid edge_gid, gid::Gid vertex_from_id,
-                                   storage::VertexAddress vertex_to_addr) {
-  auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveEdgeRpc>(
-      RemoveEdgeData{tx_id, edge_gid, vertex_from_id, vertex_to_addr});
-  CHECK(res) << "RemoveEdge RPC failed on worker: " << worker_id;
-  RaiseIfRemoteError(res->member);
-}
-
-void UpdatesRpcClients::RemoveInEdge(tx::TransactionId tx_id, int worker_id,
-                                     gid::Gid vertex_id,
-                                     storage::EdgeAddress edge_address) {
-  CHECK(edge_address.is_remote()) << "RemoveInEdge edge_address is local.";
-  auto res = worker_clients_.GetClientPool(worker_id).Call<RemoveInEdgeRpc>(
-      RemoveInEdgeData{tx_id, vertex_id, edge_address});
-  CHECK(res) << "RemoveInEdge RPC failed on worker: " << worker_id;
-  RaiseIfRemoteError(res->member);
-}
-
-std::vector<utils::Future<UpdateResult>> UpdatesRpcClients::UpdateApplyAll(
-    int skip_worker_id, tx::TransactionId tx_id) {
-  return worker_clients_.ExecuteOnWorkers<UpdateResult>(
-      skip_worker_id, [tx_id](auto &client) {
-        auto res = client.template Call<UpdateApplyRpc>(tx_id);
-        CHECK(res) << "UpdateApplyRpc failed";
-        return res->member;
-      });
-}
-
-}  // namespace distributed
diff --git a/src/distributed/updates_rpc_clients.hpp b/src/distributed/updates_rpc_clients.hpp
deleted file mode 100644
index a5baf55f7..000000000
--- a/src/distributed/updates_rpc_clients.hpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#pragma once
-
-#include <unordered_map>
-#include <vector>
-
-#include "database/state_delta.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "distributed/updates_rpc_messages.hpp"
-#include "query/typed_value.hpp"
-#include "storage/address_types.hpp"
-#include "storage/gid.hpp"
-#include "storage/types.hpp"
-#include "transactions/type.hpp"
-#include "utils/future.hpp"
-
-namespace distributed {
-
-/// Exposes the functionality to send updates to other workers (that own the
-/// graph element we are updating). Also enables us to call for a worker to
-/// apply the accumulated deferred updates, or discard them.
-class UpdatesRpcClients {
- public:
-  explicit UpdatesRpcClients(RpcWorkerClients &clients)
-      : worker_clients_(clients) {}
-
-  /// Sends an update delta to the given worker.
-  UpdateResult Update(int worker_id, const database::StateDelta &delta);
-
-  /// Creates a vertex on the given worker and returns it's id.
-  gid::Gid CreateVertex(
-      int worker_id, tx::TransactionId tx_id,
-      const std::vector<storage::Label> &labels,
-      const std::unordered_map<storage::Property, query::TypedValue>
-          &properties);
-
-  /// Creates an edge on the given worker and returns it's address. If the `to`
-  /// vertex is on the same worker as `from`, then all remote CRUD will be
-  /// handled by a call to this function. Otherwise a separate call to
-  /// `AddInEdge` might be necessary. Throws all the exceptions that can
-  /// occur remotely as a result of updating a vertex.
-  storage::EdgeAddress CreateEdge(tx::TransactionId tx_id,
-                                  VertexAccessor &from, VertexAccessor &to,
-                                  storage::EdgeType edge_type);
-
-  /// Adds the edge with the given address to the `to` vertex as an incoming
-  /// edge. Only used when `to` is remote and not on the same worker as `from`.
-  void AddInEdge(tx::TransactionId tx_id, VertexAccessor &from,
-                 storage::EdgeAddress edge_address, VertexAccessor &to,
-                 storage::EdgeType edge_type);
-
-  /// Removes a vertex from the other worker.
-  void RemoveVertex(int worker_id, tx::TransactionId tx_id, gid::Gid gid,
-                    bool check_empty);
-
-  /// Removes an edge on another worker. This also handles the `from` vertex
-  /// outgoing edge, as that vertex is on the same worker as the edge. If the
-  /// `to` vertex is on the same worker, then that side is handled too by the
-  /// single RPC call, otherwise a separate call has to be made to
-  /// RemoveInEdge.
-  void RemoveEdge(tx::TransactionId tx_id, int worker_id, gid::Gid edge_gid,
-                  gid::Gid vertex_from_id,
-                  storage::VertexAddress vertex_to_addr);
-
-  void RemoveInEdge(tx::TransactionId tx_id, int worker_id,
-                    gid::Gid vertex_id, storage::EdgeAddress edge_address);
-
-  /// Calls for all the workers (except the given one) to apply their updates
-  /// and returns the future results.
-  std::vector<utils::Future<UpdateResult>> UpdateApplyAll(
-      int skip_worker_id, tx::TransactionId tx_id);
-
- private:
-  RpcWorkerClients &worker_clients_;
-};
-
-}  // namespace distributed
diff --git a/src/distributed/updates_rpc_messages.hpp b/src/distributed/updates_rpc_messages.hpp
deleted file mode 100644
index 098a13696..000000000
--- a/src/distributed/updates_rpc_messages.hpp
+++ /dev/null
@@ -1,203 +0,0 @@
-#pragma once
-
-#include <unordered_map>
-
-#include "boost/serialization/vector.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "database/state_delta.hpp"
-#include "storage/address_types.hpp"
-#include "storage/gid.hpp"
-#include "transactions/type.hpp"
-#include "utils/serialization.hpp"
-
-namespace distributed {
-
-/// The result of sending or applying a deferred update to a worker.
-enum class UpdateResult {
-  DONE,
-  SERIALIZATION_ERROR,
-  LOCK_TIMEOUT_ERROR,
-  UPDATE_DELETED_ERROR,
-  UNABLE_TO_DELETE_VERTEX_ERROR
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(UpdateReq, database::StateDelta);
-RPC_SINGLE_MEMBER_MESSAGE(UpdateRes, UpdateResult);
-using UpdateRpc = communication::rpc::RequestResponse<UpdateReq, UpdateRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(UpdateApplyReq, tx::TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(UpdateApplyRes, UpdateResult);
-using UpdateApplyRpc =
-    communication::rpc::RequestResponse<UpdateApplyReq, UpdateApplyRes>;
-
-struct CreateResult {
-  UpdateResult result;
-  // Only valid if creation was successful.
-  gid::Gid gid;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &result;
-    ar &gid;
-  }
-};
-
-struct CreateVertexReqData {
-  tx::TransactionId tx_id;
-  std::vector<storage::Label> labels;
-  std::unordered_map<storage::Property, query::TypedValue> properties;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void save(TArchive &ar, unsigned int) const {
-    ar << tx_id;
-    ar << labels;
-    ar << properties.size();
-    for (auto &kv : properties) {
-      ar << kv.first;
-      utils::SaveTypedValue(ar, kv.second);
-    }
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, unsigned int) {
-    ar >> tx_id;
-    ar >> labels;
-    size_t props_size;
-    ar >> props_size;
-    for (size_t i = 0; i < props_size; ++i) {
-      storage::Property p;
-      ar >> p;
-      query::TypedValue tv;
-      utils::LoadTypedValue(ar, tv);
-      properties.emplace(p, std::move(tv));
-    }
-  }
-  BOOST_SERIALIZATION_SPLIT_MEMBER()
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(CreateVertexReq, CreateVertexReqData);
-RPC_SINGLE_MEMBER_MESSAGE(CreateVertexRes, CreateResult);
-using CreateVertexRpc =
-    communication::rpc::RequestResponse<CreateVertexReq, CreateVertexRes>;
-
-struct CreateEdgeReqData {
-  gid::Gid from;
-  storage::VertexAddress to;
-  storage::EdgeType edge_type;
-  tx::TransactionId tx_id;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &from;
-    ar &to;
-    ar &edge_type;
-    ar &tx_id;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(CreateEdgeReq, CreateEdgeReqData);
-RPC_SINGLE_MEMBER_MESSAGE(CreateEdgeRes, CreateResult);
-using CreateEdgeRpc =
-    communication::rpc::RequestResponse<CreateEdgeReq, CreateEdgeRes>;
-
-struct AddInEdgeReqData {
-  storage::VertexAddress from;
-  storage::EdgeAddress edge_address;
-  gid::Gid to;
-  storage::EdgeType edge_type;
-  tx::TransactionId tx_id;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &from;
-    ar &edge_address;
-    ar &to;
-    ar &edge_type;
-    ar &tx_id;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(AddInEdgeReq, AddInEdgeReqData);
-RPC_SINGLE_MEMBER_MESSAGE(AddInEdgeRes, UpdateResult);
-using AddInEdgeRpc =
-    communication::rpc::RequestResponse<AddInEdgeReq, AddInEdgeRes>;
-
-struct RemoveVertexReqData {
-  gid::Gid gid;
-  tx::TransactionId tx_id;
-  bool check_empty;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &gid;
-    ar &tx_id;
-    ar &check_empty;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(RemoveVertexReq, RemoveVertexReqData);
-RPC_SINGLE_MEMBER_MESSAGE(RemoveVertexRes, UpdateResult);
-using RemoveVertexRpc =
-    communication::rpc::RequestResponse<RemoveVertexReq, RemoveVertexRes>;
-
-struct RemoveEdgeData {
-  tx::TransactionId tx_id;
-  gid::Gid edge_id;
-  gid::Gid vertex_from_id;
-  storage::VertexAddress vertex_to_address;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &tx_id;
-    ar &edge_id;
-    ar &vertex_from_id;
-    ar &vertex_to_address;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(RemoveEdgeReq, RemoveEdgeData);
-RPC_SINGLE_MEMBER_MESSAGE(RemoveEdgeRes, UpdateResult);
-using RemoveEdgeRpc =
-    communication::rpc::RequestResponse<RemoveEdgeReq, RemoveEdgeRes>;
-
-struct RemoveInEdgeData {
-  tx::TransactionId tx_id;
-  gid::Gid vertex;
-  storage::EdgeAddress edge_address;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &tx_id;
-    ar &vertex;
-    ar &edge_address;
-  }
-};
-
-RPC_SINGLE_MEMBER_MESSAGE(RemoveInEdgeReq, RemoveInEdgeData);
-RPC_SINGLE_MEMBER_MESSAGE(RemoveInEdgeRes, UpdateResult);
-using RemoveInEdgeRpc =
-    communication::rpc::RequestResponse<RemoveInEdgeReq, RemoveInEdgeRes>;
-
-}  // namespace distributed
diff --git a/src/distributed/updates_rpc_server.cpp b/src/distributed/updates_rpc_server.cpp
deleted file mode 100644
index 2ad76cc29..000000000
--- a/src/distributed/updates_rpc_server.cpp
+++ /dev/null
@@ -1,349 +0,0 @@
-#include <utility>
-
-#include "glog/logging.h"
-
-#include "distributed/updates_rpc_server.hpp"
-#include "threading/sync/lock_timeout_exception.hpp"
-
-namespace distributed {
-
-template <typename TRecordAccessor>
-UpdateResult UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::Emplace(
-    const database::StateDelta &delta) {
-  auto gid = std::is_same<TRecordAccessor, VertexAccessor>::value
-                 ? delta.vertex_id
-                 : delta.edge_id;
-  std::lock_guard<SpinLock> guard{lock_};
-  auto found = deltas_.find(gid);
-  if (found == deltas_.end()) {
-    found =
-        deltas_
-            .emplace(gid, std::make_pair(FindAccessor(gid),
-                                         std::vector<database::StateDelta>{}))
-            .first;
-  }
-
-  found->second.second.emplace_back(delta);
-
-  // TODO call `RecordAccessor::update` to force serialization errors to
-  // fail-fast (as opposed to when all the deltas get applied).
-  //
-  // This is problematic because `VersionList::update` needs to become
-  // thread-safe within the same transaction. Note that the concurrency is
-  // possible both between the owner worker interpretation thread and an RPC
-  // thread (current thread), as well as multiple RPC threads if this
-  // object's lock is released (perhaps desirable).
-  //
-  // A potential solution *might* be that `LockStore::Lock` returns a `bool`
-  // indicating if the caller was the one obtaining the lock (not the same
-  // as lock already being held by the same transaction).
-  //
-  // Another thing that needs to be done (if we do this) is ensuring that
-  // `LockStore::Take` is thread-safe when called in parallel in the same
-  // transaction. Currently it's thread-safe only when called in parallel
-  // from different transactions (only one manages to take the RecordLock).
-  //
-  // Deferring the implementation of this as it's tricky, and essentially an
-  // optimization.
-  //
-  // try {
-  //   found->second.first.update();
-  // } catch (const mvcc::SerializationError &) {
-  //   return UpdateResult::SERIALIZATION_ERROR;
-  // } catch (const RecordDeletedError &) {
-  //   return UpdateResult::UPDATE_DELETED_ERROR;
-  // } catch (const LockTimeoutException &) {
-  //   return UpdateResult::LOCK_TIMEOUT_ERROR;
-  // }
-  return UpdateResult::DONE;
-}
-
-template <typename TRecordAccessor>
-gid::Gid UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::CreateVertex(
-    const std::vector<storage::Label> &labels,
-    const std::unordered_map<storage::Property, query::TypedValue>
-        &properties) {
-  auto result = db_accessor_.InsertVertex();
-  for (auto &label : labels) result.add_label(label);
-  for (auto &kv : properties) result.PropsSet(kv.first, kv.second);
-  std::lock_guard<SpinLock> guard{lock_};
-  deltas_.emplace(result.gid(),
-                  std::make_pair(result, std::vector<database::StateDelta>{}));
-  return result.gid();
-}
-
-template <typename TRecordAccessor>
-gid::Gid UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::CreateEdge(
-    gid::Gid from, storage::VertexAddress to, storage::EdgeType edge_type) {
-  auto &db = db_accessor_.db();
-  auto edge = db_accessor_.InsertOnlyEdge(
-      {from, db.WorkerId()}, db.storage().LocalizedAddressIfPossible(to),
-      edge_type);
-  std::lock_guard<SpinLock> guard{lock_};
-  deltas_.emplace(edge.gid(),
-                  std::make_pair(edge, std::vector<database::StateDelta>{}));
-  return edge.gid();
-}
-
-template <typename TRecordAccessor>
-UpdateResult UpdatesRpcServer::TransactionUpdates<TRecordAccessor>::Apply() {
-  std::lock_guard<SpinLock> guard{lock_};
-  for (auto &kv : deltas_) {
-    auto &record_accessor = kv.second.first;
-    // We need to reconstruct the record as in the meantime some local
-    // update might have updated it.
-    record_accessor.Reconstruct();
-    for (database::StateDelta &delta : kv.second.second) {
-      try {
-        auto &dba = db_accessor_;
-        switch (delta.type) {
-          case database::StateDelta::Type::TRANSACTION_BEGIN:
-          case database::StateDelta::Type::TRANSACTION_COMMIT:
-          case database::StateDelta::Type::TRANSACTION_ABORT:
-          case database::StateDelta::Type::CREATE_VERTEX:
-          case database::StateDelta::Type::CREATE_EDGE:
-          case database::StateDelta::Type::BUILD_INDEX:
-            LOG(FATAL) << "Can only apply record update deltas for remote "
-                          "graph element";
-          case database::StateDelta::Type::REMOVE_VERTEX:
-            if (!db_accessor().RemoveVertex(
-                    reinterpret_cast<VertexAccessor &>(record_accessor),
-                    delta.check_empty)) {
-              return UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR;
-            }
-            break;
-          case database::StateDelta::Type::SET_PROPERTY_VERTEX:
-          case database::StateDelta::Type::SET_PROPERTY_EDGE:
-            record_accessor.PropsSet(delta.property, delta.value);
-            break;
-          case database::StateDelta::Type::ADD_LABEL:
-            reinterpret_cast<VertexAccessor &>(record_accessor)
-                .add_label(delta.label);
-            break;
-          case database::StateDelta::Type::REMOVE_LABEL:
-            reinterpret_cast<VertexAccessor &>(record_accessor)
-                .remove_label(delta.label);
-            break;
-          case database::StateDelta::Type::ADD_OUT_EDGE:
-            reinterpret_cast<Vertex &>(record_accessor.update())
-                .out_.emplace(dba.db().storage().LocalizedAddressIfPossible(
-                                  delta.vertex_to_address),
-                              dba.db().storage().LocalizedAddressIfPossible(
-                                  delta.edge_address),
-                              delta.edge_type);
-            dba.wal().Emplace(delta);
-            break;
-          case database::StateDelta::Type::ADD_IN_EDGE:
-            reinterpret_cast<Vertex &>(record_accessor.update())
-                .in_.emplace(dba.db().storage().LocalizedAddressIfPossible(
-                                 delta.vertex_from_address),
-                             dba.db().storage().LocalizedAddressIfPossible(
-                                 delta.edge_address),
-                             delta.edge_type);
-            dba.wal().Emplace(delta);
-            break;
-          case database::StateDelta::Type::REMOVE_EDGE:
-            // We only remove the edge as a result of this StateDelta,
-            // because the removal of edge from vertex in/out is performed
-            // in REMOVE_[IN/OUT]_EDGE deltas.
-            db_accessor_.RemoveEdge(
-                reinterpret_cast<EdgeAccessor &>(record_accessor), false,
-                false);
-            break;
-          case database::StateDelta::Type::REMOVE_OUT_EDGE:
-            reinterpret_cast<VertexAccessor &>(record_accessor)
-                .RemoveOutEdge(delta.edge_address);
-            break;
-          case database::StateDelta::Type::REMOVE_IN_EDGE:
-            reinterpret_cast<VertexAccessor &>(record_accessor)
-                .RemoveInEdge(delta.edge_address);
-            break;
-        }
-      } catch (const mvcc::SerializationError &) {
-        return UpdateResult::SERIALIZATION_ERROR;
-      } catch (const RecordDeletedError &) {
-        return UpdateResult::UPDATE_DELETED_ERROR;
-      } catch (const LockTimeoutException &) {
-        return UpdateResult::LOCK_TIMEOUT_ERROR;
-      }
-    }
-  }
-  return UpdateResult::DONE;
-}
-
-UpdatesRpcServer::UpdatesRpcServer(database::GraphDb &db,
-                                   communication::rpc::Server &server)
-    : db_(db) {
-  server.Register<UpdateRpc>([this](const UpdateReq &req) {
-    using DeltaType = database::StateDelta::Type;
-    auto &delta = req.member;
-    switch (delta.type) {
-      case DeltaType::SET_PROPERTY_VERTEX:
-      case DeltaType::ADD_LABEL:
-      case DeltaType::REMOVE_LABEL:
-      case database::StateDelta::Type::REMOVE_OUT_EDGE:
-      case database::StateDelta::Type::REMOVE_IN_EDGE:
-        return std::make_unique<UpdateRes>(
-            GetUpdates(vertex_updates_, delta.transaction_id).Emplace(delta));
-      case DeltaType::SET_PROPERTY_EDGE:
-        return std::make_unique<UpdateRes>(
-            GetUpdates(edge_updates_, delta.transaction_id).Emplace(delta));
-      default:
-        LOG(FATAL) << "Can't perform a remote update with delta type: "
-                   << static_cast<int>(req.member.type);
-    }
-  });
-
-  server.Register<UpdateApplyRpc>([this](const UpdateApplyReq &req) {
-    return std::make_unique<UpdateApplyRes>(Apply(req.member));
-  });
-
-  server.Register<CreateVertexRpc>([this](const CreateVertexReq &req) {
-    gid::Gid gid = GetUpdates(vertex_updates_, req.member.tx_id)
-                       .CreateVertex(req.member.labels, req.member.properties);
-    return std::make_unique<CreateVertexRes>(
-        CreateResult{UpdateResult::DONE, gid});
-  });
-
-  server.Register<CreateEdgeRpc>([this](const CreateEdgeReq &req) {
-    auto data = req.member;
-    auto creation_result = CreateEdge(data);
-
-    // If `from` and `to` are both on this worker, we handle it in this
-    // RPC call. Do it only if CreateEdge succeeded.
-    if (creation_result.result == UpdateResult::DONE &&
-        data.to.worker_id() == db_.WorkerId()) {
-      auto to_delta = database::StateDelta::AddInEdge(
-          data.tx_id, data.to.gid(), {data.from, db_.WorkerId()},
-          {creation_result.gid, db_.WorkerId()}, data.edge_type);
-      creation_result.result =
-          GetUpdates(vertex_updates_, data.tx_id).Emplace(to_delta);
-    }
-
-    return std::make_unique<CreateEdgeRes>(creation_result);
-  });
-
-  server.Register<AddInEdgeRpc>([this](const AddInEdgeReq &req) {
-    auto to_delta = database::StateDelta::AddInEdge(
-        req.member.tx_id, req.member.to, req.member.from,
-        req.member.edge_address, req.member.edge_type);
-    auto result =
-        GetUpdates(vertex_updates_, req.member.tx_id).Emplace(to_delta);
-    return std::make_unique<AddInEdgeRes>(result);
-  });
-
-  server.Register<RemoveVertexRpc>([this](const RemoveVertexReq &req) {
-    auto to_delta = database::StateDelta::RemoveVertex(
-        req.member.tx_id, req.member.gid, req.member.check_empty);
-    auto result =
-        GetUpdates(vertex_updates_, req.member.tx_id).Emplace(to_delta);
-    return std::make_unique<RemoveVertexRes>(result);
-  });
-
-  server.Register<RemoveEdgeRpc>([this](const RemoveEdgeReq &req) {
-    return std::make_unique<RemoveEdgeRes>(RemoveEdge(req.member));
-  });
-
-  server.Register<RemoveInEdgeRpc>([this](const RemoveInEdgeReq &req) {
-    auto data = req.member;
-    return std::make_unique<RemoveInEdgeRes>(
-        GetUpdates(vertex_updates_, data.tx_id)
-            .Emplace(database::StateDelta::RemoveInEdge(data.tx_id, data.vertex,
-                                                        data.edge_address)));
-  });
-}
-
-UpdateResult UpdatesRpcServer::Apply(tx::TransactionId tx_id) {
-  auto apply = [tx_id](auto &collection) {
-    auto access = collection.access();
-    auto found = access.find(tx_id);
-    if (found == access.end()) {
-      return UpdateResult::DONE;
-    }
-    auto result = found->second.Apply();
-    access.remove(tx_id);
-    return result;
-  };
-
-  auto vertex_result = apply(vertex_updates_);
-  auto edge_result = apply(edge_updates_);
-  if (vertex_result != UpdateResult::DONE) return vertex_result;
-  if (edge_result != UpdateResult::DONE) return edge_result;
-  return UpdateResult::DONE;
-}
-
-void UpdatesRpcServer::ClearTransactionalCache(
-    tx::TransactionId oldest_active) {
-  auto vertex_access = vertex_updates_.access();
-  for (auto &kv : vertex_access) {
-    if (kv.first < oldest_active) {
-      vertex_access.remove(kv.first);
-    }
-  }
-  auto edge_access = edge_updates_.access();
-  for (auto &kv : edge_access) {
-    if (kv.first < oldest_active) {
-      edge_access.remove(kv.first);
-    }
-  }
-}
-
-// Gets/creates the TransactionUpdates for the given transaction.
-template <typename TAccessor>
-UpdatesRpcServer::TransactionUpdates<TAccessor> &UpdatesRpcServer::GetUpdates(
-    MapT<TAccessor> &updates, tx::TransactionId tx_id) {
-  return updates.access()
-      .emplace(tx_id, std::make_tuple(tx_id),
-               std::make_tuple(std::ref(db_), tx_id))
-      .first->second;
-}
-
-CreateResult UpdatesRpcServer::CreateEdge(const CreateEdgeReqData &req) {
-  auto gid = GetUpdates(edge_updates_, req.tx_id)
-                 .CreateEdge(req.from, req.to, req.edge_type);
-
-  auto from_delta = database::StateDelta::AddOutEdge(
-      req.tx_id, req.from, req.to, {gid, db_.WorkerId()}, req.edge_type);
-
-  auto result = GetUpdates(vertex_updates_, req.tx_id).Emplace(from_delta);
-  return {result, gid};
-}
-
-UpdateResult UpdatesRpcServer::RemoveEdge(const RemoveEdgeData &data) {
-  // Edge removal.
-  auto deletion_delta =
-      database::StateDelta::RemoveEdge(data.tx_id, data.edge_id);
-  auto result = GetUpdates(edge_updates_, data.tx_id).Emplace(deletion_delta);
-
-  // Out-edge removal, for sure is local.
-  if (result == UpdateResult::DONE) {
-    auto remove_out_delta = database::StateDelta::RemoveOutEdge(
-        data.tx_id, data.vertex_from_id, {data.edge_id, db_.WorkerId()});
-    result = GetUpdates(vertex_updates_, data.tx_id).Emplace(remove_out_delta);
-  }
-
-  // In-edge removal, might not be local.
-  if (result == UpdateResult::DONE &&
-      data.vertex_to_address.worker_id() == db_.WorkerId()) {
-    auto remove_in_delta = database::StateDelta::RemoveInEdge(
-        data.tx_id, data.vertex_to_address.gid(),
-        {data.edge_id, db_.WorkerId()});
-    result = GetUpdates(vertex_updates_, data.tx_id).Emplace(remove_in_delta);
-  }
-
-  return result;
-}
-
-template <>
-VertexAccessor UpdatesRpcServer::TransactionUpdates<
-    VertexAccessor>::FindAccessor(gid::Gid gid) {
-  return db_accessor_.FindVertex(gid, false);
-}
-
-template <>
-EdgeAccessor UpdatesRpcServer::TransactionUpdates<EdgeAccessor>::FindAccessor(
-    gid::Gid gid) {
-  return db_accessor_.FindEdge(gid, false);
-}
-
-}  // namespace distributed
diff --git a/src/distributed/updates_rpc_server.hpp b/src/distributed/updates_rpc_server.hpp
deleted file mode 100644
index de3bef334..000000000
--- a/src/distributed/updates_rpc_server.hpp
+++ /dev/null
@@ -1,104 +0,0 @@
-#pragma once
-
-#include <unordered_map>
-#include <vector>
-
-#include "glog/logging.h"
-
-#include "communication/rpc/server.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "database/graph_db.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "database/state_delta.hpp"
-#include "distributed/updates_rpc_messages.hpp"
-#include "query/typed_value.hpp"
-#include "storage/edge_accessor.hpp"
-#include "storage/gid.hpp"
-#include "storage/types.hpp"
-#include "storage/vertex_accessor.hpp"
-#include "threading/sync/spinlock.hpp"
-#include "transactions/type.hpp"
-
-namespace distributed {
-
-/// An RPC server that accepts and holds deferred updates (deltas) until it's
-/// told to apply or discard them. The updates are organized and applied per
-/// transaction in this single updates server.
-///
-/// Attempts to get serialization and update-after-delete errors to happen as
-/// soon as possible during query execution (fail fast).
-class UpdatesRpcServer {
-  // Remote updates for one transaction.
-  template <typename TRecordAccessor>
-  class TransactionUpdates {
-   public:
-    TransactionUpdates(database::GraphDb &db, tx::TransactionId tx_id)
-        : db_accessor_(db, tx_id) {}
-
-    /// Adds a delta and returns the result. Does not modify the state (data) of
-    /// the graph element the update is for, but calls the `update` method to
-    /// fail-fast on serialization and update-after-delete errors.
-    UpdateResult Emplace(const database::StateDelta &delta);
-
-    /// Creates a new vertex and returns it's gid.
-    gid::Gid CreateVertex(
-        const std::vector<storage::Label> &labels,
-        const std::unordered_map<storage::Property, query::TypedValue>
-            &properties);
-
-    /// Creates a new edge and returns it's gid. Does not update vertices at the
-    /// end of the edge.
-    gid::Gid CreateEdge(gid::Gid from, storage::VertexAddress to,
-                        storage::EdgeType edge_type);
-
-    /// Applies all the deltas on the record.
-    UpdateResult Apply();
-
-    auto &db_accessor() { return db_accessor_; }
-
-   private:
-    database::GraphDbAccessor db_accessor_;
-    std::unordered_map<
-        gid::Gid, std::pair<TRecordAccessor, std::vector<database::StateDelta>>>
-        deltas_;
-    // Multiple workers might be sending remote updates concurrently.
-    SpinLock lock_;
-
-    // Helper method specialized for [Vertex|Edge]Accessor.
-    TRecordAccessor FindAccessor(gid::Gid gid);
-  };
-
- public:
-  UpdatesRpcServer(database::GraphDb &db, communication::rpc::Server &server);
-
-  /// Applies all existsing updates for the given transaction ID. If there are
-  /// no updates for that transaction, nothing happens. Clears the updates cache
-  /// after applying them, regardless of the result.
-  UpdateResult Apply(tx::TransactionId tx_id);
-
-  /// Clears the cache of local transactions that are completed. The signature
-  /// of this method is dictated by `distributed::TransactionalCacheCleaner`.
-  void ClearTransactionalCache(tx::TransactionId oldest_active);
-
- private:
-  database::GraphDb &db_;
-
-  template <typename TAccessor>
-  using MapT =
-      ConcurrentMap<tx::TransactionId, TransactionUpdates<TAccessor>>;
-  MapT<VertexAccessor> vertex_updates_;
-  MapT<EdgeAccessor> edge_updates_;
-
-  // Gets/creates the TransactionUpdates for the given transaction.
-  template <typename TAccessor>
-  TransactionUpdates<TAccessor> &GetUpdates(MapT<TAccessor> &updates,
-                                            tx::TransactionId tx_id);
-
-  // Performs edge creation for the given request.
-  CreateResult CreateEdge(const CreateEdgeReqData &req);
-
-  // Performs edge removal for the given request.
-  UpdateResult RemoveEdge(const RemoveEdgeData &data);
-};
-
-}  // namespace distributed
diff --git a/src/durability/recovery.hpp b/src/durability/recovery.hpp
index ccb8b5f28..226e21ac3 100644
--- a/src/durability/recovery.hpp
+++ b/src/durability/recovery.hpp
@@ -27,15 +27,6 @@ struct RecoveryInfo {
            max_wal_tx_id == other.max_wal_tx_id;
   }
   bool operator!=(const RecoveryInfo &other) const { return !(*this == other); }
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &snapshot_tx_id;
-    ar &max_wal_tx_id;
-  }
 };
 
 /** Reads snapshot metadata from the end of the file without messing up the
diff --git a/src/io/network/endpoint.hpp b/src/io/network/endpoint.hpp
index 5c7e8a477..64c9aaf78 100644
--- a/src/io/network/endpoint.hpp
+++ b/src/io/network/endpoint.hpp
@@ -5,8 +5,6 @@
 #include <iostream>
 #include <string>
 
-#include "boost/serialization/access.hpp"
-
 #include "utils/exceptions.hpp"
 
 namespace io::network {
@@ -29,15 +27,6 @@ class Endpoint {
   friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
 
  private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &address_;
-    ar &port_;
-    ar &family_;
-  }
-
   std::string address_;
   uint16_t port_{0};
   unsigned char family_{0};
diff --git a/src/memgraph_bolt.cpp b/src/memgraph_bolt.cpp
index a1bac08c9..28d73773b 100644
--- a/src/memgraph_bolt.cpp
+++ b/src/memgraph_bolt.cpp
@@ -12,9 +12,9 @@
 #include <glog/logging.h>
 
 #include "communication/bolt/v1/session.hpp"
+#include "communication/server.hpp"
 #include "config.hpp"
 #include "database/graph_db.hpp"
-#include "stats/stats.hpp"
 #include "utils/flag_validation.hpp"
 #include "utils/signals.hpp"
 #include "utils/sysinfo/memory.hpp"
@@ -118,6 +118,7 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) {
 int WithInit(int argc, char **argv,
              const std::function<std::string()> &get_stats_prefix,
              const std::function<void()> &memgraph_main) {
+  google::SetUsageMessage("Memgraph database server");
   gflags::SetVersionString(version_string);
 
   // Load config before parsing arguments, so that flags from the command line
@@ -132,9 +133,6 @@ int WithInit(int argc, char **argv,
   // Unhandled exception handler init.
   std::set_terminate(&utils::TerminateHandler);
 
-  stats::InitStatsLogging(get_stats_prefix());
-  utils::OnScopeExit stop_stats([] { stats::StopStatsLogging(); });
-
   // Start memory warning logger.
   utils::Scheduler mem_log_scheduler;
   if (FLAGS_memory_warning_threshold > 0) {
@@ -150,7 +148,6 @@ int WithInit(int argc, char **argv,
 }
 
 void SingleNodeMain() {
-  google::SetUsageMessage("Memgraph single-node database server");
   database::SingleNode db;
   SessionData session_data{db};
   ServerT server({FLAGS_interface, static_cast<uint16_t>(FLAGS_port)},
@@ -170,71 +167,6 @@ void SingleNodeMain() {
 
 // End common stuff for enterprise and community editions
 
-#ifdef MG_COMMUNITY
-
 int main(int argc, char **argv) {
   return WithInit(argc, argv, []() { return "memgraph"; }, SingleNodeMain);
 }
-
-#else  // enterprise edition
-
-// Distributed flags.
-DEFINE_HIDDEN_bool(
-    master, false,
-    "If this Memgraph server is the master in a distributed deployment.");
-DEFINE_HIDDEN_bool(
-    worker, false,
-    "If this Memgraph server is a worker in a distributed deployment.");
-DECLARE_int32(worker_id);
-
-void MasterMain() {
-  google::SetUsageMessage("Memgraph distributed master");
-
-  database::Master db;
-  SessionData session_data{db};
-  ServerT server({FLAGS_interface, static_cast<uint16_t>(FLAGS_port)},
-                 session_data, FLAGS_session_inactivity_timeout, "Bolt",
-                 FLAGS_num_workers);
-
-  // Handler for regular termination signals
-  auto shutdown = [&server] {
-    // Server needs to be shutdown first and then the database. This prevents a
-    // race condition when a transaction is accepted during server shutdown.
-    server.Shutdown();
-  };
-
-  InitSignalHandlers(shutdown);
-  server.AwaitShutdown();
-}
-
-void WorkerMain() {
-  google::SetUsageMessage("Memgraph distributed worker");
-  database::Worker db;
-  db.WaitForShutdown();
-}
-
-int main(int argc, char **argv) {
-  auto get_stats_prefix = [&]() -> std::string {
-    if (FLAGS_master) {
-      return "master";
-    } else if (FLAGS_worker) {
-      return fmt::format("worker-{}", FLAGS_worker_id);
-    }
-    return "memgraph";
-  };
-
-  auto memgraph_main = [&]() {
-    CHECK(!(FLAGS_master && FLAGS_worker))
-        << "Can't run Memgraph as worker and master at the same time";
-    if (FLAGS_master)
-      MasterMain();
-    else if (FLAGS_worker)
-      WorkerMain();
-    else
-      SingleNodeMain();
-  };
-
-  return WithInit(argc, argv, get_stats_prefix, memgraph_main);
-}
-
-#endif  // enterprise edition
diff --git a/src/query/common.hpp b/src/query/common.hpp
index eadd9b530..2d168983e 100644
--- a/src/query/common.hpp
+++ b/src/query/common.hpp
@@ -3,7 +3,6 @@
 #include <cstdint>
 #include <string>
 
-#include "boost/serialization/serialization.hpp"
 #include "query/frontend/ast/ast.hpp"
 #include "query/typed_value.hpp"
 
@@ -54,12 +53,6 @@ class TypedValueVectorCompare {
  private:
   std::vector<Ordering> ordering_;
 
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &ordering_;
-  }
   // Custom comparison for TypedValue objects.
   //
   // Behaves generally like Neo's ORDER BY comparison operator:
diff --git a/src/query/frontend/ast/ast.cpp b/src/query/frontend/ast/ast.cpp
index 0359c494c..424ac57d8 100644
--- a/src/query/frontend/ast/ast.cpp
+++ b/src/query/frontend/ast/ast.cpp
@@ -1,10 +1,5 @@
 #include "query/frontend/ast/ast.hpp"
 
-// Include archives before registering most derived types.
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-
 namespace query {
 
 // Id for boost's archive get_helper needs to be unique among all ids. If it
@@ -43,59 +38,3 @@ ReturnBody CloneReturnBody(AstTreeStorage &storage, const ReturnBody &body) {
 }
 
 }  // namespace query
-
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Query);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::SingleQuery);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::CypherUnion);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::NamedExpression);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::OrOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::XorOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::AndOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::NotOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::AdditionOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::SubtractionOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::MultiplicationOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::DivisionOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::ModOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::NotEqualOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::EqualOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::LessOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::GreaterOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::LessEqualOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::GreaterEqualOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::InListOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::ListMapIndexingOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::ListSlicingOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::IfOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::UnaryPlusOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::UnaryMinusOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::IsNullOperator);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::ListLiteral);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::MapLiteral);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::PropertyLookup);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::LabelsTest);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Aggregation);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Function);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Reduce);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::All);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Single);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::ParameterLookup);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Create);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Match);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Return);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::With);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Pattern);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::NodeAtom);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::EdgeAtom);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Delete);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Where);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::SetProperty);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::SetProperties);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::SetLabels);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::RemoveProperty);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::RemoveLabels);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Merge);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Unwind);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::Identifier);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::PrimitiveLiteral);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::CreateIndex);
diff --git a/src/query/frontend/ast/ast.hpp b/src/query/frontend/ast/ast.hpp
index 049edec2f..50dd75532 100644
--- a/src/query/frontend/ast/ast.hpp
+++ b/src/query/frontend/ast/ast.hpp
@@ -4,18 +4,11 @@
 #include <unordered_map>
 #include <vector>
 
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/export.hpp"
-#include "boost/serialization/split_member.hpp"
-#include "boost/serialization/string.hpp"
-#include "boost/serialization/vector.hpp"
-
 #include "query/frontend/ast/ast_visitor.hpp"
 #include "query/frontend/semantic/symbol.hpp"
 #include "query/interpret/awesome_memgraph_functions.hpp"
 #include "query/typed_value.hpp"
 #include "storage/types.hpp"
-#include "utils/serialization.hpp"
 
 // Hash function for the key in pattern atom property maps.
 namespace std {
@@ -53,12 +46,6 @@ namespace query {
         expression_->Clone(storage));                                        \
   }
 
-#define SERIALIZE_USING_BASE(BaseClass)                      \
-  template <class TArchive>                                  \
-  void serialize(TArchive &ar, const unsigned int) {         \
-    ar &boost::serialization::base_object<BaseClass>(*this); \
-  }
-
 class Tree;
 
 // It would be better to call this AstTree, but we already have a class Tree,
@@ -171,13 +158,6 @@ class Tree : public ::utils::Visitable<HierarchicalTreeVisitor>,
 
  private:
   int uid_;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &uid_;
-  }
 };
 
 // Expressions
@@ -190,10 +170,6 @@ class Expression : public Tree {
 
  protected:
   explicit Expression(int uid) : Tree(uid) {}
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(Tree);
 };
 
 class Where : public Tree {
@@ -217,27 +193,6 @@ class Where : public Tree {
  protected:
   explicit Where(int uid) : Tree(uid) {}
   Where(int uid, Expression *expression) : Tree(uid), expression_(expression) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Where *,
-                                                        const unsigned int);
 };
 
 class BinaryOperator : public Expression {
@@ -253,25 +208,6 @@ class BinaryOperator : public Expression {
   explicit BinaryOperator(int uid) : Expression(uid) {}
   BinaryOperator(int uid, Expression *expression1, Expression *expression2)
       : Expression(uid), expression1_(expression1), expression2_(expression2) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, expression1_);
-    SavePointer(ar, expression2_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, expression1_);
-    LoadPointer(ar, expression2_);
-  }
 };
 
 class UnaryOperator : public Expression {
@@ -286,23 +222,6 @@ class UnaryOperator : public Expression {
   explicit UnaryOperator(int uid) : Expression(uid) {}
   UnaryOperator(int uid, Expression *expression)
       : Expression(uid), expression_(expression) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, expression_);
-  }
 };
 
 class OrOperator : public BinaryOperator {
@@ -320,14 +239,6 @@ class OrOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        OrOperator *,
-                                                        const unsigned int);
 };
 
 class XorOperator : public BinaryOperator {
@@ -345,14 +256,6 @@ class XorOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        XorOperator *,
-                                                        const unsigned int);
 };
 
 class AndOperator : public BinaryOperator {
@@ -370,14 +273,6 @@ class AndOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        AndOperator *,
-                                                        const unsigned int);
 };
 
 class AdditionOperator : public BinaryOperator {
@@ -395,14 +290,6 @@ class AdditionOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        AdditionOperator *,
-                                                        const unsigned int);
 };
 
 class SubtractionOperator : public BinaryOperator {
@@ -420,14 +307,6 @@ class SubtractionOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        SubtractionOperator *,
-                                                        const unsigned int);
 };
 
 class MultiplicationOperator : public BinaryOperator {
@@ -445,13 +324,6 @@ class MultiplicationOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(
-      TArchive &, MultiplicationOperator *, const unsigned int);
 };
 
 class DivisionOperator : public BinaryOperator {
@@ -469,14 +341,6 @@ class DivisionOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        DivisionOperator *,
-                                                        const unsigned int);
 };
 
 class ModOperator : public BinaryOperator {
@@ -494,14 +358,6 @@ class ModOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        ModOperator *,
-                                                        const unsigned int);
 };
 
 class NotEqualOperator : public BinaryOperator {
@@ -519,14 +375,6 @@ class NotEqualOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        NotEqualOperator *,
-                                                        const unsigned int);
 };
 
 class EqualOperator : public BinaryOperator {
@@ -544,14 +392,6 @@ class EqualOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        EqualOperator *,
-                                                        const unsigned int);
 };
 
 class LessOperator : public BinaryOperator {
@@ -569,14 +409,6 @@ class LessOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        LessOperator *,
-                                                        const unsigned int);
 };
 
 class GreaterOperator : public BinaryOperator {
@@ -594,14 +426,6 @@ class GreaterOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        GreaterOperator *,
-                                                        const unsigned int);
 };
 
 class LessEqualOperator : public BinaryOperator {
@@ -619,14 +443,6 @@ class LessEqualOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        LessEqualOperator *,
-                                                        const unsigned int);
 };
 
 class GreaterEqualOperator : public BinaryOperator {
@@ -644,14 +460,6 @@ class GreaterEqualOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        GreaterEqualOperator *,
-                                                        const unsigned int);
 };
 
 class InListOperator : public BinaryOperator {
@@ -669,14 +477,6 @@ class InListOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        InListOperator *,
-                                                        const unsigned int);
 };
 
 class ListMapIndexingOperator : public BinaryOperator {
@@ -694,13 +494,6 @@ class ListMapIndexingOperator : public BinaryOperator {
 
  protected:
   using BinaryOperator::BinaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(BinaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(
-      TArchive &, ListMapIndexingOperator *, const unsigned int);
 };
 
 class ListSlicingOperator : public Expression {
@@ -739,32 +532,6 @@ class ListSlicingOperator : public Expression {
         list_(list),
         lower_bound_(lower_bound),
         upper_bound_(upper_bound) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, list_);
-    SavePointer(ar, lower_bound_);
-    SavePointer(ar, upper_bound_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, list_);
-    LoadPointer(ar, lower_bound_);
-    LoadPointer(ar, upper_bound_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        ListSlicingOperator *,
-                                                        const unsigned int);
 };
 
 class IfOperator : public Expression {
@@ -799,32 +566,6 @@ class IfOperator : public Expression {
         condition_(condition),
         then_expression_(then_expression),
         else_expression_(else_expression) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, condition_);
-    SavePointer(ar, then_expression_);
-    SavePointer(ar, else_expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, condition_);
-    LoadPointer(ar, then_expression_);
-    LoadPointer(ar, else_expression_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        IfOperator *,
-                                                        const unsigned int);
 };
 
 class NotOperator : public UnaryOperator {
@@ -842,14 +583,6 @@ class NotOperator : public UnaryOperator {
 
  protected:
   using UnaryOperator::UnaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(UnaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        NotOperator *,
-                                                        const unsigned int);
 };
 
 class UnaryPlusOperator : public UnaryOperator {
@@ -867,14 +600,6 @@ class UnaryPlusOperator : public UnaryOperator {
 
  protected:
   using UnaryOperator::UnaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(UnaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        UnaryPlusOperator *,
-                                                        const unsigned int);
 };
 
 class UnaryMinusOperator : public UnaryOperator {
@@ -892,14 +617,6 @@ class UnaryMinusOperator : public UnaryOperator {
 
  protected:
   using UnaryOperator::UnaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(UnaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        UnaryMinusOperator *,
-                                                        const unsigned int);
 };
 
 class IsNullOperator : public UnaryOperator {
@@ -917,14 +634,6 @@ class IsNullOperator : public UnaryOperator {
 
  protected:
   using UnaryOperator::UnaryOperator;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(UnaryOperator);
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        IsNullOperator *,
-                                                        const unsigned int);
 };
 
 class BaseLiteral : public Expression {
@@ -935,10 +644,6 @@ class BaseLiteral : public Expression {
 
  protected:
   explicit BaseLiteral(int uid) : Expression(uid) {}
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(Expression);
 };
 
 class PrimitiveLiteral : public BaseLiteral {
@@ -965,30 +670,6 @@ class PrimitiveLiteral : public BaseLiteral {
   template <typename T>
   PrimitiveLiteral(int uid, T value, int token_position)
       : BaseLiteral(uid), value_(value), token_position_(token_position) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<BaseLiteral>(*this);
-    ar << token_position_;
-    utils::SaveTypedValue(ar, value_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<BaseLiteral>(*this);
-    ar >> token_position_;
-    utils::LoadTypedValue(ar, value_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        PrimitiveLiteral *,
-                                                        const unsigned int);
 };
 
 class ListLiteral : public BaseLiteral {
@@ -1018,28 +699,6 @@ class ListLiteral : public BaseLiteral {
   explicit ListLiteral(int uid) : BaseLiteral(uid) {}
   ListLiteral(int uid, const std::vector<Expression *> &elements)
       : BaseLiteral(uid), elements_(elements) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<BaseLiteral>(*this);
-    SavePointers(ar, elements_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<BaseLiteral>(*this);
-    LoadPointers(ar, elements_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        ListLiteral *,
-                                                        const unsigned int);
 };
 
 class MapLiteral : public BaseLiteral {
@@ -1072,44 +731,6 @@ class MapLiteral : public BaseLiteral {
              const std::unordered_map<std::pair<std::string, storage::Property>,
                                       Expression *> &elements)
       : BaseLiteral(uid), elements_(elements) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<BaseLiteral>(*this);
-    ar << elements_.size();
-    for (const auto &element : elements_) {
-      const auto &property = element.first;
-      ar << property.first;
-      ar << property.second;
-      SavePointer(ar, element.second);
-    }
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<BaseLiteral>(*this);
-    size_t size = 0;
-    ar >> size;
-    for (size_t i = 0; i < size; ++i) {
-      std::pair<std::string, storage::Property> property;
-      ar >> property.first;
-      ar >> property.second;
-      Expression *expression = nullptr;
-      LoadPointer(ar, expression);
-      DCHECK(expression) << "Unexpected nullptr expression serialized";
-      elements_.emplace(property, expression);
-    }
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        MapLiteral *,
-                                                        const unsigned int);
 };
 
 class Identifier : public Expression {
@@ -1130,21 +751,6 @@ class Identifier : public Expression {
   Identifier(int uid, const std::string &name) : Expression(uid), name_(name) {}
   Identifier(int uid, const std::string &name, bool user_declared)
       : Expression(uid), name_(name), user_declared_(user_declared) {}
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Expression>(*this);
-    ar &name_;
-    ar &user_declared_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        Identifier *,
-                                                        const unsigned int);
 };
 
 class PropertyLookup : public Expression {
@@ -1181,32 +787,6 @@ class PropertyLookup : public Expression {
         expression_(expression),
         property_name_(property.first),
         property_(property.second) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, expression_);
-    ar << property_name_;
-    ar << property_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, expression_);
-    ar >> property_name_;
-    ar >> property_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        PropertyLookup *,
-                                                        const unsigned int);
 };
 
 class LabelsTest : public Expression {
@@ -1232,30 +812,6 @@ class LabelsTest : public Expression {
   LabelsTest(int uid, Expression *expression,
              const std::vector<storage::Label> &labels)
       : Expression(uid), expression_(expression), labels_(labels) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, expression_);
-    ar << labels_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, expression_);
-    ar >> labels_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        LabelsTest *,
-                                                        const unsigned int);
 };
 
 class Function : public Expression {
@@ -1301,30 +857,6 @@ class Function : public Expression {
   std::function<TypedValue(const std::vector<TypedValue> &,
                            database::GraphDbAccessor &)>
       function_;
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    ar << function_name_;
-    SavePointers(ar, arguments_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    ar >> function_name_;
-    function_ = NameToFunction(function_name_);
-    DCHECK(function_) << "Unexpected missing function: " << function_name_;
-    LoadPointers(ar, arguments_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Function *,
-                                                        const unsigned int);
 };
 
 class Aggregation : public BinaryOperator {
@@ -1374,20 +906,6 @@ class Aggregation : public BinaryOperator {
         << "The second expression is obligatory in COLLECT_MAP and "
            "invalid otherwise";
   }
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<BinaryOperator>(*this);
-    ar &op_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        Aggregation *,
-                                                        const unsigned int);
 };
 
 class Reduce : public Expression {
@@ -1433,35 +951,6 @@ class Reduce : public Expression {
         identifier_(identifier),
         list_(list),
         expression_(expression) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, accumulator_);
-    SavePointer(ar, initializer_);
-    SavePointer(ar, identifier_);
-    SavePointer(ar, list_);
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, accumulator_);
-    LoadPointer(ar, initializer_);
-    LoadPointer(ar, identifier_);
-    LoadPointer(ar, list_);
-    LoadPointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Reduce *,
-                                                        const unsigned int);
 };
 
 // TODO: Think about representing All and Any as Reduce.
@@ -1496,31 +985,6 @@ class All : public Expression {
         identifier_(identifier),
         list_expression_(list_expression),
         where_(where) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, identifier_);
-    SavePointer(ar, list_expression_);
-    SavePointer(ar, where_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, identifier_);
-    LoadPointer(ar, list_expression_);
-    LoadPointer(ar, where_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, All *,
-                                                        const unsigned int);
 };
 
 // TODO: This is pretty much copy pasted from All. Consider merging Reduce, All,
@@ -1557,31 +1021,6 @@ class Single : public Expression {
         identifier_(identifier),
         list_expression_(list_expression),
         where_(where) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Expression>(*this);
-    SavePointer(ar, identifier_);
-    SavePointer(ar, list_expression_);
-    SavePointer(ar, where_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Expression>(*this);
-    LoadPointer(ar, identifier_);
-    LoadPointer(ar, list_expression_);
-    LoadPointer(ar, where_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Single *,
-                                                        const unsigned int);
 };
 
 class ParameterLookup : public Expression {
@@ -1604,19 +1043,6 @@ class ParameterLookup : public Expression {
   explicit ParameterLookup(int uid) : Expression(uid) {}
   ParameterLookup(int uid, int token_position)
       : Expression(uid), token_position_(token_position) {}
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Expression>(*this);
-    ar &token_position_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        ParameterLookup *,
-                                                        const unsigned int);
 };
 
 class NamedExpression : public Tree {
@@ -1654,32 +1080,6 @@ class NamedExpression : public Tree {
         name_(name),
         expression_(expression),
         token_position_(token_position) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    ar << name_;
-    SavePointer(ar, expression_);
-    ar << token_position_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    ar >> name_;
-    LoadPointer(ar, expression_);
-    ar >> token_position_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        NamedExpression *,
-                                                        const unsigned int);
 };
 
 // Pattern atoms
@@ -1696,23 +1096,6 @@ class PatternAtom : public Tree {
   explicit PatternAtom(int uid) : Tree(uid) {}
   PatternAtom(int uid, Identifier *identifier)
       : Tree(uid), identifier_(identifier) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointer(ar, identifier_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointer(ar, identifier_);
-  }
 };
 
 class NodeAtom : public PatternAtom {
@@ -1748,45 +1131,6 @@ class NodeAtom : public PatternAtom {
 
  protected:
   using PatternAtom::PatternAtom;
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<PatternAtom>(*this);
-    ar << labels_;
-    ar << properties_.size();
-    for (const auto &property : properties_) {
-      const auto &key = property.first;
-      ar << key.first;
-      ar << key.second;
-      SavePointer(ar, property.second);
-    }
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<PatternAtom>(*this);
-    ar >> labels_;
-    size_t size = 0;
-    ar >> size;
-    for (size_t i = 0; i < size; ++i) {
-      std::pair<std::string, storage::Property> property;
-      ar >> property.first;
-      ar >> property.second;
-      Expression *expression = nullptr;
-      LoadPointer(ar, expression);
-      DCHECK(expression) << "Unexpected nullptr expression serialized";
-      properties_.emplace(property, expression);
-    }
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, NodeAtom *,
-                                                        const unsigned int);
 };
 
 class EdgeAtom : public PatternAtom {
@@ -1905,69 +1249,6 @@ class EdgeAtom : public PatternAtom {
         type_(type),
         direction_(direction),
         edge_types_(edge_types) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<PatternAtom>(*this);
-    ar << type_;
-    ar << direction_;
-    ar << edge_types_;
-    ar << properties_.size();
-    for (const auto &property : properties_) {
-      const auto &key = property.first;
-      ar << key.first;
-      ar << key.second;
-      SavePointer(ar, property.second);
-    }
-    SavePointer(ar, lower_bound_);
-    SavePointer(ar, upper_bound_);
-    auto save_lambda = [&ar](const auto &lambda) {
-      SavePointer(ar, lambda.inner_edge);
-      SavePointer(ar, lambda.inner_node);
-      SavePointer(ar, lambda.expression);
-    };
-    save_lambda(filter_lambda_);
-    save_lambda(weight_lambda_);
-    SavePointer(ar, total_weight_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<PatternAtom>(*this);
-    ar >> type_;
-    ar >> direction_;
-    ar >> edge_types_;
-    size_t size = 0;
-    ar >> size;
-    for (size_t i = 0; i < size; ++i) {
-      std::pair<std::string, storage::Property> property;
-      ar >> property.first;
-      ar >> property.second;
-      Expression *expression = nullptr;
-      LoadPointer(ar, expression);
-      DCHECK(expression) << "Unexpected nullptr expression serialized";
-      properties_.emplace(property, expression);
-    }
-    LoadPointer(ar, lower_bound_);
-    LoadPointer(ar, upper_bound_);
-    auto load_lambda = [&ar](auto &lambda) {
-      LoadPointer(ar, lambda.inner_edge);
-      LoadPointer(ar, lambda.inner_node);
-      LoadPointer(ar, lambda.expression);
-    };
-    load_lambda(filter_lambda_);
-    load_lambda(weight_lambda_);
-    LoadPointer(ar, total_weight_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, EdgeAtom *,
-                                                        const unsigned int);
 };
 
 class Pattern : public Tree {
@@ -2001,29 +1282,6 @@ class Pattern : public Tree {
 
  protected:
   explicit Pattern(int uid) : Tree(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointer(ar, identifier_);
-    SavePointers(ar, atoms_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointer(ar, identifier_);
-    LoadPointers(ar, atoms_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Pattern *,
-                                                        const unsigned int);
 };
 
 // Clause
@@ -2035,10 +1293,6 @@ class Clause : public Tree {
   explicit Clause(int uid) : Tree(uid) {}
 
   Clause *Clone(AstTreeStorage &storage) const override = 0;
-
- private:
-  friend class boost::serialization::access;
-  SERIALIZE_USING_BASE(Tree);
 };
 
 // SingleQuery
@@ -2069,28 +1323,6 @@ class SingleQuery : public Tree {
 
  protected:
   explicit SingleQuery(int uid) : Tree(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointers(ar, clauses_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointers(ar, clauses_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        SingleQuery *,
-                                                        const unsigned int);
 };
 
 // CypherUnion
@@ -2123,32 +1355,6 @@ class CypherUnion : public Tree {
  protected:
   explicit CypherUnion(int uid) : Tree(uid) {}
   CypherUnion(int uid, bool distinct) : Tree(uid), distinct_(distinct) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointer(ar, single_query_);
-    ar << distinct_;
-    ar << union_symbols_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointer(ar, single_query_);
-    ar >> distinct_;
-    ar >> union_symbols_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        CypherUnion *,
-                                                        const unsigned int);
 };
 
 // Queries
@@ -2185,29 +1391,6 @@ class Query : public Tree {
 
  protected:
   explicit Query(int uid) : Tree(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Tree>(*this);
-    SavePointer(ar, single_query_);
-    SavePointers(ar, cypher_unions_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Tree>(*this);
-    LoadPointer(ar, single_query_);
-    LoadPointers(ar, cypher_unions_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Query *,
-                                                        const unsigned int);
 };
 
 // Clauses
@@ -2238,27 +1421,6 @@ class Create : public Clause {
 
  protected:
   explicit Create(int uid) : Clause(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointers(ar, patterns_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointers(ar, patterns_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Create *,
-                                                        const unsigned int);
 };
 
 class Match : public Clause {
@@ -2298,31 +1460,6 @@ class Match : public Clause {
  protected:
   explicit Match(int uid) : Clause(uid) {}
   Match(int uid, bool optional) : Clause(uid), optional_(optional) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointers(ar, patterns_);
-    SavePointer(ar, where_);
-    ar << optional_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointers(ar, patterns_);
-    LoadPointer(ar, where_);
-    ar >> optional_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Match *,
-                                                        const unsigned int);
 };
 
 /// Defines the order for sorting values (ascending or descending).
@@ -2349,44 +1486,6 @@ struct ReturnBody {
 // function class member.
 ReturnBody CloneReturnBody(AstTreeStorage &storage, const ReturnBody &body);
 
-template <class TArchive>
-void serialize(TArchive &ar, ReturnBody &body,
-               const unsigned int file_version) {
-  boost::serialization::split_free(ar, body, file_version);
-}
-
-template <class TArchive>
-void save(TArchive &ar, const ReturnBody &body, const unsigned int) {
-  ar << body.distinct;
-  ar << body.all_identifiers;
-  SavePointers(ar, body.named_expressions);
-  ar << body.order_by.size();
-  for (const auto &order_by : body.order_by) {
-    ar << order_by.first;
-    SavePointer(ar, order_by.second);
-  }
-  SavePointer(ar, body.skip);
-  SavePointer(ar, body.limit);
-}
-
-template <class TArchive>
-void load(TArchive &ar, ReturnBody &body, const unsigned int) {
-  ar >> body.distinct;
-  ar >> body.all_identifiers;
-  LoadPointers(ar, body.named_expressions);
-  size_t size = 0;
-  ar >> size;
-  for (size_t i = 0; i < size; ++i) {
-    std::pair<Ordering, Expression *> order_by;
-    ar >> order_by.first;
-    LoadPointer(ar, order_by.second);
-    DCHECK(order_by.second) << "Unexpected nullptr serialized";
-    body.order_by.emplace_back(order_by);
-  }
-  LoadPointer(ar, body.skip);
-  LoadPointer(ar, body.limit);
-}
-
 class Return : public Clause {
   friend class AstTreeStorage;
 
@@ -2425,19 +1524,6 @@ class Return : public Clause {
 
  protected:
   explicit Return(int uid) : Clause(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Clause>(*this);
-    ar &body_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Return *,
-                                                        const unsigned int);
 };
 
 class With : public Clause {
@@ -2481,29 +1567,6 @@ class With : public Clause {
 
  protected:
   explicit With(int uid) : Clause(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    ar << body_;
-    SavePointer(ar, where_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    ar >> body_;
-    LoadPointer(ar, where_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, With *,
-                                                        const unsigned int);
 };
 
 class Delete : public Clause {
@@ -2534,29 +1597,6 @@ class Delete : public Clause {
 
  protected:
   explicit Delete(int uid) : Clause(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointers(ar, expressions_);
-    ar << detach_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointers(ar, expressions_);
-    ar >> detach_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Delete *,
-                                                        const unsigned int);
 };
 
 class SetProperty : public Clause {
@@ -2585,30 +1625,6 @@ class SetProperty : public Clause {
       : Clause(uid),
         property_lookup_(property_lookup),
         expression_(expression) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, property_lookup_);
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, property_lookup_);
-    LoadPointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        SetProperty *,
-                                                        const unsigned int);
 };
 
 class SetProperties : public Clause {
@@ -2640,32 +1656,6 @@ class SetProperties : public Clause {
         identifier_(identifier),
         expression_(expression),
         update_(update) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, identifier_);
-    SavePointer(ar, expression_);
-    ar << update_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, identifier_);
-    LoadPointer(ar, expression_);
-    ar >> update_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        SetProperties *,
-                                                        const unsigned int);
 };
 
 class SetLabels : public Clause {
@@ -2692,29 +1682,6 @@ class SetLabels : public Clause {
   SetLabels(int uid, Identifier *identifier,
             const std::vector<storage::Label> &labels)
       : Clause(uid), identifier_(identifier), labels_(labels) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, identifier_);
-    ar << labels_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, identifier_);
-    ar >> labels_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, SetLabels *,
-                                                        const unsigned int);
 };
 
 class RemoveProperty : public Clause {
@@ -2739,28 +1706,6 @@ class RemoveProperty : public Clause {
   explicit RemoveProperty(int uid) : Clause(uid) {}
   RemoveProperty(int uid, PropertyLookup *property_lookup)
       : Clause(uid), property_lookup_(property_lookup) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, property_lookup_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, property_lookup_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        RemoveProperty *,
-                                                        const unsigned int);
 };
 
 class RemoveLabels : public Clause {
@@ -2787,30 +1732,6 @@ class RemoveLabels : public Clause {
   RemoveLabels(int uid, Identifier *identifier,
                const std::vector<storage::Label> &labels)
       : Clause(uid), identifier_(identifier), labels_(labels) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, identifier_);
-    ar << labels_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, identifier_);
-    ar >> labels_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        RemoveLabels *,
-                                                        const unsigned int);
 };
 
 class Merge : public Clause {
@@ -2859,31 +1780,6 @@ class Merge : public Clause {
 
  protected:
   explicit Merge(int uid) : Clause(uid) {}
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, pattern_);
-    SavePointers(ar, on_match_);
-    SavePointers(ar, on_create_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, pattern_);
-    LoadPointers(ar, on_match_);
-    LoadPointers(ar, on_create_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Merge *,
-                                                        const unsigned int);
 };
 
 class Unwind : public Clause {
@@ -2912,27 +1808,6 @@ class Unwind : public Clause {
     DCHECK(named_expression)
         << "Unwind cannot take nullptr for named_expression";
   }
-
- private:
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar << boost::serialization::base_object<Clause>(*this);
-    SavePointer(ar, named_expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar >> boost::serialization::base_object<Clause>(*this);
-    LoadPointer(ar, named_expression_);
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &, Unwind *,
-                                                        const unsigned int);
 };
 
 class CreateIndex : public Clause {
@@ -2953,155 +1828,9 @@ class CreateIndex : public Clause {
   explicit CreateIndex(int uid) : Clause(uid) {}
   CreateIndex(int uid, storage::Label label, storage::Property property)
       : Clause(uid), label_(label), property_(property) {}
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Clause>(*this);
-    ar &label_;
-    ar &property_;
-  }
-
-  template <class TArchive>
-  friend void boost::serialization::load_construct_data(TArchive &,
-                                                        CreateIndex *,
-                                                        const unsigned int);
 };
 
 #undef CLONE_BINARY_EXPRESSION
 #undef CLONE_UNARY_EXPRESSION
-#undef SERIALIZE_USING_BASE
 
 }  // namespace query
-
-// All of the serialization cruft follows
-
-#define LOAD_AND_CONSTRUCT(DerivedClass, ...)             \
-  template <class TArchive>                               \
-  void load_construct_data(TArchive &, DerivedClass *cls, \
-                           const unsigned int) {          \
-    ::new (cls) DerivedClass(__VA_ARGS__);                \
-  }
-
-namespace boost::serialization {
-
-LOAD_AND_CONSTRUCT(query::Where, 0);
-LOAD_AND_CONSTRUCT(query::OrOperator, 0);
-LOAD_AND_CONSTRUCT(query::XorOperator, 0);
-LOAD_AND_CONSTRUCT(query::AndOperator, 0);
-LOAD_AND_CONSTRUCT(query::AdditionOperator, 0);
-LOAD_AND_CONSTRUCT(query::SubtractionOperator, 0);
-LOAD_AND_CONSTRUCT(query::MultiplicationOperator, 0);
-LOAD_AND_CONSTRUCT(query::DivisionOperator, 0);
-LOAD_AND_CONSTRUCT(query::ModOperator, 0);
-LOAD_AND_CONSTRUCT(query::NotEqualOperator, 0);
-LOAD_AND_CONSTRUCT(query::EqualOperator, 0);
-LOAD_AND_CONSTRUCT(query::LessOperator, 0);
-LOAD_AND_CONSTRUCT(query::GreaterOperator, 0);
-LOAD_AND_CONSTRUCT(query::LessEqualOperator, 0);
-LOAD_AND_CONSTRUCT(query::GreaterEqualOperator, 0);
-LOAD_AND_CONSTRUCT(query::InListOperator, 0);
-LOAD_AND_CONSTRUCT(query::ListMapIndexingOperator, 0);
-LOAD_AND_CONSTRUCT(query::ListSlicingOperator, 0, nullptr, nullptr, nullptr);
-LOAD_AND_CONSTRUCT(query::IfOperator, 0, nullptr, nullptr, nullptr);
-LOAD_AND_CONSTRUCT(query::NotOperator, 0);
-LOAD_AND_CONSTRUCT(query::UnaryPlusOperator, 0);
-LOAD_AND_CONSTRUCT(query::UnaryMinusOperator, 0);
-LOAD_AND_CONSTRUCT(query::IsNullOperator, 0);
-LOAD_AND_CONSTRUCT(query::PrimitiveLiteral, 0);
-LOAD_AND_CONSTRUCT(query::ListLiteral, 0);
-LOAD_AND_CONSTRUCT(query::MapLiteral, 0);
-LOAD_AND_CONSTRUCT(query::Identifier, 0, "");
-LOAD_AND_CONSTRUCT(query::PropertyLookup, 0, nullptr, "", storage::Property());
-LOAD_AND_CONSTRUCT(query::LabelsTest, 0, nullptr,
-                   std::vector<storage::Label>());
-LOAD_AND_CONSTRUCT(query::Function, 0);
-LOAD_AND_CONSTRUCT(query::Aggregation, 0, nullptr, nullptr,
-                   query::Aggregation::Op::COUNT);
-LOAD_AND_CONSTRUCT(query::Reduce, 0, nullptr, nullptr, nullptr, nullptr,
-                   nullptr);
-LOAD_AND_CONSTRUCT(query::All, 0, nullptr, nullptr, nullptr);
-LOAD_AND_CONSTRUCT(query::Single, 0, nullptr, nullptr, nullptr);
-LOAD_AND_CONSTRUCT(query::ParameterLookup, 0);
-LOAD_AND_CONSTRUCT(query::NamedExpression, 0);
-LOAD_AND_CONSTRUCT(query::NodeAtom, 0);
-LOAD_AND_CONSTRUCT(query::EdgeAtom, 0);
-LOAD_AND_CONSTRUCT(query::Pattern, 0);
-LOAD_AND_CONSTRUCT(query::SingleQuery, 0);
-LOAD_AND_CONSTRUCT(query::CypherUnion, 0);
-LOAD_AND_CONSTRUCT(query::Query, 0);
-LOAD_AND_CONSTRUCT(query::Create, 0);
-LOAD_AND_CONSTRUCT(query::Match, 0);
-LOAD_AND_CONSTRUCT(query::Return, 0);
-LOAD_AND_CONSTRUCT(query::With, 0);
-LOAD_AND_CONSTRUCT(query::Delete, 0);
-LOAD_AND_CONSTRUCT(query::SetProperty, 0);
-LOAD_AND_CONSTRUCT(query::SetProperties, 0);
-LOAD_AND_CONSTRUCT(query::SetLabels, 0);
-LOAD_AND_CONSTRUCT(query::RemoveProperty, 0);
-LOAD_AND_CONSTRUCT(query::RemoveLabels, 0);
-LOAD_AND_CONSTRUCT(query::Merge, 0);
-LOAD_AND_CONSTRUCT(query::Unwind, 0);
-LOAD_AND_CONSTRUCT(query::CreateIndex, 0);
-
-}  // namespace boost::serialization
-
-#undef LOAD_AND_CONSTRUCT
-
-BOOST_CLASS_EXPORT_KEY(query::Query);
-BOOST_CLASS_EXPORT_KEY(query::SingleQuery);
-BOOST_CLASS_EXPORT_KEY(query::CypherUnion);
-BOOST_CLASS_EXPORT_KEY(query::NamedExpression);
-BOOST_CLASS_EXPORT_KEY(query::OrOperator);
-BOOST_CLASS_EXPORT_KEY(query::XorOperator);
-BOOST_CLASS_EXPORT_KEY(query::AndOperator);
-BOOST_CLASS_EXPORT_KEY(query::NotOperator);
-BOOST_CLASS_EXPORT_KEY(query::AdditionOperator);
-BOOST_CLASS_EXPORT_KEY(query::SubtractionOperator);
-BOOST_CLASS_EXPORT_KEY(query::MultiplicationOperator);
-BOOST_CLASS_EXPORT_KEY(query::DivisionOperator);
-BOOST_CLASS_EXPORT_KEY(query::ModOperator);
-BOOST_CLASS_EXPORT_KEY(query::NotEqualOperator);
-BOOST_CLASS_EXPORT_KEY(query::EqualOperator);
-BOOST_CLASS_EXPORT_KEY(query::LessOperator);
-BOOST_CLASS_EXPORT_KEY(query::GreaterOperator);
-BOOST_CLASS_EXPORT_KEY(query::LessEqualOperator);
-BOOST_CLASS_EXPORT_KEY(query::GreaterEqualOperator);
-BOOST_CLASS_EXPORT_KEY(query::InListOperator);
-BOOST_CLASS_EXPORT_KEY(query::ListMapIndexingOperator);
-BOOST_CLASS_EXPORT_KEY(query::ListSlicingOperator);
-BOOST_CLASS_EXPORT_KEY(query::IfOperator);
-BOOST_CLASS_EXPORT_KEY(query::UnaryPlusOperator);
-BOOST_CLASS_EXPORT_KEY(query::UnaryMinusOperator);
-BOOST_CLASS_EXPORT_KEY(query::IsNullOperator);
-BOOST_CLASS_EXPORT_KEY(query::ListLiteral);
-BOOST_CLASS_EXPORT_KEY(query::MapLiteral);
-BOOST_CLASS_EXPORT_KEY(query::PropertyLookup);
-BOOST_CLASS_EXPORT_KEY(query::LabelsTest);
-BOOST_CLASS_EXPORT_KEY(query::Aggregation);
-BOOST_CLASS_EXPORT_KEY(query::Function);
-BOOST_CLASS_EXPORT_KEY(query::Reduce);
-BOOST_CLASS_EXPORT_KEY(query::All);
-BOOST_CLASS_EXPORT_KEY(query::Single);
-BOOST_CLASS_EXPORT_KEY(query::ParameterLookup);
-BOOST_CLASS_EXPORT_KEY(query::Create);
-BOOST_CLASS_EXPORT_KEY(query::Match);
-BOOST_CLASS_EXPORT_KEY(query::Return);
-BOOST_CLASS_EXPORT_KEY(query::With);
-BOOST_CLASS_EXPORT_KEY(query::Pattern);
-BOOST_CLASS_EXPORT_KEY(query::NodeAtom);
-BOOST_CLASS_EXPORT_KEY(query::EdgeAtom);
-BOOST_CLASS_EXPORT_KEY(query::Delete);
-BOOST_CLASS_EXPORT_KEY(query::Where);
-BOOST_CLASS_EXPORT_KEY(query::SetProperty);
-BOOST_CLASS_EXPORT_KEY(query::SetProperties);
-BOOST_CLASS_EXPORT_KEY(query::SetLabels);
-BOOST_CLASS_EXPORT_KEY(query::RemoveProperty);
-BOOST_CLASS_EXPORT_KEY(query::RemoveLabels);
-BOOST_CLASS_EXPORT_KEY(query::Merge);
-BOOST_CLASS_EXPORT_KEY(query::Unwind);
-BOOST_CLASS_EXPORT_KEY(query::Identifier);
-BOOST_CLASS_EXPORT_KEY(query::PrimitiveLiteral);
-BOOST_CLASS_EXPORT_KEY(query::CreateIndex);
diff --git a/src/query/frontend/semantic/symbol.hpp b/src/query/frontend/semantic/symbol.hpp
index a6becfd19..478614628 100644
--- a/src/query/frontend/semantic/symbol.hpp
+++ b/src/query/frontend/semantic/symbol.hpp
@@ -2,9 +2,6 @@
 
 #include <string>
 
-#include "boost/serialization/serialization.hpp"
-#include "boost/serialization/string.hpp"
-
 namespace query {
 
 class Symbol {
@@ -46,17 +43,6 @@ class Symbol {
   bool user_declared_ = true;
   Type type_ = Type::Any;
   int token_position_ = -1;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar & name_;
-    ar & position_;
-    ar & user_declared_;
-    ar & type_;
-    ar & token_position_;
-  }
 };
 
 }  // namespace query
diff --git a/src/query/frontend/semantic/symbol_table.hpp b/src/query/frontend/semantic/symbol_table.hpp
index 0499d4979..a2297dd9c 100644
--- a/src/query/frontend/semantic/symbol_table.hpp
+++ b/src/query/frontend/semantic/symbol_table.hpp
@@ -3,9 +3,6 @@
 #include <map>
 #include <string>
 
-#include "boost/serialization/map.hpp"
-#include "boost/serialization/serialization.hpp"
-
 #include "query/frontend/ast/ast.hpp"
 #include "query/frontend/semantic/symbol.hpp"
 
@@ -33,14 +30,6 @@ class SymbolTable final {
  private:
   int position_{0};
   std::map<int, Symbol> table_;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &position_;
-    ar &table_;
-  }
 };
 
 }  // namespace query
diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp
index 9245ed3fc..84316d4bf 100644
--- a/src/query/interpreter.cpp
+++ b/src/query/interpreter.cpp
@@ -3,7 +3,6 @@
 #include <glog/logging.h>
 #include <limits>
 
-#include "distributed/plan_dispatcher.hpp"
 #include "query/exceptions.hpp"
 #include "query/frontend/ast/cypher_main_visitor.hpp"
 #include "query/frontend/opencypher/parser.hpp"
@@ -20,35 +19,13 @@ DEFINE_VALIDATED_int32(query_plan_cache_ttl, 60,
 
 namespace query {
 
-Interpreter::CachedPlan::CachedPlan(
-    plan::DistributedPlan distributed_plan, double cost,
-    distributed::PlanDispatcher *plan_dispatcher)
-    : distributed_plan_(std::move(distributed_plan)),
-      cost_(cost),
-      plan_dispatcher_(plan_dispatcher) {
-  if (plan_dispatcher_) {
-    for (const auto &plan_pair : distributed_plan_.worker_plans) {
-      const auto &plan_id = plan_pair.first;
-      const auto &worker_plan = plan_pair.second;
-      plan_dispatcher_->DispatchPlan(plan_id, worker_plan,
-                                     distributed_plan_.symbol_table);
-    }
-  }
-}
+Interpreter::CachedPlan::CachedPlan(plan::DistributedPlan distributed_plan,
+                                    double cost)
+    : distributed_plan_(std::move(distributed_plan)), cost_(cost) {}
 
-Interpreter::CachedPlan::~CachedPlan() {
-  if (plan_dispatcher_) {
-    for (const auto &plan_pair : distributed_plan_.worker_plans) {
-      const auto &plan_id = plan_pair.first;
-      plan_dispatcher_->RemovePlan(plan_id);
-    }
-  }
-}
+Interpreter::CachedPlan::~CachedPlan() {}
 
-Interpreter::Interpreter(database::GraphDb &db)
-    : plan_dispatcher_(db.type() == database::GraphDb::Type::DISTRIBUTED_MASTER
-                           ? &db.plan_dispatcher()
-                           : nullptr) {}
+Interpreter::Interpreter(database::GraphDb &db) {}
 
 Interpreter::Results Interpreter::operator()(
     const std::string &query, database::GraphDbAccessor &db_accessor,
@@ -134,26 +111,13 @@ std::shared_ptr<Interpreter::CachedPlan> Interpreter::QueryToPlan(
   std::tie(tmp_logical_plan, query_plan_cost_estimation) =
       MakeLogicalPlan(ast_storage, ctx);
 
-  DCHECK(ctx.db_accessor_.db().type() !=
-         database::GraphDb::Type::DISTRIBUTED_WORKER);
-  if (ctx.db_accessor_.db().type() ==
-      database::GraphDb::Type::DISTRIBUTED_MASTER) {
-    auto distributed_plan = MakeDistributedPlan(
-        *tmp_logical_plan, ctx.symbol_table_, next_plan_id_);
-    VLOG(10) << "[Interpreter] Created plan for distributed execution "
-             << next_plan_id_ - 1;
-    return std::make_shared<CachedPlan>(std::move(distributed_plan),
-                                        query_plan_cost_estimation,
-                                        plan_dispatcher_);
-  } else {
-    return std::make_shared<CachedPlan>(
-        plan::DistributedPlan{0,
-                              std::move(tmp_logical_plan),
-                              {},
-                              std::move(ast_storage),
-                              ctx.symbol_table_},
-        query_plan_cost_estimation, plan_dispatcher_);
-  }
+  return std::make_shared<CachedPlan>(
+      plan::DistributedPlan{0,
+                            std::move(tmp_logical_plan),
+                            {},
+                            std::move(ast_storage),
+                            ctx.symbol_table_},
+      query_plan_cost_estimation);
 }
 
 AstTreeStorage Interpreter::QueryToAst(const StrippedQuery &stripped,
diff --git a/src/query/interpreter.hpp b/src/query/interpreter.hpp
index 1c11d316f..9f955886d 100644
--- a/src/query/interpreter.hpp
+++ b/src/query/interpreter.hpp
@@ -16,10 +16,6 @@
 
 DECLARE_int32(query_plan_cache_ttl);
 
-namespace distributed {
-class PlanDispatcher;
-}
-
 namespace query {
 
 class Interpreter {
@@ -29,8 +25,7 @@ class Interpreter {
   class CachedPlan {
    public:
     /// Creates a cached plan and sends it to all the workers.
-    CachedPlan(plan::DistributedPlan distributed_plan, double cost,
-               distributed::PlanDispatcher *plan_dispatcher);
+    CachedPlan(plan::DistributedPlan distributed_plan, double cost);
 
     /// Removes the cached plan from all the workers.
     ~CachedPlan();
@@ -49,9 +44,6 @@ class Interpreter {
     plan::DistributedPlan distributed_plan_;
     double cost_;
     utils::Timer cache_timer_;
-
-    // Optional, only available in a distributed master.
-    distributed::PlanDispatcher *plan_dispatcher_{nullptr};
   };
 
   using PlanCacheT = ConcurrentMap<HashType, std::shared_ptr<CachedPlan>>;
@@ -175,9 +167,6 @@ class Interpreter {
   // so this lock probably won't impact performance much...
   SpinLock antlr_lock_;
 
-  // Optional, not null only in a distributed master.
-  distributed::PlanDispatcher *plan_dispatcher_{nullptr};
-
   // stripped query -> CachedPlan
   std::shared_ptr<CachedPlan> QueryToPlan(const StrippedQuery &stripped,
                                           Context &ctx);
diff --git a/src/query/plan/distributed.cpp b/src/query/plan/distributed.cpp
index 95813d896..ca77755bb 100644
--- a/src/query/plan/distributed.cpp
+++ b/src/query/plan/distributed.cpp
@@ -2,12 +2,6 @@
 
 #include <memory>
 
-// TODO: Remove these includes for hacked cloning of logical operators via boost
-// serialization when proper cloning is added.
-#include <sstream>
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-
 #include "query/plan/operator.hpp"
 #include "query/plan/preprocess.hpp"
 #include "utils/exceptions.hpp"
@@ -16,22 +10,6 @@ namespace query::plan {
 
 namespace {
 
-std::pair<std::unique_ptr<LogicalOperator>, AstTreeStorage> Clone(
-    const LogicalOperator &original_plan) {
-  // TODO: Add a proper Clone method to LogicalOperator
-  std::stringstream stream;
-  {
-    boost::archive::binary_oarchive out_archive(stream);
-    out_archive << &original_plan;
-  }
-  boost::archive::binary_iarchive in_archive(stream);
-  LogicalOperator *plan_copy = nullptr;
-  in_archive >> plan_copy;
-  return {std::unique_ptr<LogicalOperator>(plan_copy),
-          std::move(in_archive.template get_helper<AstTreeStorage>(
-              AstTreeStorage::kHelperId))};
-}
-
 int64_t AddWorkerPlan(DistributedPlan &distributed_plan,
                       std::atomic<int64_t> &next_plan_id,
                       const std::shared_ptr<LogicalOperator> &worker_plan) {
@@ -749,43 +727,4 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor {
 
 }  // namespace
 
-DistributedPlan MakeDistributedPlan(const LogicalOperator &original_plan,
-                                    const SymbolTable &symbol_table,
-                                    std::atomic<int64_t> &next_plan_id) {
-  DistributedPlan distributed_plan;
-  // If we will generate multiple worker plans, we will need to increment the
-  // next_plan_id for each one.
-  distributed_plan.master_plan_id = next_plan_id++;
-  distributed_plan.symbol_table = symbol_table;
-  std::tie(distributed_plan.master_plan, distributed_plan.ast_storage) =
-      Clone(original_plan);
-  DistributedPlanner planner(distributed_plan, next_plan_id);
-  distributed_plan.master_plan->Accept(planner);
-  if (planner.ShouldSplit()) {
-    // We haven't split the plan, this means that it should be the same on
-    // master and worker. We only need to prepend PullRemote to master plan.
-    std::shared_ptr<LogicalOperator> worker_plan(
-        std::move(distributed_plan.master_plan));
-    auto pull_id = AddWorkerPlan(distributed_plan, next_plan_id, worker_plan);
-    // If the plan performs writes, we need to finish with Synchronize.
-    if (planner.NeedsSynchronize()) {
-      auto pull_remote = std::make_shared<PullRemote>(
-          nullptr, pull_id,
-          worker_plan->OutputSymbols(distributed_plan.symbol_table));
-      distributed_plan.master_plan =
-          std::make_unique<Synchronize>(worker_plan, pull_remote, false);
-    } else {
-      distributed_plan.master_plan = std::make_unique<PullRemote>(
-          worker_plan, pull_id,
-          worker_plan->OutputSymbols(distributed_plan.symbol_table));
-    }
-  } else if (planner.NeedsSynchronize()) {
-    // If the plan performs writes on master, we still need to Synchronize, even
-    // though we don't split the plan.
-    distributed_plan.master_plan = std::make_unique<Synchronize>(
-        std::move(distributed_plan.master_plan), nullptr, false);
-  }
-  return distributed_plan;
-}
-
 }  // namespace query::plan
diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp
index b25894277..ef2a477cd 100644
--- a/src/query/plan/operator.cpp
+++ b/src/query/plan/operator.cpp
@@ -11,15 +11,9 @@
 #include <unordered_set>
 #include <utility>
 
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
 #include "glog/logging.h"
 
 #include "database/graph_db_accessor.hpp"
-#include "distributed/pull_rpc_clients.hpp"
-#include "distributed/updates_rpc_clients.hpp"
-#include "distributed/updates_rpc_server.hpp"
 #include "query/context.hpp"
 #include "query/exceptions.hpp"
 #include "query/frontend/ast/ast.hpp"
@@ -119,15 +113,6 @@ CreateNode::CreateNode(const std::shared_ptr<LogicalOperator> &input,
 
 namespace {
 
-// Returns a random worker id. Worker ID is obtained from the Db.
-int RandomWorkerId(database::GraphDb &db) {
-  thread_local std::mt19937 gen_{std::random_device{}()};
-  thread_local std::uniform_int_distribution<int> rand_;
-
-  auto worker_ids = db.GetWorkerIds();
-  return worker_ids[rand_(gen_) % worker_ids.size()];
-}
-
 // Creates a vertex on this GraphDb. Returns a reference to vertex placed on the
 // frame.
 VertexAccessor &CreateLocalVertex(NodeAtom *node_atom, Frame &frame,
@@ -146,34 +131,6 @@ VertexAccessor &CreateLocalVertex(NodeAtom *node_atom, Frame &frame,
   return frame[context.symbol_table_.at(*node_atom->identifier_)].ValueVertex();
 }
 
-// Creates a vertex on the GraphDb with the given worker_id. Can be this worker.
-VertexAccessor &CreateVertexOnWorker(int worker_id, NodeAtom *node_atom,
-                                     Frame &frame, Context &context) {
-  auto &dba = context.db_accessor_;
-
-  if (worker_id == dba.db().WorkerId())
-    return CreateLocalVertex(node_atom, frame, context);
-
-  std::unordered_map<storage::Property, query::TypedValue> properties;
-
-  // Evaluator should use the latest accessors, as modified in this query, when
-  // setting properties on new nodes.
-  ExpressionEvaluator evaluator(frame, context.parameters_,
-                                context.symbol_table_, dba, GraphView::NEW);
-  for (auto &kv : node_atom->properties_) {
-    auto value = kv.second->Accept(evaluator);
-    if (!value.IsPropertyValue()) {
-      throw QueryRuntimeException("'{}' cannot be used as a property value.",
-                                  value.type());
-    }
-    properties.emplace(kv.first.second, std::move(value));
-  }
-
-  auto new_node =
-      dba.InsertVertexIntoRemote(worker_id, node_atom->labels_, properties);
-  frame[context.symbol_table_.at(*node_atom->identifier_)] = new_node;
-  return frame[context.symbol_table_.at(*node_atom->identifier_)].ValueVertex();
-}
 }  // namespace
 
 ACCEPT_WITH_INPUT(CreateNode)
@@ -192,16 +149,11 @@ std::vector<Symbol> CreateNode::ModifiedSymbols(
 
 CreateNode::CreateNodeCursor::CreateNodeCursor(const CreateNode &self,
                                                database::GraphDbAccessor &db)
-    : self_(self), db_(db), input_cursor_(self.input_->MakeCursor(db)) {}
+    : self_(self), input_cursor_(self.input_->MakeCursor(db)) {}
 
 bool CreateNode::CreateNodeCursor::Pull(Frame &frame, Context &context) {
   if (input_cursor_->Pull(frame, context)) {
-    if (self_.on_random_worker_) {
-      CreateVertexOnWorker(RandomWorkerId(db_.db()), self_.node_atom_, frame,
-                           context);
-    } else {
-      CreateLocalVertex(self_.node_atom_, frame, context);
-    }
+    CreateLocalVertex(self_.node_atom_, frame, context);
     return true;
   }
   return false;
@@ -286,7 +238,7 @@ VertexAccessor &CreateExpand::CreateExpandCursor::OtherVertex(
     ExpectType(dest_node_symbol, dest_node_value, TypedValue::Type::Vertex);
     return dest_node_value.Value<VertexAccessor>();
   } else {
-    return CreateVertexOnWorker(worker_id, self_.node_atom_, frame, context);
+    return CreateLocalVertex(self_.node_atom_, frame, context);
   }
 }
 
@@ -414,32 +366,32 @@ std::unique_ptr<Cursor> ScanAllByLabelPropertyRange::MakeCursor(
       -> std::experimental::optional<decltype(
           db.Vertices(label_, property_, std::experimental::nullopt,
                       std::experimental::nullopt, false))> {
-        ExpressionEvaluator evaluator(frame, context.parameters_,
-                                      context.symbol_table_, db, graph_view_);
-        auto convert = [&evaluator](const auto &bound)
-            -> std::experimental::optional<utils::Bound<PropertyValue>> {
-              if (!bound) return std::experimental::nullopt;
-              auto value = bound->value()->Accept(evaluator);
-              try {
-                return std::experimental::make_optional(
-                    utils::Bound<PropertyValue>(value, bound->type()));
-              } catch (const TypedValueException &) {
-                throw QueryRuntimeException(
-                    "'{}' cannot be used as a property value.", value.type());
-              }
-            };
-        auto maybe_lower = convert(lower_bound());
-        auto maybe_upper = convert(upper_bound());
-        // If any bound is null, then the comparison would result in nulls. This
-        // is treated as not satisfying the filter, so return no vertices.
-        if (maybe_lower && maybe_lower->value().IsNull())
-          return std::experimental::nullopt;
-        if (maybe_upper && maybe_upper->value().IsNull())
-          return std::experimental::nullopt;
+    ExpressionEvaluator evaluator(frame, context.parameters_,
+                                  context.symbol_table_, db, graph_view_);
+    auto convert = [&evaluator](const auto &bound)
+        -> std::experimental::optional<utils::Bound<PropertyValue>> {
+      if (!bound) return std::experimental::nullopt;
+      auto value = bound->value()->Accept(evaluator);
+      try {
         return std::experimental::make_optional(
-            db.Vertices(label_, property_, maybe_lower, maybe_upper,
-                        graph_view_ == GraphView::NEW));
-      };
+            utils::Bound<PropertyValue>(value, bound->type()));
+      } catch (const TypedValueException &) {
+        throw QueryRuntimeException("'{}' cannot be used as a property value.",
+                                    value.type());
+      }
+    };
+    auto maybe_lower = convert(lower_bound());
+    auto maybe_upper = convert(upper_bound());
+    // If any bound is null, then the comparison would result in nulls. This
+    // is treated as not satisfying the filter, so return no vertices.
+    if (maybe_lower && maybe_lower->value().IsNull())
+      return std::experimental::nullopt;
+    if (maybe_upper && maybe_upper->value().IsNull())
+      return std::experimental::nullopt;
+    return std::experimental::make_optional(
+        db.Vertices(label_, property_, maybe_lower, maybe_upper,
+                    graph_view_ == GraphView::NEW));
+  };
   return std::make_unique<ScanAllCursor<decltype(vertices)>>(
       output_symbol_, input_->MakeCursor(db), std::move(vertices), db);
 }
@@ -462,18 +414,18 @@ std::unique_ptr<Cursor> ScanAllByLabelPropertyValue::MakeCursor(
   auto vertices = [this, &db](Frame &frame, Context &context)
       -> std::experimental::optional<decltype(
           db.Vertices(label_, property_, TypedValue::Null, false))> {
-        ExpressionEvaluator evaluator(frame, context.parameters_,
-                                      context.symbol_table_, db, graph_view_);
-        auto value = expression_->Accept(evaluator);
-        if (value.IsNull()) return std::experimental::nullopt;
-        try {
-          return std::experimental::make_optional(db.Vertices(
-              label_, property_, value, graph_view_ == GraphView::NEW));
-        } catch (const TypedValueException &) {
-          throw QueryRuntimeException(
-              "'{}' cannot be used as a property value.", value.type());
-        }
-      };
+    ExpressionEvaluator evaluator(frame, context.parameters_,
+                                  context.symbol_table_, db, graph_view_);
+    auto value = expression_->Accept(evaluator);
+    if (value.IsNull()) return std::experimental::nullopt;
+    try {
+      return std::experimental::make_optional(
+          db.Vertices(label_, property_, value, graph_view_ == GraphView::NEW));
+    } catch (const TypedValueException &) {
+      throw QueryRuntimeException("'{}' cannot be used as a property value.",
+                                  value.type());
+    }
+  };
   return std::make_unique<ScanAllCursor<decltype(vertices)>>(
       output_symbol_, input_->MakeCursor(db), std::move(vertices), db);
 }
@@ -1224,7 +1176,8 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
     // For the given (edge, vertex, weight, depth) tuple checks if they
     // satisfy the "where" condition. if so, places them in the priority queue.
     auto expand_pair = [this, &evaluator, &frame, &create_state](
-        EdgeAccessor edge, VertexAccessor vertex, double weight, int depth) {
+                           EdgeAccessor edge, VertexAccessor vertex,
+                           double weight, int depth) {
       SwitchAccessor(edge, self_.graph_view_);
       SwitchAccessor(vertex, self_.graph_view_);
 
@@ -2960,44 +2913,6 @@ void Union::UnionCursor::Reset() {
   right_cursor_->Reset();
 }
 
-bool PullRemote::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
-  if (visitor.PreVisit(*this)) {
-    if (input_) input_->Accept(visitor);
-  }
-  return visitor.PostVisit(*this);
-}
-
-std::vector<Symbol> PullRemote::OutputSymbols(const SymbolTable &table) const {
-  return input_ ? input_->OutputSymbols(table) : std::vector<Symbol>{};
-}
-
-std::vector<Symbol> PullRemote::ModifiedSymbols(
-    const SymbolTable &table) const {
-  auto symbols = symbols_;
-  if (input_) {
-    auto input_symbols = input_->ModifiedSymbols(table);
-    symbols.insert(symbols.end(), input_symbols.begin(), input_symbols.end());
-  }
-  return symbols;
-}
-
-std::vector<Symbol> Synchronize::ModifiedSymbols(
-    const SymbolTable &table) const {
-  auto symbols = input_->ModifiedSymbols(table);
-  if (pull_remote_) {
-    auto pull_symbols = pull_remote_->ModifiedSymbols(table);
-    symbols.insert(symbols.end(), pull_symbols.begin(), pull_symbols.end());
-  }
-  return symbols;
-}
-
-bool Synchronize::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
-  if (visitor.PreVisit(*this)) {
-    input_->Accept(visitor) && pull_remote_->Accept(visitor);
-  }
-  return visitor.PostVisit(*this);
-}
-
 std::vector<Symbol> Cartesian::ModifiedSymbols(const SymbolTable &table) const {
   auto symbols = left_op_->ModifiedSymbols(table);
   auto right = right_op_->ModifiedSymbols(table);
@@ -3014,407 +2929,6 @@ bool Cartesian::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
 
 WITHOUT_SINGLE_INPUT(Cartesian);
 
-PullRemoteOrderBy::PullRemoteOrderBy(
-    const std::shared_ptr<LogicalOperator> &input, int64_t plan_id,
-    const std::vector<std::pair<Ordering, Expression *>> &order_by,
-    const std::vector<Symbol> &symbols)
-    : input_(input), plan_id_(plan_id), symbols_(symbols) {
-  CHECK(input_ != nullptr)
-      << "PullRemoteOrderBy should always be constructed with input!";
-  std::vector<Ordering> ordering;
-  ordering.reserve(order_by.size());
-  order_by_.reserve(order_by.size());
-  for (const auto &ordering_expression_pair : order_by) {
-    ordering.emplace_back(ordering_expression_pair.first);
-    order_by_.emplace_back(ordering_expression_pair.second);
-  }
-  compare_ = TypedValueVectorCompare(ordering);
-}
-
-ACCEPT_WITH_INPUT(PullRemoteOrderBy);
-
-std::vector<Symbol> PullRemoteOrderBy::OutputSymbols(
-    const SymbolTable &table) const {
-  return input_->OutputSymbols(table);
-}
-
-std::vector<Symbol> PullRemoteOrderBy::ModifiedSymbols(
-    const SymbolTable &table) const {
-  return input_->ModifiedSymbols(table);
-}
-
-namespace {
-
-/** Helper class that wraps remote pulling for cursors that handle results from
- * distributed workers.
- */
-class RemotePuller {
- public:
-  RemotePuller(database::GraphDbAccessor &db,
-               const std::vector<Symbol> &symbols, int64_t plan_id)
-      : db_(db), symbols_(symbols), plan_id_(plan_id) {
-    worker_ids_ = db_.db().pull_clients().GetWorkerIds();
-    // Remove master from the worker ids list.
-    worker_ids_.erase(std::find(worker_ids_.begin(), worker_ids_.end(), 0));
-  }
-
-  void Initialize(Context &context) {
-    if (!remote_pulls_initialized_) {
-      VLOG(10) << "[RemotePuller] [" << context.db_accessor_.transaction_id()
-               << "] [" << plan_id_ << "] initialized";
-      for (auto &worker_id : worker_ids_) {
-        UpdatePullForWorker(worker_id, context);
-      }
-      remote_pulls_initialized_ = true;
-    }
-  }
-
-  void Update(Context &context) {
-    // If we don't have results for a worker, check if his remote pull
-    // finished and save results locally.
-
-    auto move_frames = [this, &context](int worker_id, auto remote_results) {
-      VLOG(10) << "[RemotePuller] [" << context.db_accessor_.transaction_id()
-               << "] [" << plan_id_ << "] received results from " << worker_id;
-      remote_results_[worker_id] = std::move(remote_results.frames);
-      // Since we return and remove results from the back of the vector,
-      // reverse the results so the first to return is on the end of the
-      // vector.
-      std::reverse(remote_results_[worker_id].begin(),
-                   remote_results_[worker_id].end());
-    };
-
-    for (auto &worker_id : worker_ids_) {
-      if (!remote_results_[worker_id].empty()) continue;
-
-      auto found_it = remote_pulls_.find(worker_id);
-      if (found_it == remote_pulls_.end()) continue;
-
-      auto &remote_pull = found_it->second;
-      if (!remote_pull.IsReady()) continue;
-
-      auto remote_results = remote_pull.get();
-      switch (remote_results.pull_state) {
-        case distributed::PullState::CURSOR_EXHAUSTED:
-          VLOG(10) << "[RemotePuller] ["
-                   << context.db_accessor_.transaction_id() << "] [" << plan_id_
-                   << "] cursor exhausted from " << worker_id;
-          move_frames(worker_id, remote_results);
-          remote_pulls_.erase(found_it);
-          break;
-        case distributed::PullState::CURSOR_IN_PROGRESS:
-          VLOG(10) << "[RemotePuller] ["
-                   << context.db_accessor_.transaction_id() << "] [" << plan_id_
-                   << "] cursor in progress from " << worker_id;
-          move_frames(worker_id, remote_results);
-          UpdatePullForWorker(worker_id, context);
-          break;
-        case distributed::PullState::SERIALIZATION_ERROR:
-          throw mvcc::SerializationError(
-              "Serialization error occured during PullRemote !");
-        case distributed::PullState::LOCK_TIMEOUT_ERROR:
-          throw LockTimeoutException(
-              "LockTimeout error occured during PullRemote !");
-        case distributed::PullState::UPDATE_DELETED_ERROR:
-          throw QueryRuntimeException(
-              "RecordDeleted error ocured during PullRemote !");
-        case distributed::PullState::RECONSTRUCTION_ERROR:
-          throw query::ReconstructionException();
-        case distributed::PullState::UNABLE_TO_DELETE_VERTEX_ERROR:
-          throw RemoveAttachedVertexException();
-        case distributed::PullState::HINTED_ABORT_ERROR:
-          throw HintedAbortError();
-        case distributed::PullState::QUERY_ERROR:
-          throw QueryRuntimeException(
-              "Query runtime error occurred duing PullRemote !");
-      }
-    }
-  }
-
-  auto Workers() { return worker_ids_; }
-
-  int GetWorkerId(int worker_id_index) { return worker_ids_[worker_id_index]; }
-
-  size_t WorkerCount() { return worker_ids_.size(); }
-
-  void ClearWorkers() { worker_ids_.clear(); }
-
-  bool HasPendingPulls() { return !remote_pulls_.empty(); }
-
-  bool HasPendingPullFromWorker(int worker_id) {
-    return remote_pulls_.find(worker_id) != remote_pulls_.end();
-  }
-
-  bool HasResultsFromWorker(int worker_id) {
-    return !remote_results_[worker_id].empty();
-  }
-
-  std::vector<query::TypedValue> PopResultFromWorker(int worker_id) {
-    auto result = remote_results_[worker_id].back();
-    remote_results_[worker_id].pop_back();
-
-    // Remove the worker if we exhausted all locally stored results and there
-    // are no more pending remote pulls for that worker.
-    if (remote_results_[worker_id].empty() &&
-        remote_pulls_.find(worker_id) == remote_pulls_.end()) {
-      worker_ids_.erase(
-          std::find(worker_ids_.begin(), worker_ids_.end(), worker_id));
-    }
-
-    return result;
-  }
-
- private:
-  database::GraphDbAccessor &db_;
-  std::vector<Symbol> symbols_;
-  int64_t plan_id_;
-  std::unordered_map<int, utils::Future<distributed::PullData>> remote_pulls_;
-  std::unordered_map<int, std::vector<std::vector<query::TypedValue>>>
-      remote_results_;
-  std::vector<int> worker_ids_;
-  bool remote_pulls_initialized_ = false;
-
-  void UpdatePullForWorker(int worker_id, Context &context) {
-    remote_pulls_[worker_id] = db_.db().pull_clients().Pull(
-        db_, worker_id, plan_id_, context.parameters_, symbols_, false);
-  }
-};
-
-class PullRemoteCursor : public Cursor {
- public:
-  PullRemoteCursor(const PullRemote &self, database::GraphDbAccessor &db)
-      : self_(self),
-        input_cursor_(self.input() ? self.input()->MakeCursor(db) : nullptr),
-        remote_puller_(RemotePuller(db, self.symbols(), self.plan_id())) {}
-
-  bool Pull(Frame &frame, Context &context) override {
-    if (context.db_accessor_.should_abort()) throw HintedAbortError();
-    remote_puller_.Initialize(context);
-
-    bool have_remote_results = false;
-    while (!have_remote_results && remote_puller_.WorkerCount() > 0) {
-      if (context.db_accessor_.should_abort()) throw HintedAbortError();
-      remote_puller_.Update(context);
-
-      // Get locally stored results from workers in a round-robin fasion.
-      int num_workers = remote_puller_.WorkerCount();
-      for (int i = 0; i < num_workers; ++i) {
-        int worker_id_index =
-            (last_pulled_worker_id_index_ + i + 1) % num_workers;
-        int worker_id = remote_puller_.GetWorkerId(worker_id_index);
-
-        if (remote_puller_.HasResultsFromWorker(worker_id)) {
-          last_pulled_worker_id_index_ = worker_id_index;
-          have_remote_results = true;
-          break;
-        }
-      }
-
-      if (!have_remote_results) {
-        if (!remote_puller_.HasPendingPulls()) {
-          remote_puller_.ClearWorkers();
-          break;
-        }
-
-        // If there are no remote results available, try to pull and return
-        // local results.
-        if (input_cursor_ && input_cursor_->Pull(frame, context)) {
-          VLOG(10) << "[PullRemoteCursor] ["
-                   << context.db_accessor_.transaction_id() << "] ["
-                   << self_.plan_id() << "] producing local results ";
-          return true;
-        }
-
-        VLOG(10) << "[PullRemoteCursor] ["
-                 << context.db_accessor_.transaction_id() << "] ["
-                 << self_.plan_id() << "] no results available, sleeping ";
-        // If there aren't any local/remote results available, sleep.
-        std::this_thread::sleep_for(
-            std::chrono::microseconds(FLAGS_remote_pull_sleep_micros));
-      }
-    }
-
-    // No more remote results, make sure local results get exhausted.
-    if (!have_remote_results) {
-      if (input_cursor_ && input_cursor_->Pull(frame, context)) {
-        VLOG(10) << "[PullRemoteCursor] ["
-                 << context.db_accessor_.transaction_id() << "] ["
-                 << self_.plan_id() << "] producing local results ";
-        return true;
-      }
-      return false;
-    }
-
-    {
-      int worker_id = remote_puller_.GetWorkerId(last_pulled_worker_id_index_);
-      VLOG(10) << "[PullRemoteCursor] ["
-               << context.db_accessor_.transaction_id() << "] ["
-               << self_.plan_id() << "] producing results from worker "
-               << worker_id;
-      auto result = remote_puller_.PopResultFromWorker(worker_id);
-      for (size_t i = 0; i < self_.symbols().size(); ++i) {
-        frame[self_.symbols()[i]] = std::move(result[i]);
-      }
-    }
-    return true;
-  }
-
-  void Reset() override {
-    throw QueryRuntimeException("Unsupported: Reset during PullRemote!");
-  }
-
- private:
-  const PullRemote &self_;
-  const std::unique_ptr<Cursor> input_cursor_;
-  RemotePuller remote_puller_;
-  int last_pulled_worker_id_index_ = 0;
-};
-
-class SynchronizeCursor : public Cursor {
- public:
-  SynchronizeCursor(const Synchronize &self, database::GraphDbAccessor &db)
-      : self_(self),
-        input_cursor_(self.input()->MakeCursor(db)),
-        pull_remote_cursor_(
-            self.pull_remote() ? self.pull_remote()->MakeCursor(db) : nullptr) {
-  }
-
-  bool Pull(Frame &frame, Context &context) override {
-    if (!initial_pull_done_) {
-      InitialPull(frame, context);
-      initial_pull_done_ = true;
-    }
-    // Yield local stuff while available.
-    if (!local_frames_.empty()) {
-      VLOG(10) << "[SynchronizeCursor] ["
-               << context.db_accessor_.transaction_id()
-               << "] producing local results";
-      auto &result = local_frames_.back();
-      for (size_t i = 0; i < frame.elems().size(); ++i) {
-        if (self_.advance_command()) {
-          query::ReconstructTypedValue(result[i]);
-        }
-        frame.elems()[i] = std::move(result[i]);
-      }
-      local_frames_.resize(local_frames_.size() - 1);
-      return true;
-    }
-
-    // We're out of local stuff, yield from pull_remote if available.
-    if (pull_remote_cursor_ && pull_remote_cursor_->Pull(frame, context)) {
-      VLOG(10) << "[SynchronizeCursor] ["
-               << context.db_accessor_.transaction_id()
-               << "] producing remote results";
-      return true;
-    }
-
-    return false;
-  }
-
-  void Reset() override {
-    throw QueryRuntimeException("Unsupported: Reset during Synchronize!");
-  }
-
- private:
-  const Synchronize &self_;
-  const std::unique_ptr<Cursor> input_cursor_;
-  const std::unique_ptr<Cursor> pull_remote_cursor_;
-  bool initial_pull_done_{false};
-  std::vector<std::vector<TypedValue>> local_frames_;
-
-  void InitialPull(Frame &frame, Context &context) {
-    VLOG(10) << "[SynchronizeCursor] [" << context.db_accessor_.transaction_id()
-             << "] initial pull";
-    auto &db = context.db_accessor_.db();
-
-    // Tell all workers to accumulate, only if there is a remote pull.
-    std::vector<utils::Future<distributed::PullData>> worker_accumulations;
-    if (pull_remote_cursor_) {
-      for (auto worker_id : db.pull_clients().GetWorkerIds()) {
-        if (worker_id == db.WorkerId()) continue;
-        worker_accumulations.emplace_back(db.pull_clients().Pull(
-            context.db_accessor_, worker_id, self_.pull_remote()->plan_id(),
-            context.parameters_, self_.pull_remote()->symbols(), true, 0));
-      }
-    }
-
-    // Accumulate local results
-    while (input_cursor_->Pull(frame, context)) {
-      local_frames_.emplace_back();
-      auto &local_frame = local_frames_.back();
-      local_frame.reserve(frame.elems().size());
-      for (auto &elem : frame.elems()) {
-        local_frame.emplace_back(std::move(elem));
-      }
-    }
-
-    // Wait for all workers to finish accumulation (first sync point).
-    for (auto &accu : worker_accumulations) {
-      switch (accu.get().pull_state) {
-        case distributed::PullState::CURSOR_EXHAUSTED:
-          continue;
-        case distributed::PullState::CURSOR_IN_PROGRESS:
-          throw QueryRuntimeException(
-              "Expected exhausted cursor after remote pull accumulate");
-        case distributed::PullState::SERIALIZATION_ERROR:
-          throw mvcc::SerializationError(
-              "Failed to perform remote accumulate due to SerializationError");
-        case distributed::PullState::UPDATE_DELETED_ERROR:
-          throw QueryRuntimeException(
-              "Failed to perform remote accumulate due to RecordDeletedError");
-        case distributed::PullState::LOCK_TIMEOUT_ERROR:
-          throw LockTimeoutException(
-              "Failed to perform remote accumulate due to "
-              "LockTimeoutException");
-        case distributed::PullState::RECONSTRUCTION_ERROR:
-          throw QueryRuntimeException(
-              "Failed to perform remote accumulate due to ReconstructionError");
-        case distributed::PullState::UNABLE_TO_DELETE_VERTEX_ERROR:
-          throw RemoveAttachedVertexException();
-        case distributed::PullState::HINTED_ABORT_ERROR:
-          throw HintedAbortError();
-        case distributed::PullState::QUERY_ERROR:
-          throw QueryRuntimeException(
-              "Failed to perform remote accumulate due to Query runtime error");
-      }
-    }
-
-    if (self_.advance_command()) {
-      context.db_accessor_.AdvanceCommand();
-    }
-
-    // Make all the workers apply their deltas.
-    auto tx_id = context.db_accessor_.transaction_id();
-    auto apply_futures =
-        db.updates_clients().UpdateApplyAll(db.WorkerId(), tx_id);
-    db.updates_server().Apply(tx_id);
-    for (auto &future : apply_futures) {
-      switch (future.get()) {
-        case distributed::UpdateResult::SERIALIZATION_ERROR:
-          throw mvcc::SerializationError(
-              "Failed to apply deferred updates due to SerializationError");
-        case distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR:
-          throw RemoveAttachedVertexException();
-        case distributed::UpdateResult::UPDATE_DELETED_ERROR:
-          throw QueryRuntimeException(
-              "Failed to apply deferred updates due to RecordDeletedError");
-        case distributed::UpdateResult::LOCK_TIMEOUT_ERROR:
-          throw LockTimeoutException(
-              "Failed to apply deferred update due to LockTimeoutException");
-        case distributed::UpdateResult::DONE:
-          break;
-      }
-    }
-
-    // If the command advanced, let the workers know.
-    if (self_.advance_command()) {
-      auto futures =
-          db.pull_clients().NotifyAllTransactionCommandAdvanced(tx_id);
-      for (auto &future : futures) future.wait();
-    }
-  }
-};
-
 class CartesianCursor : public Cursor {
  public:
   CartesianCursor(const Cartesian &self, database::GraphDbAccessor &db)
@@ -3494,198 +3008,9 @@ class CartesianCursor : public Cursor {
   bool cartesian_pull_initialized_{false};
 };
 
-class PullRemoteOrderByCursor : public Cursor {
- public:
-  PullRemoteOrderByCursor(const PullRemoteOrderBy &self,
-                          database::GraphDbAccessor &db)
-      : self_(self),
-        input_(self.input()->MakeCursor(db)),
-        remote_puller_(RemotePuller(db, self.symbols(), self.plan_id())) {}
-
-  bool Pull(Frame &frame, Context &context) {
-    if (context.db_accessor_.should_abort()) throw HintedAbortError();
-    ExpressionEvaluator evaluator(frame, context.parameters_,
-                                  context.symbol_table_, context.db_accessor_);
-
-    auto evaluate_result = [this, &evaluator]() {
-      std::vector<TypedValue> order_by;
-      order_by.reserve(self_.order_by().size());
-      for (auto expression_ptr : self_.order_by()) {
-        order_by.emplace_back(expression_ptr->Accept(evaluator));
-      }
-      return order_by;
-    };
-
-    auto restore_frame = [&frame,
-                          this](const std::vector<TypedValue> &restore_from) {
-      for (size_t i = 0; i < restore_from.size(); ++i) {
-        frame[self_.symbols()[i]] = restore_from[i];
-      }
-    };
-
-    if (!merge_initialized_) {
-      VLOG(10) << "[PullRemoteOrderBy] ["
-               << context.db_accessor_.transaction_id() << "] ["
-               << self_.plan_id() << "] initialize";
-      remote_puller_.Initialize(context);
-      missing_results_from_ = remote_puller_.Workers();
-      missing_master_result_ = true;
-      merge_initialized_ = true;
-    }
-
-    if (missing_master_result_) {
-      if (input_->Pull(frame, context)) {
-        std::vector<TypedValue> output;
-        output.reserve(self_.symbols().size());
-        for (const Symbol &symbol : self_.symbols()) {
-          output.emplace_back(frame[symbol]);
-        }
-
-        merge_.push_back(MergeResultItem{std::experimental::nullopt, output,
-                                         evaluate_result()});
-      }
-      missing_master_result_ = false;
-    }
-
-    while (!missing_results_from_.empty()) {
-      if (context.db_accessor_.should_abort()) throw HintedAbortError();
-      remote_puller_.Update(context);
-
-      bool has_all_result = true;
-      for (auto &worker_id : missing_results_from_) {
-        if (!remote_puller_.HasResultsFromWorker(worker_id) &&
-            remote_puller_.HasPendingPullFromWorker(worker_id)) {
-          has_all_result = false;
-          break;
-        }
-      }
-
-      if (!has_all_result) {
-        VLOG(10) << "[PullRemoteOrderByCursor] ["
-                 << context.db_accessor_.transaction_id() << "] ["
-                 << self_.plan_id() << "] missing results, sleep";
-        // If we don't have results from all workers, sleep before continuing.
-        std::this_thread::sleep_for(
-            std::chrono::microseconds(FLAGS_remote_pull_sleep_micros));
-        continue;
-      }
-
-      for (auto &worker_id : missing_results_from_) {
-        // It is possible that the workers remote pull finished but it didn't
-        // return any results. In that case, just skip it.
-        if (!remote_puller_.HasResultsFromWorker(worker_id)) continue;
-        auto remote_result = remote_puller_.PopResultFromWorker(worker_id);
-        restore_frame(remote_result);
-        merge_.push_back(
-            MergeResultItem{worker_id, remote_result, evaluate_result()});
-      }
-
-      missing_results_from_.clear();
-    }
-
-    if (merge_.empty()) return false;
-
-    auto result_it = std::min_element(
-        merge_.begin(), merge_.end(), [this](const auto &lhs, const auto &rhs) {
-          return self_.compare()(lhs.order_by, rhs.order_by);
-        });
-
-    restore_frame(result_it->remote_result);
-
-    if (result_it->worker_id) {
-      VLOG(10) << "[PullRemoteOrderByCursor] ["
-               << context.db_accessor_.transaction_id() << "] ["
-               << self_.plan_id() << "] producing results from worker "
-               << result_it->worker_id.value();
-      missing_results_from_.push_back(result_it->worker_id.value());
-    } else {
-      VLOG(10) << "[PullRemoteOrderByCursor] ["
-               << context.db_accessor_.transaction_id() << "] ["
-               << self_.plan_id() << "] producing local results";
-      missing_master_result_ = true;
-    }
-
-    merge_.erase(result_it);
-    return true;
-  }
-
-  void Reset() {
-    throw QueryRuntimeException("Unsupported: Reset during PullRemoteOrderBy!");
-  }
-
- private:
-  struct MergeResultItem {
-    std::experimental::optional<int> worker_id;
-    std::vector<TypedValue> remote_result;
-    std::vector<TypedValue> order_by;
-  };
-
-  const PullRemoteOrderBy &self_;
-  std::unique_ptr<Cursor> input_;
-  RemotePuller remote_puller_;
-  std::vector<MergeResultItem> merge_;
-  std::vector<int> missing_results_from_;
-  bool missing_master_result_ = false;
-  bool merge_initialized_ = false;
-};
-
-}  // namespace
-
-std::unique_ptr<Cursor> PullRemote::MakeCursor(
-    database::GraphDbAccessor &db) const {
-  return std::make_unique<PullRemoteCursor>(*this, db);
-}
-
-std::unique_ptr<Cursor> Synchronize::MakeCursor(
-    database::GraphDbAccessor &db) const {
-  return std::make_unique<SynchronizeCursor>(*this, db);
-}
-
 std::unique_ptr<Cursor> Cartesian::MakeCursor(
     database::GraphDbAccessor &db) const {
   return std::make_unique<CartesianCursor>(*this, db);
 }
 
-std::unique_ptr<Cursor> PullRemoteOrderBy::MakeCursor(
-    database::GraphDbAccessor &db) const {
-  return std::make_unique<PullRemoteOrderByCursor>(*this, db);
-}
-
 }  // namespace query::plan
-
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Once);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateNode);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateExpand);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAll);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabel);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabelPropertyRange);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ScanAllByLabelPropertyValue);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Expand);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ExpandVariable);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Filter);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Produce);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ConstructNamedPath);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Delete);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetProperty);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetProperties);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::SetLabels);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::RemoveProperty);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::RemoveLabels);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::ExpandUniquenessFilter<EdgeAccessor>);
-BOOST_CLASS_EXPORT_IMPLEMENT(
-    query::plan::ExpandUniquenessFilter<VertexAccessor>);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Accumulate);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Aggregate);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Skip);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Limit);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::OrderBy);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Merge);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Optional);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Unwind);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Distinct);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::CreateIndex);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Union);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::PullRemote);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Synchronize);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::Cartesian);
-BOOST_CLASS_EXPORT_IMPLEMENT(query::plan::PullRemoteOrderBy);
diff --git a/src/query/plan/operator.hpp b/src/query/plan/operator.hpp
index 12bb7cae9..f03f3d4f0 100644
--- a/src/query/plan/operator.hpp
+++ b/src/query/plan/operator.hpp
@@ -9,14 +9,6 @@
 #include <utility>
 #include <vector>
 
-#include <boost/serialization/shared_ptr_helper.hpp>
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/export.hpp"
-#include "boost/serialization/serialization.hpp"
-#include "boost/serialization/shared_ptr.hpp"
-#include "boost/serialization/unique_ptr.hpp"
-
-#include "distributed/pull_produce_rpc_messages.hpp"
 #include "query/common.hpp"
 #include "query/frontend/ast/ast.hpp"
 #include "query/frontend/semantic/symbol.hpp"
@@ -98,10 +90,7 @@ class Unwind;
 class Distinct;
 class CreateIndex;
 class Union;
-class PullRemote;
-class Synchronize;
 class Cartesian;
-class PullRemoteOrderBy;
 
 using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor<
     Once, CreateNode, CreateExpand, ScanAll, ScanAllByLabel,
@@ -110,8 +99,7 @@ using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor<
     SetProperties, SetLabels, RemoveProperty, RemoveLabels,
     ExpandUniquenessFilter<VertexAccessor>,
     ExpandUniquenessFilter<EdgeAccessor>, Accumulate, Aggregate, Skip, Limit,
-    OrderBy, Merge, Optional, Unwind, Distinct, Union, PullRemote, Synchronize,
-    Cartesian, PullRemoteOrderBy>;
+    OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian>;
 
 using LogicalOperatorLeafVisitor = ::utils::LeafVisitor<Once, CreateIndex>;
 
@@ -196,12 +184,6 @@ class LogicalOperator
    * NOTE: This should only be called if `HasSingleInput() == true`.
    */
   virtual void set_input(std::shared_ptr<LogicalOperator>) = 0;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &, const unsigned int) {}
 };
 
 template <class TArchive>
@@ -240,12 +222,6 @@ class Once : public LogicalOperator {
    private:
     bool did_pull_{false};
   };
-
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-  }
 };
 
 /** @brief Operator for creating a node.
@@ -298,29 +274,8 @@ class CreateNode : public LogicalOperator {
 
    private:
     const CreateNode &self_;
-    database::GraphDbAccessor &db_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, node_atom_);
-    ar &on_random_worker_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, node_atom_);
-    ar &on_random_worker_;
-  }
 };
 
 /** @brief Operator for creating edges and destination nodes.
@@ -407,30 +362,6 @@ class CreateExpand : public LogicalOperator {
                     const SymbolTable &symbol_table,
                     ExpressionEvaluator &evaluator);
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    SavePointer(ar, node_atom_);
-    SavePointer(ar, edge_atom_);
-    ar &input_;
-    ar &input_symbol_;
-    ar &existing_node_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    LoadPointer(ar, node_atom_);
-    LoadPointer(ar, edge_atom_);
-    ar &input_;
-    ar &input_symbol_;
-    ar &existing_node_;
-  }
 };
 
 /**
@@ -481,17 +412,6 @@ class ScanAll : public LogicalOperator {
   GraphView graph_view_;
 
   ScanAll() {}
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &output_symbol_;
-    ar &graph_view_;
-  }
 };
 
 /**
@@ -517,14 +437,6 @@ class ScanAllByLabel : public ScanAll {
   storage::Label label_;
 
   ScanAllByLabel() {}
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<ScanAll>(*this);
-    ar &label_;
-  }
 };
 
 /**
@@ -576,51 +488,6 @@ class ScanAllByLabelPropertyRange : public ScanAll {
   std::experimental::optional<Bound> upper_bound_;
 
   ScanAllByLabelPropertyRange() {}
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<ScanAll>(*this);
-    ar &label_;
-    ar &property_;
-    auto save_bound = [&ar](auto &maybe_bound) {
-      if (!maybe_bound) {
-        ar & false;
-        return;
-      }
-      ar & true;
-      auto &bound = *maybe_bound;
-      ar &bound.type();
-      SavePointer(ar, bound.value());
-    };
-    save_bound(lower_bound_);
-    save_bound(upper_bound_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<ScanAll>(*this);
-    ar &label_;
-    ar &property_;
-    auto load_bound = [&ar](auto &maybe_bound) {
-      bool has_bound = false;
-      ar &has_bound;
-      if (!has_bound) {
-        maybe_bound = std::experimental::nullopt;
-        return;
-      }
-      utils::BoundType type;
-      ar &type;
-      Expression *value;
-      LoadPointer(ar, value);
-      maybe_bound = std::experimental::make_optional(Bound(value, type));
-    };
-    load_bound(lower_bound_);
-    load_bound(upper_bound_);
-  }
 };
 
 /**
@@ -663,26 +530,6 @@ class ScanAllByLabelPropertyValue : public ScanAll {
   Expression *expression_;
 
   ScanAllByLabelPropertyValue() {}
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<ScanAll>(*this);
-    ar &label_;
-    ar &property_;
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<ScanAll>(*this);
-    ar &label_;
-    ar &property_;
-    LoadPointer(ar, expression_);
-  }
 };
 
 /**
@@ -770,21 +617,6 @@ class ExpandCommon {
   bool HandleExistingNode(const VertexAccessor &new_node, Frame &frame) const;
 
   ExpandCommon() {}
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &node_symbol_;
-    ar &edge_symbol_;
-    ar &direction_;
-    ar &edge_types_;
-    ar &input_;
-    ar &input_symbol_;
-    ar &existing_node_;
-    ar &graph_view_;
-  }
 };
 
 /**
@@ -853,15 +685,6 @@ class Expand : public LogicalOperator, public ExpandCommon {
 
     bool InitEdges(Frame &, Context &);
   };
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &boost::serialization::base_object<ExpandCommon>(*this);
-  }
 };
 
 /**
@@ -897,22 +720,6 @@ class ExpandVariable : public LogicalOperator, public ExpandCommon {
     Symbol inner_node_symbol;
     // Expression used in lambda during expansion.
     Expression *expression;
-
-    BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-    template <class TArchive>
-    void save(TArchive &ar, const unsigned int) const {
-      ar &inner_edge_symbol;
-      ar &inner_node_symbol;
-      SavePointer(ar, expression);
-    }
-
-    template <class TArchive>
-    void load(TArchive &ar, const unsigned int) {
-      ar &inner_edge_symbol;
-      ar &inner_node_symbol;
-      LoadPointer(ar, expression);
-    }
   };
 
   /**
@@ -977,36 +784,6 @@ class ExpandVariable : public LogicalOperator, public ExpandCommon {
   std::experimental::optional<Symbol> total_weight_;
 
   ExpandVariable() {}
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &boost::serialization::base_object<ExpandCommon>(*this);
-    ar &type_;
-    ar &is_reverse_;
-    SavePointer(ar, lower_bound_);
-    SavePointer(ar, upper_bound_);
-    ar &filter_lambda_;
-    ar &weight_lambda_;
-    ar &total_weight_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &boost::serialization::base_object<ExpandCommon>(*this);
-    ar &type_;
-    ar &is_reverse_;
-    LoadPointer(ar, lower_bound_);
-    LoadPointer(ar, upper_bound_);
-    ar &filter_lambda_;
-    ar &weight_lambda_;
-    ar &total_weight_;
-  }
 };
 
 /**
@@ -1040,16 +817,6 @@ class ConstructNamedPath : public LogicalOperator {
   std::vector<Symbol> path_elements_;
 
   ConstructNamedPath() {}
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &path_symbol_;
-    ar &path_elements_;
-  }
 };
 
 /**
@@ -1092,24 +859,6 @@ class Filter : public LogicalOperator {
     database::GraphDbAccessor &db_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, expression_);
-  }
 };
 
 /**
@@ -1158,23 +907,6 @@ class Produce : public LogicalOperator {
   };
 
   Produce() {}
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointers(ar, named_expressions_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointers(ar, named_expressions_);
-  }
 };
 
 /**
@@ -1219,26 +951,6 @@ class Delete : public LogicalOperator {
     database::GraphDbAccessor &db_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointers(ar, expressions_);
-    ar &detach_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointers(ar, expressions_);
-    ar &detach_;
-  }
 };
 
 /**
@@ -1280,26 +992,6 @@ class SetProperty : public LogicalOperator {
     database::GraphDbAccessor &db_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, lhs_);
-    SavePointer(ar, rhs_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, lhs_);
-    LoadPointer(ar, rhs_);
-  }
 };
 
 /**
@@ -1367,28 +1059,6 @@ class SetProperties : public LogicalOperator {
     template <typename TRecordAccessor>
     void Set(TRecordAccessor &record, const TypedValue &rhs) const;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &input_symbol_;
-    SavePointer(ar, rhs_);
-    ar &op_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &input_symbol_;
-    LoadPointer(ar, rhs_);
-    ar &op_;
-  }
 };
 
 /**
@@ -1429,16 +1099,6 @@ class SetLabels : public LogicalOperator {
     const SetLabels &self_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &input_symbol_;
-    ar &labels_;
-  }
 };
 
 /**
@@ -1478,24 +1138,6 @@ class RemoveProperty : public LogicalOperator {
     database::GraphDbAccessor &db_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, lhs_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, lhs_);
-  }
 };
 
 /**
@@ -1536,16 +1178,6 @@ class RemoveLabels : public LogicalOperator {
     const RemoveLabels &self_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &input_symbol_;
-    ar &labels_;
-  }
 };
 
 /**
@@ -1606,16 +1238,6 @@ class ExpandUniquenessFilter : public LogicalOperator {
     const ExpandUniquenessFilter &self_;
     const std::unique_ptr<Cursor> input_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &expand_symbol_;
-    ar &previous_symbols_;
-  }
 };
 
 /** @brief Pulls everything from the input before passing it through.
@@ -1684,16 +1306,6 @@ class Accumulate : public LogicalOperator {
     decltype(cache_.begin()) cache_it_ = cache_.begin();
     bool pulled_all_input_{false};
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &symbols_;
-    ar &advance_command_;
-  }
 };
 
 /**
@@ -1730,27 +1342,6 @@ class Aggregate : public LogicalOperator {
     Expression *key;
     Aggregation::Op op;
     Symbol output_sym;
-
-   private:
-    friend class boost::serialization::access;
-
-    BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-    template <class TArchive>
-    void save(TArchive &ar, const unsigned int) const {
-      SavePointer(ar, value);
-      SavePointer(ar, key);
-      ar &op;
-      ar &output_sym;
-    }
-
-    template <class TArchive>
-    void load(TArchive &ar, const unsigned int) {
-      LoadPointer(ar, value);
-      LoadPointer(ar, key);
-      ar &op;
-      ar &output_sym;
-    }
   };
 
   Aggregate(const std::shared_ptr<LogicalOperator> &input,
@@ -1859,28 +1450,6 @@ class Aggregate : public LogicalOperator {
      * an appropriate exception is thrown. */
     void EnsureOkForAvgSum(const TypedValue &value) const;
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &aggregations_;
-    SavePointers(ar, group_by_);
-    ar &remember_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &aggregations_;
-    LoadPointers(ar, group_by_);
-    ar &remember_;
-  }
 };
 
 /** @brief Skips a number of Pulls from the input op.
@@ -1931,24 +1500,6 @@ class Skip : public LogicalOperator {
     int to_skip_{-1};
     int skipped_{0};
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, expression_);
-  }
 };
 
 /** @brief Limits the number of Pulls from the input op.
@@ -2002,24 +1553,6 @@ class Limit : public LogicalOperator {
     int limit_{-1};
     int pulled_{0};
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, expression_);
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, expression_);
-  }
 };
 
 /** @brief Logical operator for ordering (sorting) results.
@@ -2082,28 +1615,6 @@ class OrderBy : public LogicalOperator {
     // iterator over the cache_, maintains state between Pulls
     decltype(cache_.begin()) cache_it_ = cache_.begin();
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &compare_;
-    SavePointers(ar, order_by_);
-    ar &output_symbols_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &compare_;
-    LoadPointers(ar, order_by_);
-    ar &output_symbols_;
-  }
 };
 
 /**
@@ -2166,16 +1677,6 @@ class Merge : public LogicalOperator {
     //  - previous Pull from this cursor exhausted the merge_match_cursor
     bool pull_input_{true};
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &merge_match_;
-    ar &merge_create_;
-  }
 };
 
 /**
@@ -2230,16 +1731,6 @@ class Optional : public LogicalOperator {
     //  - previous Pull from this cursor exhausted the optional_cursor_
     bool pull_input_{true};
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &optional_;
-    ar &optional_symbols_;
-  }
 };
 
 /**
@@ -2287,26 +1778,6 @@ class Unwind : public LogicalOperator {
     // current position in input_value_
     std::vector<TypedValue>::iterator input_value_it_ = input_value_.end();
   };
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    SavePointer(ar, input_expression_);
-    ar &output_symbol_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    LoadPointer(ar, input_expression_);
-    ar &output_symbol_;
-  }
 };
 
 /**
@@ -2358,15 +1829,6 @@ class Distinct : public LogicalOperator {
         TypedValueVectorEqual>
         seen_rows_;
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &value_symbols_;
-  }
 };
 
 /**
@@ -2399,15 +1861,6 @@ class CreateIndex : public LogicalOperator {
   storage::Property property_;
 
   CreateIndex() {}
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &label_;
-    ar &property_;
-  }
 };
 
 /**
@@ -2450,126 +1903,6 @@ class Union : public LogicalOperator {
     const Union &self_;
     const std::unique_ptr<Cursor> left_cursor_, right_cursor_;
   };
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &left_op_;
-    ar &right_op_;
-    ar &union_symbols_;
-    ar &left_symbols_;
-    ar &right_symbols_;
-  }
-};
-
-/**
- * An operator in distributed Memgraph that yields both local and remote (from
- * other workers) frames. Obtaining remote frames is done through RPC calls to
- * `distributed::ProduceRpcServer`s running on all the workers.
- *
- * This operator aims to yield results as fast as possible and lose minimal
- * time on data transfer. It gives no guarantees on result order.
- */
-class PullRemote : public LogicalOperator {
- public:
-  PullRemote(const std::shared_ptr<LogicalOperator> &input, int64_t plan_id,
-             const std::vector<Symbol> &symbols)
-      : input_(input), plan_id_(plan_id), symbols_(symbols) {}
-  bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
-  std::unique_ptr<Cursor> MakeCursor(
-      database::GraphDbAccessor &db) const override;
-  std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
-  std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
-
-  bool HasSingleInput() const override { return true; }
-  std::shared_ptr<LogicalOperator> input() const override { return input_; }
-  void set_input(std::shared_ptr<LogicalOperator> input) override {
-    input_ = input;
-  }
-
-  const auto &symbols() const { return symbols_; }
-  auto plan_id() const { return plan_id_; }
-
- private:
-  std::shared_ptr<LogicalOperator> input_;
-  int64_t plan_id_ = 0;
-  std::vector<Symbol> symbols_;
-
-  PullRemote() {}
-
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &plan_id_;
-    ar &symbols_;
-  }
-};
-
-/**
- * Operator used to synchronize stages of plan execution between the master and
- * all the workers. Synchronization is necessary in queries that update that
- * graph state because updates (as well as creations and deletions) are deferred
- * to avoid multithreaded modification of graph element data (as it's not
- * thread-safe).
- *
- * Logic of the synchronize operator is:
- *
- * 1. If there is a Pull, tell all the workers to pull on that plan and
- *    accumulate results without sending them to the master. This is async.
- * 2. Accumulate local results, in parallel with 1. getting executed on workers.
- * 3. Wait till the master and all the workers are done accumulating.
- * 4. Advance the command, if necessary.
- * 5. Tell all the workers to apply their updates. This is async.
- * 6. Apply local updates, in parallel with 5. on the workers.
- * 7. Notify workers that the command has advanced, if necessary.
- * 8. Yield all the results, first local, then from Pull if available.
- */
-class Synchronize : public LogicalOperator {
- public:
-  Synchronize(const std::shared_ptr<LogicalOperator> &input,
-              const std::shared_ptr<PullRemote> &pull_remote,
-              bool advance_command)
-      : input_(input),
-        pull_remote_(pull_remote),
-        advance_command_(advance_command) {}
-  bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
-  std::unique_ptr<Cursor> MakeCursor(
-      database::GraphDbAccessor &db) const override;
-  std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
-
-  std::vector<Symbol> OutputSymbols(
-      const SymbolTable &symbol_table) const override {
-    return input_->OutputSymbols(symbol_table);
-  }
-
-  bool HasSingleInput() const override { return true; }
-  std::shared_ptr<LogicalOperator> input() const override { return input_; }
-  void set_input(std::shared_ptr<LogicalOperator> input) override {
-    input_ = input;
-  }
-
-  auto pull_remote() const { return pull_remote_; }
-  auto advance_command() const { return advance_command_; }
-
- private:
-  std::shared_ptr<LogicalOperator> input_;
-  std::shared_ptr<PullRemote> pull_remote_;
-  bool advance_command_ = false;
-
-  Synchronize() {}
-
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &pull_remote_;
-    ar &advance_command_;
-  }
 };
 
 /** Operator for producing a Cartesian product from 2 input branches */
@@ -2606,119 +1939,7 @@ class Cartesian : public LogicalOperator {
   std::vector<Symbol> right_symbols_;
 
   Cartesian() {}
-
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &left_op_;
-    ar &left_symbols_;
-    ar &right_op_;
-    ar &right_symbols_;
-  }
-};
-
-/**
- * Operator that merges distributed OrderBy operators.
- *
- * Instead of using a regular OrderBy on master (which would collect all remote
- * results and order them), we can have each worker do an OrderBy locally and
- * have the master rely on the fact that the results are ordered and merge them
- * by having only one result from each worker.
- */
-class PullRemoteOrderBy : public LogicalOperator {
- public:
-  PullRemoteOrderBy(
-      const std::shared_ptr<LogicalOperator> &input, int64_t plan_id,
-      const std::vector<std::pair<Ordering, Expression *>> &order_by,
-      const std::vector<Symbol> &symbols);
-  bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
-  std::unique_ptr<Cursor> MakeCursor(
-      database::GraphDbAccessor &db) const override;
-
-  std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
-  std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
-
-  bool HasSingleInput() const override { return true; }
-  std::shared_ptr<LogicalOperator> input() const override { return input_; }
-  void set_input(std::shared_ptr<LogicalOperator> input) override {
-    input_ = input;
-  }
-
-  auto plan_id() const { return plan_id_; }
-  const auto &symbols() const { return symbols_; }
-  auto order_by() const { return order_by_; }
-  const auto &compare() const { return compare_; }
-
- private:
-  std::shared_ptr<LogicalOperator> input_;
-  int64_t plan_id_ = 0;
-  std::vector<Symbol> symbols_;
-  std::vector<Expression *> order_by_;
-  TypedValueVectorCompare compare_;
-
-  PullRemoteOrderBy() {}
-
-  friend class boost::serialization::access;
-
-  BOOST_SERIALIZATION_SPLIT_MEMBER();
-
-  template <class TArchive>
-  void save(TArchive &ar, const unsigned int) const {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &plan_id_;
-    ar &symbols_;
-    SavePointers(ar, order_by_);
-    ar &compare_;
-  }
-
-  template <class TArchive>
-  void load(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<LogicalOperator>(*this);
-    ar &input_;
-    ar &plan_id_;
-    ar &symbols_;
-    LoadPointers(ar, order_by_);
-    ar &compare_;
-  }
 };
 
 }  // namespace plan
 }  // namespace query
-
-BOOST_CLASS_EXPORT_KEY(query::plan::Once);
-BOOST_CLASS_EXPORT_KEY(query::plan::CreateNode);
-BOOST_CLASS_EXPORT_KEY(query::plan::CreateExpand);
-BOOST_CLASS_EXPORT_KEY(query::plan::ScanAll);
-BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabel);
-BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabelPropertyRange);
-BOOST_CLASS_EXPORT_KEY(query::plan::ScanAllByLabelPropertyValue);
-BOOST_CLASS_EXPORT_KEY(query::plan::Expand);
-BOOST_CLASS_EXPORT_KEY(query::plan::ExpandVariable);
-BOOST_CLASS_EXPORT_KEY(query::plan::Filter);
-BOOST_CLASS_EXPORT_KEY(query::plan::Produce);
-BOOST_CLASS_EXPORT_KEY(query::plan::ConstructNamedPath);
-BOOST_CLASS_EXPORT_KEY(query::plan::Delete);
-BOOST_CLASS_EXPORT_KEY(query::plan::SetProperty);
-BOOST_CLASS_EXPORT_KEY(query::plan::SetProperties);
-BOOST_CLASS_EXPORT_KEY(query::plan::SetLabels);
-BOOST_CLASS_EXPORT_KEY(query::plan::RemoveProperty);
-BOOST_CLASS_EXPORT_KEY(query::plan::RemoveLabels);
-BOOST_CLASS_EXPORT_KEY(query::plan::ExpandUniquenessFilter<EdgeAccessor>);
-BOOST_CLASS_EXPORT_KEY(query::plan::ExpandUniquenessFilter<VertexAccessor>);
-BOOST_CLASS_EXPORT_KEY(query::plan::Accumulate);
-BOOST_CLASS_EXPORT_KEY(query::plan::Aggregate);
-BOOST_CLASS_EXPORT_KEY(query::plan::Skip);
-BOOST_CLASS_EXPORT_KEY(query::plan::Limit);
-BOOST_CLASS_EXPORT_KEY(query::plan::OrderBy);
-BOOST_CLASS_EXPORT_KEY(query::plan::Merge);
-BOOST_CLASS_EXPORT_KEY(query::plan::Optional);
-BOOST_CLASS_EXPORT_KEY(query::plan::Unwind);
-BOOST_CLASS_EXPORT_KEY(query::plan::Distinct);
-BOOST_CLASS_EXPORT_KEY(query::plan::CreateIndex);
-BOOST_CLASS_EXPORT_KEY(query::plan::Union);
-BOOST_CLASS_EXPORT_KEY(query::plan::PullRemote);
-BOOST_CLASS_EXPORT_KEY(query::plan::Synchronize);
-BOOST_CLASS_EXPORT_KEY(query::plan::Cartesian);
-BOOST_CLASS_EXPORT_KEY(query::plan::PullRemoteOrderBy);
diff --git a/src/stats/metrics.cpp b/src/stats/metrics.cpp
deleted file mode 100644
index 69552f45b..000000000
--- a/src/stats/metrics.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-#include "stats/metrics.hpp"
-
-#include <tuple>
-
-#include "fmt/format.h"
-#include "glog/logging.h"
-
-namespace stats {
-
-std::mutex &MetricsMutex() {
-  static std::mutex mutex;
-  return mutex;
-}
-
-std::map<std::string, std::unique_ptr<Metric>> &AccessMetrics() {
-  static std::map<std::string, std::unique_ptr<Metric>> metrics;
-  MetricsMutex().lock();
-  return metrics;
-}
-
-void ReleaseMetrics() { MetricsMutex().unlock(); }
-
-Metric::Metric(int64_t start_value) : value_(start_value) {}
-
-Counter::Counter(int64_t start_value) : Metric(start_value) {}
-
-void Counter::Bump(int64_t delta) { value_ += delta; }
-
-std::experimental::optional<int64_t> Counter::Flush() { return value_; }
-
-int64_t Counter::Value() { return value_; }
-
-Gauge::Gauge(int64_t start_value) : Metric(start_value) {}
-
-void Gauge::Set(int64_t value) { value_ = value; }
-
-std::experimental::optional<int64_t> Gauge::Flush() { return value_; }
-
-IntervalMin::IntervalMin(int64_t start_value) : Metric(start_value) {}
-
-void IntervalMin::Add(int64_t value) {
-  int64_t curr = value_;
-  while (curr > value && !value_.compare_exchange_weak(curr, value))
-    ;
-}
-
-std::experimental::optional<int64_t> IntervalMin::Flush() {
-  int64_t curr = value_;
-  value_.compare_exchange_weak(curr, std::numeric_limits<int64_t>::max());
-  return curr == std::numeric_limits<int64_t>::max()
-             ? std::experimental::nullopt
-             : std::experimental::make_optional(curr);
-}
-
-IntervalMax::IntervalMax(int64_t start_value) : Metric(start_value) {}
-
-void IntervalMax::Add(int64_t value) {
-  int64_t curr = value_;
-  while (curr < value && !value_.compare_exchange_weak(curr, value))
-    ;
-}
-
-std::experimental::optional<int64_t> IntervalMax::Flush() {
-  int64_t curr = value_;
-  value_.compare_exchange_weak(curr, std::numeric_limits<int64_t>::min());
-  return curr == std::numeric_limits<int64_t>::min()
-             ? std::experimental::nullopt
-             : std::experimental::make_optional(curr);
-}
-
-template <class T>
-T &GetMetric(const std::string &name, int64_t start_value) {
-  auto &metrics = AccessMetrics();
-  auto it = metrics.find(name);
-  if (it == metrics.end()) {
-    auto got = metrics.emplace(name, std::make_unique<T>(start_value));
-    CHECK(got.second) << "Failed to create counter " << name;
-    it = got.first;
-  }
-  ReleaseMetrics();
-  auto *ptr = dynamic_cast<T *>(it->second.get());
-  if (!ptr) {
-    LOG(FATAL) << fmt::format("GetMetric({}) called with invalid metric type",
-                              name);
-  }
-  return *ptr;
-}
-
-Counter &GetCounter(const std::string &name, int64_t start_value) {
-  return GetMetric<Counter>(name, start_value);
-}
-
-Gauge &GetGauge(const std::string &name, int64_t start_value) {
-  return GetMetric<Gauge>(name, start_value);
-}
-
-IntervalMin &GetIntervalMin(const std::string &name) {
-  return GetMetric<IntervalMin>(name, std::numeric_limits<int64_t>::max());
-}
-
-IntervalMax &GetIntervalMax(const std::string &name) {
-  return GetMetric<IntervalMax>(name, std::numeric_limits<int64_t>::min());
-}
-
-}  // namespace stats
diff --git a/src/stats/metrics.hpp b/src/stats/metrics.hpp
deleted file mode 100644
index c13bcff18..000000000
--- a/src/stats/metrics.hpp
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * @file
- *
- * This file contains some metrics types that can be aggregated on client side
- * and periodically flushed to StatsD.
- */
-#pragma once
-
-#include <atomic>
-#include <experimental/optional>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <string>
-
-#include "fmt/format.h"
-
-namespace stats {
-
-// TODO(mtomic): it would probably be nice to have Value method for every metric
-// type, however, there is no use case for this yet
-
-/**
- * Abstract base class for all metrics.
- */
-class Metric {
- public:
-  /**
-   * Constructs a metric to be exported to StatsD.
-   *
-   * @param name  metric will be exported to StatsD with this path
-   * @param value initial value
-   */
-  virtual ~Metric() {}
-
-  /**
-   * Metric refresh thread will periodically call this function. It should
-   * return the metric value aggregated since the last flush call or nullopt
-   * if there were no updates.
-   */
-  virtual std::experimental::optional<int64_t> Flush() = 0;
-
-  explicit Metric(int64_t start_value = 0);
-
- protected:
-  std::atomic<int64_t> value_;
-};
-
-/**
- * A simple counter.
- */
-class Counter : public Metric {
- public:
-  explicit Counter(int64_t start_value = 0);
-
-  /**
-   * Change counter value by delta.
-   *
-   * @param delta value change
-   */
-  void Bump(int64_t delta = 1);
-
-  /** Returns the current value of the counter. **/
-  std::experimental::optional<int64_t> Flush() override;
-
-  /** Returns the current value of the counter. **/
-  int64_t Value();
-
-  friend Counter &GetCounter(const std::string &name);
-};
-
-/**
- * To be used instead of Counter constructor. If counter with this name doesn't
- * exist, it will be initialized with start_value.
- *
- * @param name        counter name
- * @param start_value start value
- */
-Counter &GetCounter(const std::string &name, int64_t start_value = 0);
-
-/**
- * A simple gauge. Gauge value is explicitly set, instead of being added to or
- * subtracted from.
- */
-class Gauge : public Metric {
- public:
-  explicit Gauge(int64_t start_value = 0);
-
-  /**
-   * Set gauge value.
-   *
-   * @param value value to be set
-   */
-  void Set(int64_t value);
-
-  /** Returns the current gauge value. **/
-  std::experimental::optional<int64_t> Flush() override;
-};
-
-/**
- * To be used instead of Gauge constructor. If gauge with this name doesn't
- * exist, it will be initialized with start_value.
- *
- * @param name        gauge name
- * @param start_value start value
- */
-Gauge &GetGauge(const std::string &name, int64_t start_value = 0);
-
-/**
- * Aggregates minimum between two flush periods.
- */
-class IntervalMin : public Metric {
- public:
-  explicit IntervalMin(int64_t start_value);
-
-  /**
-   * Add another value into the minimum computation.
-   *
-   * @param value value to be added
-   */
-  void Add(int64_t value);
-
-  /**
-   * Returns the minimum value encountered since the last flush period,
-   * or nullopt if no values were added.
-   */
-  std::experimental::optional<int64_t> Flush() override;
-};
-
-/**
- * To be used instead of IntervalMin constructor.
- *
- * @param name        interval min name
- */
-IntervalMin &GetIntervalMin(const std::string &name);
-
-/**
- * Aggregates maximum betweenw two flush periods.
- */
-class IntervalMax : public Metric {
- public:
-  explicit IntervalMax(int64_t start_value);
-
-  /**
-   * Add another value into the maximum computation.
-   */
-  void Add(int64_t value);
-
-  /**
-   * Returns the maximum value encountered since the last flush period,
-   * or nullopt if no values were added.
-   */
-  std::experimental::optional<int64_t> Flush() override;
-};
-
-/**
- * To be used instead of IntervalMax constructor.
- *
- * @param name        interval max name
- */
-IntervalMax &GetIntervalMax(const std::string &name);
-
-/**
- * A stopwatch utility. It exports 4 metrics: total time measured since the
- * beginning of the program, total number of times time intervals measured,
- * minimum and maximum time interval measured since the last metric flush.
- * Metrics exported by the stopwatch will be named
- * [name].{total_time|count|min|max}.
- *
- * @param name timed event name
- * @param f Callable, an action to be performed.
- */
-template <class Function>
-int64_t Stopwatch(const std::string &name, Function f) {
-  auto &total_time = GetCounter(fmt::format("{}.total_time", name));
-  auto &count = GetCounter(fmt::format("{}.count", name));
-  auto &min = GetIntervalMin(fmt::format("{}.min", name));
-  auto &max = GetIntervalMax(fmt::format("{}.max", name));
-  auto start = std::chrono::system_clock::now();
-  f();
-  auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
-                      std::chrono::system_clock::now() - start)
-                      .count();
-  total_time.Bump(duration);
-  count.Bump();
-  min.Add(duration);
-  max.Add(duration);
-  return duration;
-}
-
-/**
- * Access internal metric list. You probably don't want to use this,
- * but if you do, make sure to call ReleaseMetrics when you're done.
- */
-std::map<std::string, std::unique_ptr<Metric>> &AccessMetrics();
-
-/**
- * Releases internal lock on metric list.
- */
-void ReleaseMetrics();
-
-}  // namespace stats
diff --git a/src/stats/stats.cpp b/src/stats/stats.cpp
deleted file mode 100644
index 2abc28c63..000000000
--- a/src/stats/stats.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-#include "stats/stats.hpp"
-
-#include "glog/logging.h"
-
-#include "communication/rpc/client.hpp"
-#include "data_structures/concurrent/push_queue.hpp"
-#include "utils/thread.hpp"
-
-#include "stats/stats_rpc_messages.hpp"
-
-DEFINE_HIDDEN_string(statsd_address, "", "Stats server IP address");
-DEFINE_HIDDEN_int32(statsd_port, 2500, "Stats server port");
-DEFINE_HIDDEN_int32(statsd_flush_interval, 500,
-                    "Stats flush interval (in milliseconds)");
-
-namespace stats {
-
-std::string statsd_prefix = "";
-std::thread stats_dispatch_thread;
-std::thread counter_refresh_thread;
-std::atomic<bool> stats_running{false};
-ConcurrentPushQueue<StatsReq> stats_queue;
-
-void RefreshMetrics() {
-  LOG(INFO) << "Metrics flush thread started";
-  utils::ThreadSetName("Stats refresh");
-  while (stats_running) {
-    auto &metrics = AccessMetrics();
-    for (auto &kv : metrics) {
-      auto value = kv.second->Flush();
-      if (value) {
-        LogStat(kv.first, *value);
-      }
-    }
-    ReleaseMetrics();
-    // TODO(mtomic): hardcoded sleep time
-    std::this_thread::sleep_for(std::chrono::seconds(1));
-  }
-  LOG(INFO) << "Metrics flush thread stopped";
-}
-
-void StatsDispatchMain(const io::network::Endpoint &endpoint) {
-  // TODO(mtomic): we probably want to batch based on request size and MTU
-  const int MAX_BATCH_SIZE = 100;
-
-  LOG(INFO) << "Stats dispatcher thread started";
-  utils::ThreadSetName("Stats dispatcher");
-
-  communication::rpc::Client client(endpoint);
-
-  BatchStatsReq batch_request;
-  batch_request.requests.reserve(MAX_BATCH_SIZE);
-
-  while (stats_running) {
-    auto last = stats_queue.begin();
-    size_t sent = 0, total = 0;
-
-    auto flush_batch = [&] {
-      if (client.Call<BatchStatsRpc>(batch_request)) {
-        sent += batch_request.requests.size();
-      }
-      total += batch_request.requests.size();
-      batch_request.requests.clear();
-    };
-
-    for (auto it = last; it != stats_queue.end(); it++) {
-      batch_request.requests.emplace_back(std::move(*it));
-      if (batch_request.requests.size() == MAX_BATCH_SIZE) {
-        flush_batch();
-      }
-    }
-
-    if (!batch_request.requests.empty()) {
-      flush_batch();
-    }
-
-    VLOG(30) << fmt::format("Sent {} out of {} events from queue.", sent,
-                            total);
-    last.delete_tail();
-    std::this_thread::sleep_for(
-        std::chrono::milliseconds(FLAGS_statsd_flush_interval));
-  }
-}
-
-void LogStat(const std::string &metric_path, double value,
-             const std::vector<std::pair<std::string, std::string>> &tags) {
-  if (stats_running) {
-    stats_queue.push(statsd_prefix + metric_path, tags, value);
-  }
-}
-
-void InitStatsLogging(std::string prefix) {
-  if (!prefix.empty()) {
-    statsd_prefix = prefix + ".";
-  }
-  if (FLAGS_statsd_address != "") {
-    stats_running = true;
-    stats_dispatch_thread = std::thread(
-        StatsDispatchMain, io::network::Endpoint{FLAGS_statsd_address,
-                                                 (uint16_t)FLAGS_statsd_port});
-    counter_refresh_thread = std::thread(RefreshMetrics);
-  }
-}
-
-void StopStatsLogging() {
-  if (stats_running) {
-    stats_running = false;
-    stats_dispatch_thread.join();
-    counter_refresh_thread.join();
-  }
-}
-
-}  // namespace stats
diff --git a/src/stats/stats.hpp b/src/stats/stats.hpp
deleted file mode 100644
index b3dd2f703..000000000
--- a/src/stats/stats.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/// @file
-
-#pragma once
-
-#include <thread>
-#include <vector>
-
-#include "gflags/gflags.h"
-
-#include "stats/metrics.hpp"
-
-namespace stats {
-
-/**
- * Start sending metrics to StatsD server.
- *
- * @param prefix prefix to prepend to exported keys
- */
-void InitStatsLogging(std::string prefix = "");
-
-/**
- * Stop sending metrics to StatsD server. This should be called before exiting
- * program.
- */
-void StopStatsLogging();
-
-/**
- * Send a value to StatsD with current timestamp.
- */
-void LogStat(const std::string &metric_path, double value,
-             const std::vector<std::pair<std::string, std::string>> &tags = {});
-
-}  // namespace stats
diff --git a/src/stats/stats_rpc_messages.hpp b/src/stats/stats_rpc_messages.hpp
deleted file mode 100644
index b5106097c..000000000
--- a/src/stats/stats_rpc_messages.hpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#pragma once
-
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/string.hpp"
-#include "boost/serialization/utility.hpp"
-#include "boost/serialization/vector.hpp"
-
-#include "communication/rpc/messages.hpp"
-#include "utils/timestamp.hpp"
-
-namespace stats {
-
-struct StatsReq : public communication::rpc::Message {
-  StatsReq() {}
-  StatsReq(std::string metric_path,
-           std::vector<std::pair<std::string, std::string>> tags, double value)
-      : metric_path(metric_path),
-        tags(tags),
-        value(value),
-        timestamp(utils::Timestamp::Now().SecSinceTheEpoch()) {}
-
-  std::string metric_path;
-  std::vector<std::pair<std::string, std::string>> tags;
-  double value;
-  uint64_t timestamp;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar &metric_path &tags &value &timestamp;
-  }
-};
-
-RPC_NO_MEMBER_MESSAGE(StatsRes);
-
-struct BatchStatsReq : public communication::rpc::Message {
-  BatchStatsReq() {}
-  explicit BatchStatsReq(std::vector<StatsReq> requests) : requests(requests) {}
-
-  std::vector<StatsReq> requests;
-
- private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<communication::rpc::Message>(*this);
-    ar &requests;
-  }
-};
-
-RPC_NO_MEMBER_MESSAGE(BatchStatsRes);
-
-using StatsRpc = communication::rpc::RequestResponse<StatsReq, StatsRes>;
-using BatchStatsRpc =
-    communication::rpc::RequestResponse<BatchStatsReq, BatchStatsRes>;
-
-}  // namespace stats
diff --git a/src/storage/address.hpp b/src/storage/address.hpp
index 93e83dcba..8d4d9cf75 100644
--- a/src/storage/address.hpp
+++ b/src/storage/address.hpp
@@ -2,7 +2,6 @@
 
 #include <cstdint>
 
-#include "boost/serialization/access.hpp"
 #include "glog/logging.h"
 
 #include "storage/gid.hpp"
@@ -91,11 +90,5 @@ class Address {
 
  private:
   StorageT storage_{0};
-
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &storage_;
-  }
 };
 }  // namespace storage
diff --git a/src/storage/concurrent_id_mapper_master.cpp b/src/storage/concurrent_id_mapper_master.cpp
deleted file mode 100644
index d8df45f5a..000000000
--- a/src/storage/concurrent_id_mapper_master.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-#include "glog/logging.h"
-
-#include "storage/concurrent_id_mapper_master.hpp"
-#include "storage/concurrent_id_mapper_rpc_messages.hpp"
-#include "storage/types.hpp"
-
-namespace storage {
-
-namespace {
-template <typename TId>
-void RegisterRpc(MasterConcurrentIdMapper<TId> &mapper,
-                 communication::rpc::Server &rpc_server);
-#define ID_VALUE_RPC_CALLS(type)                                              \
-  template <>                                                                 \
-  void RegisterRpc<type>(MasterConcurrentIdMapper<type> & mapper,             \
-                         communication::rpc::Server & rpc_server) {           \
-    rpc_server.Register<type##IdRpc>([&mapper](const type##IdReq &req) {      \
-      return std::make_unique<type##IdRes>(mapper.value_to_id(req.member));   \
-    });                                                                       \
-    rpc_server.Register<Id##type##Rpc>([&mapper](const Id##type##Req &req) {  \
-      return std::make_unique<Id##type##Res>(mapper.id_to_value(req.member)); \
-    });                                                                       \
-  }
-
-using namespace storage;
-ID_VALUE_RPC_CALLS(Label)
-ID_VALUE_RPC_CALLS(EdgeType)
-ID_VALUE_RPC_CALLS(Property)
-#undef ID_VALUE_RPC
-}  // namespace
-
-template <typename TId>
-MasterConcurrentIdMapper<TId>::MasterConcurrentIdMapper(
-    communication::rpc::Server &server)
-    // We have to make sure our rpc server name is unique with regards to type.
-    // Otherwise we will try to reuse the same rpc server name for different
-    // types (Label/EdgeType/Property)
-    : rpc_server_(server) {
-  RegisterRpc(*this, rpc_server_);
-}
-
-template class MasterConcurrentIdMapper<Label>;
-template class MasterConcurrentIdMapper<EdgeType>;
-template class MasterConcurrentIdMapper<Property>;
-
-}  // namespace storage
diff --git a/src/storage/concurrent_id_mapper_master.hpp b/src/storage/concurrent_id_mapper_master.hpp
deleted file mode 100644
index 563b634fa..000000000
--- a/src/storage/concurrent_id_mapper_master.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-
-#include "communication/rpc/server.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "storage/concurrent_id_mapper_single_node.hpp"
-
-namespace storage {
-
-/** Master implementation of ConcurrentIdMapper. */
-template <typename TId>
-class MasterConcurrentIdMapper : public SingleNodeConcurrentIdMapper<TId> {
- public:
-  explicit MasterConcurrentIdMapper(communication::rpc::Server &server);
-
- private:
-  communication::rpc::Server &rpc_server_;
-};
-}  // namespace storage
diff --git a/src/storage/concurrent_id_mapper_rpc_messages.hpp b/src/storage/concurrent_id_mapper_rpc_messages.hpp
deleted file mode 100644
index 06e1d7f87..000000000
--- a/src/storage/concurrent_id_mapper_rpc_messages.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-#include <chrono>
-
-#include "communication/rpc/messages.hpp"
-#include "storage/types.hpp"
-#include "transactions/commit_log.hpp"
-#include "transactions/snapshot.hpp"
-#include "transactions/type.hpp"
-
-namespace storage {
-
-#define ID_VALUE_RPC(type)                                           \
-  RPC_SINGLE_MEMBER_MESSAGE(type##IdReq, std::string);               \
-  RPC_SINGLE_MEMBER_MESSAGE(type##IdRes, storage::type);             \
-  using type##IdRpc =                                                \
-      communication::rpc::RequestResponse<type##IdReq, type##IdRes>; \
-  RPC_SINGLE_MEMBER_MESSAGE(Id##type##Req, storage::type);           \
-  RPC_SINGLE_MEMBER_MESSAGE(Id##type##Res, std::string);             \
-  using Id##type##Rpc =                                              \
-      communication::rpc::RequestResponse<Id##type##Req, Id##type##Res>;
-
-ID_VALUE_RPC(Label)
-ID_VALUE_RPC(EdgeType)
-ID_VALUE_RPC(Property)
-
-#undef ID_VALUE_RPC
-
-}  // namespace storage
diff --git a/src/storage/concurrent_id_mapper_worker.cpp b/src/storage/concurrent_id_mapper_worker.cpp
deleted file mode 100644
index 85902702c..000000000
--- a/src/storage/concurrent_id_mapper_worker.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-#include "glog/logging.h"
-
-#include "concurrent_id_mapper_worker.hpp"
-#include "storage/concurrent_id_mapper_rpc_messages.hpp"
-#include "storage/types.hpp"
-
-namespace storage {
-
-#define ID_VALUE_RPC_CALLS(type)                                      \
-  template <>                                                         \
-  type WorkerConcurrentIdMapper<type>::RpcValueToId(                  \
-      const std::string &value) {                                     \
-    auto response = master_client_pool_.Call<type##IdRpc>(value);     \
-    CHECK(response) << (#type "IdRpc failed");                        \
-    return response->member;                                          \
-  }                                                                   \
-                                                                      \
-  template <>                                                         \
-  std::string WorkerConcurrentIdMapper<type>::RpcIdToValue(type id) { \
-    auto response = master_client_pool_.Call<Id##type##Rpc>(id);      \
-    CHECK(response) << ("Id" #type "Rpc failed");                     \
-    return response->member;                                          \
-  }
-
-using namespace storage;
-ID_VALUE_RPC_CALLS(Label)
-ID_VALUE_RPC_CALLS(EdgeType)
-ID_VALUE_RPC_CALLS(Property)
-
-#undef ID_VALUE_RPC_CALLS
-
-template <typename TId>
-WorkerConcurrentIdMapper<TId>::WorkerConcurrentIdMapper(
-    communication::rpc::ClientPool &master_client_pool)
-    : master_client_pool_(master_client_pool) {}
-
-template <typename TId>
-TId WorkerConcurrentIdMapper<TId>::value_to_id(const std::string &value) {
-  auto accessor = value_to_id_cache_.access();
-  auto found = accessor.find(value);
-  if (found != accessor.end()) return found->second;
-
-  TId id = RpcValueToId(value);
-  accessor.insert(value, id);
-  return id;
-}
-
-template <typename TId>
-const std::string &WorkerConcurrentIdMapper<TId>::id_to_value(const TId &id) {
-  auto accessor = id_to_value_cache_.access();
-  auto found = accessor.find(id);
-  if (found != accessor.end()) return found->second;
-  std::string value = RpcIdToValue(id);
-  return accessor.insert(id, value).first->second;
-}
-
-template class WorkerConcurrentIdMapper<Label>;
-template class WorkerConcurrentIdMapper<EdgeType>;
-template class WorkerConcurrentIdMapper<Property>;
-}  // namespace storage
diff --git a/src/storage/concurrent_id_mapper_worker.hpp b/src/storage/concurrent_id_mapper_worker.hpp
deleted file mode 100644
index 5a45299a8..000000000
--- a/src/storage/concurrent_id_mapper_worker.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-#pragma once
-
-#include "communication/rpc/client_pool.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "io/network/endpoint.hpp"
-#include "storage/concurrent_id_mapper.hpp"
-
-namespace storage {
-
-/** Worker implementation of ConcurrentIdMapper. */
-template <typename TId>
-class WorkerConcurrentIdMapper : public ConcurrentIdMapper<TId> {
-  // Makes an appropriate RPC call for the current TId type and the given value.
-  TId RpcValueToId(const std::string &value);
-
-  // Makes an appropriate RPC call for the current TId type and the given value.
-  std::string RpcIdToValue(TId id);
-
- public:
-  WorkerConcurrentIdMapper(communication::rpc::ClientPool &master_client_pool);
-
-  TId value_to_id(const std::string &value) override;
-  const std::string &id_to_value(const TId &id) override;
-
- private:
-  // Sources of truth for the mappings are on the master, not on this worker. We
-  // keep the caches.
-  ConcurrentMap<std::string, TId> value_to_id_cache_;
-  ConcurrentMap<TId, std::string> id_to_value_cache_;
-
-  // Communication to the concurrent ID master.
-  communication::rpc::ClientPool &master_client_pool_;
-};
-}  // namespace storage
diff --git a/src/storage/record_accessor.cpp b/src/storage/record_accessor.cpp
index 5e1d90416..988c4a4ec 100644
--- a/src/storage/record_accessor.cpp
+++ b/src/storage/record_accessor.cpp
@@ -2,8 +2,6 @@
 
 #include "database/graph_db_accessor.hpp"
 #include "database/state_delta.hpp"
-#include "distributed/data_manager.hpp"
-#include "distributed/updates_rpc_clients.hpp"
 #include "query/exceptions.hpp"
 #include "storage/edge.hpp"
 #include "storage/record_accessor.hpp"
@@ -20,8 +18,7 @@ RecordAccessor<TRecord>::RecordAccessor(AddressT address,
 }
 
 template <typename TRecord>
-PropertyValue RecordAccessor<TRecord>::PropsAt(
-    storage::Property key) const {
+PropertyValue RecordAccessor<TRecord>::PropsAt(storage::Property key) const {
   return current().properties_.at(key);
 }
 
@@ -32,9 +29,7 @@ void RecordAccessor<Vertex>::PropsSet(storage::Property key,
   auto delta = StateDelta::PropsSetVertex(dba.transaction_id(), gid(), key,
                                           dba.PropertyName(key), value);
   update().properties_.set(key, value);
-  if (is_local()) {
-    dba.UpdatePropertyIndex(key, *this, &update());
-  }
+  dba.UpdatePropertyIndex(key, *this, &update());
   ProcessDelta(delta);
 }
 
@@ -96,7 +91,7 @@ database::GraphDbAccessor &RecordAccessor<TRecord>::db_accessor() const {
 
 template <typename TRecord>
 gid::Gid RecordAccessor<TRecord>::gid() const {
-  return is_local() ? address_.local()->gid_ : address_.gid();
+  return address_.local()->gid_;
 }
 
 template <typename TRecord>
@@ -108,26 +103,20 @@ typename RecordAccessor<TRecord>::AddressT RecordAccessor<TRecord>::address()
 template <typename TRecord>
 typename RecordAccessor<TRecord>::AddressT
 RecordAccessor<TRecord>::GlobalAddress() const {
-  return is_local() ? storage::Address<mvcc::VersionList<TRecord>>(
-                          gid(), db_accessor_->db_.WorkerId())
-                    : address_;
+  return storage::Address<mvcc::VersionList<TRecord>>(
+      gid(), db_accessor_->db_.WorkerId());
 }
 
 template <typename TRecord>
 RecordAccessor<TRecord> &RecordAccessor<TRecord>::SwitchNew() {
-  if (is_local()) {
-    if (!new_) {
-      // if new_ is not set yet, look for it
-      // we can just Reconstruct the pointers, old_ will get initialized
-      // to the same value as it has now, and the amount of work is the
-      // same as just looking for a new_ record
-      if (!Reconstruct())
-        DLOG(FATAL)
-            << "RecordAccessor::SwitchNew - accessor invalid after Reconstruct";
-    }
-  } else {
-    // A remote record only sees local updates, until the command is advanced.
-    // So this does nothing, as the old/new switch happens below.
+  if (!new_) {
+    // if new_ is not set yet, look for it
+    // we can just Reconstruct the pointers, old_ will get initialized
+    // to the same value as it has now, and the amount of work is the
+    // same as just looking for a new_ record
+    if (!Reconstruct())
+      DLOG(FATAL)
+          << "RecordAccessor::SwitchNew - accessor invalid after Reconstruct";
   }
   current_ = new_ ? new_ : old_;
   return *this;
@@ -142,19 +131,7 @@ RecordAccessor<TRecord> &RecordAccessor<TRecord>::SwitchOld() {
 template <typename TRecord>
 bool RecordAccessor<TRecord>::Reconstruct() const {
   auto &dba = db_accessor();
-  if (is_local()) {
-    address_.local()->find_set_old_new(dba.transaction(), old_, new_);
-  } else {
-    // It's not possible that we have a global address for a graph element
-    // that's local, because that is resolved in the constructor.
-    // TODO in write queries it's possible the command has been advanced and
-    // we need to invalidate the Cache and really get the latest stuff.
-    // But only do that after the command has been advanced.
-    auto &cache = dba.db().data_manager().template Elements<TRecord>(
-        dba.transaction_id());
-    cache.FindSetOldNew(dba.transaction().id_, address_.worker_id(),
-                        address_.gid(), old_, new_);
-  }
+  address_.local()->find_set_old_new(dba.transaction(), old_, new_);
   current_ = old_ ? old_ : new_;
   return old_ != nullptr || new_ != nullptr;
 }
@@ -176,13 +153,8 @@ TRecord &RecordAccessor<TRecord>::update() const {
 
   if (new_) return *new_;
 
-  if (is_local()) {
-    new_ = address_.local()->update(t);
-  } else {
-    auto &cache = dba.db().data_manager().template Elements<TRecord>(
-        dba.transaction_id());
-    new_ = cache.FindNew(address_.gid());
-  }
+  new_ = address_.local()->update(t);
+
   DCHECK(new_ != nullptr) << "RecordAccessor.new_ is null after update";
   return *new_;
 }
@@ -196,36 +168,10 @@ const TRecord &RecordAccessor<TRecord>::current() const {
   return *current_;
 }
 
-template <typename TRecord>
-void RecordAccessor<TRecord>::SendDelta(
-    const database::StateDelta &delta) const {
-  DCHECK(!is_local())
-      << "Only a delta created on a remote accessor should be sent";
-
-  auto result =
-      db_accessor().db().updates_clients().Update(address().worker_id(), delta);
-  switch (result) {
-    case distributed::UpdateResult::DONE:
-      break;
-    case distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR:
-      throw query::RemoveAttachedVertexException();
-    case distributed::UpdateResult::SERIALIZATION_ERROR:
-      throw mvcc::SerializationError();
-    case distributed::UpdateResult::UPDATE_DELETED_ERROR:
-      throw RecordDeletedError();
-    case distributed::UpdateResult::LOCK_TIMEOUT_ERROR:
-      throw LockTimeoutException("Lock timeout on remote worker");
-  }
-}
-
 template <typename TRecord>
 void RecordAccessor<TRecord>::ProcessDelta(
     const database::StateDelta &delta) const {
-  if (is_local()) {
-    db_accessor().wal().Emplace(delta);
-  } else {
-    SendDelta(delta);
-  }
+  db_accessor().wal().Emplace(delta);
 }
 
 template class RecordAccessor<Vertex>;
diff --git a/src/storage/record_accessor.hpp b/src/storage/record_accessor.hpp
index 017074db4..4a21e2895 100644
--- a/src/storage/record_accessor.hpp
+++ b/src/storage/record_accessor.hpp
@@ -147,11 +147,6 @@ class RecordAccessor : public utils::TotalOrdering<RecordAccessor<TRecord>> {
   }
 
  protected:
-  /**
-   * Sends delta for remote processing.
-   */
-  void SendDelta(const database::StateDelta &delta) const;
-
   /**
    * Processes delta by either adding it to WAL, or by sending it remotely.
    */
diff --git a/src/storage/types.hpp b/src/storage/types.hpp
index f97f93d09..cf0d52212 100644
--- a/src/storage/types.hpp
+++ b/src/storage/types.hpp
@@ -3,8 +3,6 @@
 #include <cstdint>
 #include <functional>
 
-#include "boost/serialization/base_object.hpp"
-
 #include "utils/total_ordering.hpp"
 
 namespace storage {
@@ -33,47 +31,19 @@ class Common : public utils::TotalOrdering<TSpecificType> {
   };
 
  private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &storage_;
-  }
-
   StorageT storage_{0};
 };
 
 class Label : public Common<Label> {
   using Common::Common;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Common<Label>>(*this);
-  }
 };
 
 class EdgeType : public Common<EdgeType> {
   using Common::Common;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Common<EdgeType>>(*this);
-  }
 };
 
 class Property : public Common<Property> {
   using Common::Common;
-
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, const unsigned int) {
-    ar &boost::serialization::base_object<Common<Property>>(*this);
-  }
 };
 };  // namespace storage
 
diff --git a/src/storage/vertex_accessor.cpp b/src/storage/vertex_accessor.cpp
index 4793ee655..5fcee862c 100644
--- a/src/storage/vertex_accessor.cpp
+++ b/src/storage/vertex_accessor.cpp
@@ -18,13 +18,9 @@ void VertexAccessor::add_label(storage::Label label) {
   // not a duplicate label, add it
   if (!utils::Contains(vertex.labels_, label)) {
     vertex.labels_.emplace_back(label);
-    if (is_local()) {
-      dba.wal().Emplace(delta);
-      dba.UpdateLabelIndices(label, *this, &vertex);
-    }
+    dba.wal().Emplace(delta);
+    dba.UpdateLabelIndices(label, *this, &vertex);
   }
-
-  if (!is_local()) SendDelta(delta);
 }
 
 void VertexAccessor::remove_label(storage::Label label) {
@@ -37,12 +33,8 @@ void VertexAccessor::remove_label(storage::Label label) {
     auto found = std::find(labels.begin(), labels.end(), delta.label);
     std::swap(*found, labels.back());
     labels.pop_back();
-    if (is_local()) {
-      dba.wal().Emplace(delta);
-    }
+    dba.wal().Emplace(delta);
   }
-
-  if (!is_local()) SendDelta(delta);
 }
 
 bool VertexAccessor::has_label(storage::Label label) const {
diff --git a/src/transactions/commit_log.hpp b/src/transactions/commit_log.hpp
index e446f59a0..ae611655b 100644
--- a/src/transactions/commit_log.hpp
+++ b/src/transactions/commit_log.hpp
@@ -1,7 +1,5 @@
 #pragma once
 
-#include "boost/serialization/access.hpp"
-
 #include "data_structures/bitset/dynamic_bitset.hpp"
 #include "type.hpp"
 
@@ -58,13 +56,6 @@ class CommitLog {
     operator uint8_t() const { return flags_; }
 
    private:
-    friend class boost::serialization::access;
-
-    template <class TArchive>
-    void serialize(TArchive &ar, unsigned int) {
-      ar &flags_;
-    }
-
     uint8_t flags_{0};
   };
 
diff --git a/src/transactions/engine_master.cpp b/src/transactions/engine_master.cpp
deleted file mode 100644
index 5f984b486..000000000
--- a/src/transactions/engine_master.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-#include <limits>
-#include <mutex>
-
-#include "glog/logging.h"
-
-#include "database/state_delta.hpp"
-#include "transactions/engine_master.hpp"
-#include "transactions/engine_rpc_messages.hpp"
-
-namespace tx {
-
-MasterEngine::MasterEngine(communication::rpc::Server &server,
-                           distributed::RpcWorkerClients &rpc_worker_clients,
-                           durability::WriteAheadLog *wal)
-    : SingleNodeEngine(wal),
-      rpc_server_(server),
-      ongoing_produce_joiner_(rpc_worker_clients) {
-  rpc_server_.Register<BeginRpc>([this](const BeginReq &) {
-    auto tx = Begin();
-    return std::make_unique<BeginRes>(TxAndSnapshot{tx->id_, tx->snapshot()});
-  });
-
-  rpc_server_.Register<AdvanceRpc>([this](const AdvanceReq &req) {
-    return std::make_unique<AdvanceRes>(Advance(req.member));
-  });
-
-  rpc_server_.Register<CommitRpc>([this](const CommitReq &req) {
-    Commit(*RunningTransaction(req.member));
-    return std::make_unique<CommitRes>();
-  });
-
-  rpc_server_.Register<AbortRpc>([this](const AbortReq &req) {
-    Abort(*RunningTransaction(req.member));
-    return std::make_unique<AbortRes>();
-  });
-
-  rpc_server_.Register<SnapshotRpc>([this](const SnapshotReq &req) {
-    // It is guaranteed that the Worker will not be requesting this for a
-    // transaction that's done, and that there are no race conditions here.
-    return std::make_unique<SnapshotRes>(
-        RunningTransaction(req.member)->snapshot());
-  });
-
-  rpc_server_.Register<CommandRpc>([this](const CommandReq &req) {
-    // It is guaranteed that the Worker will not be requesting this for a
-    // transaction that's done, and that there are no race conditions here.
-    return std::make_unique<CommandRes>(RunningTransaction(req.member)->cid());
-  });
-
-  rpc_server_.Register<GcSnapshotRpc>(
-      [this](const communication::rpc::Message &) {
-        return std::make_unique<SnapshotRes>(GlobalGcSnapshot());
-      });
-
-  rpc_server_.Register<ClogInfoRpc>([this](const ClogInfoReq &req) {
-    return std::make_unique<ClogInfoRes>(Info(req.member));
-  });
-
-  rpc_server_.Register<ActiveTransactionsRpc>(
-      [this](const communication::rpc::Message &) {
-        return std::make_unique<SnapshotRes>(GlobalActiveTransactions());
-      });
-
-  rpc_server_.Register<EnsureNextIdGreaterRpc>(
-      [this](const EnsureNextIdGreaterReq &req) {
-        EnsureNextIdGreater(req.member);
-        return std::make_unique<EnsureNextIdGreaterRes>();
-      });
-
-  rpc_server_.Register<GlobalLastRpc>([this](const GlobalLastReq &) {
-    return std::make_unique<GlobalLastRes>(GlobalLast());
-  });
-}
-
-void MasterEngine::Commit(const Transaction &t) {
-  ongoing_produce_joiner_.JoinOngoingProduces(t.id_);
-  SingleNodeEngine::Commit(t);
-}
-
-void MasterEngine::Abort(const Transaction &t) {
-  ongoing_produce_joiner_.JoinOngoingProduces(t.id_);
-  SingleNodeEngine::Abort(t);
-}
-
-}  // namespace tx
diff --git a/src/transactions/engine_master.hpp b/src/transactions/engine_master.hpp
deleted file mode 100644
index 5dc9b9e95..000000000
--- a/src/transactions/engine_master.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-#include "communication/rpc/server.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "transactions/engine_single_node.hpp"
-
-namespace tx {
-
-/** Distributed master transaction engine. Has complete engine functionality and
- * exposes an RPC server to be used by distributed Workers. */
-class MasterEngine : public SingleNodeEngine {
- public:
-  /**
-   * @param server - Required. Used for rpc::Server construction.
-   * @param rpc_worker_clients - Required. Used for
-   * OngoingProduceJoinerRpcClients construction.
-   * @param wal - Optional. If present, the Engine will write tx
-   * Begin/Commit/Abort atomically (while under lock).
-   */
-  MasterEngine(communication::rpc::Server &server,
-               distributed::RpcWorkerClients &rpc_worker_clients,
-               durability::WriteAheadLog *wal = nullptr);
-  void Commit(const Transaction &t) override;
-  void Abort(const Transaction &t) override;
-
- private:
-  communication::rpc::Server &rpc_server_;
-  distributed::OngoingProduceJoinerRpcClients ongoing_produce_joiner_;
-};
-}  // namespace tx
diff --git a/src/transactions/engine_rpc_messages.hpp b/src/transactions/engine_rpc_messages.hpp
deleted file mode 100644
index 9f948813c..000000000
--- a/src/transactions/engine_rpc_messages.hpp
+++ /dev/null
@@ -1,70 +0,0 @@
-#pragma once
-
-#include "communication/rpc/messages.hpp"
-#include "transactions/commit_log.hpp"
-#include "transactions/snapshot.hpp"
-#include "transactions/type.hpp"
-
-namespace tx {
-
-RPC_NO_MEMBER_MESSAGE(BeginReq);
-struct TxAndSnapshot {
-  TransactionId tx_id;
-  Snapshot snapshot;
-
- private:
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &tx_id;
-    ar &snapshot;
-  }
-};
-RPC_SINGLE_MEMBER_MESSAGE(BeginRes, TxAndSnapshot);
-using BeginRpc = communication::rpc::RequestResponse<BeginReq, BeginRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(AdvanceReq, TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(AdvanceRes, CommandId);
-using AdvanceRpc = communication::rpc::RequestResponse<AdvanceReq, AdvanceRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(CommitReq, TransactionId);
-RPC_NO_MEMBER_MESSAGE(CommitRes);
-using CommitRpc = communication::rpc::RequestResponse<CommitReq, CommitRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(AbortReq, TransactionId);
-RPC_NO_MEMBER_MESSAGE(AbortRes);
-using AbortRpc = communication::rpc::RequestResponse<AbortReq, AbortRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(SnapshotReq, TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(SnapshotRes, Snapshot);
-using SnapshotRpc =
-    communication::rpc::RequestResponse<SnapshotReq, SnapshotRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(CommandReq, TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(CommandRes, CommandId);
-using CommandRpc = communication::rpc::RequestResponse<CommandReq, CommandRes>;
-
-RPC_NO_MEMBER_MESSAGE(GcSnapshotReq);
-using GcSnapshotRpc =
-    communication::rpc::RequestResponse<GcSnapshotReq, SnapshotRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(ClogInfoReq, TransactionId);
-RPC_SINGLE_MEMBER_MESSAGE(ClogInfoRes, CommitLog::Info);
-using ClogInfoRpc =
-    communication::rpc::RequestResponse<ClogInfoReq, ClogInfoRes>;
-
-RPC_NO_MEMBER_MESSAGE(ActiveTransactionsReq);
-using ActiveTransactionsRpc =
-    communication::rpc::RequestResponse<ActiveTransactionsReq, SnapshotRes>;
-
-RPC_SINGLE_MEMBER_MESSAGE(EnsureNextIdGreaterReq, TransactionId);
-RPC_NO_MEMBER_MESSAGE(EnsureNextIdGreaterRes);
-using EnsureNextIdGreaterRpc =
-    communication::rpc::RequestResponse<EnsureNextIdGreaterReq,
-                                        EnsureNextIdGreaterRes>;
-
-RPC_NO_MEMBER_MESSAGE(GlobalLastReq);
-RPC_SINGLE_MEMBER_MESSAGE(GlobalLastRes, TransactionId);
-using GlobalLastRpc =
-    communication::rpc::RequestResponse<GlobalLastReq, GlobalLastRes>;
-}  // namespace tx
diff --git a/src/transactions/engine_single_node.cpp b/src/transactions/engine_single_node.cpp
index dacc4c233..2c30f2b14 100644
--- a/src/transactions/engine_single_node.cpp
+++ b/src/transactions/engine_single_node.cpp
@@ -4,7 +4,6 @@
 #include "glog/logging.h"
 
 #include "database/state_delta.hpp"
-#include "transactions/engine_rpc_messages.hpp"
 #include "transactions/engine_single_node.hpp"
 
 namespace tx {
diff --git a/src/transactions/engine_worker.cpp b/src/transactions/engine_worker.cpp
deleted file mode 100644
index 2066d60e6..000000000
--- a/src/transactions/engine_worker.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-#include <chrono>
-
-#include "glog/logging.h"
-
-#include "transactions/engine_rpc_messages.hpp"
-#include "transactions/engine_worker.hpp"
-#include "utils/atomic.hpp"
-
-namespace tx {
-
-WorkerEngine::WorkerEngine(communication::rpc::ClientPool &master_client_pool)
-    : master_client_pool_(master_client_pool) {}
-
-WorkerEngine::~WorkerEngine() {
-  for (auto &kv : active_.access()) {
-    delete kv.second;
-  }
-}
-
-Transaction *WorkerEngine::Begin() {
-  auto res = master_client_pool_.Call<BeginRpc>();
-  CHECK(res) << "BeginRpc failed";
-  auto &data = res->member;
-  UpdateOldestActive(data.snapshot, data.tx_id);
-  Transaction *tx = new Transaction(data.tx_id, data.snapshot, *this);
-  auto insertion = active_.access().insert(data.tx_id, tx);
-  CHECK(insertion.second) << "Failed to start creation from worker";
-  VLOG(11) << "[Tx] Starting worker transaction " << data.tx_id;
-  return tx;
-}
-
-CommandId WorkerEngine::Advance(TransactionId tx_id) {
-  auto res = master_client_pool_.Call<AdvanceRpc>(tx_id);
-  CHECK(res) << "AdvanceRpc failed";
-  auto access = active_.access();
-  auto found = access.find(tx_id);
-  CHECK(found != access.end())
-      << "Can't advance a transaction not in local cache";
-  found->second->cid_ = res->member;
-  return res->member;
-}
-
-CommandId WorkerEngine::UpdateCommand(TransactionId tx_id) {
-  auto res = master_client_pool_.Call<CommandRpc>(tx_id);
-  CHECK(res) << "CommandRpc failed";
-  auto cmd_id = res->member;
-
-  // Assume there is no concurrent work being done on this worker in the given
-  // transaction. This assumption is sound because command advancing needs to be
-  // done in a synchronized fashion, while no workers are executing in that
-  // transaction. That assumption lets us freely modify the command id in the
-  // cached transaction object, and ensures there are no race conditions on
-  // caching a transaction object if it wasn't cached already.
-
-  auto access = active_.access();
-  auto found = access.find(tx_id);
-  if (found != access.end()) {
-    found->second->cid_ = cmd_id;
-  }
-  return cmd_id;
-}
-
-void WorkerEngine::Commit(const Transaction &t) {
-  auto res = master_client_pool_.Call<CommitRpc>(t.id_);
-  CHECK(res) << "CommitRpc failed";
-  VLOG(11) << "[Tx] Commiting worker transaction " << t.id_;
-  ClearSingleTransaction(t.id_);
-}
-
-void WorkerEngine::Abort(const Transaction &t) {
-  auto res = master_client_pool_.Call<AbortRpc>(t.id_);
-  CHECK(res) << "AbortRpc failed";
-  VLOG(11) << "[Tx] Aborting worker transaction " << t.id_;
-  ClearSingleTransaction(t.id_);
-}
-
-CommitLog::Info WorkerEngine::Info(TransactionId tid) const {
-  auto info = clog_.fetch_info(tid);
-  // If we don't know the transaction to be commited nor aborted, ask the
-  // master about it and update the local commit log.
-  if (!(info.is_aborted() || info.is_committed())) {
-    // @review: this version of Call is just used because Info has no
-    // default constructor.
-    auto res = master_client_pool_.Call<ClogInfoRpc>(tid);
-    CHECK(res) << "ClogInfoRpc failed";
-    info = res->member;
-    if (!info.is_active()) {
-      if (info.is_committed()) clog_.set_committed(tid);
-      if (info.is_aborted()) clog_.set_aborted(tid);
-      ClearSingleTransaction(tid);
-    }
-  }
-
-  return info;
-}
-
-Snapshot WorkerEngine::GlobalGcSnapshot() {
-  auto res = master_client_pool_.Call<GcSnapshotRpc>();
-  CHECK(res) << "GcSnapshotRpc failed";
-  auto snapshot = std::move(res->member);
-  UpdateOldestActive(snapshot, local_last_.load());
-  return snapshot;
-}
-
-Snapshot WorkerEngine::GlobalActiveTransactions() {
-  auto res = master_client_pool_.Call<ActiveTransactionsRpc>();
-  CHECK(res) << "ActiveTransactionsRpc failed";
-  auto snapshot = std::move(res->member);
-  UpdateOldestActive(snapshot, local_last_.load());
-  return snapshot;
-}
-
-TransactionId WorkerEngine::LocalLast() const { return local_last_; }
-TransactionId WorkerEngine::GlobalLast() const {
-  auto res = master_client_pool_.Call<GlobalLastRpc>();
-  CHECK(res) << "GlobalLastRpc failed";
-  return res->member;
-}
-
-void WorkerEngine::LocalForEachActiveTransaction(
-    std::function<void(Transaction &)> f) {
-  for (auto pair : active_.access()) f(*pair.second);
-}
-
-TransactionId WorkerEngine::LocalOldestActive() const {
-  return oldest_active_;
-}
-
-Transaction *WorkerEngine::RunningTransaction(TransactionId tx_id) {
-  auto accessor = active_.access();
-  auto found = accessor.find(tx_id);
-  if (found != accessor.end()) return found->second;
-
-  auto res = master_client_pool_.Call<SnapshotRpc>(tx_id);
-  CHECK(res) << "SnapshotRpc failed";
-  auto snapshot = std::move(res->member);
-  UpdateOldestActive(snapshot, local_last_.load());
-  return RunningTransaction(tx_id, snapshot);
-}
-
-Transaction *WorkerEngine::RunningTransaction(TransactionId tx_id,
-                                              const Snapshot &snapshot) {
-  auto accessor = active_.access();
-  auto found = accessor.find(tx_id);
-  if (found != accessor.end()) return found->second;
-
-  auto new_tx = new Transaction(tx_id, snapshot, *this);
-  auto insertion = accessor.insert(tx_id, new_tx);
-  if (!insertion.second) delete new_tx;
-  utils::EnsureAtomicGe(local_last_, tx_id);
-  return insertion.first->second;
-}
-
-void WorkerEngine::ClearTransactionalCache(
-    TransactionId oldest_active) const {
-  auto access = active_.access();
-  for (auto kv : access) {
-    if (kv.first < oldest_active) {
-      auto transaction_ptr = kv.second;
-      if (access.remove(kv.first)) {
-        delete transaction_ptr;
-      }
-    }
-  }
-}
-
-void WorkerEngine::ClearSingleTransaction(TransactionId tx_id) const {
-  auto access = active_.access();
-  auto found = access.find(tx_id);
-  if (found != access.end()) {
-    auto transaction_ptr = found->second;
-    if (access.remove(found->first)) {
-      delete transaction_ptr;
-    }
-  }
-}
-
-void WorkerEngine::UpdateOldestActive(const Snapshot &snapshot,
-                                      TransactionId alternative) {
-  if (snapshot.empty()) {
-    oldest_active_.store(std::max(alternative, oldest_active_.load()));
-  } else {
-    oldest_active_.store(snapshot.front());
-  }
-}
-
-void WorkerEngine::EnsureNextIdGreater(TransactionId tx_id) {
-  master_client_pool_.Call<EnsureNextIdGreaterRpc>(tx_id);
-}
-
-void WorkerEngine::GarbageCollectCommitLog(TransactionId tx_id) {
-  clog_.garbage_collect_older(tx_id);
-}
-}  // namespace tx
diff --git a/src/transactions/engine_worker.hpp b/src/transactions/engine_worker.hpp
deleted file mode 100644
index 1c4aad26c..000000000
--- a/src/transactions/engine_worker.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-#pragma once
-
-#include <atomic>
-
-#include "communication/rpc/client_pool.hpp"
-#include "data_structures/concurrent/concurrent_map.hpp"
-#include "io/network/endpoint.hpp"
-#include "transactions/commit_log.hpp"
-#include "transactions/engine.hpp"
-#include "transactions/transaction.hpp"
-
-namespace tx {
-
-/** Distributed worker transaction engine. Connects to a MasterEngine (single
- * source of truth) to obtain transactional info. Caches most info locally. Can
- * begin/advance/end transactions on the master. */
-class WorkerEngine : public Engine {
- public:
-  /// The wait time between two releases of local transaction objects that have
-  /// expired on the master.
-  static constexpr std::chrono::seconds kCacheReleasePeriod{1};
-
-  explicit WorkerEngine(communication::rpc::ClientPool &master_client_pool);
-  ~WorkerEngine();
-
-  Transaction *Begin() override;
-  CommandId Advance(TransactionId id) override;
-  CommandId UpdateCommand(TransactionId id) override;
-  void Commit(const Transaction &t) override;
-  void Abort(const Transaction &t) override;
-  CommitLog::Info Info(TransactionId tid) const override;
-  Snapshot GlobalGcSnapshot() override;
-  Snapshot GlobalActiveTransactions() override;
-  TransactionId GlobalLast() const override;
-  TransactionId LocalLast() const override;
-  void LocalForEachActiveTransaction(
-      std::function<void(Transaction &)> f) override;
-  TransactionId LocalOldestActive() const override;
-  Transaction *RunningTransaction(TransactionId tx_id) override;
-
-  // Caches the transaction for the given info an returs a ptr to it.
-  Transaction *RunningTransaction(TransactionId tx_id,
-                                  const Snapshot &snapshot);
-
-  void EnsureNextIdGreater(TransactionId tx_id) override;
-  void GarbageCollectCommitLog(tx::TransactionId tx_id) override;
-
-  /// Clears the cache of local transactions that have expired. The signature of
-  /// this method is dictated by `distributed::TransactionalCacheCleaner`.
-  void ClearTransactionalCache(TransactionId oldest_active) const;
-
- private:
-  // Local caches.
-  mutable ConcurrentMap<TransactionId, Transaction *> active_;
-  std::atomic<TransactionId> local_last_{0};
-  // Mutable because just getting info can cause a cache fill.
-  mutable CommitLog clog_;
-
-  // Communication to the transactional master.
-  communication::rpc::ClientPool &master_client_pool_;
-
-  // Used for clearing of caches of transactions that have expired.
-  // Initialize the oldest_active_ with 1 because there's never a tx with id=0
-  std::atomic<TransactionId> oldest_active_{1};
-
-  // Removes a single transaction from the cache, if present.
-  void ClearSingleTransaction(TransactionId tx_Id) const;
-
-  // Updates the oldest active transaction to the one from the snapshot. If the
-  // snapshot is empty, it's set to the given alternative.
-  void UpdateOldestActive(const Snapshot &snapshot,
-                          TransactionId alternative);
-};
-}  // namespace tx
diff --git a/src/transactions/snapshot.hpp b/src/transactions/snapshot.hpp
index 3cc1ca0d6..51520c33d 100644
--- a/src/transactions/snapshot.hpp
+++ b/src/transactions/snapshot.hpp
@@ -4,9 +4,6 @@
 #include <iostream>
 #include <vector>
 
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/vector.hpp"
-
 #include "glog/logging.h"
 #include "transactions/type.hpp"
 #include "utils/algorithm.hpp"
@@ -87,13 +84,6 @@ class Snapshot {
   }
 
  private:
-  friend class boost::serialization::access;
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &transaction_ids_;
-  }
-
   std::vector<TransactionId> transaction_ids_;
 };
 }  // namespace tx
diff --git a/src/utils/serialization.hpp b/src/utils/serialization.hpp
deleted file mode 100644
index 4bf2a6be5..000000000
--- a/src/utils/serialization.hpp
+++ /dev/null
@@ -1,193 +0,0 @@
-#pragma once
-
-#include <experimental/optional>
-
-#include "boost/serialization/split_free.hpp"
-#include "query/typed_value.hpp"
-#include "storage/edge.hpp"
-#include "storage/vertex.hpp"
-#include "utils/exceptions.hpp"
-
-namespace boost::serialization {
-
-namespace {
-
-template <size_t idx, class TArchive, class... Elements>
-void tuple_serialization_helper(TArchive &ar, std::tuple<Elements...> &tup) {
-  if constexpr (idx < sizeof...(Elements)) {
-    ar &std::get<idx>(tup);
-    tuple_serialization_helper<idx + 1, TArchive, Elements...>(ar, tup);
-  }
-}
-
-}  // namespace
-
-template <class TArchive, class... Elements>
-inline void serialize(TArchive &ar, std::tuple<Elements...> &tup,
-                      unsigned int) {
-  tuple_serialization_helper<0, TArchive, Elements...>(ar, tup);
-}
-
-template <class TArchive, class T>
-inline void serialize(TArchive &ar, std::experimental::optional<T> &opt,
-                      unsigned int version) {
-  split_free(ar, opt, version);
-}
-
-template <class TArchive, class T>
-void save(TArchive &ar, const std::experimental::optional<T> &opt,
-          unsigned int) {
-  ar << static_cast<bool>(opt);
-  if (opt) {
-    ar << *opt;
-  }
-}
-
-template <class TArchive, class T>
-void load(TArchive &ar, std::experimental::optional<T> &opt, unsigned int) {
-  bool has_value;
-  ar >> has_value;
-  if (has_value) {
-    T tmp;
-    ar >> tmp;
-    opt = std::move(tmp);
-  } else {
-    opt = std::experimental::nullopt;
-  }
-}
-
-}  // namespace boost::serialization
-
-namespace utils {
-
-/**
- * Saves the given value into the given Boost archive. The optional
- * `save_graph_element` function is called if the given `value` is a
- * [Vertex|Edge|Path]. If that function is not provided, and `value` is one of
- * those, an exception is thrown.
- */
-template <class TArchive>
-void SaveTypedValue(
-    TArchive &ar, const query::TypedValue &value,
-    std::function<void(TArchive &ar, const query::TypedValue &value)>
-        save_graph_element = nullptr) {
-  ar << value.type();
-  switch (value.type()) {
-    case query::TypedValue::Type::Null:
-      return;
-    case query::TypedValue::Type::Bool:
-      ar << value.Value<bool>();
-      return;
-    case query::TypedValue::Type::Int:
-      ar << value.Value<int64_t>();
-      return;
-    case query::TypedValue::Type::Double:
-      ar << value.Value<double>();
-      return;
-    case query::TypedValue::Type::String:
-      ar << value.Value<std::string>();
-      return;
-    case query::TypedValue::Type::List: {
-      const auto &values = value.Value<std::vector<query::TypedValue>>();
-      ar << values.size();
-      for (const auto &v : values) {
-        SaveTypedValue(ar, v, save_graph_element);
-      }
-      return;
-    }
-    case query::TypedValue::Type::Map: {
-      const auto &map = value.Value<std::map<std::string, query::TypedValue>>();
-      ar << map.size();
-      for (const auto &key_value : map) {
-        ar << key_value.first;
-        SaveTypedValue(ar, key_value.second, save_graph_element);
-      }
-      return;
-    }
-    case query::TypedValue::Type::Vertex:
-    case query::TypedValue::Type::Edge:
-    case query::TypedValue::Type::Path:
-      if (save_graph_element) {
-        save_graph_element(ar, value);
-      } else {
-        throw utils::BasicException("Unable to archive TypedValue of type: {}",
-                                    value.type());
-      }
-  }
-}
-
-/** Loads a typed value into the given reference from the given archive. The
- * optional `load_graph_element` function is called if a [Vertex|Edge|Path]
- * TypedValue should be unarchived. If that function is not provided, and
- * `value` is one of those, an exception is thrown.
- */
-template <class TArchive>
-void LoadTypedValue(TArchive &ar, query::TypedValue &value,
-                    std::function<void(TArchive &ar, query::TypedValue::Type,
-                                       query::TypedValue &)>
-                        load_graph_element = nullptr) {
-  query::TypedValue::Type type = query::TypedValue::Type::Null;
-  ar >> type;
-  switch (type) {
-    case query::TypedValue::Type::Null:
-      return;
-    case query::TypedValue::Type::Bool: {
-      bool v;
-      ar >> v;
-      value = v;
-      return;
-    }
-    case query::TypedValue::Type::Int: {
-      int64_t v;
-      ar >> v;
-      value = v;
-      return;
-    }
-    case query::TypedValue::Type::Double: {
-      double v;
-      ar >> v;
-      value = v;
-      return;
-    }
-    case query::TypedValue::Type::String: {
-      std::string v;
-      ar >> v;
-      value = v;
-      return;
-    }
-    case query::TypedValue::Type::List: {
-      value = std::vector<query::TypedValue>{};
-      auto &list = value.ValueList();
-      size_t size;
-      ar >> size;
-      list.reserve(size);
-      for (size_t i = 0; i < size; ++i) {
-        list.emplace_back();
-        LoadTypedValue(ar, list.back(), load_graph_element);
-      }
-      return;
-    }
-    case query::TypedValue::Type::Map: {
-      value = std::map<std::string, query::TypedValue>{};
-      auto &map = value.ValueMap();
-      size_t size;
-      ar >> size;
-      for (size_t i = 0; i < size; ++i) {
-        std::string key;
-        ar >> key;
-        LoadTypedValue(ar, map[key], load_graph_element);
-      }
-      return;
-    }
-    case query::TypedValue::Type::Vertex:
-    case query::TypedValue::Type::Edge:
-    case query::TypedValue::Type::Path:
-      if (load_graph_element) {
-        load_graph_element(ar, type, value);
-      } else {
-        throw utils::BasicException(
-            "Unexpected TypedValue type '{}' when loading from archive", type);
-      }
-  }
-}
-}  // namespace utils
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index fa866032e..5218f7314 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -24,6 +24,3 @@ add_subdirectory(unit)
 
 # property based test binaries
 add_subdirectory(property_based)
-
-# raft binaries
-add_subdirectory(distributed/raft)
diff --git a/tests/distributed/card_fraud/.gitignore b/tests/distributed/card_fraud/.gitignore
deleted file mode 100644
index 6a72bbd8f..000000000
--- a/tests/distributed/card_fraud/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-output
-snapshots
diff --git a/tests/distributed/card_fraud/apollo_runs.py b/tests/distributed/card_fraud/apollo_runs.py
deleted file mode 100755
index 60c8aa543..000000000
--- a/tests/distributed/card_fraud/apollo_runs.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python3
-import json
-import os
-import re
-import subprocess
-
-from card_fraud import NUM_MACHINES, BINARIES
-
-# paths
-SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
-WORKSPACE_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", ".."))
-OUTPUT_DIR_REL = os.path.join(os.path.relpath(SCRIPT_DIR, WORKSPACE_DIR), "output")
-
-# generate runs
-runs = []
-
-binaries = list(map(lambda x: os.path.join("..", "..", "build_release", x), BINARIES))
-
-for i in range(NUM_MACHINES):
-    name = "master" if i == 0 else "worker" + str(i)
-    additional = ["master.py"] if i == 0 else []
-    outfile_paths = ["\\./" + OUTPUT_DIR_REL + "/.+"] if i == 0 else []
-    if i == 0:
-        cmd = "master.py --machines-num {0} --test-suite card_fraud " \
-                "--test card_fraud".format(NUM_MACHINES)
-    else:
-        cmd = "jail_service.py"
-    runs.append({
-        "name": "distributed__card_fraud__" + name,
-        "cd": "..",
-        "supervisor": cmd,
-        "infiles": binaries + [
-            "common.py",
-            "jail_service.py",
-            "card_fraud/card_fraud.py",
-            "card_fraud/snapshots/worker_" + str(i),
-        ] + additional,
-        "outfile_paths": outfile_paths,
-        "parallel_run": "distributed__card_fraud",
-        "slave_group": "remote_4c32g",
-        "enable_network": True,
-    })
-
-print(json.dumps(runs, indent=4, sort_keys=True))
diff --git a/tests/distributed/card_fraud/card_fraud.py b/tests/distributed/card_fraud/card_fraud.py
deleted file mode 100644
index c92f03864..000000000
--- a/tests/distributed/card_fraud/card_fraud.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import json
-import os
-import time
-
-# to change the size of the cluster, just change this parameter
-NUM_MACHINES = 3
-
-# test setup
-SCENARIOS = ["point_lookup", "create_tx"]
-DURATION = 300
-WORKERS = 6
-
-# constants
-SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
-MEMGRAPH_BINARY = "memgraph"
-CLIENT_BINARY   = "tests/macro_benchmark/card_fraud_client"
-BINARIES = [MEMGRAPH_BINARY, CLIENT_BINARY]
-
-# wrappers
-class WorkerWrapper:
-    def __init__(self, address, worker):
-        self._address = address
-        self._worker = worker
-        self._tid = worker.get_jail()
-
-    def get_address(self):
-        return self._address
-
-    def __getattr__(self, name):
-        if name in ["allocate_file", "read_file", "store_label"]:
-            return getattr(self._worker, name)
-        def func(*args, **kwargs):
-            args = [self._tid] + list(args)
-            return getattr(self._worker, name)(*args, **kwargs)
-        return func
-
-class MgCluster:
-    def __init__(self, machine_ids, workers):
-        # create wrappers
-        self._master = WorkerWrapper(os.environ[machine_ids[0]],
-                workers[machine_ids[0]])
-        self._workers = []
-        for machine_id in machine_ids[1:]:
-            self._workers.append(WorkerWrapper(os.environ[machine_id],
-                    workers[machine_id]))
-
-    def start(self):
-        # start memgraph master
-        self._master.start(MEMGRAPH_BINARY, [
-            "--master",
-            "--master-host", self._master.get_address(),
-            "--master-port", "10000",
-            "--durability-directory", os.path.join(SCRIPT_DIR, "snapshots",
-                                                   "worker_0"),
-            "--db-recover-on-startup",
-            "--query-vertex-count-to-expand-existing", "-1",
-            "--num-workers", str(WORKERS),
-            "--rpc-num-workers", str(WORKERS),
-        ])
-
-        # sleep to allow the master to startup
-        time.sleep(5)
-
-        # start memgraph workers
-        for i, worker in enumerate(self._workers, start=1):
-            worker.start(MEMGRAPH_BINARY, [
-                "--worker", "--worker-id", str(i),
-                "--worker-host", worker.get_address(),
-                "--worker-port", str(10000 + i),
-                "--master-host", self._master.get_address(),
-                "--master-port", "10000",
-                "--durability-directory", os.path.join(SCRIPT_DIR, "snapshots",
-                                                       "worker_" + str(i)),
-                "--db-recover-on-startup",
-                "--num-workers", str(WORKERS),
-                "--rpc-num-workers", str(WORKERS),
-            ])
-
-        # sleep to allow the workers to startup
-        time.sleep(5)
-
-        # store initial usage
-        self._usage_start = [self._master.get_usage()]
-        for worker in self._workers:
-            self._usage_start.append(worker.get_usage())
-        self._usage_start_time = time.time()
-
-    def get_master_address(self):
-        return self._master.get_address()
-
-    def check_status(self):
-        if not self._master.check_status():
-            return False
-        for worker in self._workers:
-            if not worker.check_status():
-                return False
-        return True
-
-    def stop(self):
-        # store final usage
-        self._usage_stop = [self._master.get_usage()]
-        for worker in self._workers:
-            self._usage_stop.append(worker.get_usage())
-        self._usage_stop_time = time.time()
-
-        # stop the master
-        self._master.stop()
-
-        # wait to allow the master and workers to die
-        time.sleep(5)
-
-        # stop the workers
-        for worker in self._workers:
-            worker.stop()
-
-        # wait to allow the workers to die
-        time.sleep(5)
-
-    def get_usage(self):
-        ret = []
-        tdelta = self._usage_stop_time - self._usage_start_time
-        for val_start, val_stop in zip(self._usage_start, self._usage_stop):
-            data = {
-                "cpu": (val_stop["cpu"] - val_start["cpu"]) / tdelta,
-                "memory": val_stop["max_memory"] / 1024,
-                "threads": val_stop["max_threads"],
-                "network": {}
-            }
-            net_start = val_start["network"]["eth0"]
-            net_stop = val_stop["network"]["eth0"]
-            for i in ["bytes", "packets"]:
-                data["network"][i] = {}
-                for j in ["rx", "tx"]:
-                    data["network"][i][j] = (net_stop[i][j] -
-                            net_start[i][j]) / tdelta
-            ret.append(data)
-        return ret
-
-    def store_label(self, label):
-        self._master.store_label(label)
-        for worker in self._workers:
-            worker.store_label(label)
-
-def write_scenario_summary(scenario, throughput, usage, output):
-    output.write("Scenario **{}** throughput !!{:.2f}!! queries/s.\n\n".format(
-            scenario, throughput))
-    headers = ["Memgraph", "CPU", "Max memory", "Max threads",
-            "Network RX", "Network TX"]
-    output.write("<table>\n<tr>")
-    for header in headers:
-        output.write("<th>{}</th>".format(header))
-    output.write("</tr>\n")
-    for i, current in enumerate(usage):
-        name = "master" if i == 0 else "worker" + str(i)
-        output.write("<tr><td>{}</td>".format(name))
-        for key, unit in [("cpu", "s/s"), ("memory", "MiB"), ("threads", "")]:
-            fmt = ".2f" if key != "threads" else ""
-            output.write(("<td>{:" + fmt + "} {}</td>").format(
-                    current[key], unit).strip())
-        for key in ["rx", "tx"]:
-            output.write("<td>{:.2f} packets/s</td>".format(
-                    current["network"]["packets"][key]))
-        output.write("</tr>\n")
-    output.write("</table>\n\n")
-
-# main test function
-def run(machine_ids, workers):
-    # create output directory
-    output_dir = os.path.join(SCRIPT_DIR, "output")
-    if not os.path.exists(output_dir):
-        os.mkdir(output_dir)
-
-    # create memgraph cluster and client
-    mg_cluster = MgCluster(machine_ids, workers)
-    mg_client = WorkerWrapper(os.environ[machine_ids[0]],
-            workers[machine_ids[0]])
-
-    # execute the tests
-    stats = {}
-    for scenario in SCENARIOS:
-        output_file = os.path.join(output_dir, scenario + ".json")
-
-        print("Starting memgraph cluster")
-        mg_cluster.store_label("Start: cluster")
-        mg_cluster.start()
-
-        print("Starting client scenario:", scenario)
-        mg_cluster.store_label("Start: " + scenario)
-        mg_client.start(CLIENT_BINARY, [
-            "--address", mg_cluster.get_master_address(),
-            "--group", "card_fraud",
-            "--scenario", scenario,
-            "--duration", str(DURATION),
-            "--num-workers", str(WORKERS),
-            "--output", output_file,
-        ])
-
-        # wait for the client to terminate and check the cluster status
-        while mg_client.check_status():
-            assert mg_cluster.check_status(), "The memgraph cluster has died!"
-            time.sleep(2)
-
-        # stop everything
-        mg_client.wait()
-        mg_cluster.store_label("Stop: " + scenario)
-        mg_cluster.stop()
-        mg_cluster.store_label("Stop: cluster")
-
-        # process the stats
-        data = json.loads(list(filter(lambda x: x.strip(),
-                open(output_file).read().split("\n")))[-1])
-        throughput = data["num_executed_queries"] / data["elapsed_time"]
-        usage = mg_cluster.get_usage()
-        stats[scenario] = (throughput, usage)
-
-    # dump the stats
-    stats_file = open(os.path.join(output_dir, ".card_fraud_summary"), "w")
-    stats_file.write("==== Distributed card fraud summary: ====\n\n")
-    for scenario in SCENARIOS:
-        throughput, usage = stats[scenario]
-        write_scenario_summary(scenario, throughput, usage, stats_file)
-    stats_file.close()
diff --git a/tests/distributed/card_fraud/config.json b/tests/distributed/card_fraud/config.json
deleted file mode 100644
index c3047ec41..000000000
--- a/tests/distributed/card_fraud/config.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "cards_per_worker" : 10000,
-  "pos_per_worker" : 1000,
-  "transactions_per_worker" : 50000,
-  "compromised_pos_probability" : 0.2,
-  "fraud_reported_probability" : 0.1,
-  "hop_probability" : 0.1
-}
diff --git a/tests/distributed/card_fraud/generate_dataset.sh b/tests/distributed/card_fraud/generate_dataset.sh
deleted file mode 100755
index b79ee3023..000000000
--- a/tests/distributed/card_fraud/generate_dataset.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-cd $script_dir
-
-output_dir=snapshots
-
-if [ -d $output_dir ]; then
-    rm -rf $output_dir
-fi
-
-NUM_MACHINES="$( cat card_fraud.py | grep -m1 "NUM_MACHINES" | tail -c 2 )"
-
-build_dir=../../../build_release
-if [ ! -d $build_dir ]; then
-    build_dir=../../../build
-fi
-$build_dir/tests/manual/card_fraud_generate_snapshot --config config.json --num-workers $NUM_MACHINES --dir $output_dir
diff --git a/tests/distributed/common.py b/tests/distributed/common.py
deleted file mode 120000
index 0968c4fd8..000000000
--- a/tests/distributed/common.py
+++ /dev/null
@@ -1 +0,0 @@
-../macro_benchmark/common.py
\ No newline at end of file
diff --git a/tests/distributed/jail_faker.py b/tests/distributed/jail_faker.py
deleted file mode 120000
index ee550ff0c..000000000
--- a/tests/distributed/jail_faker.py
+++ /dev/null
@@ -1 +0,0 @@
-../macro_benchmark/jail_faker.py
\ No newline at end of file
diff --git a/tests/distributed/jail_service.py b/tests/distributed/jail_service.py
deleted file mode 100755
index 357022501..000000000
--- a/tests/distributed/jail_service.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python3
-import logging
-import os
-import signal
-import subprocess
-import sys
-import tempfile
-import traceback
-import uuid
-import xmlrpc.client
-
-# workaround for xmlrpc max/min integer size
-xmlrpc.client.MAXINT = 2**100
-xmlrpc.client.MININT = -2**100
-
-from common import get_absolute_path
-from xmlrpc.server import SimpleXMLRPCServer
-
-try:
-    import jail
-except:
-    import jail_faker as jail
-
-
-class XMLRPCServer(SimpleXMLRPCServer):
-    def _dispatch(self, method, params):
-        try:
-            return super()._dispatch(method, params)
-        except:
-            traceback.print_exc()
-            raise
-
-
-class JailService:
-    """
-    Knows how to start and stop binaries
-    """
-    def __init__(self):
-        logging.basicConfig(level=logging.INFO)
-        self.log = logging.getLogger("JailService")
-        self.log.info("Initializing Jail Service")
-        self.processes = {}
-        self._generated_filenames = []
-        self.tempdir = tempfile.TemporaryDirectory()
-
-    def _get_proc(self, tid):
-        if tid not in self.processes:
-            raise Exception(
-                "Binary with tid {tid} does not exist".format(tid=tid))
-        return self.processes[tid]
-
-    def start(self, tid, binary_name, binary_args=None):
-        self.log.info("Starting Binary: {binary}".format(binary=binary_name))
-        self.log.info("With args: {args}".format(args=binary_args))
-        # find executable path
-        binary = get_absolute_path(binary_name, "build")
-        if not os.path.exists(binary):
-            # Apollo builds both debug and release binaries on diff
-            # so we need to use the release binary if the debug one
-            # doesn't exist
-            binary = get_absolute_path(binary_name, "build_release")
-
-        # fetch process
-        proc = self._get_proc(tid)
-
-        # start binary
-        proc.run(binary, args=binary_args, timeout=600)
-
-        msg = "Binary {binary} successfully started with tid {tid}".format(
-            binary=binary_name, tid=proc._tid)
-        self.log.info(msg)
-
-    def check_status(self, tid):
-        proc = self._get_proc(tid)
-        status = proc.get_status()
-        if status is None: return True
-        assert status == 0, "The binary exited with a non-zero status!"
-        return False
-
-    def get_usage(self, tid):
-        usage = self._get_proc(tid).get_usage()
-        usage.update({"network": jail.get_network_usage()})
-        return usage
-
-    def wait(self, tid):
-        proc = self._get_proc(tid)
-        proc.wait()
-
-    def stop(self, tid):
-        self.log.info("Stopping binary with tid {tid}".format(tid=tid))
-        proc = self._get_proc(tid)
-        try:
-            proc.send_signal(jail.SIGTERM)
-        except Exception:
-            pass
-        proc.wait()
-        self.log.info("Binary with tid {tid} stopped".format(tid=tid))
-
-    def allocate_file(self, extension=""):
-        if extension != "" and not extension.startswith("."):
-            extension = "." + extension
-        tmp_name = str(uuid.uuid4())
-        while tmp_name in self._generated_filenames:
-            tmp_name = str(uuid.uuid4())
-        self._generated_filenames.append(tmp_name)
-        absolute_path = os.path.join(self.tempdir.name, tmp_name + extension)
-        return absolute_path
-
-    def read_file(self, absolute_path):
-        with open(absolute_path, "rb") as handle:
-            return xmlrpc.client.Binary(handle.read())
-
-    def get_jail(self):
-        proc = jail.get_process()
-        self.processes[proc._tid] = proc
-        return proc._tid
-
-    def store_label(self, label):
-        jail.store_label(label)
-
-    def shutdown(self):
-        self.log.info("Stopping Jail Service")
-        os._exit(0)
-
-
-def main():
-    # set port dynamically
-    port = os.environ["CURRENT_MACHINE"][len("MACHINE"):]
-    port = 8000 + (int(port) * 100)
-    interface = os.environ[os.environ["CURRENT_MACHINE"]]
-
-    # init server
-    server = XMLRPCServer((interface, port), allow_none=True, logRequests=False)
-    server.register_introspection_functions()
-    server.register_instance(JailService())
-
-    # signal handler
-    def signal_sigterm(signum, frame):
-        server.server_close()
-        sys.exit()
-
-    try:
-        signal.signal(signal.SIGTERM, signal_sigterm)
-        server.serve_forever()
-    except KeyboardInterrupt:
-        server.server_close()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/distributed/local_runner b/tests/distributed/local_runner
deleted file mode 100755
index 922a73a05..000000000
--- a/tests/distributed/local_runner
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash -e
-
-script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-cd $script_dir
-
-if [[ $# -ne 2 ]]; then
-    echo "Invalid number of arguments"
-    echo "Usage: ./local_runner {test_suite} {test_name}"
-    exit 1
-fi
-
-test_suite=$1
-test_name=$2
-test_path="${test_suite}/${test_name}.py"
-
-if [ ! -f ${test_path} ]; then
-    echo "Test ${test_name}.py does not exist"
-    echo "Usage: ./local_runner {test_suite} {test_name}"
-    exit 1
-fi
-
-NUM_MACHINES="$(cat $test_path | grep -m1 "NUM_MACHINES" | tail -c 2)"
-
-# Define machine ips
-for i in `seq 1 $NUM_MACHINES`;
-do
-  export "MACHINE${i}"="127.0.0.1"
-done
-
-# Run workers
-for i in `seq 2 $NUM_MACHINES`;
-do
-    CURRENT_MACHINE="MACHINE$i" ./jail_service.py &
-    pids[$i]=$!
-done
-
-quit()
-{
-    # Stop workers
-    sleep 1
-    for i in `seq 2 $NUM_MACHINES`;
-    do
-        kill ${pids[$i]}
-    done
-}
-
-trap 'quit' INT
-
-# Run master with test
-args="--machines-num $NUM_MACHINES --test-suite $test_suite --test $test_name"
-CURRENT_MACHINE="MACHINE1" ./master.py $args || quit
-
-quit
diff --git a/tests/distributed/master.py b/tests/distributed/master.py
deleted file mode 100755
index 41909b610..000000000
--- a/tests/distributed/master.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python3
-import atexit
-import importlib
-import logging
-import os
-import signal
-import subprocess
-import time
-import xmlrpc.client
-
-# workaround for xmlrpc max/min integer size
-xmlrpc.client.MAXINT = 2**100
-xmlrpc.client.MININT = -2**100
-
-from argparse import ArgumentParser
-from jail_service import JailService
-
-
-def parse_args():
-    """
-    Parse command line arguments
-    """
-    argp = ArgumentParser(description=__doc__)
-    argp.add_argument("--test-suite", default="raft",
-                      help="Tests suite")
-    argp.add_argument("--test", default="example_test",
-                      help="Test specification in python module")
-    argp.add_argument("--machines-num", default="4",
-                      help="Number of machines in cluster")
-    return argp.parse_args()
-
-
-def wait_for_server(interface, port, delay=0.1):
-    cmd = ["nc", "-z", "-w", "1", interface, port]
-    while subprocess.call(cmd) != 0:
-        time.sleep(0.01)
-    time.sleep(delay)
-
-
-def main(args):
-    workers = {}
-    machine_ids = []
-    machines_num = int(args.machines_num)
-
-    # initialize workers
-    for i in range(machines_num):
-        id = i + 1
-        machine_id = "MACHINE{id}".format(id=id)
-        machine_ids.append(machine_id)
-        machine_interface = os.environ[machine_id]
-        machine_port = 8000 + id * 100
-
-        if (id == 1):
-            worker = JailService()
-        else:
-            host = "http://{interface}:{port}".format(
-                interface=machine_interface,
-                port=str(machine_port))
-            worker = xmlrpc.client.ServerProxy(host)
-            wait_for_server(machine_interface, str(machine_port))
-
-        workers[machine_id] = worker
-
-    # cleanup at exit
-    @atexit.register
-    def cleanup():
-        for machine_id in machine_ids[1:]:
-            try:
-                workers[machine_id].shutdown()
-            except ConnectionRefusedError:
-                pass
-
-    # run test
-    test = importlib.import_module(
-        "{suite}.{test}".format(suite=args.test_suite, test=args.test))
-    test.run(machine_ids, workers)
-
-
-if __name__ == "__main__":
-    args = parse_args()
-    main(args)
diff --git a/tests/distributed/raft/CMakeLists.txt b/tests/distributed/raft/CMakeLists.txt
deleted file mode 100644
index c3624024a..000000000
--- a/tests/distributed/raft/CMakeLists.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# set current directory name as a test type
-get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME)
-
-# get all cpp abs file names recursively starting from current directory
-file(GLOB_RECURSE test_type_cpps *.cpp)
-message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}")
-
-# for each cpp file build binary and register test
-foreach(test_cpp ${test_type_cpps})
-
-    # get exec name (remove extension from the abs path)
-    get_filename_component(exec_name ${test_cpp} NAME_WE)
-
-    set(target_name memgraph__${test_type}__${exec_name})
-
-    # build exec file
-    add_executable(${target_name} ${test_cpp})
-
-    # OUTPUT_NAME sets the real name of a target when it is built and can be
-    # used to help create two targets of the same name even though CMake
-    # requires unique logical target names
-    set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
-
-    # link libraries
-    target_link_libraries(${target_name} memgraph_lib)
-
-    set(output_path ${CMAKE_BINARY_DIR}/test_results/unit/${target_name}.xml)
-
-endforeach()
diff --git a/tests/distributed/raft/README.md b/tests/distributed/raft/README.md
deleted file mode 100644
index b7dbe11f2..000000000
--- a/tests/distributed/raft/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Raft Tests
-
-To run test locally execute following command:
-
-```
-./local_runner {test_suite} {test_name}
-```
-
-Every test has to be defined as python module
-with exposed ```run(machine_ids, workers)```
-method. In each test there has to be constant
-```NUM_MACHINES``` which specifies how many workers
-to run in cluster.
diff --git a/tests/distributed/raft/example_client.cpp b/tests/distributed/raft/example_client.cpp
deleted file mode 100644
index fed7620bb..000000000
--- a/tests/distributed/raft/example_client.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-#include <ctime>
-#include <random>
-#include <thread>
-
-#include <fmt/format.h>
-#include <gflags/gflags.h>
-#include <glog/logging.h>
-
-#include "communication/rpc/client.hpp"
-#include "io/network/endpoint.hpp"
-#include "messages.hpp"
-#include "utils/network.hpp"
-
-using namespace communication::rpc;
-using namespace std::literals::chrono_literals;
-
-DEFINE_string(server_interface, "127.0.0.1",
-              "Server interface on which to communicate.");
-DEFINE_int32(server_port, 8010, "Server port on which to communicate.");
-
-int main(int argc, char **argv) {
-  google::SetUsageMessage("Raft RPC Client");
-
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  // Initialize client.
-  Client client(io::network::Endpoint(
-      utils::ResolveHostname(FLAGS_server_interface), FLAGS_server_port));
-
-  // Try to send 100 values to server.
-  // If requests timeout, try to resend it.
-  // Log output on server should contain all values once
-  // in correct order.
-  for (int i = 1; i <= 100; ++i) {
-    LOG(INFO) << fmt::format("Apennding value: {}", i);
-    auto result_tuple = client.Call<AppendEntry>(i);
-    if (!result_tuple) {
-      LOG(INFO) << "Request unsuccessful";
-      // Try to resend value
-      --i;
-    } else {
-      LOG(INFO) << fmt::format("Appended value: {}", i);
-    }
-  }
-
-  return 0;
-}
diff --git a/tests/distributed/raft/example_server.cpp b/tests/distributed/raft/example_server.cpp
deleted file mode 100644
index 72c4b18c6..000000000
--- a/tests/distributed/raft/example_server.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-#include <fstream>
-#include <thread>
-
-#include <fmt/format.h>
-#include <gflags/gflags.h>
-#include <glog/logging.h>
-
-#include "communication/rpc/server.hpp"
-#include "messages.hpp"
-#include "utils/signals.hpp"
-#include "utils/terminate_handler.hpp"
-
-using namespace communication::rpc;
-using namespace std::literals::chrono_literals;
-
-DEFINE_string(interface, "127.0.0.1",
-              "Communication interface on which to listen.");
-DEFINE_string(port, "10000", "Communication port on which to listen.");
-DEFINE_string(log, "log.txt", "Entries log file");
-
-volatile sig_atomic_t is_shutting_down = 0;
-
-int main(int argc, char **argv) {
-  google::SetUsageMessage("Raft RPC Server");
-
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  // Unhandled exception handler init.
-  std::set_terminate(&utils::TerminateHandler);
-
-  Server server(io::network::Endpoint(FLAGS_interface, stoul(FLAGS_port)));
-  std::ofstream log(FLAGS_log, std::ios_base::app);
-
-  // Handler for regular termination signals.
-  auto shutdown = [&log]() {
-    if (is_shutting_down) return;
-    is_shutting_down = 1;
-    log.close();
-    exit(0);
-  };
-
-  // Prevent handling shutdown inside a shutdown. For example, SIGINT handler
-  // being interrupted by SIGTERM before is_shutting_down is set, thus causing
-  // double shutdown.
-  sigset_t block_shutdown_signals;
-  sigemptyset(&block_shutdown_signals);
-  sigaddset(&block_shutdown_signals, SIGTERM);
-  sigaddset(&block_shutdown_signals, SIGINT);
-
-  CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Terminate,
-                                              shutdown, block_shutdown_signals))
-      << "Unable to register SIGTERM handler!";
-  CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Interupt, shutdown,
-                                              block_shutdown_signals))
-      << "Unable to register SIGINT handler!";
-
-  // Example callback.
-  server.Register<AppendEntry>([&log](const AppendEntryReq &request) {
-    log << request.val << std::endl;
-    log.flush();
-    LOG(INFO) << fmt::format("AppendEntry: {}", request.val);
-    return std::make_unique<AppendEntryRes>(200, FLAGS_interface,
-                                            stol(FLAGS_port));
-  });
-
-  LOG(INFO) << "Raft RPC server started";
-  // Sleep until shutdown detected.
-  std::this_thread::sleep_until(
-      std::chrono::time_point<std::chrono::system_clock>::max());
-
-  return 0;
-}
diff --git a/tests/distributed/raft/example_test.py b/tests/distributed/raft/example_test.py
deleted file mode 100644
index 06525ce0c..000000000
--- a/tests/distributed/raft/example_test.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import logging
-import os
-import time
-import xmlrpc.client
-
-NUM_MACHINES = 2
-
-# binaries to run
-CLIENT_BINARY = "tests/distributed/raft/example_client"
-SERVER_BINARY = "tests/distributed/raft/example_server"
-
-
-def run(machine_ids, workers):
-    logging.basicConfig(level=logging.INFO)
-    log = logging.getLogger("example_test")
-    log.info("Start")
-
-    # define interfaces and ports for binaries
-    server_interface = os.environ[machine_ids[1]]
-    server_port = str(10000)
-    client_interface = os.environ[machine_ids[0]]
-    client_port = str(10010)
-
-    # start binaries
-    log_abs_path = workers[machine_ids[1]].allocate_file()
-    server_tid = workers[machine_ids[1]].get_jail()
-    server_args = ["--interface", server_interface]
-    server_args += ["--port", server_port]
-    server_args += ["--log", log_abs_path]
-    workers[machine_ids[1]].start(server_tid, SERVER_BINARY, server_args)
-
-    client_tid = workers[machine_ids[0]].get_jail()
-    client_args = ["--interface", client_interface]
-    client_args += ["--port", client_port]
-    client_args += ["--server-interface", server_interface]
-    client_args += ["--server-port", server_port]
-    workers[machine_ids[0]].start(client_tid, CLIENT_BINARY, client_args)
-
-    # crash server
-    workers[machine_ids[1]].stop(server_tid)
-    time.sleep(5)
-    workers[machine_ids[1]].start(server_tid, SERVER_BINARY, server_args)
-
-    # wait for test to finish
-    time.sleep(5)
-
-    # stop binaries
-    workers[machine_ids[0]].stop(client_tid)
-    workers[machine_ids[1]].stop(server_tid)
-
-    # fetch log
-    result = workers[machine_ids[1]].read_file(log_abs_path)
-    if result is not None:
-        local_log = "local_log.txt"
-        result = result.data.decode('ascii')
-        if result.splitlines() == ["{}".format(x) for x in range(1, 101)]:
-            log.warn("Test successful")
-        else:
-            raise Exception("Test failed")
-
-    log.info("End")
diff --git a/tests/distributed/raft/messages.hpp b/tests/distributed/raft/messages.hpp
deleted file mode 100644
index 5cb33f3f3..000000000
--- a/tests/distributed/raft/messages.hpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/base_object.hpp"
-
-#include "boost/serialization/export.hpp"
-#include "communication/rpc/messages.hpp"
-
-using boost::serialization::base_object;
-using communication::rpc::Message;
-using namespace communication::rpc;
-
-struct AppendEntryReq : public Message {
-  AppendEntryReq() {}
-  explicit AppendEntryReq(int val) : val(val) {}
-  int val;
-
- private:
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &base_object<Message>(*this);
-    ar &val;
-  }
-};
-BOOST_CLASS_EXPORT(AppendEntryReq);
-
-struct AppendEntryRes : public Message {
-  AppendEntryRes() {}
-  AppendEntryRes(int status, std::string interface, uint16_t port)
-      : status(status), interface(interface), port(port) {}
-  int status;
-  std::string interface;
-  uint16_t port;
-
- private:
-  friend class boost::serialization::access;
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &base_object<Message>(*this);
-    ar &status;
-    ar &interface;
-    ar &port;
-  }
-};
-BOOST_CLASS_EXPORT(AppendEntryRes);
-
-using AppendEntry = RequestResponse<AppendEntryReq, AppendEntryRes>;
diff --git a/tests/macro_benchmark/clients/card_fraud_client.cpp b/tests/macro_benchmark/clients/card_fraud_client.cpp
index 7971a1def..1388e26c5 100644
--- a/tests/macro_benchmark/clients/card_fraud_client.cpp
+++ b/tests/macro_benchmark/clients/card_fraud_client.cpp
@@ -5,21 +5,10 @@
 
 #include "gflags/gflags.h"
 
-#include "stats/stats.hpp"
-#include "stats/stats_rpc_messages.hpp"
 #include "threading/sync/rwlock.hpp"
 
 #include "long_running_common.hpp"
 
-// TODO(mtomic): this sucks but I don't know a different way to make it work
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-BOOST_CLASS_EXPORT(stats::StatsReq);
-BOOST_CLASS_EXPORT(stats::StatsRes);
-BOOST_CLASS_EXPORT(stats::BatchStatsReq);
-BOOST_CLASS_EXPORT(stats::BatchStatsRes);
-
 std::atomic<int64_t> num_pos;
 std::atomic<int64_t> num_cards;
 std::atomic<int64_t> num_transactions;
@@ -31,12 +20,12 @@ DEFINE_string(config, "", "test config");
 
 enum class Role { WORKER, ANALYTIC, CLEANUP };
 
-stats::Gauge &num_vertices = stats::GetGauge("vertices");
-stats::Gauge &num_edges = stats::GetGauge("edges");
+std::atomic<int64_t> num_vertices{0};
+std::atomic<int64_t> num_edges{0};
 
 void UpdateStats() {
-  num_vertices.Set(num_pos + num_cards + num_transactions);
-  num_edges.Set(2 * num_transactions);
+  num_vertices = num_pos + num_cards + num_transactions;
+  num_edges = 2 * num_transactions;
 }
 
 int64_t NumNodesWithLabel(Client &client, std::string label) {
@@ -340,9 +329,6 @@ int main(int argc, char **argv) {
   gflags::ParseCommandLineFlags(&argc, &argv, true);
   google::InitGoogleLogging(argv[0]);
 
-  stats::InitStatsLogging(
-      fmt::format("client.long_running.{}.{}", FLAGS_group, FLAGS_scenario));
-
   Endpoint endpoint(FLAGS_address, FLAGS_port);
   Client client;
   if (!client.Connect(endpoint, FLAGS_username, FLAGS_password)) {
@@ -389,7 +375,5 @@ int main(int argc, char **argv) {
 
   RunMultithreadedTest(clients);
 
-  stats::StopStatsLogging();
-
   return 0;
 }
diff --git a/tests/macro_benchmark/clients/long_running_common.hpp b/tests/macro_benchmark/clients/long_running_common.hpp
index f110a413a..d9d759f2f 100644
--- a/tests/macro_benchmark/clients/long_running_common.hpp
+++ b/tests/macro_benchmark/clients/long_running_common.hpp
@@ -1,9 +1,9 @@
 #pragma once
 
+#include <memory>
+
 #include "json/json.hpp"
 
-#include "stats/metrics.hpp"
-#include "stats/stats.hpp"
 #include "utils/network.hpp"
 #include "utils/timer.hpp"
 
@@ -22,8 +22,8 @@ DEFINE_int32(duration, 30, "Number of seconds to execute benchmark");
 DEFINE_string(group, "unknown", "Test group name");
 DEFINE_string(scenario, "unknown", "Test scenario name");
 
-auto &executed_queries = stats::GetCounter("executed_queries");
-auto &serialization_errors = stats::GetCounter("serialization_errors");
+std::atomic<uint64_t> executed_queries{0};
+std::atomic<uint64_t> serialization_errors{0};
 
 class TestClient {
  public:
@@ -70,7 +70,7 @@ class TestClient {
       std::tie(result, retries) =
           ExecuteNTimesTillSuccess(client_, query, params, MAX_RETRIES);
     } catch (const utils::BasicException &e) {
-      serialization_errors.Bump(MAX_RETRIES);
+      serialization_errors += MAX_RETRIES;
       return std::experimental::nullopt;
     }
     auto wall_time = timer.Elapsed();
@@ -84,8 +84,8 @@ class TestClient {
         stats_[query].push_back(std::move(metadata));
       }
     }
-    executed_queries.Bump();
-    serialization_errors.Bump(retries);
+    executed_queries += 1;
+    serialization_errors += 1;
     return result;
   }
 
@@ -167,15 +167,10 @@ void RunMultithreadedTest(std::vector<std::unique_ptr<TestClient>> &clients) {
                       .first;
         it->second = (it->second.ValueDouble() * old_count + stat.second) /
                      (old_count + new_count);
-        stats::LogStat(
-            fmt::format("queries.{}.{}", query_stats.first, stat.first),
-            (stat.second / new_count));
       }
-      stats::LogStat(fmt::format("queries.{}.count", query_stats.first),
-                     new_count);
     }
 
-    out << "{\"num_executed_queries\": " << executed_queries.Value() << ", "
+    out << "{\"num_executed_queries\": " << executed_queries << ", "
         << "\"elapsed_time\": " << timer.Elapsed().count()
         << ", \"queries\": [";
     utils::PrintIterable(
diff --git a/tests/manual/card_fraud_local.cpp b/tests/manual/card_fraud_local.cpp
deleted file mode 100644
index 4685dc1cc..000000000
--- a/tests/manual/card_fraud_local.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-#include <atomic>
-#include <random>
-#include <thread>
-#include <vector>
-
-#include "gflags/gflags.h"
-
-#include "distributed_common.hpp"
-
-DEFINE_int32(num_tx_creators, 3, "Number of threads creating transactions");
-DEFINE_int32(tx_per_thread, 1000, "Number of transactions each thread creates");
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-
-  Cluster cluster(5);
-
-  cluster.Execute("CREATE INDEX ON :Card(id)");
-  cluster.Execute("CREATE INDEX ON :Transaction(id)");
-  cluster.Execute("CREATE INDEX ON :Pos(id)");
-
-  int kCardCount = 20000;
-  int kPosCount = 20000;
-
-  cluster.Execute("UNWIND range(0, $card_count) AS id CREATE (:Card {id:id})",
-                  {{"card_count", kCardCount - 1}});
-  cluster.Execute("UNWIND range(0, $pos_count) AS id CREATE (:Pos {id:id})",
-                  {{"pos_count", kPosCount - 1}});
-
-  CheckResults(cluster.Execute("MATCH (:Pos) RETURN count(1)"), {{kPosCount}},
-               "Failed to create POS");
-  CheckResults(cluster.Execute("MATCH (:Card) RETURN count(1)"), {{kCardCount}},
-               "Failed to create Cards");
-
-  std::atomic<int> tx_counter{0};
-  auto create_tx = [&cluster, kCardCount, kPosCount, &tx_counter](int count) {
-    std::mt19937 rand_dev{std::random_device{}()};
-    std::uniform_int_distribution<> int_dist;
-
-    auto rint = [&rand_dev, &int_dist](int upper) {
-      return int_dist(rand_dev) % upper;
-    };
-
-    for (int i = 0; i < count; ++i) {
-      try {
-        auto res = cluster.Execute(
-            "MATCH (p:Pos {id: $pos}), (c:Card {id: $card}) "
-            "CREATE (p)<-[:At]-(:Transaction {id : $tx})-[:Using]->(c) "
-            "RETURN count(1)",
-            {{"pos", rint(kPosCount)},
-             {"card", rint(kCardCount)},
-             {"tx", tx_counter++}});
-        CheckResults(res, {{1}}, "Transaction creation");
-      } catch (LockTimeoutException &) {
-        --i;
-      } catch (mvcc::SerializationError &) {
-        --i;
-      }
-      if (i > 0 && i % 200 == 0)
-        LOG(INFO) << "Created " << i << " transactions";
-    }
-  };
-
-  LOG(INFO) << "Creating " << FLAGS_num_tx_creators * FLAGS_tx_per_thread
-            << " transactions in " << FLAGS_num_tx_creators << " threads";
-  std::vector<std::thread> tx_creators;
-  for (int i = 0; i < FLAGS_num_tx_creators; ++i)
-    tx_creators.emplace_back(create_tx, FLAGS_tx_per_thread);
-  for (auto &t : tx_creators) t.join();
-
-  CheckResults(cluster.Execute("MATCH (:Transaction) RETURN count(1)"),
-               {{FLAGS_num_tx_creators * FLAGS_tx_per_thread}},
-               "Failed to create Transactions");
-
-  LOG(INFO) << "Test terminated successfully";
-  return 0;
-}
diff --git a/tests/manual/distributed_common.hpp b/tests/manual/distributed_common.hpp
deleted file mode 100644
index a33d5acd5..000000000
--- a/tests/manual/distributed_common.hpp
+++ /dev/null
@@ -1,98 +0,0 @@
-#pragma once
-
-#include <chrono>
-#include <vector>
-
-#include "communication/result_stream_faker.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "query/interpreter.hpp"
-#include "query/typed_value.hpp"
-
-class WorkerInThread {
- public:
-  explicit WorkerInThread(database::Config config) : worker_(config) {
-    thread_ = std::thread([this, config] { worker_.WaitForShutdown(); });
-  }
-
-  ~WorkerInThread() {
-    if (thread_.joinable()) thread_.join();
-  }
-
-  database::Worker worker_;
-  std::thread thread_;
-};
-
-class Cluster {
-  const std::chrono::microseconds kInitTime{200};
-  const std::string kLocal = "127.0.0.1";
-
- public:
-  Cluster(int worker_count) {
-    database::Config masterconfig;
-    masterconfig.master_endpoint = {kLocal, 0};
-    master_ = std::make_unique<database::Master>(masterconfig);
-    interpreter_ = std::make_unique<query::Interpreter>(*master_);
-    std::this_thread::sleep_for(kInitTime);
-
-    auto worker_config = [this](int worker_id) {
-      database::Config config;
-      config.worker_id = worker_id;
-      config.master_endpoint = master_->endpoint();
-      config.worker_endpoint = {kLocal, 0};
-      return config;
-    };
-
-    for (int i = 0; i < worker_count; ++i) {
-      workers_.emplace_back(
-          std::make_unique<WorkerInThread>(worker_config(i + 1)));
-      std::this_thread::sleep_for(kInitTime);
-    }
-  }
-
-  void Stop() {
-    interpreter_ = nullptr;
-    master_ = nullptr;
-    workers_.clear();
-  }
-
-  ~Cluster() {
-    if (master_) Stop();
-  }
-
-  auto Execute(const std::string &query,
-               std::map<std::string, query::TypedValue> params = {}) {
-    database::GraphDbAccessor dba(*master_);
-    ResultStreamFaker result;
-    interpreter_->operator()(query, dba, params, false).PullAll(result);
-    dba.Commit();
-    return result.GetResults();
-  };
-
- private:
-  std::unique_ptr<database::Master> master_;
-  std::vector<std::unique_ptr<WorkerInThread>> workers_;
-  std::unique_ptr<query::Interpreter> interpreter_;
-};
-
-void CheckResults(
-    const std::vector<std::vector<query::TypedValue>> &results,
-    const std::vector<std::vector<query::TypedValue>> &expected_rows,
-    const std::string &msg) {
-  query::TypedValue::BoolEqual equality;
-  CHECK(results.size() == expected_rows.size())
-      << msg << " (expected " << expected_rows.size() << " rows "
-      << ", got " << results.size() << ")";
-  for (size_t row_id = 0; row_id < results.size(); ++row_id) {
-    auto &result = results[row_id];
-    auto &expected = expected_rows[row_id];
-    CHECK(result.size() == expected.size())
-        << msg << " (expected " << expected.size() << " elements in row "
-        << row_id << ", got " << result.size() << ")";
-    for (size_t col_id = 0; col_id < result.size(); ++col_id) {
-      CHECK(equality(result[col_id], expected[col_id]))
-          << msg << " (expected value '" << expected[col_id] << "' got '"
-          << result[col_id] << "' in row " << row_id << " col " << col_id
-          << ")";
-    }
-  }
-}
diff --git a/tests/manual/distributed_repl.cpp b/tests/manual/distributed_repl.cpp
deleted file mode 100644
index 17853e43b..000000000
--- a/tests/manual/distributed_repl.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-#include <chrono>
-#include <iostream>
-#include <memory>
-#include <thread>
-
-#include <gflags/gflags.h>
-#include <glog/logging.h>
-
-#include "database/graph_db.hpp"
-#include "query/interpreter.hpp"
-#include "query/repl.hpp"
-#include "utils/flag_validation.hpp"
-
-DEFINE_VALIDATED_int32(worker_count, 1,
-                       "The number of worker nodes in cluster.",
-                       FLAG_IN_RANGE(1, 1000));
-DECLARE_int32(min_log_level);
-
-const std::string kLocal = "127.0.0.1";
-
-class WorkerInThread {
- public:
-  explicit WorkerInThread(database::Config config) : worker_(config) {
-    thread_ = std::thread([this, config] { worker_.WaitForShutdown(); });
-  }
-
-  ~WorkerInThread() {
-    if (thread_.joinable()) thread_.join();
-  }
-
-  database::Worker worker_;
-  std::thread thread_;
-};
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  FLAGS_min_log_level = google::ERROR;
-  google::InitGoogleLogging(argv[0]);
-
-  // Start the master
-  database::Config master_config;
-  master_config.master_endpoint = {kLocal, 0};
-  auto master = std::make_unique<database::Master>(master_config);
-  // Allow the master to get initialized before making workers.
-  std::this_thread::sleep_for(std::chrono::milliseconds(250));
-
-  std::vector<std::unique_ptr<WorkerInThread>> workers;
-  for (int i = 0; i < FLAGS_worker_count; ++i) {
-    database::Config config;
-    config.worker_id = i + 1;
-    config.master_endpoint = master->endpoint();
-    config.worker_endpoint = {kLocal, 0};
-    workers.emplace_back(std::make_unique<WorkerInThread>(config));
-  }
-
-  // Start the REPL
-  query::Repl(*master);
-
-  master = nullptr;
-  return 0;
-}
diff --git a/tests/manual/query_planner.cpp b/tests/manual/query_planner.cpp
index a1851e8d5..645ec7760 100644
--- a/tests/manual/query_planner.cpp
+++ b/tests/manual/query_planner.cpp
@@ -514,53 +514,12 @@ class PlanPrinter : public query::plan::HierarchicalLogicalOperatorVisitor {
     return true;
   }
 
-  bool PreVisit(query::plan::PullRemote &op) override {
-    WithPrintLn([&op](auto &out) {
-      out << "* PullRemote [" << op.plan_id() << "] {";
-      utils::PrintIterable(
-          out, op.symbols(), ", ",
-          [](auto &out, const auto &sym) { out << sym.name(); });
-      out << "}";
-    });
-    WithPrintLn([](auto &out) { out << "|\\"; });
-    ++depth_;
-    WithPrintLn([](auto &out) { out << "* workers"; });
-    --depth_;
-    return true;
-  }
-
-  bool PreVisit(query::plan::Synchronize &op) override {
-    WithPrintLn([&op](auto &out) {
-      out << "* Synchronize";
-      if (op.advance_command()) out << " (ADV CMD)";
-    });
-    if (op.pull_remote()) Branch(*op.pull_remote());
-    op.input()->Accept(*this);
-    return false;
-  }
-
   bool PreVisit(query::plan::Cartesian &op) override {
     WithPrintLn([](auto &out) { out << "* Cartesian"; });
     Branch(*op.right_op());
     op.left_op()->Accept(*this);
     return false;
   }
-
-  bool PreVisit(query::plan::PullRemoteOrderBy &op) override {
-    WithPrintLn([&op](auto &out) {
-      out << "* PullRemoteOrderBy {";
-      utils::PrintIterable(
-          out, op.symbols(), ", ",
-          [](auto &out, const auto &sym) { out << sym.name(); });
-      out << "}";
-    });
-
-    WithPrintLn([](auto &out) { out << "|\\"; });
-    ++depth_;
-    WithPrintLn([](auto &out) { out << "* workers"; });
-    --depth_;
-    return true;
-  }
 #undef PRE_VISIT
 
  private:
@@ -649,39 +608,11 @@ DEFCOMMAND(Show) {
   plan->Accept(printer);
 }
 
-DEFCOMMAND(ShowDistributed) {
-  int64_t plan_ix = 0;
-  std::stringstream ss(args[0]);
-  ss >> plan_ix;
-  if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return;
-  const auto &plan = plans[plan_ix].first;
-  std::atomic<int64_t> plan_id{0};
-  auto distributed_plan = MakeDistributedPlan(*plan, symbol_table, plan_id);
-  {
-    std::cout << "---- Master Plan ---- " << std::endl;
-    PlanPrinter printer(dba);
-    distributed_plan.master_plan->Accept(printer);
-    std::cout << std::endl;
-  }
-  for (size_t i = 0; i < distributed_plan.worker_plans.size(); ++i) {
-    int64_t id;
-    std::shared_ptr<query::plan::LogicalOperator> worker_plan;
-    std::tie(id, worker_plan) = distributed_plan.worker_plans[i];
-    std::cout << "---- Worker Plan #" << id << " ---- " << std::endl;
-    PlanPrinter printer(dba);
-    worker_plan->Accept(printer);
-    std::cout << std::endl;
-  }
-}
-
 DEFCOMMAND(Help);
 
 std::map<std::string, Command> commands = {
     {"top", {TopCommand, 1, "Show top N plans"}},
     {"show", {ShowCommand, 1, "Show the Nth plan"}},
-    {"show-distributed",
-     {ShowDistributedCommand, 1,
-      "Show the Nth plan as for distributed execution"}},
     {"help", {HelpCommand, 0, "Show available commands"}},
 };
 
diff --git a/tests/manual/raft_rpc.cpp b/tests/manual/raft_rpc.cpp
deleted file mode 100644
index c423acd8c..000000000
--- a/tests/manual/raft_rpc.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-#include "boost/serialization/export.hpp"
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/archive/text_iarchive.hpp"
-#include "boost/archive/text_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-
-#include "communication/raft/rpc.hpp"
-#include "communication/raft/storage/file.hpp"
-#include "communication/raft/test_utils.hpp"
-
-using namespace std::literals::chrono_literals;
-
-namespace raft = communication::raft;
-
-using io::network::Endpoint;
-using raft::RaftConfig;
-using raft::RpcNetwork;
-using raft::test_utils::DummyState;
-
-DEFINE_string(member_id, "", "id of Raft member");
-DEFINE_string(log_dir, "", "Raft log directory");
-
-BOOST_CLASS_EXPORT(raft::PeerRpcReply);
-BOOST_CLASS_EXPORT(raft::PeerRpcRequest<DummyState>);
-
-/* Start cluster members with:
- * ./raft_rpc --member-id a --log-dir a_log
- * ./raft_rpc --member-id b --log-dir b_log
- * ./raft_rpc --member-id c --log-dir c_log
- *
- * Enjoy democracy!
- */
-
-int main(int argc, char *argv[]) {
-  google::InitGoogleLogging(argv[0]);
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-
-  std::unordered_map<std::string, Endpoint> directory = {
-      {"a", Endpoint("127.0.0.1", 12345)},
-      {"b", Endpoint("127.0.0.1", 12346)},
-      {"c", Endpoint("127.0.0.1", 12347)}};
-
-  communication::rpc::Server server(directory[FLAGS_member_id]);
-  RpcNetwork<DummyState> network(server, directory);
-  raft::SimpleFileStorage<DummyState> storage(FLAGS_log_dir);
-
-  raft::RaftConfig config{{"a", "b", "c"}, 150ms, 300ms, 70ms, 30ms};
-
-  {
-    raft::RaftMember<DummyState> raft_member(network, storage, FLAGS_member_id,
-                                             config);
-    while (true) {
-      continue;
-    }
-  }
-
-  return 0;
-}
diff --git a/tests/unit/concurrent_id_mapper_distributed.cpp b/tests/unit/concurrent_id_mapper_distributed.cpp
deleted file mode 100644
index 9f0dc8629..000000000
--- a/tests/unit/concurrent_id_mapper_distributed.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-#include <experimental/optional>
-
-#include "gtest/gtest.h"
-
-#include "communication/rpc/server.hpp"
-#include "storage/concurrent_id_mapper_master.hpp"
-#include "storage/concurrent_id_mapper_worker.hpp"
-#include "storage/types.hpp"
-
-template <typename TId>
-class DistributedConcurrentIdMapperTest : public ::testing::Test {
-  const std::string kLocal{"127.0.0.1"};
-
- protected:
-  communication::rpc::Server master_server_{{kLocal, 0}};
-  std::experimental::optional<communication::rpc::ClientPool>
-      master_client_pool_;
-  std::experimental::optional<storage::MasterConcurrentIdMapper<TId>>
-      master_mapper_;
-  std::experimental::optional<storage::WorkerConcurrentIdMapper<TId>>
-      worker_mapper_;
-
-  void SetUp() override {
-    master_client_pool_.emplace(master_server_.endpoint());
-    master_mapper_.emplace(master_server_);
-    worker_mapper_.emplace(master_client_pool_.value());
-  }
-  void TearDown() override {
-    worker_mapper_ = std::experimental::nullopt;
-    master_mapper_ = std::experimental::nullopt;
-    master_client_pool_ = std::experimental::nullopt;
-  }
-};
-
-typedef ::testing::Types<storage::Label, storage::EdgeType, storage::Property>
-    GraphDbTestTypes;
-TYPED_TEST_CASE(DistributedConcurrentIdMapperTest, GraphDbTestTypes);
-
-TYPED_TEST(DistributedConcurrentIdMapperTest, Basic) {
-  auto &master = this->master_mapper_.value();
-  auto &worker = this->worker_mapper_.value();
-
-  auto id1 = master.value_to_id("v1");
-  EXPECT_EQ(worker.id_to_value(id1), "v1");
-  EXPECT_EQ(worker.value_to_id("v1"), id1);
-
-  auto id2 = worker.value_to_id("v2");
-  EXPECT_EQ(master.id_to_value(id2), "v2");
-  EXPECT_EQ(master.value_to_id("v2"), id2);
-
-  EXPECT_NE(id1, id2);
-}
diff --git a/tests/unit/counters.cpp b/tests/unit/counters.cpp
deleted file mode 100644
index fad665443..000000000
--- a/tests/unit/counters.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-#include "gtest/gtest.h"
-
-#include "communication/rpc/server.hpp"
-#include "database/counters.hpp"
-
-const std::string kLocal = "127.0.0.1";
-
-TEST(CountersDistributed, All) {
-  communication::rpc::Server master_server({kLocal, 0});
-  database::MasterCounters master(master_server);
-  communication::rpc::ClientPool master_client_pool(master_server.endpoint());
-
-  database::WorkerCounters w1(master_client_pool);
-  database::WorkerCounters w2(master_client_pool);
-
-  EXPECT_EQ(w1.Get("a"), 0);
-  EXPECT_EQ(w1.Get("a"), 1);
-  EXPECT_EQ(w2.Get("a"), 2);
-  EXPECT_EQ(w1.Get("a"), 3);
-  EXPECT_EQ(master.Get("a"), 4);
-
-  EXPECT_EQ(master.Get("b"), 0);
-  EXPECT_EQ(w2.Get("b"), 1);
-  w1.Set("b", 42);
-  EXPECT_EQ(w2.Get("b"), 42);
-}
diff --git a/tests/unit/cypher_main_visitor.cpp b/tests/unit/cypher_main_visitor.cpp
index d47219b68..ecb9f5b57 100644
--- a/tests/unit/cypher_main_visitor.cpp
+++ b/tests/unit/cypher_main_visitor.cpp
@@ -6,8 +6,6 @@
 #include <vector>
 
 #include "antlr4-runtime.h"
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
@@ -110,39 +108,11 @@ class CachedAstGenerator : public Base {
   Query *query_;
 };
 
-// This generator serializes the parsed ast and uses the deserialized one.
-class SerializedAstGenerator : public Base {
- public:
-  SerializedAstGenerator(const std::string &query)
-      : Base(query),
-        storage_([&]() {
-          ::frontend::opencypher::Parser parser(query);
-          CypherMainVisitor visitor(context_);
-          visitor.visit(parser.tree());
-          std::stringstream stream;
-          {
-            boost::archive::binary_oarchive out_archive(stream);
-            out_archive << *visitor.query();
-          }
-          AstTreeStorage new_ast;
-          {
-            boost::archive::binary_iarchive in_archive(stream);
-            new_ast.Load(in_archive);
-          }
-          return new_ast;
-        }()),
-        query_(storage_.query()) {}
-
-  AstTreeStorage storage_;
-  Query *query_;
-};
-
 template <typename T>
 class CypherMainVisitorTest : public ::testing::Test {};
 
 typedef ::testing::Types<AstGenerator, OriginalAfterCloningAstGenerator,
-                         ClonedAstGenerator, CachedAstGenerator,
-                         SerializedAstGenerator>
+                         ClonedAstGenerator, CachedAstGenerator>
     AstGeneratorTypes;
 TYPED_TEST_CASE(CypherMainVisitorTest, AstGeneratorTypes);
 
diff --git a/tests/unit/database_master.cpp b/tests/unit/database_master.cpp
deleted file mode 100644
index 24cb62664..000000000
--- a/tests/unit/database_master.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include "gtest/gtest.h"
-
-#include "config.hpp"
-#include "database/graph_db.hpp"
-
-TEST(DatabaseMaster, Instantiate) {
-  database::Config config;
-  config.master_endpoint = io::network::Endpoint("127.0.0.1", 0);
-  config.worker_id = 0;
-  database::Master master(config);
-}
diff --git a/tests/unit/distributed_common.hpp b/tests/unit/distributed_common.hpp
deleted file mode 100644
index 0101fc1fa..000000000
--- a/tests/unit/distributed_common.hpp
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <experimental/filesystem>
-#include <memory>
-#include <thread>
-
-#include <gtest/gtest.h>
-
-#include "database/graph_db.hpp"
-#include "database/graph_db_accessor.hpp"
-#include "distributed/updates_rpc_server.hpp"
-#include "storage/address_types.hpp"
-#include "transactions/engine_master.hpp"
-
-namespace fs = std::experimental::filesystem;
-
-class DistributedGraphDbTest : public ::testing::Test {
-  const std::string kLocal = "127.0.0.1";
-  const int kWorkerCount = 2;
-
-  class WorkerInThread {
-   public:
-    explicit WorkerInThread(database::Config config) : worker_(config) {
-      thread_ = std::thread([this, config] { worker_.WaitForShutdown(); });
-    }
-
-    ~WorkerInThread() {
-      if (thread_.joinable()) thread_.join();
-    }
-
-    database::Worker worker_;
-    std::thread thread_;
-  };
-
- protected:
-  virtual int QueryExecutionTimeSec(int) { return 180; }
-
-  void Initialize(
-      std::function<database::Config(database::Config config)> modify_config) {
-    const auto kInitTime = 200ms;
-
-    database::Config master_config;
-    master_config.master_endpoint = {kLocal, 0};
-    master_config.query_execution_time_sec = QueryExecutionTimeSec(0);
-    master_config.durability_directory = tmp_dir_;
-    master_ = std::make_unique<database::Master>(modify_config(master_config));
-    std::this_thread::sleep_for(kInitTime);
-
-    auto worker_config = [this](int worker_id) {
-      database::Config config;
-      config.worker_id = worker_id;
-      config.master_endpoint = master_->endpoint();
-      config.durability_directory = tmp_dir_;
-      config.worker_endpoint = {kLocal, 0};
-      config.query_execution_time_sec = QueryExecutionTimeSec(worker_id);
-      return config;
-    };
-
-    for (int i = 0; i < kWorkerCount; ++i) {
-      workers_.emplace_back(std::make_unique<WorkerInThread>(
-          modify_config(worker_config(i + 1))));
-      std::this_thread::sleep_for(kInitTime);
-    }
-  }
-
-  void SetUp() override {
-    Initialize([](database::Config config) { return config; });
-  }
-
-  void ShutDown() {
-    // Kill master first because it will expect a shutdown response from the
-    // workers.
-    auto t = std::thread([this]() { master_ = nullptr; });
-    workers_.clear();
-    if (t.joinable()) t.join();
-  }
-
-  void CleanDurability() {
-    if (fs::exists(tmp_dir_)) fs::remove_all(tmp_dir_);
-  }
-
-  void TearDown() override {
-    ShutDown();
-    CleanDurability();
-  }
-
-  database::Master &master() { return *master_; }
-  auto &master_tx_engine() {
-    return dynamic_cast<tx::MasterEngine &>(master_->tx_engine());
-  }
-
-  database::Worker &worker(int worker_id) {
-    return workers_[worker_id - 1]->worker_;
-  }
-
-  /// Inserts a vertex and returns it's global address. Does it in a new
-  /// transaction.
-  storage::VertexAddress InsertVertex(database::GraphDb &db) {
-    database::GraphDbAccessor dba{db};
-    auto r_val = dba.InsertVertex().GlobalAddress();
-    dba.Commit();
-    return r_val;
-  }
-
-  /// Inserts an edge (on the 'from' side) and returns it's global address.
-  auto InsertEdge(storage::VertexAddress from_addr,
-                  storage::VertexAddress to_addr,
-                  const std::string &edge_type_name) {
-    CHECK(from_addr.is_remote() && to_addr.is_remote())
-        << "Distributed test InsertEdge only takes global addresses";
-    database::GraphDbAccessor dba{master()};
-    VertexAccessor from{from_addr, dba};
-    VertexAccessor to{to_addr, dba};
-    auto r_val =
-        dba.InsertEdge(from, to, dba.EdgeType(edge_type_name)).GlobalAddress();
-    master().updates_server().Apply(dba.transaction_id());
-    worker(1).updates_server().Apply(dba.transaction_id());
-    worker(2).updates_server().Apply(dba.transaction_id());
-    dba.Commit();
-    return r_val;
-  }
-
-  auto VertexCount(database::GraphDb &db) {
-    database::GraphDbAccessor dba{db};
-    auto vertices = dba.Vertices(false);
-    return std::distance(vertices.begin(), vertices.end());
-  };
-
-  auto EdgeCount(database::GraphDb &db) {
-    database::GraphDbAccessor dba(db);
-    auto edges = dba.Edges(false);
-    return std::distance(edges.begin(), edges.end());
-  };
-
-  fs::path tmp_dir_ = fs::temp_directory_path() /
-                      ("MG_test_unit_durability" + std::to_string(getpid()));
-
- private:
-  std::unique_ptr<database::Master> master_;
-  std::vector<std::unique_ptr<WorkerInThread>> workers_;
-};
diff --git a/tests/unit/distributed_coordination.cpp b/tests/unit/distributed_coordination.cpp
deleted file mode 100644
index 396100e08..000000000
--- a/tests/unit/distributed_coordination.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-#include <atomic>
-#include <experimental/optional>
-#include <memory>
-#include <thread>
-#include <unordered_set>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-#include "communication/rpc/client_pool.hpp"
-#include "communication/rpc/server.hpp"
-#include "distributed/cluster_discovery_master.hpp"
-#include "distributed/cluster_discovery_worker.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "io/network/endpoint.hpp"
-
-using communication::rpc::ClientPool;
-using communication::rpc::Server;
-using namespace distributed;
-using namespace std::literals::chrono_literals;
-
-const int kWorkerCount = 5;
-const std::string kLocal = "127.0.0.1";
-
-class WorkerCoordinationInThread {
-  struct Worker {
-    Worker(Endpoint master_endpoint) : master_endpoint(master_endpoint) {}
-    Endpoint master_endpoint;
-    Server server{{kLocal, 0}};
-    WorkerCoordination coord{server, master_endpoint};
-    ClientPool client_pool{master_endpoint};
-    ClusterDiscoveryWorker discovery{server, coord, client_pool};
-    std::atomic<int> worker_id_{0};
-  };
-
- public:
-  WorkerCoordinationInThread(io::network::Endpoint master_endpoint,
-                             int desired_id = -1) {
-    std::atomic<bool> init_done{false};
-    worker_thread_ =
-        std::thread([this, master_endpoint, desired_id, &init_done] {
-          worker.emplace(master_endpoint);
-          worker->discovery.RegisterWorker(desired_id);
-          worker->worker_id_ = desired_id;
-          init_done = true;
-          worker->coord.WaitForShutdown();
-          worker = std::experimental::nullopt;
-        });
-
-    while (!init_done) std::this_thread::sleep_for(10ms);
-  }
-
-  int worker_id() const { return worker->worker_id_; }
-  auto endpoint() const { return worker->server.endpoint(); }
-  auto worker_endpoint(int worker_id) {
-    return worker->coord.GetEndpoint(worker_id);
-  }
-  auto worker_ids() { return worker->coord.GetWorkerIds(); }
-  void join() { worker_thread_.join(); }
-
- private:
-  std::thread worker_thread_;
-  std::experimental::optional<Worker> worker;
-};
-
-TEST(Distributed, Coordination) {
-  Server master_server({kLocal, 0});
-  std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers;
-  {
-    MasterCoordination master_coord(master_server.endpoint());
-    master_coord.SetRecoveryInfo(std::experimental::nullopt);
-    RpcWorkerClients rpc_worker_clients(master_coord);
-    ClusterDiscoveryMaster master_discovery_(master_server, master_coord,
-                                             rpc_worker_clients);
-
-    for (int i = 1; i <= kWorkerCount; ++i)
-      workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-          master_server.endpoint(), i));
-
-    // Expect that all workers have a different ID.
-    std::unordered_set<int> worker_ids;
-    for (const auto &w : workers) worker_ids.insert(w->worker_id());
-    ASSERT_EQ(worker_ids.size(), kWorkerCount);
-
-    // Check endpoints.
-    for (auto &w1 : workers) {
-      for (auto &w2 : workers) {
-        EXPECT_EQ(w1->worker_endpoint(w2->worker_id()), w2->endpoint());
-      }
-    }
-  }  // Coordinated shutdown.
-
-  for (auto &worker : workers) worker->join();
-}
-
-TEST(Distributed, DesiredAndUniqueId) {
-  Server master_server({kLocal, 0});
-  std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers;
-  {
-    MasterCoordination master_coord(master_server.endpoint());
-    master_coord.SetRecoveryInfo(std::experimental::nullopt);
-    RpcWorkerClients rpc_worker_clients(master_coord);
-    ClusterDiscoveryMaster master_discovery_(master_server, master_coord,
-                                             rpc_worker_clients);
-
-    workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-        master_server.endpoint(), 42));
-    EXPECT_EQ(workers[0]->worker_id(), 42);
-
-    EXPECT_DEATH(
-        workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-            master_server.endpoint(), 42)),
-        "");
-  }
-
-  for (auto &worker : workers) worker->join();
-}
-
-TEST(Distributed, CoordinationWorkersId) {
-  Server master_server({kLocal, 0});
-  std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers;
-  {
-    MasterCoordination master_coord(master_server.endpoint());
-    master_coord.SetRecoveryInfo(std::experimental::nullopt);
-    RpcWorkerClients rpc_worker_clients(master_coord);
-    ClusterDiscoveryMaster master_discovery_(master_server, master_coord,
-                                             rpc_worker_clients);
-
-    workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-        master_server.endpoint(), 42));
-    workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-        master_server.endpoint(), 43));
-
-    std::vector<int> ids;
-    ids.push_back(0);
-
-    for (auto &worker : workers) ids.push_back(worker->worker_id());
-    EXPECT_THAT(master_coord.GetWorkerIds(),
-                testing::UnorderedElementsAreArray(ids));
-  }
-
-  for (auto &worker : workers) worker->join();
-}
-
-TEST(Distributed, ClusterDiscovery) {
-  Server master_server({kLocal, 0});
-  std::vector<std::unique_ptr<WorkerCoordinationInThread>> workers;
-  {
-    MasterCoordination master_coord(master_server.endpoint());
-    master_coord.SetRecoveryInfo(std::experimental::nullopt);
-    RpcWorkerClients rpc_worker_clients(master_coord);
-    ClusterDiscoveryMaster master_discovery_(master_server, master_coord,
-                                             rpc_worker_clients);
-    std::vector<int> ids;
-    int worker_count = 10;
-
-    ids.push_back(0);
-    for (int i = 1; i <= worker_count; ++i) {
-      workers.emplace_back(std::make_unique<WorkerCoordinationInThread>(
-          master_server.endpoint(), i));
-
-      ids.push_back(i);
-    }
-
-    EXPECT_THAT(master_coord.GetWorkerIds(),
-                testing::UnorderedElementsAreArray(ids));
-    for (auto &worker : workers) {
-      EXPECT_THAT(worker->worker_ids(),
-                  testing::UnorderedElementsAreArray(ids));
-    }
-  }
-
-  for (auto &worker : workers) worker->join();
-}
-
-int main(int argc, char **argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  return RUN_ALL_TESTS();
-}
diff --git a/tests/unit/distributed_data_exchange.cpp b/tests/unit/distributed_data_exchange.cpp
deleted file mode 100644
index ee979088e..000000000
--- a/tests/unit/distributed_data_exchange.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-#include <unordered_map>
-
-#include "gtest/gtest.h"
-
-#include "database/graph_db_accessor.hpp"
-#include "storage/edge_accessor.hpp"
-#include "storage/vertex_accessor.hpp"
-
-#include "distributed_common.hpp"
-
-using namespace database;
-
-TEST_F(DistributedGraphDbTest, RemoteDataGetting) {
-  // Only old data is visible remotely, so create and commit some data.
-  gid::Gid v1_id, v2_id, e1_id;
-
-  {
-    GraphDbAccessor dba{master()};
-    auto v1 = dba.InsertVertex();
-    auto v2 = dba.InsertVertex();
-    auto e1 = dba.InsertEdge(v1, v2, dba.EdgeType("et"));
-
-    // Set some data so we see we're getting the right stuff.
-    v1.PropsSet(dba.Property("p1"), 42);
-    v1.add_label(dba.Label("label"));
-    v2.PropsSet(dba.Property("p2"), "value");
-    e1.PropsSet(dba.Property("p3"), true);
-
-    v1_id = v1.gid();
-    v2_id = v2.gid();
-    e1_id = e1.gid();
-
-    dba.Commit();
-  }
-
-  // The master must start a transaction before workers can work in it.
-  GraphDbAccessor master_dba{master()};
-
-  {
-    GraphDbAccessor w1_dba{worker(1), master_dba.transaction_id()};
-    VertexAccessor v1_in_w1{{v1_id, 0}, w1_dba};
-    EXPECT_NE(v1_in_w1.GetOld(), nullptr);
-    EXPECT_EQ(v1_in_w1.GetNew(), nullptr);
-    EXPECT_EQ(v1_in_w1.PropsAt(w1_dba.Property("p1")).Value<int64_t>(), 42);
-    EXPECT_TRUE(v1_in_w1.has_label(w1_dba.Label("label")));
-  }
-
-  {
-    GraphDbAccessor w2_dba{worker(2), master_dba.transaction_id()};
-    VertexAccessor v2_in_w2{{v2_id, 0}, w2_dba};
-    EXPECT_NE(v2_in_w2.GetOld(), nullptr);
-    EXPECT_EQ(v2_in_w2.GetNew(), nullptr);
-    EXPECT_EQ(v2_in_w2.PropsAt(w2_dba.Property("p2")).Value<std::string>(),
-              "value");
-    EXPECT_FALSE(v2_in_w2.has_label(w2_dba.Label("label")));
-
-    VertexAccessor v1_in_w2{{v1_id, 0}, w2_dba};
-    EdgeAccessor e1_in_w2{{e1_id, 0}, w2_dba};
-    EXPECT_EQ(e1_in_w2.from(), v1_in_w2);
-    EXPECT_EQ(e1_in_w2.to(), v2_in_w2);
-    EXPECT_EQ(e1_in_w2.EdgeType(), w2_dba.EdgeType("et"));
-    EXPECT_EQ(e1_in_w2.PropsAt(w2_dba.Property("p3")).Value<bool>(), true);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, RemoteExpansion) {
-  // Model (v1)-->(v2), where each vertex is on one worker.
-  auto from = InsertVertex(worker(1));
-  auto to = InsertVertex(worker(2));
-  InsertEdge(from, to, "et");
-  {
-    // Expand on the master for three hops. Collect vertex gids.
-    GraphDbAccessor dba{master()};
-    std::vector<VertexAccessor> visited;
-
-    auto expand = [](auto &v) {
-      for (auto e : v.out()) return e.to();
-      for (auto e : v.in()) return e.from();
-      CHECK(false) << "No edge in vertex";
-    };
-
-    // Do a few hops back and forth, all on the master.
-    VertexAccessor v{from, dba};
-    for (int i = 0; i < 5; ++i) {
-      v = expand(v);
-      EXPECT_FALSE(v.address().is_local());
-      EXPECT_EQ(v.address(), i % 2 ? from : to);
-    }
-  }
-}
diff --git a/tests/unit/distributed_durability.cpp b/tests/unit/distributed_durability.cpp
deleted file mode 100644
index b422c5f18..000000000
--- a/tests/unit/distributed_durability.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-#include "distributed_common.hpp"
-
-#include "database/graph_db_accessor.hpp"
-#include "durability/snapshooter.hpp"
-
-class DistributedDurability : public DistributedGraphDbTest {
- public:
-  void AddVertices() {
-    AddVertex(master(), "master");
-    AddVertex(worker(1), "worker1");
-    AddVertex(worker(2), "worker2");
-  }
-  void CheckVertices(int expected_count) {
-    CheckVertex(master(), expected_count, "master");
-    CheckVertex(worker(1), expected_count, "worker1");
-    CheckVertex(worker(2), expected_count, "worker2");
-  }
-  void RestartWithRecovery() {
-    ShutDown();
-    Initialize([](database::Config config) {
-      config.db_recover_on_startup = true;
-      return config;
-    });
-  }
-
- private:
-  void AddVertex(database::GraphDb &db, const std::string &label) {
-    database::GraphDbAccessor dba(db);
-    auto vertex = dba.InsertVertex();
-    vertex.add_label(dba.Label(label));
-    dba.Commit();
-  }
-
-  void CheckVertex(database::GraphDb &db, int expected_count,
-                   const std::string &label) {
-    database::GraphDbAccessor dba(db);
-    auto it = dba.Vertices(false);
-    std::vector<VertexAccessor> vertices{it.begin(), it.end()};
-    EXPECT_EQ(vertices.size(), expected_count);
-    for (auto &vertex : vertices) {
-      ASSERT_EQ(vertex.labels().size(), 1);
-      EXPECT_EQ(vertex.labels()[0], dba.Label(label));
-    }
-  }
-};
-
-TEST_F(DistributedDurability, MakeSnapshot) {
-  // Create a graph with 3 nodes with 3 labels, one on each and make a snapshot
-  // of it
-  {
-    AddVertices();
-    database::GraphDbAccessor dba(master());
-    master().MakeSnapshot(dba);
-  }
-  // Recover the graph and check if it's the same as before
-  {
-    RestartWithRecovery();
-    CheckVertices(1);
-  }
-}
-
-TEST_F(DistributedDurability, SnapshotOnExit) {
-  {
-    TearDown();
-    Initialize([](database::Config config) {
-      config.snapshot_on_exit = true;
-      return config;
-    });
-    AddVertices();
-  }
-  // Recover the graph and check if it's the same as before
-  {
-    RestartWithRecovery();
-    CheckVertices(1);
-  }
-}
-
-TEST_F(DistributedDurability, RecoveryFromSameSnapshot) {
-  {
-    AddVertices();
-    // Make snapshot on one worker, expect it won't recover from that.
-    database::GraphDbAccessor dba(worker(1));
-    worker(1).MakeSnapshot(dba);
-  }
-  {
-    RestartWithRecovery();
-    CheckVertices(0);
-    AddVertices();
-    database::GraphDbAccessor dba(master());
-    master().MakeSnapshot(dba);
-  }
-  {
-    RestartWithRecovery();
-    CheckVertices(1);
-    AddVertices();
-    CheckVertices(2);
-    // Make snapshot on one worker, expect it won't recover from that.
-    database::GraphDbAccessor dba(worker(1));
-    worker(1).MakeSnapshot(dba);
-  }
-  {
-    RestartWithRecovery();
-    CheckVertices(1);
-  }
-}
-
-TEST_F(DistributedDurability, RecoveryFailure) {
-  {
-    AddVertices();
-    // Make a snapshot on the master without the right snapshots on workers.
-    database::GraphDbAccessor dba(master());
-    bool status = durability::MakeSnapshot(master(), dba, tmp_dir_, 100);
-    ASSERT_TRUE(status);
-  }
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  EXPECT_DEATH(RestartWithRecovery(), "worker failed to recover");
-}
diff --git a/tests/unit/distributed_gc.cpp b/tests/unit/distributed_gc.cpp
deleted file mode 100644
index 53fd5b5c0..000000000
--- a/tests/unit/distributed_gc.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "distributed_common.hpp"
-
-TEST_F(DistributedGraphDbTest, GarbageCollect) {
-  database::GraphDbAccessor dba{master()};
-  auto tx = dba.transaction_id();
-  dba.Commit();
-
-  // Create multiple transactions so that the commit log can be cleared
-  for (int i = 0; i < tx::CommitLog::kBitsetBlockSize; ++i) {
-    database::GraphDbAccessor dba{master()};
-  }
-
-  master().CollectGarbage();
-  worker(1).CollectGarbage();
-  worker(2).CollectGarbage();
-  EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true);
-
-  database::GraphDbAccessor dba2{master()};
-  auto tx_last = dba2.transaction_id();
-  dba2.Commit();
-
-  worker(1).CollectGarbage();
-  worker(2).CollectGarbage();
-  master().CollectGarbage();
-
-  EXPECT_DEATH(master().tx_engine().Info(tx), "chunk is nullptr");
-  EXPECT_DEATH(worker(1).tx_engine().Info(tx), "chunk is nullptr");
-  EXPECT_DEATH(worker(2).tx_engine().Info(tx), "chunk is nullptr");
-  EXPECT_EQ(master().tx_engine().Info(tx_last).is_committed(), true);
-  EXPECT_EQ(worker(1).tx_engine().Info(tx_last).is_committed(), true);
-  EXPECT_EQ(worker(2).tx_engine().Info(tx_last).is_committed(), true);
-}
-
-TEST_F(DistributedGraphDbTest, GarbageCollectBlocked) {
-  database::GraphDbAccessor dba{master()};
-  auto tx = dba.transaction_id();
-  dba.Commit();
-
-  // Block garbage collection because this is a still alive transaction on the
-  // worker
-  database::GraphDbAccessor dba3{worker(1)};
-
-  // Create multiple transactions so that the commit log can be cleared
-  for (int i = 0; i < tx::CommitLog::kBitsetBlockSize; ++i) {
-    database::GraphDbAccessor dba{master()};
-  }
-
-  // Query for a large id so that the commit log new block is created
-  master().tx_engine().Info(tx::CommitLog::kBitsetBlockSize);
-
-  master().CollectGarbage();
-  worker(1).CollectGarbage();
-  worker(2).CollectGarbage();
-  EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true);
-
-  database::GraphDbAccessor dba2{master()};
-  auto tx_last = dba2.transaction_id();
-  dba2.Commit();
-
-  worker(1).CollectGarbage();
-  worker(2).CollectGarbage();
-  master().CollectGarbage();
-
-  EXPECT_EQ(master().tx_engine().Info(tx).is_committed(), true);
-  EXPECT_EQ(worker(1).tx_engine().Info(tx).is_committed(), true);
-  EXPECT_EQ(worker(2).tx_engine().Info(tx).is_committed(), true);
-  EXPECT_EQ(master().tx_engine().Info(tx_last).is_committed(), true);
-  EXPECT_EQ(worker(1).tx_engine().Info(tx_last).is_committed(), true);
-  EXPECT_EQ(worker(2).tx_engine().Info(tx_last).is_committed(), true);
-}
-
-int main(int argc, char **argv) {
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/tests/unit/distributed_graph_db.cpp b/tests/unit/distributed_graph_db.cpp
deleted file mode 100644
index 308b0157a..000000000
--- a/tests/unit/distributed_graph_db.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-#include <memory>
-#include <thread>
-#include <unordered_set>
-
-#include "gtest/gtest.h"
-
-#include "database/graph_db.hpp"
-#include "distributed/coordination.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "distributed/data_rpc_clients.hpp"
-#include "distributed/data_rpc_server.hpp"
-#include "distributed/plan_consumer.hpp"
-#include "distributed/plan_dispatcher.hpp"
-#include "distributed/pull_rpc_clients.hpp"
-#include "distributed_common.hpp"
-#include "io/network/endpoint.hpp"
-#include "query/frontend/ast/ast.hpp"
-#include "query/frontend/ast/cypher_main_visitor.hpp"
-#include "query/frontend/semantic/symbol_generator.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/interpreter.hpp"
-#include "query/plan/planner.hpp"
-#include "query/typed_value.hpp"
-#include "query_common.hpp"
-#include "query_plan_common.hpp"
-#include "transactions/engine_master.hpp"
-
-using namespace distributed;
-using namespace database;
-
-TEST_F(DistributedGraphDbTest, Coordination) {
-  EXPECT_NE(master().endpoint().port(), 0);
-  EXPECT_NE(worker(1).endpoint().port(), 0);
-  EXPECT_NE(worker(2).endpoint().port(), 0);
-
-  EXPECT_EQ(master().GetEndpoint(1), worker(1).endpoint());
-  EXPECT_EQ(master().GetEndpoint(2), worker(2).endpoint());
-  EXPECT_EQ(worker(1).GetEndpoint(0), master().endpoint());
-  EXPECT_EQ(worker(1).GetEndpoint(2), worker(2).endpoint());
-  EXPECT_EQ(worker(2).GetEndpoint(0), master().endpoint());
-  EXPECT_EQ(worker(2).GetEndpoint(1), worker(1).endpoint());
-}
-
-TEST_F(DistributedGraphDbTest, TxEngine) {
-  auto *tx1 = master_tx_engine().Begin();
-  auto *tx2 = master_tx_engine().Begin();
-  EXPECT_EQ(tx2->snapshot().size(), 1);
-  EXPECT_EQ(
-      worker(1).tx_engine().RunningTransaction(tx1->id_)->snapshot().size(), 0);
-  EXPECT_EQ(worker(2).tx_engine().RunningTransaction(tx2->id_)->snapshot(),
-            tx2->snapshot());
-
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  EXPECT_DEATH(worker(2).tx_engine().RunningTransaction(123), "");
-}
-
-template <typename TType>
-using mapper_vec =
-    std::vector<std::reference_wrapper<storage::ConcurrentIdMapper<TType>>>;
-
-TEST_F(DistributedGraphDbTest, StorageTypes) {
-  auto test_mappers = [](auto mappers, auto ids) {
-    for (size_t i = 0; i < mappers.size(); ++i) {
-      ids.emplace_back(
-          mappers[i].get().value_to_id("value" + std::to_string(i)));
-    }
-    EXPECT_GT(ids.size(), 0);
-    for (size_t i = 0; i < mappers.size(); ++i) {
-      for (size_t j = 0; j < ids.size(); ++j) {
-        EXPECT_EQ(mappers[i].get().id_to_value(ids[j]),
-                  "value" + std::to_string(j));
-      }
-    }
-  };
-
-  test_mappers(mapper_vec<storage::Label>{master().label_mapper(),
-                                          worker(1).label_mapper(),
-                                          worker(2).label_mapper()},
-               std::vector<storage::Label>{});
-  test_mappers(mapper_vec<storage::EdgeType>{master().edge_type_mapper(),
-                                             worker(1).edge_type_mapper(),
-                                             worker(2).edge_type_mapper()},
-               std::vector<storage::EdgeType>{});
-  test_mappers(mapper_vec<storage::Property>{master().property_mapper(),
-                                             worker(1).property_mapper(),
-                                             worker(2).property_mapper()},
-               std::vector<storage::Property>{});
-}
-
-TEST_F(DistributedGraphDbTest, Counters) {
-  EXPECT_EQ(master().counters().Get("a"), 0);
-  EXPECT_EQ(worker(1).counters().Get("a"), 1);
-  EXPECT_EQ(worker(2).counters().Get("a"), 2);
-
-  EXPECT_EQ(worker(1).counters().Get("b"), 0);
-  EXPECT_EQ(worker(2).counters().Get("b"), 1);
-  EXPECT_EQ(master().counters().Get("b"), 2);
-}
-
-TEST_F(DistributedGraphDbTest, DispatchPlan) {
-  auto kRPCWaitTime = 600ms;
-  int64_t plan_id = 5;
-  SymbolTable symbol_table;
-  AstTreeStorage storage;
-
-  auto scan_all = MakeScanAll(storage, symbol_table, "n");
-
-  master().plan_dispatcher().DispatchPlan(plan_id, scan_all.op_, symbol_table);
-  std::this_thread::sleep_for(kRPCWaitTime);
-
-  auto check_for_worker = [plan_id, &symbol_table](auto &worker) {
-    auto &cached = worker.plan_consumer().PlanForId(plan_id);
-    EXPECT_NE(dynamic_cast<query::plan::ScanAll *>(cached.plan.get()), nullptr);
-    EXPECT_EQ(cached.symbol_table.max_position(), symbol_table.max_position());
-    EXPECT_EQ(cached.symbol_table.table(), symbol_table.table());
-  };
-  check_for_worker(worker(1));
-  check_for_worker(worker(2));
-
-  master().plan_dispatcher().RemovePlan(plan_id);
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  EXPECT_DEATH(check_for_worker(worker(1)), "Missing plan*");
-}
-
-TEST_F(DistributedGraphDbTest, BuildIndexDistributed) {
-  storage::Label label;
-  storage::Property property;
-
-  {
-    GraphDbAccessor dba0{master()};
-    label = dba0.Label("label");
-    property = dba0.Property("property");
-    auto tx_id = dba0.transaction_id();
-
-    GraphDbAccessor dba1{worker(1), tx_id};
-    GraphDbAccessor dba2{worker(2), tx_id};
-    auto add_vertex = [label, property](GraphDbAccessor &dba) {
-      auto vertex = dba.InsertVertex();
-      vertex.add_label(label);
-      vertex.PropsSet(property, 1);
-    };
-    for (int i = 0; i < 100; ++i) add_vertex(dba0);
-    for (int i = 0; i < 50; ++i) add_vertex(dba1);
-    for (int i = 0; i < 300; ++i) add_vertex(dba2);
-    dba0.Commit();
-  }
-
-  {
-    GraphDbAccessor dba{master()};
-    dba.BuildIndex(label, property);
-    EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property));
-    EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 100);
-  }
-
-  GraphDbAccessor dba_master{master()};
-
-  {
-    GraphDbAccessor dba{worker(1), dba_master.transaction_id()};
-    EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property));
-    EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 50);
-  }
-
-  {
-    GraphDbAccessor dba{worker(2), dba_master.transaction_id()};
-    EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property));
-    EXPECT_EQ(CountIterable(dba.Vertices(label, property, false)), 300);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, WorkerOwnedDbAccessors) {
-  GraphDbAccessor dba_w1(worker(1));
-  auto v = dba_w1.InsertVertex();
-  auto prop = dba_w1.Property("p");
-  v.PropsSet(prop, 42);
-  auto v_ga = v.GlobalAddress();
-  dba_w1.Commit();
-
-  GraphDbAccessor dba_w2(worker(2));
-  VertexAccessor v_in_w2{v_ga, dba_w2};
-  EXPECT_EQ(v_in_w2.PropsAt(prop).Value<int64_t>(), 42);
-}
diff --git a/tests/unit/distributed_interpretation.cpp b/tests/unit/distributed_interpretation.cpp
deleted file mode 100644
index c6f2aefb8..000000000
--- a/tests/unit/distributed_interpretation.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-#include <chrono>
-#include <experimental/optional>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-#include "database/graph_db.hpp"
-#include "distributed/plan_consumer.hpp"
-#include "distributed_common.hpp"
-#include "query/interpreter.hpp"
-#include "query_common.hpp"
-#include "query_plan_common.hpp"
-#include "utils/timer.hpp"
-
-// We use this to ensure a cached plan is removed from the concurrent map and
-// properly destructed.
-DECLARE_int32(skiplist_gc_interval);
-
-using namespace distributed;
-using namespace database;
-
-class DistributedInterpretationTest : public DistributedGraphDbTest {
- protected:
-  void SetUp() override {
-    DistributedGraphDbTest::SetUp();
-    interpreter_.emplace(master());
-  }
-
-  void TearDown() override {
-    interpreter_ = std::experimental::nullopt;
-    DistributedGraphDbTest::TearDown();
-  }
-
-  auto Run(const std::string &query) {
-    std::map<std::string, query::TypedValue> params = {};
-    GraphDbAccessor dba(master());
-    ResultStreamFaker result;
-    interpreter_.value()(query, dba, params, false).PullAll(result);
-    dba.Commit();
-    return result.GetResults();
-  }
-
- private:
-  std::experimental::optional<query::Interpreter> interpreter_;
-};
-
-TEST_F(DistributedInterpretationTest, PullTest) {
-  auto results = Run("OPTIONAL MATCH(n) UNWIND(RANGE(0, 20)) AS X RETURN 1");
-  ASSERT_EQ(results.size(), 3 * 21);
-
-  for (auto result : results) {
-    ASSERT_EQ(result.size(), 1U);
-    ASSERT_EQ(result[0].ValueInt(), 1);
-  }
-}
-
-TEST_F(DistributedInterpretationTest, PullNoResultsTest) {
-  auto results = Run("MATCH (n) RETURN n");
-  ASSERT_EQ(results.size(), 0U);
-}
-
-TEST_F(DistributedInterpretationTest, CreateExpand) {
-  InsertVertex(master());
-  InsertVertex(worker(1));
-  InsertVertex(worker(1));
-  InsertVertex(worker(2));
-  InsertVertex(worker(2));
-  InsertVertex(worker(2));
-
-  Run("MATCH (n) CREATE (n)-[:T]->(m) RETURN n");
-
-  EXPECT_EQ(VertexCount(master()), 2);
-  EXPECT_EQ(VertexCount(worker(1)), 4);
-  EXPECT_EQ(VertexCount(worker(2)), 6);
-}
-
-TEST_F(DistributedInterpretationTest, RemoteExpandTest2) {
-  // Make a fully connected graph with vertices scattered across master and
-  // worker storage.
-  // Vertex count is low, because test gets exponentially slower. The expected
-  // result size is ~ vertices^3, and then that is compared at the end in no
-  // particular order which causes O(result_size^2) comparisons.
-  int verts_per_storage = 3;
-  std::vector<storage::VertexAddress> vertices;
-  vertices.reserve(verts_per_storage * 3);
-  auto add_vertices = [this, &vertices, &verts_per_storage](auto &db) {
-    for (int i = 0; i < verts_per_storage; ++i)
-      vertices.push_back(InsertVertex(db));
-  };
-  add_vertices(master());
-  add_vertices(worker(1));
-  add_vertices(worker(2));
-  auto get_edge_type = [](int v1, int v2) {
-    return std::to_string(v1) + "-" + std::to_string(v2);
-  };
-  std::vector<std::string> edge_types;
-  edge_types.reserve(vertices.size() * vertices.size());
-  for (size_t i = 0; i < vertices.size(); ++i) {
-    for (size_t j = 0; j < vertices.size(); ++j) {
-      auto edge_type = get_edge_type(i, j);
-      edge_types.push_back(edge_type);
-      InsertEdge(vertices[i], vertices[j], edge_type);
-    }
-  }
-
-  auto results = Run("MATCH (n)-[r1]-(m)-[r2]-(l) RETURN type(r1), type(r2)");
-  // We expect the number of results to be:
-  size_t expected_result_size =
-      // pick (n)
-      vertices.size() *
-      // pick both directed edges to other (m) and a
-      // single edge to (m) which equals (n), hence -1
-      (2 * vertices.size() - 1) *
-      // Pick as before, but exclude the previously taken edge, hence another -1
-      (2 * vertices.size() - 1 - 1);
-  std::vector<std::vector<std::string>> expected;
-  expected.reserve(expected_result_size);
-  for (size_t n = 0; n < vertices.size(); ++n) {
-    for (size_t m = 0; m < vertices.size(); ++m) {
-      std::vector<std::string> r1s{get_edge_type(n, m)};
-      if (n != m) r1s.push_back(get_edge_type(m, n));
-      for (size_t l = 0; l < vertices.size(); ++l) {
-        std::vector<std::string> r2s{get_edge_type(m, l)};
-        if (m != l) r2s.push_back(get_edge_type(l, m));
-        for (const auto &r1 : r1s) {
-          for (const auto &r2 : r2s) {
-            if (r1 == r2) continue;
-            expected.push_back({r1, r2});
-          }
-        }
-      }
-    }
-  }
-  ASSERT_EQ(expected.size(), expected_result_size);
-  ASSERT_EQ(results.size(), expected_result_size);
-  std::vector<std::vector<std::string>> got;
-  got.reserve(results.size());
-  for (const auto &res : results) {
-    std::vector<std::string> row;
-    row.reserve(res.size());
-    for (const auto &col : res) {
-      row.push_back(col.Value<std::string>());
-    }
-    got.push_back(row);
-  }
-  ASSERT_THAT(got, testing::UnorderedElementsAreArray(expected));
-}
-
-TEST_F(DistributedInterpretationTest, Cartesian) {
-  // Create some data on the master and both workers.
-  storage::Property prop;
-  {
-    GraphDbAccessor dba{master()};
-    auto tx_id = dba.transaction_id();
-    GraphDbAccessor dba1{worker(1), tx_id};
-    GraphDbAccessor dba2{worker(2), tx_id};
-    prop = dba.Property("prop");
-    auto add_data = [prop](GraphDbAccessor &dba, int value) {
-      dba.InsertVertex().PropsSet(prop, value);
-    };
-
-    for (int i = 0; i < 10; ++i) add_data(dba, i);
-    for (int i = 10; i < 20; ++i) add_data(dba1, i);
-    for (int i = 20; i < 30; ++i) add_data(dba2, i);
-
-    dba.Commit();
-  }
-
-  std::vector<std::vector<int64_t>> expected;
-  for (int64_t i = 0; i < 30; ++i)
-    for (int64_t j = 0; j < 30; ++j) expected.push_back({i, j});
-
-  auto results = Run("MATCH (n), (m) RETURN n.prop, m.prop;");
-
-  size_t expected_result_size = 30 * 30;
-  ASSERT_EQ(expected.size(), expected_result_size);
-  ASSERT_EQ(results.size(), expected_result_size);
-
-  std::vector<std::vector<int64_t>> got;
-  got.reserve(results.size());
-  for (const auto &res : results) {
-    std::vector<int64_t> row;
-    row.reserve(res.size());
-    for (const auto &col : res) {
-      row.push_back(col.Value<int64_t>());
-    }
-    got.push_back(row);
-  }
-
-  ASSERT_THAT(got, testing::UnorderedElementsAreArray(expected));
-}
-
-class TestQueryWaitsOnFutures : public DistributedInterpretationTest {
- protected:
-  int QueryExecutionTimeSec(int worker_id) override {
-    return worker_id == 2 ? 3 : 1;
-  }
-};
-
-TEST_F(TestQueryWaitsOnFutures, Test) {
-  const int kVertexCount = 10;
-  auto make_fully_connected = [](database::GraphDb &db) {
-    database::GraphDbAccessor dba(db);
-    std::vector<VertexAccessor> vertices;
-    for (int i = 0; i < kVertexCount; ++i)
-      vertices.emplace_back(dba.InsertVertex());
-    auto et = dba.EdgeType("et");
-    for (auto &from : vertices)
-      for (auto &to : vertices) dba.InsertEdge(from, to, et);
-    dba.Commit();
-  };
-
-  make_fully_connected(worker(1));
-  ASSERT_EQ(VertexCount(worker(1)), kVertexCount);
-  ASSERT_EQ(EdgeCount(worker(1)), kVertexCount * kVertexCount);
-
-  {
-    utils::Timer timer;
-    try {
-      Run("MATCH ()--()--()--()--()--()--() RETURN count(1)");
-    } catch (...) {
-    }
-    double seconds = timer.Elapsed().count();
-    EXPECT_GT(seconds, 1);
-    EXPECT_LT(seconds, 2);
-  }
-
-  make_fully_connected(worker(2));
-  ASSERT_EQ(VertexCount(worker(2)), kVertexCount);
-  ASSERT_EQ(EdgeCount(worker(2)), kVertexCount * kVertexCount);
-
-  {
-    utils::Timer timer;
-    try {
-      Run("MATCH ()--()--()--()--()--()--() RETURN count(1)");
-    } catch (...) {
-    }
-    double seconds = timer.Elapsed().count();
-    EXPECT_GT(seconds, 3);
-  }
-}
-
-TEST_F(DistributedInterpretationTest, PlanExpiration) {
-  FLAGS_query_plan_cache_ttl = 1;
-  Run("MATCH (n) RETURN n");
-  auto ids1 = worker(1).plan_consumer().CachedPlanIds();
-  ASSERT_EQ(ids1.size(), 1);
-  // Sleep so the cached plan becomes invalid.
-  std::this_thread::sleep_for(std::chrono::milliseconds(1100));
-  Run("MATCH (n) RETURN n");
-  // Sleep so the invalidated plan (removed from cache which is a concurrent
-  // map) gets destructed and thus remote caches cleared.
-  std::this_thread::sleep_for(std::chrono::milliseconds(1500));
-  auto ids2 = worker(1).plan_consumer().CachedPlanIds();
-  ASSERT_EQ(ids2.size(), 1);
-  EXPECT_NE(ids1, ids2);
-}
-
-TEST_F(DistributedInterpretationTest, ConcurrentPlanExpiration) {
-  FLAGS_query_plan_cache_ttl = 1;
-  auto count_vertices = [this]() {
-    utils::Timer timer;
-    while (timer.Elapsed() < 3s) {
-      Run("MATCH () RETURN count(1)");
-    }
-  };
-  std::vector<std::thread> counters;
-  for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i)
-    counters.emplace_back(count_vertices);
-  for (auto &t : counters) t.join();
-}
-
-int main(int argc, char **argv) {
-  google::InitGoogleLogging(argv[0]);
-  ::testing::InitGoogleTest(&argc, argv);
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  FLAGS_skiplist_gc_interval = 1;
-  return RUN_ALL_TESTS();
-}
diff --git a/tests/unit/distributed_query_plan.cpp b/tests/unit/distributed_query_plan.cpp
deleted file mode 100644
index b3a5a9f91..000000000
--- a/tests/unit/distributed_query_plan.cpp
+++ /dev/null
@@ -1,363 +0,0 @@
-#include <memory>
-#include <thread>
-#include <unordered_set>
-
-#include "gtest/gtest.h"
-
-#include "database/graph_db.hpp"
-#include "distributed/coordination.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "distributed/data_rpc_clients.hpp"
-#include "distributed/data_rpc_server.hpp"
-#include "distributed/plan_consumer.hpp"
-#include "distributed/plan_dispatcher.hpp"
-#include "distributed/pull_rpc_clients.hpp"
-#include "distributed_common.hpp"
-#include "io/network/endpoint.hpp"
-#include "query/frontend/ast/ast.hpp"
-#include "query/frontend/ast/cypher_main_visitor.hpp"
-#include "query/frontend/semantic/symbol_generator.hpp"
-#include "query/frontend/semantic/symbol_table.hpp"
-#include "query/interpreter.hpp"
-#include "query/plan/planner.hpp"
-#include "query/typed_value.hpp"
-#include "query_common.hpp"
-#include "query_plan_common.hpp"
-#include "transactions/engine_master.hpp"
-
-DECLARE_int32(query_execution_time_sec);
-
-using namespace distributed;
-using namespace database;
-
-TEST_F(DistributedGraphDbTest, PullProduceRpc) {
-  GraphDbAccessor dba{master()};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-
-  // Query plan for: UNWIND [42, true, "bla", 1, 2] as x RETURN x
-  using namespace query;
-  auto list =
-      LIST(LITERAL(42), LITERAL(true), LITERAL("bla"), LITERAL(1), LITERAL(2));
-  auto x = ctx.symbol_table_.CreateSymbol("x", true);
-  auto unwind = std::make_shared<plan::Unwind>(nullptr, list, x);
-  auto x_expr = IDENT("x");
-  ctx.symbol_table_[*x_expr] = x;
-  auto x_ne = NEXPR("x", x_expr);
-  ctx.symbol_table_[*x_ne] = ctx.symbol_table_.CreateSymbol("x_ne", true);
-  auto produce = MakeProduce(unwind, x_ne);
-
-  // Test that the plan works locally.
-  auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba);
-  ASSERT_EQ(results.size(), 5);
-
-  const int plan_id = 42;
-  master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_);
-
-  Parameters params;
-  std::vector<query::Symbol> symbols{ctx.symbol_table_[*x_ne]};
-  auto remote_pull = [this, &params, &symbols](GraphDbAccessor &dba,
-                                               int worker_id) {
-    return master().pull_clients().Pull(dba, worker_id, plan_id, params,
-                                        symbols, false, 3);
-  };
-  auto expect_first_batch = [](auto &batch) {
-    EXPECT_EQ(batch.pull_state, distributed::PullState::CURSOR_IN_PROGRESS);
-    ASSERT_EQ(batch.frames.size(), 3);
-    ASSERT_EQ(batch.frames[0].size(), 1);
-    EXPECT_EQ(batch.frames[0][0].ValueInt(), 42);
-    EXPECT_EQ(batch.frames[1][0].ValueBool(), true);
-    EXPECT_EQ(batch.frames[2][0].ValueString(), "bla");
-  };
-  auto expect_second_batch = [](auto &batch) {
-    EXPECT_EQ(batch.pull_state, distributed::PullState::CURSOR_EXHAUSTED);
-    ASSERT_EQ(batch.frames.size(), 2);
-    ASSERT_EQ(batch.frames[0].size(), 1);
-    EXPECT_EQ(batch.frames[0][0].ValueInt(), 1);
-    EXPECT_EQ(batch.frames[1][0].ValueInt(), 2);
-  };
-
-  GraphDbAccessor dba_1{master()};
-  GraphDbAccessor dba_2{master()};
-  for (int worker_id : {1, 2}) {
-    // TODO flor, proper test async here.
-    auto tx1_batch1 = remote_pull(dba_1, worker_id).get();
-    expect_first_batch(tx1_batch1);
-    auto tx2_batch1 = remote_pull(dba_2, worker_id).get();
-    expect_first_batch(tx2_batch1);
-    auto tx2_batch2 = remote_pull(dba_2, worker_id).get();
-    expect_second_batch(tx2_batch2);
-    auto tx1_batch2 = remote_pull(dba_1, worker_id).get();
-    expect_second_batch(tx1_batch2);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, PullProduceRpcWithGraphElements) {
-  // Create some data on the master and both workers. Eeach edge (3 of them) and
-  // vertex (6 of them) will be uniquely identified with their worker id and
-  // sequence ID, so we can check we retrieved all.
-  storage::Property prop;
-  {
-    GraphDbAccessor dba{master()};
-    prop = dba.Property("prop");
-    auto create_data = [prop](GraphDbAccessor &dba, int worker_id) {
-      auto v1 = dba.InsertVertex();
-      v1.PropsSet(prop, worker_id * 10);
-      auto v2 = dba.InsertVertex();
-      v2.PropsSet(prop, worker_id * 10 + 1);
-      auto e12 = dba.InsertEdge(v1, v2, dba.EdgeType("et"));
-      e12.PropsSet(prop, worker_id * 10 + 2);
-    };
-    create_data(dba, 0);
-    GraphDbAccessor dba_w1{worker(1), dba.transaction_id()};
-    create_data(dba_w1, 1);
-    GraphDbAccessor dba_w2{worker(2), dba.transaction_id()};
-    create_data(dba_w2, 2);
-    dba.Commit();
-  }
-
-  GraphDbAccessor dba{master()};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-
-  // Query plan for: MATCH p = (n)-[r]->(m) return [n, r], m, p
-  // Use this query to test graph elements are transferred correctly in
-  // collections too.
-  auto n = MakeScanAll(storage, ctx.symbol_table_, "n");
-  auto r_m =
-      MakeExpand(storage, ctx.symbol_table_, n.op_, n.sym_, "r",
-                 EdgeAtom::Direction::OUT, {}, "m", false, GraphView::OLD);
-  auto p_sym = ctx.symbol_table_.CreateSymbol("p", true);
-  auto p = std::make_shared<query::plan::ConstructNamedPath>(
-      r_m.op_, p_sym,
-      std::vector<Symbol>{n.sym_, r_m.edge_sym_, r_m.node_sym_});
-  auto return_n = IDENT("n");
-  ctx.symbol_table_[*return_n] = n.sym_;
-  auto return_r = IDENT("r");
-  ctx.symbol_table_[*return_r] = r_m.edge_sym_;
-  auto return_n_r = NEXPR("[n, r]", LIST(return_n, return_r));
-  ctx.symbol_table_[*return_n_r] = ctx.symbol_table_.CreateSymbol("", true);
-  auto return_m = NEXPR("m", IDENT("m"));
-  ctx.symbol_table_[*return_m->expression_] = r_m.node_sym_;
-  ctx.symbol_table_[*return_m] = ctx.symbol_table_.CreateSymbol("", true);
-  auto return_p = NEXPR("p", IDENT("p"));
-  ctx.symbol_table_[*return_p->expression_] = p_sym;
-  ctx.symbol_table_[*return_p] = ctx.symbol_table_.CreateSymbol("", true);
-  auto produce = MakeProduce(p, return_n_r, return_m, return_p);
-
-  auto check_result = [prop](
-      int worker_id,
-      const std::vector<std::vector<query::TypedValue>> &frames) {
-    int offset = worker_id * 10;
-    ASSERT_EQ(frames.size(), 1);
-    auto &row = frames[0];
-    ASSERT_EQ(row.size(), 3);
-    auto &list = row[0].ValueList();
-    ASSERT_EQ(list.size(), 2);
-    ASSERT_EQ(list[0].ValueVertex().PropsAt(prop).Value<int64_t>(), offset);
-    ASSERT_EQ(list[1].ValueEdge().PropsAt(prop).Value<int64_t>(), offset + 2);
-    ASSERT_EQ(row[1].ValueVertex().PropsAt(prop).Value<int64_t>(), offset + 1);
-    auto &path = row[2].ValuePath();
-    ASSERT_EQ(path.size(), 1);
-    ASSERT_EQ(path.vertices()[0].PropsAt(prop).Value<int64_t>(), offset);
-    ASSERT_EQ(path.edges()[0].PropsAt(prop).Value<int64_t>(), offset + 2);
-    ASSERT_EQ(path.vertices()[1].PropsAt(prop).Value<int64_t>(), offset + 1);
-  };
-
-  // Test that the plan works locally.
-  auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba);
-  check_result(0, results);
-
-  const int plan_id = 42;
-  master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_);
-
-  Parameters params;
-  std::vector<query::Symbol> symbols{ctx.symbol_table_[*return_n_r],
-                                     ctx.symbol_table_[*return_m], p_sym};
-  auto remote_pull = [this, &params, &symbols](GraphDbAccessor &dba,
-                                               int worker_id) {
-    return master().pull_clients().Pull(dba, worker_id, plan_id, params,
-                                        symbols, false, 3);
-  };
-  auto future_w1_results = remote_pull(dba, 1);
-  auto future_w2_results = remote_pull(dba, 2);
-  check_result(1, future_w1_results.get().frames);
-  check_result(2, future_w2_results.get().frames);
-}
-
-TEST_F(DistributedGraphDbTest, Synchronize) {
-  auto from = InsertVertex(worker(1));
-  auto to = InsertVertex(worker(2));
-  InsertEdge(from, to, "et");
-
-  // Query: MATCH (n)--(m) SET m.prop = 2 RETURN n.prop
-  // This query ensures that a remote update gets applied and the local stuff
-  // gets reconstructed.
-  auto &db = master();
-  GraphDbAccessor dba{db};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-  // MATCH
-  auto n = MakeScanAll(storage, ctx.symbol_table_, "n");
-  auto r_m =
-      MakeExpand(storage, ctx.symbol_table_, n.op_, n.sym_, "r",
-                 EdgeAtom::Direction::BOTH, {}, "m", false, GraphView::OLD);
-
-  // SET
-  auto literal = LITERAL(42);
-  auto prop = PROPERTY_PAIR("prop");
-  auto m_p = PROPERTY_LOOKUP("m", prop);
-  ctx.symbol_table_[*m_p->expression_] = r_m.node_sym_;
-  auto set_m_p = std::make_shared<plan::SetProperty>(r_m.op_, m_p, literal);
-
-  const int plan_id = 42;
-  master().plan_dispatcher().DispatchPlan(plan_id, set_m_p, ctx.symbol_table_);
-
-  // Master-side PullRemote, Synchronize
-  auto pull_remote = std::make_shared<query::plan::PullRemote>(
-      nullptr, plan_id, std::vector<Symbol>{n.sym_});
-  auto synchronize =
-      std::make_shared<query::plan::Synchronize>(set_m_p, pull_remote, true);
-
-  // RETURN
-  auto n_p =
-      storage.Create<PropertyLookup>(storage.Create<Identifier>("n"), prop);
-  ctx.symbol_table_[*n_p->expression_] = n.sym_;
-  auto return_n_p = NEXPR("n.prop", n_p);
-  auto return_n_p_sym = ctx.symbol_table_.CreateSymbol("n.p", true);
-  ctx.symbol_table_[*return_n_p] = return_n_p_sym;
-  auto produce = MakeProduce(synchronize, return_n_p);
-
-  auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba);
-  ASSERT_EQ(results.size(), 2);
-  ASSERT_EQ(results[0].size(), 1);
-  EXPECT_EQ(results[0][0].ValueInt(), 42);
-  ASSERT_EQ(results[1].size(), 1);
-  EXPECT_EQ(results[1][0].ValueInt(), 42);
-
-  // TODO test without advance command?
-}
-
-TEST_F(DistributedGraphDbTest, Create) {
-  // Query: UNWIND range(0, 1000) as x CREATE ()
-  auto &db = master();
-  GraphDbAccessor dba{db};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-  auto range = FN("range", LITERAL(0), LITERAL(1000));
-  auto x = ctx.symbol_table_.CreateSymbol("x", true);
-  auto unwind = std::make_shared<plan::Unwind>(nullptr, range, x);
-  auto node = NODE("n");
-  ctx.symbol_table_[*node->identifier_] =
-      ctx.symbol_table_.CreateSymbol("n", true);
-  auto create = std::make_shared<query::plan::CreateNode>(unwind, node, true);
-  PullAll(create, dba, ctx.symbol_table_);
-  dba.Commit();
-
-  EXPECT_GT(VertexCount(master()), 200);
-  EXPECT_GT(VertexCount(worker(1)), 200);
-  EXPECT_GT(VertexCount(worker(2)), 200);
-}
-
-TEST_F(DistributedGraphDbTest, PullRemoteOrderBy) {
-  // Create some data on the master and both workers.
-  storage::Property prop;
-  {
-    GraphDbAccessor dba{master()};
-    auto tx_id = dba.transaction_id();
-    GraphDbAccessor dba1{worker(1), tx_id};
-    GraphDbAccessor dba2{worker(2), tx_id};
-    prop = dba.Property("prop");
-    auto add_data = [prop](GraphDbAccessor &dba, int value) {
-      dba.InsertVertex().PropsSet(prop, value);
-    };
-
-    std::vector<int> data;
-    for (int i = 0; i < 300; ++i) data.push_back(i);
-    std::random_shuffle(data.begin(), data.end());
-
-    for (int i = 0; i < 100; ++i) add_data(dba, data[i]);
-    for (int i = 100; i < 200; ++i) add_data(dba1, data[i]);
-    for (int i = 200; i < 300; ++i) add_data(dba2, data[i]);
-
-    dba.Commit();
-  }
-
-  auto &db = master();
-  GraphDbAccessor dba{db};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-
-  // Query plan for:  MATCH (n) RETURN n.prop ORDER BY n.prop;
-  auto n = MakeScanAll(storage, ctx.symbol_table_, "n");
-  auto n_p = PROPERTY_LOOKUP("n", prop);
-  ctx.symbol_table_[*n_p->expression_] = n.sym_;
-  auto order_by = std::make_shared<plan::OrderBy>(
-      n.op_,
-      std::vector<std::pair<Ordering, Expression *>>{{Ordering::ASC, n_p}},
-      std::vector<Symbol>{n.sym_});
-
-  const int plan_id = 42;
-  master().plan_dispatcher().DispatchPlan(plan_id, order_by, ctx.symbol_table_);
-
-  auto pull_remote_order_by = std::make_shared<plan::PullRemoteOrderBy>(
-      order_by, plan_id,
-      std::vector<std::pair<Ordering, Expression *>>{{Ordering::ASC, n_p}},
-      std::vector<Symbol>{n.sym_});
-
-  auto n_p_ne = NEXPR("n.prop", n_p);
-  ctx.symbol_table_[*n_p_ne] = ctx.symbol_table_.CreateSymbol("n.prop", true);
-  auto produce = MakeProduce(pull_remote_order_by, n_p_ne);
-  auto results = CollectProduce(produce.get(), ctx.symbol_table_, dba);
-
-  ASSERT_EQ(results.size(), 300);
-  for (int j = 0; j < 300; ++j) {
-    EXPECT_TRUE(TypedValue::BoolEqual{}(results[j][0], j));
-  }
-}
-
-class DistributedTransactionTimeout : public DistributedGraphDbTest {
- protected:
-  int QueryExecutionTimeSec(int) override { return 1; }
-};
-
-TEST_F(DistributedTransactionTimeout, Timeout) {
-  InsertVertex(worker(1));
-  InsertVertex(worker(1));
-
-  GraphDbAccessor dba{master()};
-  Context ctx{dba};
-  SymbolGenerator symbol_generator{ctx.symbol_table_};
-  AstTreeStorage storage;
-
-  // Make distributed plan for MATCH (n) RETURN n
-  auto scan_all = MakeScanAll(storage, ctx.symbol_table_, "n");
-  auto output = NEXPR("n", IDENT("n"));
-  auto produce = MakeProduce(scan_all.op_, output);
-  ctx.symbol_table_[*output->expression_] = scan_all.sym_;
-  ctx.symbol_table_[*output] =
-      ctx.symbol_table_.CreateSymbol("named_expression_1", true);
-
-  const int plan_id = 42;
-  master().plan_dispatcher().DispatchPlan(plan_id, produce, ctx.symbol_table_);
-
-  Parameters params;
-  std::vector<query::Symbol> symbols{ctx.symbol_table_[*output]};
-  auto remote_pull = [this, &params, &symbols, &dba]() {
-    return master()
-        .pull_clients()
-        .Pull(dba, 1, plan_id, params, symbols, false, 1)
-        .get()
-        .pull_state;
-  };
-  ASSERT_EQ(remote_pull(), distributed::PullState::CURSOR_IN_PROGRESS);
-  // Sleep here so the remote gets a hinted error.
-  std::this_thread::sleep_for(2s);
-  EXPECT_EQ(remote_pull(), distributed::PullState::HINTED_ABORT_ERROR);
-}
diff --git a/tests/unit/distributed_serialization.cpp b/tests/unit/distributed_serialization.cpp
deleted file mode 100644
index 4f9cb83ef..000000000
--- a/tests/unit/distributed_serialization.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-#include <gtest/gtest.h>
-#include <sstream>
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-
-#include "distributed/serialization.hpp"
-#include "mvcc/version_list.hpp"
-#include "query/typed_value.hpp"
-#include "storage/edge.hpp"
-#include "storage/property_value_store.hpp"
-#include "storage/types.hpp"
-#include "storage/vertex.hpp"
-#include "transactions/engine_single_node.hpp"
-
-using namespace storage;
-
-template <typename TAddress>
-TAddress ToGlobal(const TAddress &address, int worker_id) {
-  if (address.is_remote()) return address;
-  return TAddress{address.local()->gid_, worker_id};
-}
-
-#define CHECK_RETURN(condition)     \
-  {                                 \
-    if (!(condition)) return false; \
-  }
-
-bool CheckEdges(const Edges &e1, int w1, const Edges &e2, int w2) {
-  CHECK_RETURN(e1.size() == e2.size());
-  auto e1_it = e1.begin();
-  for (auto e2_it = e2.begin(); e2_it != e2.end(); ++e1_it, ++e2_it) {
-    CHECK_RETURN(ToGlobal(e1_it->vertex, w1) == ToGlobal(e2_it->vertex, w2));
-    CHECK_RETURN(ToGlobal(e1_it->edge, w1) == ToGlobal(e2_it->edge, w2));
-    CHECK_RETURN(e1_it->edge_type == e2_it->edge_type);
-  }
-  return true;
-}
-
-bool CheckProperties(const PropertyValueStore &p1,
-                     const PropertyValueStore &p2) {
-  CHECK_RETURN(p1.size() == p2.size());
-  auto p1_it = p1.begin();
-  for (auto p2_it = p2.begin(); p2_it != p2.end(); ++p1_it, ++p2_it) {
-    CHECK_RETURN(p1_it->first == p2_it->first);
-    auto tv =
-        query::TypedValue(p1_it->second) == query::TypedValue(p2_it->second);
-    CHECK_RETURN(tv.IsBool());
-    CHECK_RETURN(tv.ValueBool());
-  }
-  return true;
-}
-
-bool CheckVertex(const Vertex &v1, int w1, const Vertex &v2, int w2) {
-  CHECK_RETURN(CheckEdges(v1.in_, w1, v2.in_, w2));
-  CHECK_RETURN(CheckEdges(v1.out_, w1, v2.out_, w2));
-  CHECK_RETURN(v1.labels_ == v2.labels_);
-  CHECK_RETURN(CheckProperties(v1.properties_, v2.properties_));
-  return true;
-}
-
-bool CheckEdge(const Edge &e1, int w1, const Edge &e2, int w2) {
-  CHECK_RETURN(ToGlobal(e1.from_, w1) == ToGlobal(e2.from_, w2));
-  CHECK_RETURN(ToGlobal(e1.to_, w1) == ToGlobal(e2.to_, w2));
-  CHECK_RETURN(e1.edge_type_ == e2.edge_type_);
-  CHECK_RETURN(CheckProperties(e1.properties_, e2.properties_));
-  return true;
-}
-
-#undef CHECK_RETURN
-
-#define SAVE_AND_LOAD(type, name, element)        \
-  std::unique_ptr<type> name;                     \
-  {                                               \
-    std::ostringstream ostream;                   \
-    boost::archive::binary_oarchive oar{ostream}; \
-    distributed::Save##type(oar, element, 0);     \
-    std::istringstream istream{ostream.str()};    \
-    boost::archive::binary_iarchive iar{istream}; \
-    name = distributed::Load##type(iar);          \
-  }
-
-TEST(DistributedSerialization, Empty) {
-  Vertex v;
-  int w_id{0};
-  SAVE_AND_LOAD(Vertex, v_recovered, v)
-  EXPECT_TRUE(CheckVertex(v, w_id, *v_recovered, w_id));
-}
-
-#define UPDATE_AND_CHECK(type, x, action)        \
-  {                                              \
-    SAVE_AND_LOAD(type, before, x)               \
-    EXPECT_TRUE(Check##type(x, 0, *before, 0));  \
-    action;                                      \
-    EXPECT_FALSE(Check##type(x, 0, *before, 0)); \
-    SAVE_AND_LOAD(type, after, x)                \
-    EXPECT_TRUE(Check##type(x, 0, *after, 0));   \
-  }
-
-#define UPDATE_AND_CHECK_V(v, action) UPDATE_AND_CHECK(Vertex, v, action)
-#define UPDATE_AND_CHECK_E(e, action) UPDATE_AND_CHECK(Edge, e, action)
-
-TEST(DistributedSerialization, VertexLabels) {
-  Vertex v;
-  UPDATE_AND_CHECK_V(v, v.labels_.emplace_back(Label(1)));
-  UPDATE_AND_CHECK_V(v, v.labels_.emplace_back(Label(2)));
-  UPDATE_AND_CHECK_V(v, v.labels_.resize(1));
-  UPDATE_AND_CHECK_V(v, v.labels_.clear());
-}
-
-TEST(DistributedSerialization, VertexProperties) {
-  Vertex v;
-  UPDATE_AND_CHECK_V(v, v.properties_.set(Property(1), true));
-  UPDATE_AND_CHECK_V(v, v.properties_.set(Property(1), "string"));
-  UPDATE_AND_CHECK_V(v, v.properties_.set(Property(2), 42));
-  UPDATE_AND_CHECK_V(v, v.properties_.erase(Property(1)));
-  UPDATE_AND_CHECK_V(v, v.properties_.clear());
-}
-
-class DistributedSerializationMvcc : public ::testing::Test {
- protected:
-  tx::SingleNodeEngine engine;
-  tx::Transaction *tx = engine.Begin();
-  mvcc::VersionList<Vertex> v1_vlist{*tx, 0};
-  Vertex &v1 = *v1_vlist.Oldest();
-  mvcc::VersionList<Vertex> v2_vlist{*tx, 1};
-  Vertex &v2 = *v2_vlist.Oldest();
-  mvcc::VersionList<Edge> e1_vlist{*tx, 0, &v1_vlist, &v2_vlist, EdgeType(0)};
-  Edge &e1 = *e1_vlist.Oldest();
-  mvcc::VersionList<Edge> e2_vlist{*tx, 1, &v2_vlist, &v1_vlist, EdgeType(2)};
-  Edge &e2 = *e2_vlist.Oldest();
-};
-
-TEST_F(DistributedSerializationMvcc, VertexEdges) {
-  UPDATE_AND_CHECK_V(v1, v1.out_.emplace(&v2_vlist, &e1_vlist, EdgeType(0)));
-  UPDATE_AND_CHECK_V(v2, v2.in_.emplace(&v1_vlist, &e1_vlist, EdgeType(0)));
-  UPDATE_AND_CHECK_V(v1, v1.in_.emplace(&v2_vlist, &e2_vlist, EdgeType(2)));
-  UPDATE_AND_CHECK_V(v2, v2.out_.emplace(&v1_vlist, &e2_vlist, EdgeType(2)));
-}
-
-TEST_F(DistributedSerializationMvcc, EdgeFromAndTo) {
-  UPDATE_AND_CHECK_E(e1, e1.from_ = &v2_vlist);
-  UPDATE_AND_CHECK_E(e1, e1.to_ = &v1_vlist);
-}
-
-TEST_F(DistributedSerializationMvcc, EdgeType) {
-  UPDATE_AND_CHECK_E(e1, e1.edge_type_ = EdgeType(123));
-  UPDATE_AND_CHECK_E(e1, e1.edge_type_ = EdgeType(55));
-}
-
-TEST_F(DistributedSerializationMvcc, EdgeProperties) {
-  UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(1), true));
-  UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(1), "string"));
-  UPDATE_AND_CHECK_E(e1, e1.properties_.set(Property(2), 42));
-  UPDATE_AND_CHECK_E(e1, e1.properties_.erase(Property(1)));
-  UPDATE_AND_CHECK_E(e1, e1.properties_.clear());
-}
-
-#undef UPDATE_AND_CHECK_E
-#undef UPDATE_AND_CHECK_V
-#undef UPDATE_AND_CHECK
-#undef SAVE_AND_LOAD
diff --git a/tests/unit/distributed_updates.cpp b/tests/unit/distributed_updates.cpp
deleted file mode 100644
index 14cd82f61..000000000
--- a/tests/unit/distributed_updates.cpp
+++ /dev/null
@@ -1,560 +0,0 @@
-#include <functional>
-#include <unordered_map>
-
-#include <gtest/gtest.h>
-
-#include "database/graph_db_accessor.hpp"
-#include "distributed/updates_rpc_clients.hpp"
-#include "distributed/updates_rpc_server.hpp"
-#include "query/typed_value.hpp"
-#include "storage/property_value.hpp"
-
-#include "distributed_common.hpp"
-
-class DistributedUpdateTest : public DistributedGraphDbTest {
- protected:
-  std::unique_ptr<database::GraphDbAccessor> dba1;
-  std::unique_ptr<database::GraphDbAccessor> dba2;
-  storage::Label label;
-  std::unique_ptr<VertexAccessor> v1_dba1;
-  std::unique_ptr<VertexAccessor> v1_dba2;
-
-  void SetUp() override {
-    DistributedGraphDbTest::SetUp();
-
-    database::GraphDbAccessor dba_tx1{worker(1)};
-    auto v = dba_tx1.InsertVertex();
-    auto v_ga = v.GlobalAddress();
-    dba_tx1.Commit();
-
-    dba1 = std::make_unique<database::GraphDbAccessor>(worker(1));
-    dba2 = std::make_unique<database::GraphDbAccessor>(worker(2),
-                                                       dba1->transaction_id());
-
-    v1_dba1 = std::make_unique<VertexAccessor>(v_ga, *dba1);
-    v1_dba2 = std::make_unique<VertexAccessor>(v_ga, *dba2);
-    ASSERT_FALSE(v1_dba2->address().is_local());
-    label = dba1->Label("l");
-    v1_dba2->add_label(label);
-  }
-
-  void TearDown() override {
-    dba2 = nullptr;
-    dba1 = nullptr;
-    DistributedGraphDbTest::TearDown();
-  }
-};
-
-#define EXPECT_LABEL(var, old_result, new_result) \
-  {                                               \
-    var->SwitchOld();                             \
-    EXPECT_EQ(var->has_label(label), old_result); \
-    var->SwitchNew();                             \
-    EXPECT_EQ(var->has_label(label), new_result); \
-  }
-
-TEST_F(DistributedUpdateTest, UpdateLocalOnly) {
-  EXPECT_LABEL(v1_dba2, false, true);
-  EXPECT_LABEL(v1_dba1, false, false);
-}
-
-TEST_F(DistributedUpdateTest, UpdateApply) {
-  EXPECT_LABEL(v1_dba1, false, false);
-  worker(1).updates_server().Apply(dba1->transaction_id());
-  EXPECT_LABEL(v1_dba1, false, true);
-}
-
-#undef EXPECT_LABEL
-
-TEST_F(DistributedGraphDbTest, CreateVertex) {
-  gid::Gid gid;
-  {
-    database::GraphDbAccessor dba{worker(1)};
-    auto v = dba.InsertVertexIntoRemote(2, {}, {});
-    gid = v.gid();
-    dba.Commit();
-  }
-  {
-    database::GraphDbAccessor dba{worker(2)};
-    auto v = dba.FindVertexOptional(gid, false);
-    ASSERT_TRUE(v);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, CreateVertexWithUpdate) {
-  gid::Gid gid;
-  storage::Property prop;
-  {
-    database::GraphDbAccessor dba{worker(1)};
-    auto v = dba.InsertVertexIntoRemote(2, {}, {});
-    gid = v.gid();
-    prop = dba.Property("prop");
-    v.PropsSet(prop, 42);
-    worker(2).updates_server().Apply(dba.transaction_id());
-    dba.Commit();
-  }
-  {
-    database::GraphDbAccessor dba{worker(2)};
-    auto v = dba.FindVertexOptional(gid, false);
-    ASSERT_TRUE(v);
-    EXPECT_EQ(v->PropsAt(prop).Value<int64_t>(), 42);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, CreateVertexWithData) {
-  gid::Gid gid;
-  storage::Label l1;
-  storage::Label l2;
-  storage::Property prop;
-  {
-    database::GraphDbAccessor dba{worker(1)};
-    l1 = dba.Label("l1");
-    l2 = dba.Label("l2");
-    prop = dba.Property("prop");
-    auto v = dba.InsertVertexIntoRemote(2, {l1, l2}, {{prop, 42}});
-    gid = v.gid();
-
-    // Check local visibility before commit.
-    EXPECT_TRUE(v.has_label(l1));
-    EXPECT_TRUE(v.has_label(l2));
-    EXPECT_EQ(v.PropsAt(prop).Value<int64_t>(), 42);
-
-    worker(2).updates_server().Apply(dba.transaction_id());
-    dba.Commit();
-  }
-  {
-    database::GraphDbAccessor dba{worker(2)};
-    auto v = dba.FindVertexOptional(gid, false);
-    ASSERT_TRUE(v);
-    // Check remote data after commit.
-    EXPECT_TRUE(v->has_label(l1));
-    EXPECT_TRUE(v->has_label(l2));
-    EXPECT_EQ(v->PropsAt(prop).Value<int64_t>(), 42);
-  }
-}
-
-// Checks if expiring a local record for a local update before applying a remote
-// update delta causes a problem
-TEST_F(DistributedGraphDbTest, UpdateVertexRemoteAndLocal) {
-  gid::Gid gid;
-  storage::Label l1;
-  storage::Label l2;
-  {
-    database::GraphDbAccessor dba{worker(1)};
-    auto v = dba.InsertVertex();
-    gid = v.gid();
-    l1 = dba.Label("label1");
-    l2 = dba.Label("label2");
-    dba.Commit();
-  }
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto v_local = dba1.FindVertex(gid, false);
-    auto v_remote = VertexAccessor(storage::VertexAddress(gid, 1), dba0);
-
-    v_remote.add_label(l2);
-    v_local.add_label(l1);
-
-    auto result = worker(1).updates_server().Apply(dba0.transaction_id());
-    EXPECT_EQ(result, distributed::UpdateResult::DONE);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, AddSameLabelRemoteAndLocal) {
-  auto v_address = InsertVertex(worker(1));
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto v_local = dba1.FindVertex(v_address.gid(), false);
-    auto v_remote = VertexAccessor(v_address, dba0);
-    auto l1 = dba1.Label("label");
-    v_remote.add_label(l1);
-    v_local.add_label(l1);
-    worker(1).updates_server().Apply(dba0.transaction_id());
-    dba0.Commit();
-  }
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto v = dba1.FindVertex(v_address.gid(), false);
-    EXPECT_EQ(v.labels().size(), 1);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, IndexGetsUpdatedRemotely) {
-  storage::VertexAddress v_remote = InsertVertex(worker(1));
-  storage::Label label;
-  {
-    database::GraphDbAccessor dba0{master()};
-    label = dba0.Label("label");
-    VertexAccessor va(v_remote, dba0);
-    va.add_label(label);
-    worker(1).updates_server().Apply(dba0.transaction_id());
-    dba0.Commit();
-  }
-  {
-    database::GraphDbAccessor dba1{worker(1)};
-    auto vertices = dba1.Vertices(label, false);
-    EXPECT_EQ(std::distance(vertices.begin(), vertices.end()), 1);
-  }
-}
-
-TEST_F(DistributedGraphDbTest, DeleteVertexRemoteCommit) {
-  auto v_address = InsertVertex(worker(1));
-  database::GraphDbAccessor dba0{master()};
-  database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-  auto v_remote = VertexAccessor(v_address, dba0);
-  dba0.RemoveVertex(v_remote);
-  EXPECT_TRUE(dba1.FindVertexOptional(v_address.gid(), true));
-  EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()),
-            distributed::UpdateResult::DONE);
-  EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true));
-}
-
-TEST_F(DistributedGraphDbTest, DeleteVertexRemoteBothDelete) {
-  auto v_address = InsertVertex(worker(1));
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto v_local = dba1.FindVertex(v_address.gid(), false);
-    auto v_remote = VertexAccessor(v_address, dba0);
-    EXPECT_TRUE(dba1.RemoveVertex(v_local));
-    EXPECT_TRUE(dba0.RemoveVertex(v_remote));
-    EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()),
-              distributed::UpdateResult::DONE);
-    EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true));
-  }
-}
-
-TEST_F(DistributedGraphDbTest, DeleteVertexRemoteStillConnected) {
-  auto v_address = InsertVertex(worker(1));
-  auto e_address = InsertEdge(v_address, v_address, "edge");
-
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto v_remote = VertexAccessor(v_address, dba0);
-    dba0.RemoveVertex(v_remote);
-    EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()),
-              distributed::UpdateResult::UNABLE_TO_DELETE_VERTEX_ERROR);
-    EXPECT_TRUE(dba1.FindVertexOptional(v_address.gid(), true));
-  }
-  {
-    database::GraphDbAccessor dba0{master()};
-    database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-    auto e_local = dba1.FindEdge(e_address.gid(), false);
-    auto v_local = dba1.FindVertex(v_address.gid(), false);
-    auto v_remote = VertexAccessor(v_address, dba0);
-
-    dba1.RemoveEdge(e_local);
-    dba0.RemoveVertex(v_remote);
-
-    EXPECT_EQ(worker(1).updates_server().Apply(dba0.transaction_id()),
-              distributed::UpdateResult::DONE);
-    EXPECT_FALSE(dba1.FindVertexOptional(v_address.gid(), true));
-  }
-}
-
-class DistributedDetachDeleteTest : public DistributedGraphDbTest {
- protected:
-  storage::VertexAddress w1_a;
-  storage::VertexAddress w1_b;
-  storage::VertexAddress w2_a;
-
-  void SetUp() override {
-    DistributedGraphDbTest::SetUp();
-    w1_a = InsertVertex(worker(1));
-    w1_b = InsertVertex(worker(1));
-    w2_a = InsertVertex(worker(2));
-  }
-
-  template <typename TF>
-  void Run(storage::VertexAddress v_address, TF check_func) {
-    for (int i : {0, 1, 2}) {
-      database::GraphDbAccessor dba0{master()};
-      database::GraphDbAccessor dba1{worker(1), dba0.transaction_id()};
-      database::GraphDbAccessor dba2{worker(2), dba0.transaction_id()};
-
-      std::vector<std::reference_wrapper<database::GraphDbAccessor>> dba;
-      dba.emplace_back(dba0);
-      dba.emplace_back(dba1);
-      dba.emplace_back(dba2);
-
-      auto &accessor = dba[i].get();
-      auto v_accessor = VertexAccessor(v_address, accessor);
-      accessor.DetachRemoveVertex(v_accessor);
-
-      for (auto db_accessor : dba) {
-        ASSERT_EQ(db_accessor.get().db().updates_server().Apply(
-                      dba[0].get().transaction_id()),
-                  distributed::UpdateResult::DONE);
-      }
-
-      check_func(dba);
-    }
-  }
-};
-
-TEST_F(DistributedDetachDeleteTest, VertexCycle) {
-  auto e_address = InsertEdge(w1_a, w1_a, "edge");
-  Run(w1_a,
-      [this, e_address](
-          std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) {
-        EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true));
-      });
-}
-
-TEST_F(DistributedDetachDeleteTest, TwoVerticesDifferentWorkers) {
-  auto e_address = InsertEdge(w1_a, w2_a, "edge");
-
-  // Delete from
-  Run(w1_a,
-      [this, e_address](
-          std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) {
-        EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true));
-        EXPECT_TRUE(dba[2].get().FindVertexOptional(w2_a.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true));
-      });
-
-  // Delete to
-  Run(w2_a,
-      [this, e_address](
-          std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) {
-        EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_a.gid(), true));
-        EXPECT_FALSE(dba[2].get().FindVertexOptional(w2_a.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true));
-      });
-}
-
-TEST_F(DistributedDetachDeleteTest, TwoVerticesSameWorkers) {
-  auto e_address = InsertEdge(w1_a, w1_b, "edge");
-
-  // Delete from
-  Run(w1_a,
-      [this, e_address](
-          std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) {
-        EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_a.gid(), true));
-        EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_b.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true));
-      });
-
-  // Delete to
-  Run(w1_b,
-      [this, e_address](
-          std::vector<std::reference_wrapper<database::GraphDbAccessor>> &dba) {
-        EXPECT_TRUE(dba[1].get().FindVertexOptional(w1_a.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindVertexOptional(w1_b.gid(), true));
-        EXPECT_FALSE(dba[1].get().FindEdgeOptional(e_address.gid(), true));
-      });
-}
-
-class DistributedEdgeCreateTest : public DistributedGraphDbTest {
- protected:
-  storage::VertexAddress w1_a;
-  storage::VertexAddress w1_b;
-  storage::VertexAddress w2_a;
-  std::unordered_map<std::string, PropertyValue> props{{"p1", 42},
-                                                       {"p2", true}};
-  storage::EdgeAddress e_ga;
-
-  void SetUp() override {
-    DistributedGraphDbTest::SetUp();
-    w1_a = InsertVertex(worker(1));
-    w1_b = InsertVertex(worker(1));
-    w2_a = InsertVertex(worker(2));
-  }
-
-  void CreateEdge(database::GraphDb &creator, storage::VertexAddress from_addr,
-                  storage::VertexAddress to_addr) {
-    CHECK(from_addr.is_remote() && to_addr.is_remote())
-        << "Local address given to CreateEdge";
-    database::GraphDbAccessor dba{creator};
-    auto edge_type = dba.EdgeType("et");
-    VertexAccessor v1{from_addr, dba};
-    VertexAccessor v2{to_addr, dba};
-    auto edge = dba.InsertEdge(v1, v2, edge_type);
-    e_ga = edge.GlobalAddress();
-
-    for (auto &kv : props) edge.PropsSet(dba.Property(kv.first), kv.second);
-
-    master().updates_server().Apply(dba.transaction_id());
-    worker(1).updates_server().Apply(dba.transaction_id());
-    worker(2).updates_server().Apply(dba.transaction_id());
-    dba.Commit();
-  }
-
-  void CheckState(database::GraphDb &db, bool edge_is_local,
-                  storage::VertexAddress from_addr,
-                  storage::VertexAddress to_addr) {
-    database::GraphDbAccessor dba{db};
-
-    // Check edge data.
-    {
-      EdgeAccessor edge{e_ga, dba};
-      EXPECT_EQ(edge.address().is_local(), edge_is_local);
-      EXPECT_EQ(edge.GlobalAddress(), e_ga);
-      auto from = edge.from();
-      EXPECT_EQ(from.GlobalAddress(), from_addr);
-      auto to = edge.to();
-      EXPECT_EQ(to.GlobalAddress(), to_addr);
-
-      EXPECT_EQ(edge.Properties().size(), props.size());
-      for (auto &kv : props) {
-        auto equality = edge.PropsAt(dba.Property(kv.first)) ==
-                        query::TypedValue(kv.second);
-        EXPECT_TRUE(equality.IsBool() && equality.ValueBool());
-      }
-    }
-
-    auto edges = [](auto iterable) {
-      std::vector<EdgeAccessor> res;
-      for (auto edge : iterable) res.emplace_back(edge);
-      return res;
-    };
-
-    // Check `from` data.
-    {
-      VertexAccessor from{from_addr, dba};
-      ASSERT_EQ(edges(from.out()).size(), 1);
-      EXPECT_EQ(edges(from.out())[0].GlobalAddress(), e_ga);
-      // In case of cycles we have 1 in the `in` edges.
-      EXPECT_EQ(edges(from.in()).size(), from_addr == to_addr);
-    }
-
-    // Check `to` data.
-    {
-      VertexAccessor to{to_addr, dba};
-      // In case of cycles we have 1 in the `out` edges.
-      EXPECT_EQ(edges(to.out()).size(), from_addr == to_addr);
-      ASSERT_EQ(edges(to.in()).size(), 1);
-      EXPECT_EQ(edges(to.in())[0].GlobalAddress(), e_ga);
-    }
-  }
-
-  void CheckAll(storage::VertexAddress from_addr,
-                storage::VertexAddress to_addr) {
-    int edge_worker = from_addr.worker_id();
-    EXPECT_EQ(EdgeCount(master()), edge_worker == 0);
-    EXPECT_EQ(EdgeCount(worker(1)), edge_worker == 1);
-    EXPECT_EQ(EdgeCount(worker(2)), edge_worker == 2);
-    CheckState(master(), edge_worker == 0, from_addr, to_addr);
-    CheckState(worker(1), edge_worker == 1, from_addr, to_addr);
-    CheckState(worker(2), edge_worker == 2, from_addr, to_addr);
-  }
-};
-
-TEST_F(DistributedEdgeCreateTest, LocalRemote) {
-  CreateEdge(worker(1), w1_a, w2_a);
-  CheckAll(w1_a, w2_a);
-}
-
-TEST_F(DistributedEdgeCreateTest, RemoteLocal) {
-  CreateEdge(worker(2), w1_a, w2_a);
-  CheckAll(w1_a, w2_a);
-}
-
-TEST_F(DistributedEdgeCreateTest, RemoteRemoteDifferentWorkers) {
-  CreateEdge(master(), w1_a, w2_a);
-  CheckAll(w1_a, w2_a);
-}
-
-TEST_F(DistributedEdgeCreateTest, RemoteRemoteSameWorkers) {
-  CreateEdge(master(), w1_a, w1_b);
-  CheckAll(w1_a, w1_b);
-}
-
-TEST_F(DistributedEdgeCreateTest, RemoteRemoteCycle) {
-  CreateEdge(master(), w1_a, w1_a);
-  CheckAll(w1_a, w1_a);
-}
-
-class DistributedEdgeRemoveTest : public DistributedGraphDbTest {
- protected:
-  storage::VertexAddress from_addr;
-  storage::VertexAddress to_addr;
-  storage::EdgeAddress edge_addr;
-
-  void Create(database::GraphDb &from_db, database::GraphDb &to_db) {
-    from_addr = InsertVertex(from_db);
-    to_addr = InsertVertex(to_db);
-    edge_addr = InsertEdge(from_addr, to_addr, "edge_type");
-  }
-
-  void Delete(database::GraphDb &db) {
-    database::GraphDbAccessor dba{db};
-    EdgeAccessor edge{edge_addr, dba};
-    dba.RemoveEdge(edge);
-    master().updates_server().Apply(dba.transaction_id());
-    worker(1).updates_server().Apply(dba.transaction_id());
-    worker(2).updates_server().Apply(dba.transaction_id());
-    dba.Commit();
-  }
-
-  template <typename TIterable>
-  auto Size(TIterable iterable) {
-    return std::distance(iterable.begin(), iterable.end());
-  };
-
-  void CheckCreation() {
-    auto wid = from_addr.worker_id();
-    ASSERT_TRUE(wid >= 0 && wid < 3);
-    ASSERT_EQ(EdgeCount(master()), wid == 0);
-    ASSERT_EQ(EdgeCount(worker(1)), wid == 1);
-    ASSERT_EQ(EdgeCount(worker(2)), wid == 2);
-
-    database::GraphDbAccessor dba{master()};
-    VertexAccessor from{from_addr, dba};
-    EXPECT_EQ(Size(from.out()), 1);
-    EXPECT_EQ(Size(from.in()), 0);
-
-    VertexAccessor to{to_addr, dba};
-    EXPECT_EQ(Size(to.out()), 0);
-    EXPECT_EQ(Size(to.in()), 1);
-  }
-
-  void CheckDeletion() {
-    EXPECT_EQ(EdgeCount(master()), 0);
-    EXPECT_EQ(EdgeCount(worker(1)), 0);
-    EXPECT_EQ(EdgeCount(worker(2)), 0);
-
-    database::GraphDbAccessor dba{master()};
-
-    VertexAccessor from{from_addr, dba};
-    EXPECT_EQ(Size(from.out()), 0);
-    EXPECT_EQ(Size(from.in()), 0);
-
-    VertexAccessor to{to_addr, dba};
-    EXPECT_EQ(Size(to.out()), 0);
-    EXPECT_EQ(Size(to.in()), 0);
-  }
-};
-
-TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersRemoteDelete) {
-  Create(worker(1), worker(2));
-  CheckCreation();
-  Delete(master());
-  CheckDeletion();
-}
-
-TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersFromDelete) {
-  Create(worker(1), worker(2));
-  CheckCreation();
-  Delete(worker(1));
-  CheckDeletion();
-}
-
-TEST_F(DistributedEdgeRemoveTest, DifferentVertexOwnersToDelete) {
-  Create(worker(1), worker(2));
-  CheckCreation();
-  Delete(worker(2));
-  CheckDeletion();
-}
-
-TEST_F(DistributedEdgeRemoveTest, SameVertexOwnersRemoteDelete) {
-  Create(worker(1), worker(1));
-  CheckCreation();
-  Delete(worker(2));
-  CheckDeletion();
-}
diff --git a/tests/unit/durability.cpp b/tests/unit/durability.cpp
index 8c539b4ea..35442437c 100644
--- a/tests/unit/durability.cpp
+++ b/tests/unit/durability.cpp
@@ -27,6 +27,8 @@ DECLARE_int32(wal_rotate_deltas_count);
 
 namespace fs = std::experimental::filesystem;
 
+using namespace std::literals::chrono_literals;
+
 // Helper class for performing random CRUD ops on a database.
 class DbGenerator {
   static constexpr int kLabelCount = 3;
diff --git a/tests/unit/metrics.cpp b/tests/unit/metrics.cpp
deleted file mode 100644
index 25fd15a7f..000000000
--- a/tests/unit/metrics.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-#include "stats/metrics.hpp"
-
-#include <thread>
-
-#include "gtest/gtest.h"
-
-using namespace std::chrono_literals;
-
-using namespace stats;
-
-TEST(Metrics, Counter) {
-  Counter &x = GetCounter("counter");
-  EXPECT_EQ(*x.Flush(), 0);
-  EXPECT_EQ(x.Value(), 0);
-  x.Bump();
-  EXPECT_EQ(*x.Flush(), 1);
-  EXPECT_EQ(x.Value(), 1);
-
-  Counter &y = GetCounter("counter");
-  EXPECT_EQ(*y.Flush(), 1);
-  EXPECT_EQ(y.Value(), 1);
-
-  y.Bump(5);
-  EXPECT_EQ(*x.Flush(), 6);
-  EXPECT_EQ(x.Value(), 6);
-  EXPECT_EQ(*y.Flush(), 6);
-  EXPECT_EQ(y.Value(), 6);
-}
-
-TEST(Metrics, Gauge) {
-  Gauge &x = GetGauge("gauge");
-  EXPECT_EQ(*x.Flush(), 0);
-  x.Set(1);
-  EXPECT_EQ(*x.Flush(), 1);
-
-  Gauge &y = GetGauge("gauge");
-  EXPECT_EQ(*y.Flush(), 1);
-
-  x.Set(2);
-  EXPECT_EQ(*x.Flush(), 2);
-  EXPECT_EQ(*y.Flush(), 2);
-}
-
-TEST(Metrics, IntervalMin) {
-  IntervalMin &x = GetIntervalMin("min");
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-  x.Add(5);
-  x.Add(3);
-  EXPECT_EQ(*x.Flush(), 3);
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-  x.Add(3);
-  x.Add(5);
-  EXPECT_EQ(*x.Flush(), 3);
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-}
-
-TEST(Metrics, IntervalMax) {
-  IntervalMax &x = GetIntervalMax("max");
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-  x.Add(5);
-  x.Add(3);
-  EXPECT_EQ(*x.Flush(), 5);
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-  x.Add(3);
-  x.Add(5);
-  EXPECT_EQ(*x.Flush(), 5);
-  EXPECT_EQ(x.Flush(), std::experimental::nullopt);
-}
-
-TEST(Metrics, Stopwatch) {
-  auto d1 = Stopwatch("stopwatch", [] { std::this_thread::sleep_for(150ms); });
-  EXPECT_TRUE(140 <= d1 && d1 <= 160);
-
-  auto d2 = Stopwatch("stopwatch", [] { std::this_thread::sleep_for(300ms); });
-  EXPECT_TRUE(290 <= d2 && d2 <= 310);
-
-  Counter &total_time = GetCounter("stopwatch.total_time");
-  Counter &count = GetCounter("stopwatch.count");
-  IntervalMin &min = GetIntervalMin("stopwatch.min");
-  IntervalMax &max = GetIntervalMax("stopwatch.max");
-
-  EXPECT_TRUE(430 <= total_time.Value() && total_time.Value() <= 470);
-  EXPECT_EQ(count.Value(), 2);
-
-  auto m = *min.Flush();
-  EXPECT_TRUE(140 <= m && m <= 160);
-
-  auto M = *max.Flush();
-  EXPECT_TRUE(290 <= M && M <= 310);
-}
diff --git a/tests/unit/query_planner.cpp b/tests/unit/query_planner.cpp
index 245e10de2..4daa5ef34 100644
--- a/tests/unit/query_planner.cpp
+++ b/tests/unit/query_planner.cpp
@@ -5,8 +5,6 @@
 #include <typeinfo>
 #include <unordered_set>
 
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
@@ -113,20 +111,10 @@ class PlanChecker : public HierarchicalLogicalOperatorVisitor {
     return true;
   }
 
-  PRE_VISIT(PullRemote);
-
-  bool PreVisit(Synchronize &op) override {
-    CheckOp(op);
-    op.input()->Accept(*this);
-    return false;
-  }
-
   bool PreVisit(Cartesian &op) override {
     CheckOp(op);
     return false;
   }
-
-  PRE_VISIT(PullRemoteOrderBy);
 #undef PRE_VISIT
 
   std::list<BaseOpChecker *> checkers_;
@@ -250,12 +238,6 @@ class ExpectAggregate : public OpChecker<Aggregate> {
   std::unordered_set<query::Expression *> group_by_;
 };
 
-auto ExpectMasterAggregate(
-    const std::vector<query::Aggregation *> &aggregations,
-    const std::unordered_set<query::Expression *> &group_by) {
-  return ExpectAggregate(true, aggregations, group_by);
-}
-
 class ExpectMerge : public OpChecker<Merge> {
  public:
   ExpectMerge(const std::list<BaseOpChecker *> &on_match,
@@ -375,45 +357,6 @@ class ExpectCreateIndex : public OpChecker<CreateIndex> {
   storage::Property property_;
 };
 
-class ExpectPullRemote : public OpChecker<PullRemote> {
- public:
-  ExpectPullRemote() {}
-  ExpectPullRemote(const std::vector<Symbol> &symbols) : symbols_(symbols) {}
-
-  void ExpectOp(PullRemote &op, const SymbolTable &) override {
-    EXPECT_THAT(op.symbols(), testing::UnorderedElementsAreArray(symbols_));
-  }
-
- private:
-  std::vector<Symbol> symbols_;
-};
-
-class ExpectSynchronize : public OpChecker<Synchronize> {
- public:
-  explicit ExpectSynchronize(bool advance_command)
-      : has_pull_(false), advance_command_(advance_command) {}
-  ExpectSynchronize(const std::vector<Symbol> &symbols = {},
-                    bool advance_command = false)
-      : expect_pull_(symbols),
-        has_pull_(true),
-        advance_command_(advance_command) {}
-
-  void ExpectOp(Synchronize &op, const SymbolTable &symbol_table) override {
-    if (has_pull_) {
-      ASSERT_TRUE(op.pull_remote());
-      expect_pull_.ExpectOp(*op.pull_remote(), symbol_table);
-    } else {
-      EXPECT_FALSE(op.pull_remote());
-    }
-    EXPECT_EQ(op.advance_command(), advance_command_);
-  }
-
- private:
-  ExpectPullRemote expect_pull_;
-  bool has_pull_ = true;
-  bool advance_command_ = false;
-};
-
 class ExpectCartesian : public OpChecker<Cartesian> {
  public:
   ExpectCartesian(const std::list<std::unique_ptr<BaseOpChecker>> &left,
@@ -447,19 +390,6 @@ class ExpectCreateNode : public OpChecker<CreateNode> {
   bool on_random_worker_ = false;
 };
 
-class ExpectPullRemoteOrderBy : public OpChecker<PullRemoteOrderBy> {
- public:
-  ExpectPullRemoteOrderBy(const std::vector<Symbol> symbols)
-      : symbols_(symbols) {}
-
-  void ExpectOp(PullRemoteOrderBy &op, const SymbolTable &) override {
-    EXPECT_THAT(op.symbols(), testing::UnorderedElementsAreArray(symbols_));
-  }
-
- private:
-  std::vector<Symbol> symbols_;
-};
-
 auto MakeSymbolTable(query::Query &query) {
   SymbolTable symbol_table;
   SymbolGenerator symbol_generator(symbol_table);
@@ -481,30 +411,6 @@ class Planner {
   std::unique_ptr<LogicalOperator> plan_;
 };
 
-class SerializedPlanner {
- public:
-  SerializedPlanner(std::vector<SingleQueryPart> single_query_parts,
-                    PlanningContext<database::GraphDbAccessor> &context) {
-    std::stringstream stream;
-    {
-      auto original_plan = MakeLogicalPlanForSingleQuery<RuleBasedPlanner>(
-          single_query_parts, context);
-      boost::archive::binary_oarchive out_archive(stream);
-      out_archive << original_plan;
-    }
-    {
-      boost::archive::binary_iarchive in_archive(stream);
-      std::tie(plan_, ast_storage_) = LoadPlan(in_archive);
-    }
-  }
-
-  auto &plan() { return *plan_; }
-
- private:
-  AstTreeStorage ast_storage_;
-  std::unique_ptr<LogicalOperator> plan_;
-};
-
 template <class TPlanner>
 TPlanner MakePlanner(database::MasterBase &master_db, AstTreeStorage &storage,
                      SymbolTable &symbol_table) {
@@ -532,57 +438,6 @@ auto CheckPlan(AstTreeStorage &storage, TChecker... checker) {
   CheckPlan(planner.plan(), symbol_table, checker...);
 }
 
-struct ExpectedDistributedPlan {
-  std::list<std::unique_ptr<BaseOpChecker>> master_checkers;
-  std::vector<std::list<std::unique_ptr<BaseOpChecker>>> worker_checkers;
-};
-
-template <class TPlanner>
-DistributedPlan MakeDistributedPlan(query::AstTreeStorage &storage) {
-  database::Master db;
-  auto symbol_table = MakeSymbolTable(*storage.query());
-  auto planner = MakePlanner<TPlanner>(db, storage, symbol_table);
-  std::atomic<int64_t> next_plan_id{0};
-  return MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id);
-}
-
-void CheckDistributedPlan(DistributedPlan &distributed_plan,
-                          ExpectedDistributedPlan &expected) {
-  PlanChecker plan_checker(expected.master_checkers,
-                           distributed_plan.symbol_table);
-  distributed_plan.master_plan->Accept(plan_checker);
-  EXPECT_TRUE(plan_checker.checkers_.empty());
-  if (expected.worker_checkers.empty()) {
-    EXPECT_TRUE(distributed_plan.worker_plans.empty());
-  } else {
-    ASSERT_EQ(distributed_plan.worker_plans.size(),
-              expected.worker_checkers.size());
-    for (size_t i = 0; i < expected.worker_checkers.size(); ++i) {
-      PlanChecker plan_checker(expected.worker_checkers[i],
-                               distributed_plan.symbol_table);
-      auto worker_plan = distributed_plan.worker_plans[i].second;
-      worker_plan->Accept(plan_checker);
-      EXPECT_TRUE(plan_checker.checkers_.empty());
-    }
-  }
-}
-
-void CheckDistributedPlan(const LogicalOperator &plan,
-                          const SymbolTable &symbol_table,
-                          ExpectedDistributedPlan &expected_distributed_plan) {
-  std::atomic<int64_t> next_plan_id{0};
-  auto distributed_plan = MakeDistributedPlan(plan, symbol_table, next_plan_id);
-  EXPECT_EQ(next_plan_id - 1, distributed_plan.worker_plans.size());
-  CheckDistributedPlan(distributed_plan, expected_distributed_plan);
-}
-
-template <class TPlanner>
-void CheckDistributedPlan(AstTreeStorage &storage,
-                          ExpectedDistributedPlan &expected_distributed_plan) {
-  auto distributed_plan = MakeDistributedPlan<TPlanner>(storage);
-  CheckDistributedPlan(distributed_plan, expected_distributed_plan);
-}
-
 template <class T>
 std::list<std::unique_ptr<BaseOpChecker>> MakeCheckers(T arg) {
   std::list<std::unique_ptr<BaseOpChecker>> l;
@@ -597,47 +452,10 @@ std::list<std::unique_ptr<BaseOpChecker>> MakeCheckers(T arg, Rest &&... rest) {
   return std::move(l);
 }
 
-ExpectedDistributedPlan ExpectDistributed(
-    std::list<std::unique_ptr<BaseOpChecker>> master_checker) {
-  return ExpectedDistributedPlan{std::move(master_checker)};
-}
-
-ExpectedDistributedPlan ExpectDistributed(
-    std::list<std::unique_ptr<BaseOpChecker>> master_checker,
-    std::list<std::unique_ptr<BaseOpChecker>> worker_checker) {
-  ExpectedDistributedPlan expected{std::move(master_checker)};
-  expected.worker_checkers.emplace_back(std::move(worker_checker));
-  return expected;
-}
-
-void AddWorkerCheckers(
-    ExpectedDistributedPlan &expected,
-    std::list<std::unique_ptr<BaseOpChecker>> worker_checker) {
-  expected.worker_checkers.emplace_back(std::move(worker_checker));
-}
-
-template <class... Rest>
-void AddWorkerCheckers(ExpectedDistributedPlan &expected,
-                       std::list<std::unique_ptr<BaseOpChecker>> worker_checker,
-                       Rest &&... rest) {
-  expected.worker_checkers.emplace_back(std::move(worker_checker));
-  AddWorkerCheckers(expected, std::forward<Rest>(rest)...);
-}
-
-template <class... Rest>
-ExpectedDistributedPlan ExpectDistributed(
-    std::list<std::unique_ptr<BaseOpChecker>> master_checker,
-    std::list<std::unique_ptr<BaseOpChecker>> worker_checker, Rest &&... rest) {
-  ExpectedDistributedPlan expected{std::move(master_checker)};
-  expected.worker_checkers.emplace_back(std::move(worker_checker));
-  AddWorkerCheckers(expected, std::forward<Rest>(rest)...);
-  return expected;
-}
-
 template <class T>
 class TestPlanner : public ::testing::Test {};
 
-using PlannerTypes = ::testing::Types<Planner, SerializedPlanner>;
+using PlannerTypes = ::testing::Types<Planner>;
 
 TYPED_TEST_CASE(TestPlanner, PlannerTypes);
 
@@ -647,14 +465,9 @@ TYPED_TEST(TestPlanner, MatchNodeReturn) {
   auto *as_n = NEXPR("n", IDENT("n"));
   QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), RETURN(as_n)));
   auto symbol_table = MakeSymbolTable(*storage.query());
-  database::Master db;
+  database::SingleNode db;
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(), pull),
-                        MakeCheckers(ExpectScanAll(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateNodeReturn) {
@@ -669,14 +482,6 @@ TYPED_TEST(TestPlanner, CreateNodeReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(), acc,
             ExpectProduce());
-  {
-    auto expected = ExpectDistributed(MakeCheckers(
-        ExpectCreateNode(true), ExpectSynchronize(false), ExpectProduce()));
-    std::atomic<int64_t> next_plan_id{0};
-    auto distributed_plan =
-        MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id);
-    CheckDistributedPlan(distributed_plan, expected);
-  }
 }
 
 TYPED_TEST(TestPlanner, CreateExpand) {
@@ -688,11 +493,6 @@ TYPED_TEST(TestPlanner, CreateExpand) {
   QUERY(SINGLE_QUERY(CREATE(PATTERN(
       NODE("n"), EDGE("r", Direction::OUT, {relationship}), NODE("m")))));
   CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(),
-                   ExpectSynchronize(false)),
-      {}};
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateMultipleNode) {
@@ -700,11 +500,6 @@ TYPED_TEST(TestPlanner, CreateMultipleNode) {
   AstTreeStorage storage;
   QUERY(SINGLE_QUERY(CREATE(PATTERN(NODE("n")), PATTERN(NODE("m")))));
   CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateNode());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectCreateNode(true),
-                   ExpectSynchronize(false)),
-      {}};
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateNodeExpandNode) {
@@ -718,11 +513,6 @@ TYPED_TEST(TestPlanner, CreateNodeExpandNode) {
       PATTERN(NODE("l")))));
   CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(),
                        ExpectCreateNode());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(),
-                   ExpectCreateNode(true), ExpectSynchronize(false)),
-      {}};
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateNamedPattern) {
@@ -735,11 +525,6 @@ TYPED_TEST(TestPlanner, CreateNamedPattern) {
       "p", NODE("n"), EDGE("r", Direction::OUT, {relationship}), NODE("m")))));
   CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(),
                        ExpectConstructNamedPath());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(),
-                   ExpectConstructNamedPath(), ExpectSynchronize(false)),
-      {}};
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchCreateExpand) {
@@ -753,10 +538,6 @@ TYPED_TEST(TestPlanner, MatchCreateExpand) {
       CREATE(PATTERN(NODE("n"), EDGE("r", Direction::OUT, {relationship}),
                      NODE("m")))));
   CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectCreateExpand());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectCreateExpand(), ExpectSynchronize()),
-      MakeCheckers(ExpectScanAll(), ExpectCreateExpand()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchLabeledNodes) {
@@ -771,11 +552,6 @@ TYPED_TEST(TestPlanner, MatchLabeledNodes) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAllByLabel(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAllByLabel(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAllByLabel(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchPathReturn) {
@@ -793,11 +569,6 @@ TYPED_TEST(TestPlanner, MatchPathReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchNamedPatternReturn) {
@@ -816,13 +587,6 @@ TYPED_TEST(TestPlanner, MatchNamedPatternReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(),
             ExpectConstructNamedPath(), ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_p)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(),
-                   ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(),
-                   ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchNamedPatternWithPredicateReturn) {
@@ -841,13 +605,6 @@ TYPED_TEST(TestPlanner, MatchNamedPatternWithPredicateReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(),
             ExpectConstructNamedPath(), ExpectFilter(), ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_p)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(),
-                   ExpectFilter(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectConstructNamedPath(),
-                   ExpectFilter(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, OptionalMatchNamedPatternReturn) {
@@ -871,12 +628,6 @@ TYPED_TEST(TestPlanner, OptionalMatchNamedPatternReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table,
             ExpectOptional(optional_symbols, optional), ExpectProduce());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectOptional(optional_symbols, optional), ExpectProduce(),
-                   ExpectPullRemote({symbol_table.at(*as_p)})),
-      MakeCheckers(ExpectOptional(optional_symbols, optional),
-                   ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchWhereReturn) {
@@ -893,11 +644,6 @@ TYPED_TEST(TestPlanner, MatchWhereReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectFilter(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectFilter(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectFilter(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchDelete) {
@@ -905,10 +651,6 @@ TYPED_TEST(TestPlanner, MatchDelete) {
   AstTreeStorage storage;
   QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), DELETE(IDENT("n"))));
   CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectDelete());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectDelete(), ExpectSynchronize()),
-      MakeCheckers(ExpectScanAll(), ExpectDelete()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchNodeSet) {
@@ -923,12 +665,6 @@ TYPED_TEST(TestPlanner, MatchNodeSet) {
                      SET("n", IDENT("n")), SET("n", {label})));
   CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectSetProperty(),
                        ExpectSetProperties(), ExpectSetLabels());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectSetProperty(), ExpectSetProperties(),
-                   ExpectSetLabels(), ExpectSynchronize()),
-      MakeCheckers(ExpectScanAll(), ExpectSetProperty(), ExpectSetProperties(),
-                   ExpectSetLabels()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchRemove) {
@@ -942,12 +678,6 @@ TYPED_TEST(TestPlanner, MatchRemove) {
                      REMOVE(PROPERTY_LOOKUP("n", prop)), REMOVE("n", {label})));
   CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectRemoveProperty(),
                        ExpectRemoveLabels());
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectRemoveProperty(),
-                                     ExpectRemoveLabels(), ExpectSynchronize()),
-                        MakeCheckers(ExpectScanAll(), ExpectRemoveProperty(),
-                                     ExpectRemoveLabels()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchMultiPattern) {
@@ -1011,25 +741,6 @@ TYPED_TEST(TestPlanner, MultiMatch) {
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(),
             ExpectScanAll(), ExpectExpand(), ExpectExpand(),
             ExpectExpandUniquenessFilter<EdgeAccessor>(), ExpectProduce());
-  auto get_symbol = [&symbol_table](const auto *atom_node) {
-    return symbol_table.at(*atom_node->identifier_);
-  };
-  ExpectPullRemote left_pull(
-      {get_symbol(node_n), get_symbol(edge_r), get_symbol(node_m)});
-  auto left_cart = MakeCheckers(ExpectScanAll(), ExpectExpand(), left_pull);
-  ExpectPullRemote right_pull({get_symbol(node_j), get_symbol(edge_e),
-                               get_symbol(node_i), get_symbol(edge_f),
-                               get_symbol(node_h)});
-  auto right_cart =
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectExpand(),
-                   ExpectExpandUniquenessFilter<EdgeAccessor>(), right_pull);
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)),
-                   ExpectProduce()),
-      MakeCheckers(ExpectScanAll(), ExpectExpand()),
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectExpand(),
-                   ExpectExpandUniquenessFilter<EdgeAccessor>()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MultiMatchSameStart) {
@@ -1046,11 +757,6 @@ TYPED_TEST(TestPlanner, MultiMatchSameStart) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectExpand(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectExpand(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchWithReturn) {
@@ -1065,11 +771,6 @@ TYPED_TEST(TestPlanner, MatchWithReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_new)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchWithWhereReturn) {
@@ -1087,13 +788,6 @@ TYPED_TEST(TestPlanner, MatchWithWhereReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(),
             ExpectFilter(), ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_new)});
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(),
-                                     ExpectFilter(), ExpectProduce(), pull),
-                        MakeCheckers(ExpectScanAll(), ExpectProduce(),
-                                     ExpectFilter(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateMultiExpand) {
@@ -1108,11 +802,6 @@ TYPED_TEST(TestPlanner, CreateMultiExpand) {
              PATTERN(NODE("n"), EDGE("p", Direction::OUT, {p}), NODE("l")))));
   CheckPlan<TypeParam>(storage, ExpectCreateNode(), ExpectCreateExpand(),
                        ExpectCreateExpand());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectCreateExpand(),
-                   ExpectCreateExpand(), ExpectSynchronize(false)),
-      {}};
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchWithSumWhereReturn) {
@@ -1148,20 +837,6 @@ TYPED_TEST(TestPlanner, MatchReturnSum) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), aggr,
             ExpectProduce());
-  {
-    std::atomic<int64_t> next_plan_id{0};
-    auto distributed_plan =
-        MakeDistributedPlan(planner.plan(), symbol_table, next_plan_id);
-    auto merge_sum = SUM(IDENT("worker_sum"));
-    auto master_aggr = ExpectMasterAggregate({merge_sum}, {n_prop2});
-    ExpectPullRemote pull(
-        {symbol_table.at(*sum), symbol_table.at(*n_prop2->expression_)});
-    auto expected =
-        ExpectDistributed(MakeCheckers(ExpectScanAll(), aggr, pull, master_aggr,
-                                       ExpectProduce(), ExpectProduce()),
-                          MakeCheckers(ExpectScanAll(), aggr));
-    CheckDistributedPlan(distributed_plan, expected);
-  }
 }
 
 TYPED_TEST(TestPlanner, CreateWithSum) {
@@ -1196,11 +871,6 @@ TYPED_TEST(TestPlanner, MatchWithCreate) {
           PATTERN(NODE("a"), EDGE("r", Direction::OUT, {r_type}), NODE("b")))));
   CheckPlan<TypeParam>(storage, ExpectScanAll(), ExpectProduce(),
                        ExpectCreateExpand());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectCreateExpand(),
-                   ExpectSynchronize()),
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectCreateExpand()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchReturnSkipLimit) {
@@ -1214,12 +884,6 @@ TYPED_TEST(TestPlanner, MatchReturnSkipLimit) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(),
             ExpectSkip(), ExpectLimit());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectProduce(), pull,
-                                     ExpectSkip(), ExpectLimit()),
-                        MakeCheckers(ExpectScanAll(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateWithSkipReturnLimit) {
@@ -1240,12 +904,6 @@ TYPED_TEST(TestPlanner, CreateWithSkipReturnLimit) {
   // us here (but who knows if they change it again).
   CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(), acc,
             ExpectProduce(), ExpectSkip(), ExpectProduce(), ExpectLimit());
-  ExpectedDistributedPlan expected{
-      MakeCheckers(ExpectCreateNode(true), ExpectSynchronize(true),
-                   ExpectProduce(), ExpectSkip(), ExpectProduce(),
-                   ExpectLimit()),
-      {}};
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateReturnSumSkipLimit) {
@@ -1281,17 +939,6 @@ TYPED_TEST(TestPlanner, MatchReturnOrderBy) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectProduce(),
             ExpectOrderBy());
-  ExpectPullRemoteOrderBy pull_order_by(
-      {symbol_table.at(*as_m), symbol_table.at(*node_n->identifier_)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectOrderBy(),
-                   pull_order_by),
-      MakeCheckers(ExpectScanAll(), ExpectProduce(), ExpectOrderBy()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
-  // Even though last operator pulls and orders by `m` and `n`, we expect only
-  // `m` as the output of the query execution.
-  EXPECT_THAT(planner.plan().OutputSymbols(symbol_table),
-              testing::UnorderedElementsAre(symbol_table.at(*as_m)));
 }
 
 TYPED_TEST(TestPlanner, CreateWithOrderByWhere) {
@@ -1322,10 +969,6 @@ TYPED_TEST(TestPlanner, CreateWithOrderByWhere) {
   CheckPlan(planner.plan(), symbol_table, ExpectCreateNode(),
             ExpectCreateExpand(), acc, ExpectProduce(), ExpectOrderBy(),
             ExpectFilter());
-  auto expected = ExpectDistributed(MakeCheckers(
-      ExpectCreateNode(true), ExpectCreateExpand(), ExpectSynchronize(true),
-      ExpectProduce(), ExpectOrderBy(), ExpectFilter()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, ReturnAddSumCountOrderBy) {
@@ -1337,9 +980,6 @@ TYPED_TEST(TestPlanner, ReturnAddSumCountOrderBy) {
       RETURN(ADD(sum, count), AS("result"), ORDER_BY(IDENT("result")))));
   auto aggr = ExpectAggregate({sum, count}, {});
   CheckPlan<TypeParam>(storage, aggr, ExpectProduce(), ExpectOrderBy());
-  auto expected =
-      ExpectDistributed(MakeCheckers(aggr, ExpectProduce(), ExpectOrderBy()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, MatchMerge) {
@@ -1403,11 +1043,6 @@ TYPED_TEST(TestPlanner, MatchUnwindReturn) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectUnwind(),
             ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n), symbol_table.at(*as_x)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectUnwind(), ExpectProduce(), pull),
-      MakeCheckers(ExpectScanAll(), ExpectUnwind(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, ReturnDistinctOrderBySkipLimit) {
@@ -1417,10 +1052,6 @@ TYPED_TEST(TestPlanner, ReturnDistinctOrderBySkipLimit) {
                                      SKIP(LITERAL(1)), LIMIT(LITERAL(1)))));
   CheckPlan<TypeParam>(storage, ExpectProduce(), ExpectDistinct(),
                        ExpectOrderBy(), ExpectSkip(), ExpectLimit());
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectProduce(), ExpectDistinct(), ExpectOrderBy(),
-                   ExpectSkip(), ExpectLimit()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, CreateWithDistinctSumWhereReturn) {
@@ -1476,13 +1107,6 @@ TYPED_TEST(TestPlanner, MatchWhereBeforeExpand) {
   auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
   CheckPlan(planner.plan(), symbol_table, ExpectScanAll(), ExpectFilter(),
             ExpectExpand(), ExpectProduce());
-  ExpectPullRemote pull({symbol_table.at(*as_n)});
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectScanAll(), ExpectFilter(),
-                                     ExpectExpand(), ExpectProduce(), pull),
-                        MakeCheckers(ExpectScanAll(), ExpectFilter(),
-                                     ExpectExpand(), ExpectProduce()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
 }
 
 TYPED_TEST(TestPlanner, MultiMatchWhere) {
@@ -1607,8 +1231,6 @@ TYPED_TEST(TestPlanner, FunctionAggregationReturn) {
       RETURN(FN("sqrt", sum), AS("result"), group_by_literal, AS("group_by"))));
   auto aggr = ExpectAggregate({sum}, {group_by_literal});
   CheckPlan<TypeParam>(storage, aggr, ExpectProduce());
-  auto expected = ExpectDistributed(MakeCheckers(aggr, ExpectProduce()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, FunctionWithoutArguments) {
@@ -1616,8 +1238,6 @@ TYPED_TEST(TestPlanner, FunctionWithoutArguments) {
   AstTreeStorage storage;
   QUERY(SINGLE_QUERY(RETURN(FN("pi"), AS("pi"))));
   CheckPlan<TypeParam>(storage, ExpectProduce());
-  auto expected = ExpectDistributed(MakeCheckers(ExpectProduce()));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, ListLiteralAggregationReturn) {
@@ -1718,9 +1338,6 @@ TYPED_TEST(TestPlanner, CreateIndex) {
   AstTreeStorage storage;
   QUERY(SINGLE_QUERY(CREATE_INDEX_ON(label, property)));
   CheckPlan<TypeParam>(storage, ExpectCreateIndex(label, property));
-  auto expected =
-      ExpectDistributed(MakeCheckers(ExpectCreateIndex(label, property)));
-  CheckDistributedPlan<TypeParam>(storage, expected);
 }
 
 TYPED_TEST(TestPlanner, AtomIndexedLabelProperty) {
@@ -2133,105 +1750,4 @@ TYPED_TEST(TestPlanner, ReturnAsteriskOmitsLambdaSymbols) {
     EXPECT_TRUE(utils::Contains(outputs, name));
   }
 }
-
-TYPED_TEST(TestPlanner, DistributedAvg) {
-  // Test MATCH (n) RETURN AVG(n.prop) AS res
-  AstTreeStorage storage;
-  database::Master db;
-  database::GraphDbAccessor dba(db);
-  auto prop = dba.Property("prop");
-  QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))),
-                     RETURN(AVG(PROPERTY_LOOKUP("n", prop)), AS("res"))));
-  auto distributed_plan = MakeDistributedPlan<TypeParam>(storage);
-  auto &symbol_table = distributed_plan.symbol_table;
-  auto worker_sum = SUM(PROPERTY_LOOKUP("n", prop));
-  auto worker_count = COUNT(PROPERTY_LOOKUP("n", prop));
-  {
-    ASSERT_EQ(distributed_plan.worker_plans.size(), 1U);
-    auto worker_plan = distributed_plan.worker_plans.back().second;
-    auto worker_aggr_op = std::dynamic_pointer_cast<Aggregate>(worker_plan);
-    ASSERT_TRUE(worker_aggr_op);
-    ASSERT_EQ(worker_aggr_op->aggregations().size(), 2U);
-    symbol_table[*worker_sum] = worker_aggr_op->aggregations()[0].output_sym;
-    symbol_table[*worker_count] = worker_aggr_op->aggregations()[1].output_sym;
-  }
-  auto worker_aggr = ExpectAggregate({worker_sum, worker_count}, {});
-  auto merge_sum = SUM(IDENT("worker_sum"));
-  auto merge_count = SUM(IDENT("worker_count"));
-  auto master_aggr = ExpectMasterAggregate({merge_sum, merge_count}, {});
-  ExpectPullRemote pull(
-      {symbol_table.at(*worker_sum), symbol_table.at(*worker_count)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), worker_aggr, pull, master_aggr,
-                   ExpectProduce(), ExpectProduce()),
-      MakeCheckers(ExpectScanAll(), worker_aggr));
-  CheckDistributedPlan(distributed_plan, expected);
-}
-
-TYPED_TEST(TestPlanner, DistributedCollectList) {
-  // Test MATCH (n) RETURN COLLECT(n.prop) AS res
-  AstTreeStorage storage;
-  database::Master db;
-  database::GraphDbAccessor dba(db);
-  auto prop = dba.Property("prop");
-  auto node_n = NODE("n");
-  auto collect = COLLECT_LIST(PROPERTY_LOOKUP("n", prop));
-  QUERY(SINGLE_QUERY(MATCH(PATTERN(node_n)), RETURN(collect, AS("res"))));
-  auto distributed_plan = MakeDistributedPlan<TypeParam>(storage);
-  auto &symbol_table = distributed_plan.symbol_table;
-  auto aggr = ExpectAggregate({collect}, {});
-  ExpectPullRemote pull({symbol_table.at(*node_n->identifier_)});
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), pull, aggr, ExpectProduce()),
-      MakeCheckers(ExpectScanAll()));
-  CheckDistributedPlan(distributed_plan, expected);
-}
-
-TYPED_TEST(TestPlanner, DistributedMatchCreateReturn) {
-  // Test MATCH (n) CREATE (m) RETURN m
-  AstTreeStorage storage;
-  auto *ident_m = IDENT("m");
-  QUERY(SINGLE_QUERY(MATCH(PATTERN(NODE("n"))), CREATE(PATTERN(NODE("m"))),
-                     RETURN(ident_m, AS("m"))));
-  auto symbol_table = MakeSymbolTable(*storage.query());
-  auto acc = ExpectAccumulate({symbol_table.at(*ident_m)});
-  database::Master db;
-  auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectScanAll(), ExpectCreateNode(),
-                   ExpectSynchronize({symbol_table.at(*ident_m)}),
-                   ExpectProduce()),
-      MakeCheckers(ExpectScanAll(), ExpectCreateNode()));
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
-}
-
-TYPED_TEST(TestPlanner, DistributedCartesianCreate) {
-  // Test MATCH (a), (b) CREATE (a)-[e:r]->(b) RETURN e
-  AstTreeStorage storage;
-  database::Master db;
-  database::GraphDbAccessor dba(db);
-  auto relationship = dba.EdgeType("r");
-  auto *node_a = NODE("a");
-  auto *node_b = NODE("b");
-  QUERY(SINGLE_QUERY(
-      MATCH(PATTERN(node_a), PATTERN(node_b)),
-      CREATE(PATTERN(NODE("a"), EDGE("e", Direction::OUT, {relationship}),
-                     NODE("b"))),
-      RETURN("e")));
-  auto symbol_table = MakeSymbolTable(*storage.query());
-  auto left_cart =
-      MakeCheckers(ExpectScanAll(),
-                   ExpectPullRemote({symbol_table.at(*node_a->identifier_)}));
-  auto right_cart =
-      MakeCheckers(ExpectScanAll(),
-                   ExpectPullRemote({symbol_table.at(*node_b->identifier_)}));
-  auto expected = ExpectDistributed(
-      MakeCheckers(ExpectCartesian(std::move(left_cart), std::move(right_cart)),
-                   ExpectCreateExpand(), ExpectSynchronize(false),
-                   ExpectProduce()),
-      MakeCheckers(ExpectScanAll()), MakeCheckers(ExpectScanAll()));
-  auto planner = MakePlanner<TypeParam>(db, storage, symbol_table);
-  CheckDistributedPlan(planner.plan(), symbol_table, expected);
-}
-
 }  // namespace
diff --git a/tests/unit/query_semantic.cpp b/tests/unit/query_semantic.cpp
index 790e720b1..832977a83 100644
--- a/tests/unit/query_semantic.cpp
+++ b/tests/unit/query_semantic.cpp
@@ -1,8 +1,6 @@
 #include <memory>
 #include <sstream>
 
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
 #include "gtest/gtest.h"
 
 #include "query/frontend/ast/ast.hpp"
@@ -1087,25 +1085,3 @@ TEST_F(TestSymbolGenerator, MatchUnion) {
   query->Accept(symbol_generator);
   EXPECT_EQ(symbol_table.max_position(), 8);
 }
-
-TEST(TestSymbolTable, Serialization) {
-  SymbolTable original_table;
-  SymbolGenerator symbol_generator{original_table};
-  AstTreeStorage storage;
-  auto ident_a = IDENT("a");
-  auto sym_a = original_table.CreateSymbol("a", true, Symbol::Type::Vertex, 0);
-  original_table[*ident_a] = sym_a;
-  auto ident_b = IDENT("b");
-  auto sym_b = original_table.CreateSymbol("b", false, Symbol::Type::Edge, 1);
-  original_table[*ident_b] = sym_b;
-  std::stringstream stream;
-  {
-    boost::archive::binary_oarchive out_archive(stream);
-    out_archive << original_table;
-  }
-  SymbolTable serialized_table;
-  boost::archive::binary_iarchive in_archive(stream);
-  in_archive >> serialized_table;
-  EXPECT_EQ(serialized_table.max_position(), original_table.max_position());
-  EXPECT_EQ(serialized_table.table(), original_table.table());
-}
diff --git a/tests/unit/raft.cpp b/tests/unit/raft.cpp
deleted file mode 100644
index 14bebefec..000000000
--- a/tests/unit/raft.cpp
+++ /dev/null
@@ -1,660 +0,0 @@
-#include "gtest/gtest.h"
-
-#include <chrono>
-#include <experimental/optional>
-#include <thread>
-
-#include "communication/raft/raft.hpp"
-#include "communication/raft/storage/memory.hpp"
-#include "communication/raft/test_utils.hpp"
-
-using namespace std::chrono_literals;
-
-using testing::Values;
-
-using namespace communication::raft;
-using namespace communication::raft::test_utils;
-
-using communication::raft::impl::RaftMemberImpl;
-using communication::raft::impl::RaftMode;
-
-const RaftConfig test_config1{{"a"}, 150ms, 300ms, 70ms, 30ms};
-const RaftConfig test_config2{{"a", "b"}, 150ms, 300ms, 70ms, 30ms};
-const RaftConfig test_config3{{"a", "b", "c"}, 150ms, 300ms, 70ms, 30ms};
-const RaftConfig test_config5{
-    {"a", "b", "c", "d", "e"}, 150ms, 300ms, 70ms, 30ms};
-
-class RaftMemberImplTest : public ::testing::Test {
- public:
-  RaftMemberImplTest()
-      : storage_(1, "a", {}), member(network_, storage_, "a", test_config5) {}
-
-  void SetLog(std::vector<LogEntry<DummyState>> log) {
-    storage_.log_ = std::move(log);
-  }
-
-  NoOpNetworkInterface<DummyState> network_;
-  InMemoryStorage<DummyState> storage_;
-  RaftMemberImpl<DummyState> member;
-};
-
-TEST_F(RaftMemberImplTest, Constructor) {
-  EXPECT_EQ(member.mode_, RaftMode::FOLLOWER);
-  EXPECT_EQ(member.term_, 1);
-  EXPECT_EQ(*member.voted_for_, "a");
-  EXPECT_EQ(member.commit_index_, 0);
-}
-
-TEST_F(RaftMemberImplTest, CandidateOrLeaderTransitionToFollower) {
-  member.mode_ = RaftMode::CANDIDATE;
-  member.CandidateTransitionToLeader();
-
-  member.CandidateOrLeaderTransitionToFollower();
-  EXPECT_EQ(member.mode_, RaftMode::FOLLOWER);
-  EXPECT_EQ(member.leader_, std::experimental::nullopt);
-  EXPECT_LT(member.next_election_time_, TimePoint::max());
-}
-
-TEST_F(RaftMemberImplTest, CandidateTransitionToLeader) {
-  member.mode_ = RaftMode::CANDIDATE;
-  member.CandidateTransitionToLeader();
-
-  EXPECT_EQ(member.mode_, RaftMode::LEADER);
-  EXPECT_EQ(*member.leader_, "a");
-  EXPECT_EQ(member.next_election_time_, TimePoint::max());
-}
-
-TEST_F(RaftMemberImplTest, CandidateOrLeaderNoteTerm) {
-  member.mode_ = RaftMode::LEADER;
-  member.term_ = 5;
-  member.CandidateOrLeaderNoteTerm(5);
-
-  EXPECT_EQ(member.mode_, RaftMode::LEADER);
-  EXPECT_EQ(member.term_, 5);
-
-  member.CandidateOrLeaderNoteTerm(6);
-  EXPECT_EQ(member.mode_, RaftMode::FOLLOWER);
-  EXPECT_EQ(member.term_, 6);
-}
-
-TEST_F(RaftMemberImplTest, StartNewElection) {
-  member.StartNewElection();
-
-  EXPECT_EQ(member.mode_, RaftMode::CANDIDATE);
-  EXPECT_EQ(member.term_, 2);
-  EXPECT_EQ(member.voted_for_, member.id_);
-}
-
-TEST_F(RaftMemberImplTest, CountVotes) {
-  member.StartNewElection();
-  EXPECT_FALSE(member.CountVotes());
-
-  member.peer_states_["b"]->voted_for_me = true;
-  EXPECT_FALSE(member.CountVotes());
-
-  member.peer_states_["c"]->voted_for_me = true;
-  EXPECT_TRUE(member.CountVotes());
-}
-
-TEST_F(RaftMemberImplTest, AdvanceCommitIndex) {
-  SetLog({{1}, {1}, {1}, {1}, {2}, {2}, {2}, {2}});
-
-  member.mode_ = RaftMode::LEADER;
-  member.term_ = 2;
-
-  member.peer_states_["b"]->match_index = 4;
-  member.peer_states_["c"]->match_index = 4;
-
-  EXPECT_EQ(member.commit_index_, 0);
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 0);
-
-  member.peer_states_["b"]->match_index = 4;
-  member.peer_states_["c"]->match_index = 4;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 0);
-
-  member.peer_states_["b"]->match_index = 5;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 0);
-
-  member.peer_states_["c"]->match_index = 5;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 5);
-
-  member.peer_states_["d"]->match_index = 6;
-  member.peer_states_["e"]->match_index = 7;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 6);
-
-  member.peer_states_["c"]->match_index = 8;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 7);
-
-  member.peer_states_["a"]->match_index = 8;
-  member.AdvanceCommitIndex();
-  EXPECT_EQ(member.commit_index_, 8);
-}
-
-TEST(RequestVote, SimpleElection) {
-  NextReplyNetworkInterface<DummyState> network;
-  InMemoryStorage<DummyState> storage(1, {}, {{1}, {1}});
-  RaftMemberImpl<DummyState> member(network, storage, "a", test_config5);
-
-  member.StartNewElection();
-
-  std::unique_lock<std::mutex> lock(member.mutex_);
-
-  PeerRpcReply next_reply;
-  next_reply.type = RpcType::REQUEST_VOTE;
-
-  network.on_request_ = [](const PeerRpcRequest<DummyState> &request) {
-    ASSERT_EQ(request.type, RpcType::REQUEST_VOTE);
-    ASSERT_EQ(request.request_vote.candidate_term, 2);
-    ASSERT_EQ(request.request_vote.candidate_id, "a");
-    ASSERT_EQ(request.request_vote.last_log_index, 2);
-    ASSERT_EQ(request.request_vote.last_log_term, 1);
-  };
-
-  /* member 'b' first voted for us */
-  next_reply.request_vote.term = 2;
-  next_reply.request_vote.vote_granted = true;
-  network.next_reply_ = next_reply;
-  member.RequestVote("b", *member.peer_states_["b"], lock);
-  EXPECT_EQ(member.mode_, RaftMode::CANDIDATE);
-  EXPECT_TRUE(member.peer_states_["b"]->request_vote_done);
-  EXPECT_TRUE(member.peer_states_["b"]->voted_for_me);
-
-  /* member 'c' didn't */
-  next_reply.request_vote.vote_granted = false;
-  network.next_reply_ = next_reply;
-  member.RequestVote("c", *member.peer_states_["c"], lock);
-  EXPECT_TRUE(member.peer_states_["c"]->request_vote_done);
-  EXPECT_FALSE(member.peer_states_["c"]->voted_for_me);
-  EXPECT_EQ(member.mode_, RaftMode::CANDIDATE);
-
-  /* but member 'd' did */
-  next_reply.request_vote.vote_granted = true;
-  network.next_reply_ = next_reply;
-  member.RequestVote("d", *member.peer_states_["d"], lock);
-  EXPECT_TRUE(member.peer_states_["d"]->request_vote_done);
-  EXPECT_TRUE(member.peer_states_["d"]->voted_for_me);
-  EXPECT_EQ(member.mode_, RaftMode::LEADER);
-
-  /* no-op entry should be at the end of leader's log */
-  EXPECT_EQ(storage.log_.back().term, 2);
-  EXPECT_EQ(storage.log_.back().command, std::experimental::nullopt);
-}
-
-TEST(AppendEntries, SimpleLogSync) {
-  NextReplyNetworkInterface<DummyState> network;
-  InMemoryStorage<DummyState> storage(3, {}, {{1}, {1}, {2}, {3}});
-  RaftMemberImpl<DummyState> member(network, storage, "a", test_config2);
-
-  member.mode_ = RaftMode::LEADER;
-
-  std::unique_lock<std::mutex> lock(member.mutex_);
-
-  PeerRpcReply reply;
-  reply.type = RpcType::APPEND_ENTRIES;
-
-  reply.append_entries.term = 3;
-  reply.append_entries.success = false;
-  network.next_reply_ = reply;
-
-  LogIndex expected_prev_log_index;
-  TermId expected_prev_log_term;
-  std::vector<LogEntry<DummyState>> expected_entries;
-
-  network.on_request_ = [&](const PeerRpcRequest<DummyState> &request) {
-    EXPECT_EQ(request.type, RpcType::APPEND_ENTRIES);
-    EXPECT_EQ(request.append_entries.leader_term, 3);
-    EXPECT_EQ(request.append_entries.leader_id, "a");
-    EXPECT_EQ(request.append_entries.prev_log_index, expected_prev_log_index);
-    EXPECT_EQ(request.append_entries.prev_log_term, expected_prev_log_term);
-    EXPECT_EQ(request.append_entries.entries, expected_entries);
-  };
-
-  /* initial state after election */
-  auto &peer_state = *member.peer_states_["b"];
-  peer_state.match_index = 0;
-  peer_state.next_index = 5;
-  peer_state.suppress_log_entries = true;
-
-  /* send a heartbeat and find out logs don't match */
-  expected_prev_log_index = 4;
-  expected_prev_log_term = 3;
-  expected_entries = {};
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 0);
-  EXPECT_EQ(peer_state.next_index, 4);
-  EXPECT_EQ(member.commit_index_, 0);
-
-  /* move `next_index` until we find a match, `expected_entries` will be empty
-   * because `suppress_log_entries` will be true */
-  expected_entries = {};
-
-  expected_prev_log_index = 3;
-  expected_prev_log_term = 2;
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 0);
-  EXPECT_EQ(peer_state.next_index, 3);
-  EXPECT_EQ(peer_state.suppress_log_entries, true);
-  EXPECT_EQ(member.commit_index_, 0);
-
-  expected_prev_log_index = 2;
-  expected_prev_log_term = 1;
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 0);
-  EXPECT_EQ(peer_state.next_index, 2);
-  EXPECT_EQ(peer_state.suppress_log_entries, true);
-  EXPECT_EQ(member.commit_index_, 0);
-
-  /* we found a match */
-  reply.append_entries.success = true;
-  network.next_reply_ = reply;
-
-  expected_prev_log_index = 1;
-  expected_prev_log_term = 1;
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 1);
-  EXPECT_EQ(peer_state.next_index, 2);
-  EXPECT_EQ(peer_state.suppress_log_entries, false);
-  EXPECT_EQ(member.commit_index_, 4);
-
-  /* now sync them */
-  expected_prev_log_index = 1;
-  expected_prev_log_term = 1;
-  expected_entries = {{1}, {2}, {3}};
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 4);
-  EXPECT_EQ(peer_state.next_index, 5);
-  EXPECT_EQ(peer_state.suppress_log_entries, false);
-  EXPECT_EQ(member.commit_index_, 4);
-
-  /* heartbeat after successful log sync */
-  expected_prev_log_index = 4;
-  expected_prev_log_term = 3;
-  expected_entries = {};
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 4);
-  EXPECT_EQ(peer_state.next_index, 5);
-  EXPECT_EQ(member.commit_index_, 4);
-
-  /* replicate a newly appended entry */
-  storage.AppendLogEntry({3});
-
-  expected_prev_log_index = 4;
-  expected_prev_log_term = 3;
-  expected_entries = {{3}};
-  member.AppendEntries("b", peer_state, lock);
-  EXPECT_EQ(peer_state.match_index, 5);
-  EXPECT_EQ(peer_state.next_index, 6);
-  EXPECT_EQ(member.commit_index_, 5);
-}
-
-template <class TestParam>
-class RaftMemberParamTest : public ::testing::TestWithParam<TestParam> {
- public:
-  virtual void SetUp() {
-    /* Some checks to verify that test case is valid. */
-
-    /* Member's term should be greater than or equal to last log term. */
-    ASSERT_GE(storage_.term_, storage_.GetLogTerm(storage_.GetLastLogIndex()));
-
-    ASSERT_GE(peer_storage_.term_,
-              peer_storage_.GetLogTerm(peer_storage_.GetLastLogIndex()));
-
-    /* If two logs match at some index, the entire prefix should match. */
-    LogIndex pos =
-        std::min(storage_.GetLastLogIndex(), peer_storage_.GetLastLogIndex());
-
-    for (; pos > 0; --pos) {
-      if (storage_.GetLogEntry(pos) == peer_storage_.GetLogEntry(pos)) {
-        break;
-      }
-    }
-
-    for (; pos > 0; --pos) {
-      ASSERT_EQ(storage_.GetLogEntry(pos), peer_storage_.GetLogEntry(pos));
-    }
-  }
-
-  RaftMemberParamTest(InMemoryStorage<DummyState> storage,
-                      InMemoryStorage<DummyState> peer_storage)
-      : network_(NoOpNetworkInterface<DummyState>()),
-        storage_(storage),
-        member_(network_, storage_, "a", test_config3),
-        peer_storage_(peer_storage) {}
-
-  NoOpNetworkInterface<DummyState> network_;
-  InMemoryStorage<DummyState> storage_;
-  RaftMemberImpl<DummyState> member_;
-
-  InMemoryStorage<DummyState> peer_storage_;
-};
-
-struct OnRequestVoteTestParam {
-  TermId term;
-  std::experimental::optional<MemberId> voted_for;
-  std::vector<LogEntry<DummyState>> log;
-
-  TermId peer_term;
-  std::vector<LogEntry<DummyState>> peer_log;
-
-  bool expected_reply;
-};
-
-class OnRequestVoteTest : public RaftMemberParamTest<OnRequestVoteTestParam> {
- public:
-  OnRequestVoteTest()
-      : RaftMemberParamTest(
-            InMemoryStorage<DummyState>(GetParam().term, GetParam().voted_for,
-                                        GetParam().log),
-            InMemoryStorage<DummyState>(GetParam().peer_term, {},
-                                        GetParam().peer_log)) {}
-  virtual ~OnRequestVoteTest() {}
-};
-
-TEST_P(OnRequestVoteTest, RequestVoteTest) {
-  auto reply = member_.OnRequestVote(
-      {GetParam().peer_term, "b", peer_storage_.GetLastLogIndex(),
-       peer_storage_.GetLogTerm(peer_storage_.GetLastLogIndex())});
-
-  EXPECT_EQ(reply.vote_granted, GetParam().expected_reply);
-
-  /* Our term should always be at least as large as sender's term. */
-  /* If we accepted the request, our term should be equal to candidate's term
-   * and voted_for should be set. */
-  EXPECT_EQ(reply.term, std::max(GetParam().peer_term, GetParam().term));
-  EXPECT_EQ(storage_.term_, std::max(GetParam().peer_term, GetParam().term));
-  EXPECT_EQ(storage_.voted_for_,
-            reply.vote_granted ? "b" : GetParam().voted_for);
-}
-
-/* Member 'b' is starting an election for term 5 and sending RequestVote RPC
- * to 'a'. Logs are empty so log-up-to-date check will always pass. */
-INSTANTIATE_TEST_CASE_P(
-    TermAndVotedForCheck, OnRequestVoteTest,
-    Values(
-        /* we didn't vote for anyone in a smaller term -> accept */
-        OnRequestVoteTestParam{3, {}, {}, 5, {}, true},
-        /* we voted for someone in smaller term -> accept */
-        OnRequestVoteTestParam{4, "c", {}, 5, {}, true},
-        /* equal term but we didn't vote for anyone in it -> accept */
-        OnRequestVoteTestParam{5, {}, {}, 5, {}, true},
-        /* equal term but we voted for this candidate-> accept */
-        OnRequestVoteTestParam{5, "b", {}, 5, {}, true},
-        /* equal term but we voted for someone else -> decline */
-        OnRequestVoteTestParam{5, "c", {}, 5, {}, false},
-        /* larger term and haven't voted for anyone -> decline */
-        OnRequestVoteTestParam{6, {}, {}, 5, {}, false},
-        /* larger term and we voted for someone else -> decline */
-        OnRequestVoteTestParam{6, "a", {}, 5, {}, false}));
-
-/* Member 'a' log:
- *     1   2   3   4   5   6   7
- *   | 1 | 1 | 1 | 2 | 3 | 3 |
- *
- * It is in term 5.
- */
-
-/* Member 'b' is sending RequestVote RPC to 'a' for term 8.  */
-INSTANTIATE_TEST_CASE_P(
-    LogUpToDateCheck, OnRequestVoteTest,
-    Values(
-        /* candidate's last log term is smaller -> decline */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}},
-                               false},
-        /* candidate's last log term is smaller -> decline */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}, {2}, {2}, {2}},
-                               false},
-        /* candidate's term is equal, but our log is longer -> decline */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}, {3}},
-                               false},
-        /* equal logs -> accept */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               true},
-        /* candidate's term is larger -> accept */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}, {4}},
-                               true},
-        /* equal terms, but candidate's log is longer -> accept */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {1}, {1}, {2}, {3}, {3}, {3}},
-                               true},
-        /* candidate's last log term is larger -> accept */
-        OnRequestVoteTestParam{5,
-                               {},
-                               {{1}, {1}, {1}, {2}, {3}, {3}},
-                               8,
-                               {{1}, {2}, {3}, {4}, {5}},
-                               true}));
-
-struct OnAppendEntriesTestParam {
-  TermId term;
-  std::vector<LogEntry<DummyState>> log;
-
-  TermId peer_term;
-  std::vector<LogEntry<DummyState>> peer_log;
-  LogIndex peer_next_index;
-
-  bool expected_reply;
-  std::vector<LogEntry<DummyState>> expected_log;
-};
-
-class OnAppendEntriesTest
-    : public RaftMemberParamTest<OnAppendEntriesTestParam> {
- public:
-  OnAppendEntriesTest()
-      : RaftMemberParamTest(
-            InMemoryStorage<DummyState>(GetParam().term, {}, GetParam().log),
-            InMemoryStorage<DummyState>(GetParam().peer_term, {},
-                                        GetParam().peer_log)) {}
-  virtual ~OnAppendEntriesTest() {}
-};
-
-TEST_P(OnAppendEntriesTest, All) {
-  auto last_log_index = GetParam().peer_next_index - 1;
-  auto last_log_term = peer_storage_.GetLogTerm(last_log_index);
-  auto entries = peer_storage_.GetLogSuffix(GetParam().peer_next_index);
-  auto reply = member_.OnAppendEntries(
-      {GetParam().peer_term, "b", last_log_index, last_log_term, entries, 0});
-
-  EXPECT_EQ(reply.success, GetParam().expected_reply);
-  EXPECT_EQ(reply.term, std::max(GetParam().peer_term, GetParam().term));
-  EXPECT_EQ(storage_.log_, GetParam().expected_log);
-}
-
-/* Member 'a' recieved AppendEntries RPC from member 'b'. The request will
- * contain no log entries, representing just a heartbeat, as it is not
- * important in these scenarios. */
-INSTANTIATE_TEST_CASE_P(
-    TermAndLogConsistencyCheck, OnAppendEntriesTest,
-    Values(
-        /* sender has stale term -> decline */
-        OnAppendEntriesTestParam{/* my term*/ 8,
-                                 {{1}, {1}, {2}},
-                                 7,
-                                 {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}},
-                                 7,
-                                 false,
-                                 {{1}, {1}, {2}}},
-        /* we're missing entries 4, 5 and 6 -> decline, but update term */
-        OnAppendEntriesTestParam{4,
-                                 {{1}, {1}, {2}},
-                                 8,
-                                 {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}},
-                                 7,
-                                 false,
-                                 {{1}, {1}, {2}}},
-        /* we're missing entry 4 -> decline, but update term */
-        OnAppendEntriesTestParam{5,
-                                 {{1}, {1}, {2}},
-                                 8,
-                                 {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}},
-                                 5,
-                                 false,
-                                 {{1}, {1}, {2}}},
-        /* log terms don't match at entry 4 -> decline, but update term */
-        OnAppendEntriesTestParam{5,
-                                 {{1}, {1}, {2}},
-                                 8,
-                                 {{1}, {1}, {3}, {3}, {4}, {5}, {5}, {6}},
-                                 4,
-                                 false,
-                                 {{1}, {1}, {2}}},
-        /* logs match -> accept and update term */
-        OnAppendEntriesTestParam{5,
-                                 {{1}, {1}, {2}},
-                                 8,
-                                 {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}},
-                                 4,
-                                 true,
-                                 {{1}, {1}, {2}, {3}, {4}, {5}, {5}, {6}}},
-        /* now follow some log truncation tests */
-        /* no truncation, append a single entry */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            9,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* no truncation, append multiple entries */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            4,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* no truncation, leader's log is prefix of ours */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {6}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            4,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {6}}},
-        /* another one, now with entries from newer term */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {7}, {7}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            4,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}, {7}, {7}}},
-        /* no truncation, partial match between our log and appended entries
-         */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            4,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* truncate suffix */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {4}, {4}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            5,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* truncate suffix, with partial match between our log and appened
-           entries */
-        OnAppendEntriesTestParam{
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {4}, {4}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            4,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* delete whole log */
-        OnAppendEntriesTestParam{
-            8,
-            {{5}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            1,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}},
-        /* append on empty log */
-        OnAppendEntriesTestParam{
-            8,
-            {{}},
-            8,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}},
-            1,
-            true,
-            {{1}, {1}, {1}, {4}, {4}, {5}, {5}, {6}, {6}, {6}}}));
-
-TEST(RaftMemberTest, AddCommand) {
-  NextReplyNetworkInterface<IntState> network;
-
-  std::vector<IntState::Change> changes = {{IntState::Change::Type::ADD, 5},
-                                           {IntState::Change::Type::ADD, 10}};
-
-  network.on_request_ = [&network, num_calls = 0 ](
-      const PeerRpcRequest<IntState> &request) mutable {
-    ++num_calls;
-    PeerRpcReply reply;
-
-    if (num_calls == 1) {
-      reply.type = RpcType::REQUEST_VOTE;
-      reply.request_vote.term = 1;
-      reply.request_vote.vote_granted = true;
-    } else {
-      reply.type = RpcType::APPEND_ENTRIES;
-      reply.append_entries.term = 1;
-      reply.append_entries.success = true;
-    }
-
-    network.next_reply_ = reply;
-  };
-
-  InMemoryStorage<IntState> storage(0, {}, {});
-  RaftMember<IntState> member(network, storage, "a", test_config2);
-
-  std::this_thread::sleep_for(500ms);
-
-  member.AddCommand(changes[0], false);
-  member.AddCommand(changes[1], true);
-
-  ASSERT_EQ(storage.log_.size(), 3);
-  EXPECT_EQ(storage.log_[0].command, std::experimental::nullopt);
-  EXPECT_TRUE(storage.log_[1].command &&
-              *storage.log_[1].command == changes[0]);
-  EXPECT_TRUE(storage.log_[2].command &&
-              *storage.log_[2].command == changes[1]);
-}
diff --git a/tests/unit/raft_storage.cpp b/tests/unit/raft_storage.cpp
deleted file mode 100644
index 0d101dced..000000000
--- a/tests/unit/raft_storage.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-#include <experimental/optional>
-
-#include "gtest/gtest.h"
-
-#include "communication/raft/storage/file.hpp"
-#include "communication/raft/test_utils.hpp"
-
-using communication::raft::LogEntry;
-using communication::raft::SimpleFileStorage;
-using communication::raft::test_utils::IntState;
-
-TEST(SimpleFileStorageTest, All) {
-  typedef LogEntry<IntState> Log;
-  auto GetLog = [](int term, int d) {
-    return Log{term, IntState::Change{IntState::Change::Type::SET, d}};
-  };
-
-  {
-    SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir"));
-    EXPECT_EQ(storage.GetTermAndVotedFor().first, 0);
-    EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt);
-    EXPECT_EQ(storage.GetLastLogIndex(), 0);
-
-    storage.WriteTermAndVotedFor(1, "a");
-    EXPECT_EQ(storage.GetTermAndVotedFor().first, 1);
-    EXPECT_EQ(*storage.GetTermAndVotedFor().second, "a");
-
-    storage.AppendLogEntry(GetLog(1, 1));
-    storage.AppendLogEntry(GetLog(1, 2));
-
-    EXPECT_EQ(storage.GetLastLogIndex(), 2);
-
-    EXPECT_EQ(storage.GetLogSuffix(1),
-              std::vector<Log>({GetLog(1, 1), GetLog(1, 2)}));
-  }
-
-  {
-    SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir"));
-
-    EXPECT_EQ(storage.GetTermAndVotedFor().first, 1);
-    EXPECT_EQ(*storage.GetTermAndVotedFor().second, "a");
-    EXPECT_EQ(storage.GetLastLogIndex(), 2);
-    EXPECT_EQ(storage.GetLogSuffix(1),
-              std::vector<Log>({GetLog(1, 1), GetLog(1, 2)}));
-
-    storage.TruncateLogSuffix(2);
-    EXPECT_EQ(storage.GetLogSuffix(1), std::vector<Log>({GetLog(1, 1)}));
-
-    storage.WriteTermAndVotedFor(2, std::experimental::nullopt);
-    storage.AppendLogEntry(GetLog(2, 3));
-
-    EXPECT_EQ(storage.GetTermAndVotedFor().first, 2);
-    EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt);
-    EXPECT_EQ(storage.GetLogSuffix(1),
-              std::vector<Log>({GetLog(1, 1), GetLog(2, 3)}));
-  }
-
-  {
-    SimpleFileStorage<IntState> storage(fs::path("raft_storage_test_dir"));
-
-    EXPECT_EQ(storage.GetTermAndVotedFor().first, 2);
-    EXPECT_EQ(storage.GetTermAndVotedFor().second, std::experimental::nullopt);
-    EXPECT_EQ(storage.GetLogSuffix(1),
-              std::vector<Log>({GetLog(1, 1), GetLog(2, 3)}));
-  }
-
-  fs::remove("raft_storage_test_dir/metadata");
-  fs::remove("raft_storage_test_dir/1");
-  fs::remove("raft_storage_test_dir/2");
-  fs::remove("raft_storage_test_dir");
-}
diff --git a/tests/unit/rpc.cpp b/tests/unit/rpc.cpp
deleted file mode 100644
index d22bbeee5..000000000
--- a/tests/unit/rpc.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-#include <thread>
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/archive/text_iarchive.hpp"
-#include "boost/archive/text_oarchive.hpp"
-#include "boost/serialization/access.hpp"
-#include "boost/serialization/base_object.hpp"
-#include "boost/serialization/export.hpp"
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-#include "communication/rpc/client.hpp"
-#include "communication/rpc/client_pool.hpp"
-#include "communication/rpc/messages.hpp"
-#include "communication/rpc/server.hpp"
-#include "utils/timer.hpp"
-
-using namespace communication::rpc;
-using namespace std::literals::chrono_literals;
-
-struct SumReq : public Message {
-  SumReq(int x, int y) : x(x), y(y) {}
-  int x;
-  int y;
-
- private:
-  friend class boost::serialization::access;
-  SumReq() {}  // Needed for serialization.
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &x;
-    ar &y;
-  }
-};
-BOOST_CLASS_EXPORT(SumReq);
-
-struct SumRes : public Message {
-  SumRes(int sum) : sum(sum) {}
-  int sum;
-
- private:
-  friend class boost::serialization::access;
-  SumRes() {}  // Needed for serialization.
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &sum;
-  }
-};
-BOOST_CLASS_EXPORT(SumRes);
-using Sum = RequestResponse<SumReq, SumRes>;
-
-struct EchoMessage : public Message {
-  EchoMessage(const std::string &data) : data(data) {}
-  std::string data;
-
- private:
-  friend class boost::serialization::access;
-  EchoMessage() {}  // Needed for serialization.
-
-  template <class TArchive>
-  void serialize(TArchive &ar, unsigned int) {
-    ar &boost::serialization::base_object<Message>(*this);
-    ar &data;
-  }
-};
-BOOST_CLASS_EXPORT(EchoMessage);
-
-using Echo = RequestResponse<EchoMessage, EchoMessage>;
-
-TEST(Rpc, Call) {
-  Server server({"127.0.0.1", 0});
-  server.Register<Sum>([](const SumReq &request) {
-    return std::make_unique<SumRes>(request.x + request.y);
-  });
-  std::this_thread::sleep_for(100ms);
-
-  Client client(server.endpoint());
-  auto sum = client.Call<Sum>(10, 20);
-  ASSERT_TRUE(sum);
-  EXPECT_EQ(sum->sum, 30);
-}
-
-TEST(Rpc, Abort) {
-  Server server({"127.0.0.1", 0});
-  server.Register<Sum>([](const SumReq &request) {
-    std::this_thread::sleep_for(500ms);
-    return std::make_unique<SumRes>(request.x + request.y);
-  });
-  std::this_thread::sleep_for(100ms);
-
-  Client client(server.endpoint());
-
-  std::thread thread([&client]() {
-    std::this_thread::sleep_for(100ms);
-    LOG(INFO) << "Shutting down the connection!";
-    client.Abort();
-  });
-
-  utils::Timer timer;
-  auto sum = client.Call<Sum>(10, 20);
-  EXPECT_FALSE(sum);
-  EXPECT_LT(timer.Elapsed(), 200ms);
-
-  thread.join();
-}
-
-TEST(Rpc, ClientPool) {
-  Server server({"127.0.0.1", 0});
-  server.Register<Sum>([](const SumReq &request) {
-    std::this_thread::sleep_for(100ms);
-    return std::make_unique<SumRes>(request.x + request.y);
-  });
-  std::this_thread::sleep_for(100ms);
-
-  Client client(server.endpoint());
-
-  /* these calls should take more than 400ms because we're using a regular
-   * client */
-  auto get_sum_client = [&client](int x, int y) {
-    auto sum = client.Call<Sum>(x, y);
-    ASSERT_TRUE(sum);
-    EXPECT_EQ(sum->sum, x + y);
-  };
-
-  utils::Timer t1;
-  std::vector<std::thread> threads;
-  for (int i = 0; i < 4; ++i) {
-    threads.emplace_back(get_sum_client, 2 * i, 2 * i + 1);
-  }
-  for (int i = 0; i < 4; ++i) {
-    threads[i].join();
-  }
-  threads.clear();
-
-  EXPECT_GE(t1.Elapsed(), 400ms);
-
-  ClientPool pool(server.endpoint());
-
-  /* these calls shouldn't take much more that 100ms because they execute in
-   * parallel */
-  auto get_sum = [&pool](int x, int y) {
-    auto sum = pool.Call<Sum>(x, y);
-    ASSERT_TRUE(sum);
-    EXPECT_EQ(sum->sum, x + y);
-  };
-
-  utils::Timer t2;
-  for (int i = 0; i < 4; ++i) {
-    threads.emplace_back(get_sum, 2 * i, 2 * i + 1);
-  }
-  for (int i = 0; i < 4; ++i) {
-    threads[i].join();
-  }
-  EXPECT_LE(t2.Elapsed(), 200ms);
-}
-
-TEST(Rpc, LargeMessage) {
-  Server server({"127.0.0.1", 0});
-  server.Register<Echo>([](const EchoMessage &request) {
-    return std::make_unique<EchoMessage>(request.data);
-  });
-  std::this_thread::sleep_for(100ms);
-
-  std::string testdata(100000, 'a');
-
-  Client client(server.endpoint());
-  auto echo = client.Call<Echo>(testdata);
-  ASSERT_TRUE(echo);
-  EXPECT_EQ(echo->data, testdata);
-}
diff --git a/tests/unit/rpc_worker_clients.cpp b/tests/unit/rpc_worker_clients.cpp
deleted file mode 100644
index db4ba1cb7..000000000
--- a/tests/unit/rpc_worker_clients.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-#include "communication/rpc/messages.hpp"
-#include "communication/rpc/server.hpp"
-#include "distributed/cluster_discovery_master.hpp"
-#include "distributed/cluster_discovery_worker.hpp"
-#include "distributed/coordination_master.hpp"
-#include "distributed/coordination_worker.hpp"
-#include "distributed/rpc_worker_clients.hpp"
-#include "distributed/serialization.hpp"
-#include "io/network/endpoint.hpp"
-
-namespace distributed {
-
-RPC_NO_MEMBER_MESSAGE(IncrementCounterReq);
-RPC_NO_MEMBER_MESSAGE(IncrementCounterRes);
-
-using IncrementCounterRpc =
-    communication::rpc::RequestResponse<IncrementCounterReq,
-                                        IncrementCounterRes>;
-};  // namespace distributed
-
-BOOST_CLASS_EXPORT(distributed::IncrementCounterReq);
-BOOST_CLASS_EXPORT(distributed::IncrementCounterRes);
-
-class RpcWorkerClientsTest : public ::testing::Test {
- protected:
-  const io::network::Endpoint kLocalHost{"127.0.0.1", 0};
-  const int kWorkerCount = 2;
-  void SetUp() override {
-    master_coord_->SetRecoveryInfo(std::experimental::nullopt);
-    for (int i = 1; i <= kWorkerCount; ++i) {
-      workers_server_.emplace_back(
-          std::make_unique<communication::rpc::Server>(kLocalHost));
-
-      workers_coord_.emplace_back(
-          std::make_unique<distributed::WorkerCoordination>(
-              *workers_server_.back(), master_server_.endpoint()));
-
-      cluster_discovery_.emplace_back(
-          std::make_unique<distributed::ClusterDiscoveryWorker>(
-              *workers_server_.back(), *workers_coord_.back(),
-              rpc_workers_.GetClientPool(0)));
-
-      cluster_discovery_.back()->RegisterWorker(i);
-
-      workers_server_.back()->Register<distributed::IncrementCounterRpc>(
-          [this, i](const distributed::IncrementCounterReq &) {
-            workers_cnt_[i]++;
-            return std::make_unique<distributed::IncrementCounterRes>();
-          });
-    }
-  }
-
-  void TearDown() override {
-    std::vector<std::thread> wait_on_shutdown;
-    for (int i = 0; i < workers_coord_.size(); ++i) {
-      wait_on_shutdown.emplace_back([i, this]() {
-        workers_coord_[i]->WaitForShutdown();
-        workers_server_[i] = nullptr;
-      });
-    }
-
-    std::this_thread::sleep_for(300ms);
-
-    // Starts server shutdown and notifies the workers
-    master_coord_ = std::experimental::nullopt;
-    for (auto &worker : wait_on_shutdown) worker.join();
-  }
-
-  std::vector<std::unique_ptr<communication::rpc::Server>> workers_server_;
-  std::vector<std::unique_ptr<distributed::WorkerCoordination>> workers_coord_;
-  std::vector<std::unique_ptr<distributed::ClusterDiscoveryWorker>>
-      cluster_discovery_;
-  std::unordered_map<int, int> workers_cnt_;
-
-  communication::rpc::Server master_server_{kLocalHost};
-  std::experimental::optional<distributed::MasterCoordination> master_coord_{
-      master_server_.endpoint()};
-
-  distributed::RpcWorkerClients rpc_workers_{*master_coord_};
-  distributed::ClusterDiscoveryMaster cluster_disocvery_{
-      master_server_, *master_coord_, rpc_workers_};
-};
-
-TEST_F(RpcWorkerClientsTest, GetWorkerIds) {
-  EXPECT_THAT(rpc_workers_.GetWorkerIds(), testing::UnorderedElementsAreArray(
-                                               master_coord_->GetWorkerIds()));
-}
-
-TEST_F(RpcWorkerClientsTest, GetClientPool) {
-  auto &pool1 = rpc_workers_.GetClientPool(1);
-  auto &pool2 = rpc_workers_.GetClientPool(2);
-  EXPECT_NE(&pool1, &pool2);
-  EXPECT_EQ(&pool1, &rpc_workers_.GetClientPool(1));
-}
-
-TEST_F(RpcWorkerClientsTest, ExecuteOnWorker) {
-  auto execute = [](auto &client) -> void {
-    ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
-  };
-
-  rpc_workers_.ExecuteOnWorker<void>(1, execute).get();
-  EXPECT_EQ(workers_cnt_[0], 0);
-  EXPECT_EQ(workers_cnt_[1], 1);
-  EXPECT_EQ(workers_cnt_[2], 0);
-}
-
-TEST_F(RpcWorkerClientsTest, ExecuteOnWorkers) {
-  auto execute = [](auto &client) -> void {
-    ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
-  };
-
-  // Skip master
-  for (auto &future : rpc_workers_.ExecuteOnWorkers<void>(0, execute))
-    future.get();
-
-  EXPECT_EQ(workers_cnt_[0], 0);
-  EXPECT_EQ(workers_cnt_[1], 1);
-  EXPECT_EQ(workers_cnt_[2], 1);
-}
diff --git a/tests/unit/serialization.cpp b/tests/unit/serialization.cpp
deleted file mode 100644
index d8e07918e..000000000
--- a/tests/unit/serialization.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-#include <experimental/optional>
-#include <sstream>
-
-#include "gtest/gtest.h"
-
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "utils/serialization.hpp"
-
-using std::experimental::optional;
-using std::string_literals::operator""s;
-
-TEST(Serialization, Optional) {
-  std::stringstream ss;
-
-  optional<int> x1 = {};
-  optional<int> x2 = 42;
-  optional<int> y1, y2;
-
-  {
-    boost::archive::binary_oarchive ar(ss);
-    ar << x1;
-    ar << x2;
-  }
-
-  {
-    boost::archive::binary_iarchive ar(ss);
-    ar >> y1;
-    ar >> y2;
-  }
-
-  EXPECT_EQ(x1, y1);
-  EXPECT_EQ(x2, y2);
-}
-
-TEST(Serialization, Tuple) {
-  std::stringstream ss;
-
-  auto x1 = std::make_tuple("foo"s, 42, std::experimental::make_optional(3.14));
-  auto x2 = std::make_tuple();
-  auto x3 = std::make_tuple(1, 2, 3, 4, 5);
-
-  decltype(x1) y1;
-  decltype(x2) y2;
-  decltype(x3) y3;
-
-  {
-    boost::archive::binary_oarchive ar(ss);
-    ar << x1;
-    ar << x2;
-    ar << x3;
-  }
-
-  {
-    boost::archive::binary_iarchive ar(ss);
-    ar >> y1;
-    ar >> y2;
-    ar >> y3;
-  }
-
-  EXPECT_EQ(x1, y1);
-  EXPECT_EQ(x2, y2);
-  EXPECT_EQ(x3, y3);
-}
diff --git a/tests/unit/transaction_engine_distributed.cpp b/tests/unit/transaction_engine_distributed.cpp
deleted file mode 100644
index 22b241e78..000000000
--- a/tests/unit/transaction_engine_distributed.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <algorithm>
-#include <mutex>
-#include <unordered_set>
-#include <vector>
-
-#include "gtest/gtest.h"
-
-#include "communication/rpc/server.hpp"
-#include "distributed/cluster_discovery_master.hpp"
-#include "distributed/coordination_master.hpp"
-#include "io/network/endpoint.hpp"
-#include "transactions/engine_master.hpp"
-#include "transactions/engine_rpc_messages.hpp"
-#include "transactions/engine_worker.hpp"
-
-using namespace tx;
-using namespace communication::rpc;
-using namespace distributed;
-
-class WorkerEngineTest : public testing::Test {
- protected:
-  const std::string local{"127.0.0.1"};
-
-  Server master_server_{{local, 0}};
-  MasterCoordination master_coordination_{master_server_.endpoint()};
-  RpcWorkerClients rpc_worker_clients_{master_coordination_};
-  ClusterDiscoveryMaster cluster_disocvery_{
-      master_server_, master_coordination_, rpc_worker_clients_};
-
-  MasterEngine master_{master_server_, rpc_worker_clients_};
-  ClientPool master_client_pool{master_server_.endpoint()};
-
-  WorkerEngine worker_{master_client_pool};
-};
-
-TEST_F(WorkerEngineTest, BeginOnWorker) {
-  worker_.Begin();
-  auto second = worker_.Begin();
-  EXPECT_EQ(master_.RunningTransaction(second->id_)->snapshot().size(), 1);
-}
-
-TEST_F(WorkerEngineTest, AdvanceOnWorker) {
-  auto tx = worker_.Begin();
-  auto cid = tx->cid();
-  EXPECT_EQ(worker_.Advance(tx->id_), cid + 1);
-}
-
-TEST_F(WorkerEngineTest, CommitOnWorker) {
-  auto tx = worker_.Begin();
-  auto tx_id = tx->id_;
-  worker_.Commit(*tx);
-  EXPECT_TRUE(master_.Info(tx_id).is_committed());
-}
-
-TEST_F(WorkerEngineTest, AbortOnWorker) {
-  auto tx = worker_.Begin();
-  auto tx_id = tx->id_;
-  worker_.Abort(*tx);
-  EXPECT_TRUE(master_.Info(tx_id).is_aborted());
-}
-
-TEST_F(WorkerEngineTest, RunningTransaction) {
-  master_.Begin();
-  master_.Begin();
-  worker_.RunningTransaction(1);
-  worker_.RunningTransaction(2);
-  int count = 0;
-  worker_.LocalForEachActiveTransaction([&count](Transaction &t) {
-    ++count;
-    if (t.id_ == 1) {
-      EXPECT_EQ(t.snapshot(),
-                tx::Snapshot(std::vector<tx::TransactionId>{}));
-    } else {
-      EXPECT_EQ(t.snapshot(), tx::Snapshot({1}));
-    }
-  });
-  EXPECT_EQ(count, 2);
-}
-
-TEST_F(WorkerEngineTest, Info) {
-  auto *tx_1 = master_.Begin();
-  auto *tx_2 = master_.Begin();
-  // We can't check active transactions in the worker (see comments there for
-  // info).
-  master_.Commit(*tx_1);
-  EXPECT_TRUE(master_.Info(1).is_committed());
-  EXPECT_TRUE(worker_.Info(1).is_committed());
-  master_.Abort(*tx_2);
-  EXPECT_TRUE(master_.Info(2).is_aborted());
-  EXPECT_TRUE(worker_.Info(2).is_aborted());
-}
-
-TEST_F(WorkerEngineTest, GlobalGcSnapshot) {
-  auto *tx_1 = master_.Begin();
-  master_.Begin();
-  master_.Commit(*tx_1);
-  EXPECT_EQ(master_.GlobalGcSnapshot(), tx::Snapshot({1, 2}));
-  EXPECT_EQ(worker_.GlobalGcSnapshot(), master_.GlobalGcSnapshot());
-}
-
-TEST_F(WorkerEngineTest, GlobalActiveTransactions) {
-  auto *tx_1 = master_.Begin();
-  master_.Begin();
-  auto *tx_3 = master_.Begin();
-  master_.Begin();
-  master_.Commit(*tx_1);
-  master_.Abort(*tx_3);
-  EXPECT_EQ(worker_.GlobalActiveTransactions(), tx::Snapshot({2, 4}));
-}
-
-TEST_F(WorkerEngineTest, LocalLast) {
-  master_.Begin();
-  EXPECT_EQ(worker_.LocalLast(), 0);
-  worker_.RunningTransaction(1);
-  EXPECT_EQ(worker_.LocalLast(), 1);
-  master_.Begin();
-  EXPECT_EQ(worker_.LocalLast(), 1);
-  master_.Begin();
-  EXPECT_EQ(worker_.LocalLast(), 1);
-  master_.Begin();
-  worker_.RunningTransaction(4);
-  EXPECT_EQ(worker_.LocalLast(), 4);
-}
-
-TEST_F(WorkerEngineTest, LocalForEachActiveTransaction) {
-  master_.Begin();
-  worker_.RunningTransaction(1);
-  master_.Begin();
-  master_.Begin();
-  master_.Begin();
-  worker_.RunningTransaction(4);
-  std::unordered_set<tx::TransactionId> local;
-  worker_.LocalForEachActiveTransaction(
-      [&local](Transaction &t) { local.insert(t.id_); });
-  EXPECT_EQ(local, std::unordered_set<tx::TransactionId>({1, 4}));
-}
-
-TEST_F(WorkerEngineTest, EnsureTxIdGreater) {
-  ASSERT_LE(master_.Begin()->id_, 40);
-  worker_.EnsureNextIdGreater(42);
-  EXPECT_EQ(master_.Begin()->id_, 43);
-  EXPECT_EQ(worker_.Begin()->id_, 44);
-}
-
-TEST_F(WorkerEngineTest, GlobalNext) {
-  auto tx = master_.Begin();
-  EXPECT_NE(worker_.LocalLast(), worker_.GlobalLast());
-  EXPECT_EQ(master_.LocalLast(), worker_.GlobalLast());
-  EXPECT_EQ(worker_.GlobalLast(), tx->id_);
-}
diff --git a/tools/src/CMakeLists.txt b/tools/src/CMakeLists.txt
index 6de12f1fb..a76923bd7 100644
--- a/tools/src/CMakeLists.txt
+++ b/tools/src/CMakeLists.txt
@@ -2,10 +2,6 @@
 add_executable(mg_import_csv mg_import_csv/main.cpp)
 target_link_libraries(mg_import_csv memgraph_lib)
 
-# StatsD Target
-add_executable(mg_statsd mg_statsd/main.cpp)
-target_link_libraries(mg_statsd memgraph_lib)
-
 # Strip the executable in release build.
 string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
 if (lower_build_type STREQUAL "release")
@@ -17,4 +13,4 @@ endif()
 install(TARGETS mg_import_csv RUNTIME DESTINATION bin)
 
 # Target for building all the tool executables.
-add_custom_target(tools DEPENDS mg_import_csv mg_statsd)
+add_custom_target(tools DEPENDS mg_import_csv)
diff --git a/tools/src/mg_import_csv/main.cpp b/tools/src/mg_import_csv/main.cpp
index f197c0fae..77272b2ff 100644
--- a/tools/src/mg_import_csv/main.cpp
+++ b/tools/src/mg_import_csv/main.cpp
@@ -470,6 +470,26 @@ std::string GetOutputPath() {
   } catch (const std::experimental::filesystem::filesystem_error &error) {
     LOG(FATAL) << error.what();
   }
+  // TODO: Remove this stupid hack which deletes WAL files just to make snapshot
+  // recovery work. Newest snapshot without accompanying WAL files should be
+  // detected in memgraph and correctly recovered (or error reported).
+  try {
+    auto wal_dir = durability_dir + "/wal";
+    if (std::experimental::filesystem::exists(wal_dir)) {
+      for (const auto &wal_file :
+           std::experimental::filesystem::directory_iterator(wal_dir)) {
+        if (!FLAGS_overwrite) {
+          LOG(FATAL) << "Durability directory isn't empty. Pass --overwrite to "
+                        "remove the old recovery data";
+        }
+        break;
+      }
+      LOG(WARNING) << "Removing old recovery data!";
+      std::experimental::filesystem::remove_all(wal_dir);
+    }
+  } catch (const std::experimental::filesystem::filesystem_error &error) {
+    LOG(FATAL) << error.what();
+  }
   int worker_id = 0;
   // TODO(dgleich): Remove this transaction id hack
   return std::string(
diff --git a/tools/src/mg_statsd/main.cpp b/tools/src/mg_statsd/main.cpp
deleted file mode 100644
index 4bc68bceb..000000000
--- a/tools/src/mg_statsd/main.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-#include "gflags/gflags.h"
-
-#include "communication/rpc/server.hpp"
-#include "io/network/socket.hpp"
-#include "stats/stats.hpp"
-#include "stats/stats_rpc_messages.hpp"
-#include "utils/flag_validation.hpp"
-
-DEFINE_string(interface, "0.0.0.0",
-              "Communication interface on which to listen.");
-DEFINE_VALIDATED_int32(port, 2500, "Communication port on which to listen.",
-                       FLAG_IN_RANGE(0, std::numeric_limits<uint16_t>::max()));
-
-DEFINE_string(graphite_address, "", "Graphite address.");
-DEFINE_int32(graphite_port, 0, "Graphite port.");
-DEFINE_string(prefix, "", "Prefix for all collected stats");
-
-std::string GraphiteFormat(const stats::StatsReq &req) {
-  std::stringstream sstr;
-  if (!FLAGS_prefix.empty()) {
-    sstr << FLAGS_prefix << "." << req.metric_path;
-  } else {
-    sstr << req.metric_path;
-  }
-  for (const auto &tag : req.tags) {
-    sstr << ";" << tag.first << "=" << tag.second;
-  }
-  sstr << " " << req.value << " " << req.timestamp << "\n";
-  return sstr.str();
-}
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-
-  communication::rpc::Server server({FLAGS_interface, (uint16_t)FLAGS_port});
-
-  io::network::Socket graphite_socket;
-
-  CHECK(graphite_socket.Connect(
-      {FLAGS_graphite_address, (uint16_t)FLAGS_graphite_port}))
-      << "Failed to connect to Graphite";
-  graphite_socket.SetKeepAlive();
-
-  server.Register<stats::StatsRpc>([&](const stats::StatsReq &req) {
-    LOG(INFO) << "StatsRpc::Received";
-    std::string data = GraphiteFormat(req);
-    graphite_socket.Write(data);
-    return std::make_unique<stats::StatsRes>();
-  });
-
-  server.Register<stats::BatchStatsRpc>([&](const stats::BatchStatsReq &req) {
-    // TODO(mtomic): batching?
-    LOG(INFO) << fmt::format("BatchStatsRpc::Received: {}",
-                             req.requests.size());
-    for (size_t i = 0; i < req.requests.size(); ++i) {
-      std::string data = GraphiteFormat(req.requests[i]);
-      graphite_socket.Write(data, i + 1 < req.requests.size());
-    }
-    return std::make_unique<stats::BatchStatsRes>();
-  });
-
-  std::this_thread::sleep_until(std::chrono::system_clock::time_point::max());
-
-  return 0;
-}
diff --git a/tools/tests/CMakeLists.txt b/tools/tests/CMakeLists.txt
index 4c6302039..672c30f82 100644
--- a/tools/tests/CMakeLists.txt
+++ b/tools/tests/CMakeLists.txt
@@ -3,9 +3,6 @@ include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
 add_executable(mg_recovery_check mg_recovery_check.cpp)
 target_link_libraries(mg_recovery_check memgraph_lib gtest gtest_main)
 
-add_executable(mg_statsd_client statsd/mg_statsd_client.cpp)
-target_link_libraries(mg_statsd_client memgraph_lib)
-
 # Copy CSV data to CMake build dir
 configure_file(csv/comment_nodes.csv csv/comment_nodes.csv COPYONLY)
 configure_file(csv/forum_nodes.csv csv/forum_nodes.csv COPYONLY)
diff --git a/tools/tests/statsd/mg_statsd_client.cpp b/tools/tests/statsd/mg_statsd_client.cpp
deleted file mode 100644
index 778417724..000000000
--- a/tools/tests/statsd/mg_statsd_client.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-#include "gflags/gflags.h"
-#include "glog/logging.h"
-
-#include "stats/stats.hpp"
-#include "stats/stats_rpc_messages.hpp"
-#include "utils/string.hpp"
-
-// TODO (buda): move this logic to a unit test
-
-// TODO (mtomic): This is a hack. I don't know a better way to make this work.
-#include "boost/archive/binary_iarchive.hpp"
-#include "boost/archive/binary_oarchive.hpp"
-#include "boost/serialization/export.hpp"
-BOOST_CLASS_EXPORT(stats::StatsReq);
-BOOST_CLASS_EXPORT(stats::StatsRes);
-
-bool parse_input(const std::string &s, std::string &metric_path,
-                 std::vector<std::pair<std::string, std::string>> &tags,
-                 double &value) {
-  auto words = utils::Split(s, " ");
-  if (words.size() < 2) {
-    return false;
-  }
-
-  metric_path = words[0];
-
-  try {
-    value = std::stod(words.back());
-  } catch (std::exception &e) {
-    return false;
-  }
-
-  tags.clear();
-  for (size_t i = 1; i < words.size() - 1; ++i) {
-    auto tag_value = utils::Split(words[i], "=", 1);
-    if (tag_value.size() != 2) {
-      return false;
-    }
-    // TODO(mtomic): tags probably need to be escaped before sending to graphite
-    tags.emplace_back(tag_value[0], tag_value[1]);
-  }
-
-  return true;
-}
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-
-  LOG(INFO) << "Usage: metric_path tag1=value1 ... tagn=valuen "
-               "metric_value";
-
-  stats::InitStatsLogging();
-
-  std::string line;
-  std::string metric_path;
-  std::vector<std::pair<std::string, std::string>> tags;
-  double value;
-
-  while (true) {
-    std::getline(std::cin, line);
-    if (!parse_input(line, metric_path, tags, value)) {
-      LOG(ERROR) << "Invalid input";
-      continue;
-    }
-    stats::LogStat(metric_path, value, tags);
-  }
-
-  return 0;
-}