diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4bfd4dfca..db39a4547 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -42,10 +42,6 @@ endif()
 
 project(memgraph LANGUAGES C CXX)
 
-# NOTE: once in a while this needs to be toggled to check headers are
-#       correct and PCH isn't masking any include issues
-set(CMAKE_DISABLE_PRECOMPILE_HEADERS OFF)
-
 #TODO: upgrade to cmake 3.24 + CheckIPOSupported
 #cmake_policy(SET CMP0138 NEW)
 #include(CheckIPOSupported)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 8a5634f4c..ba8784b19 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -143,7 +143,7 @@ install(CODE "file(MAKE_DIRECTORY \$ENV{DESTDIR}/var/log/memgraph
 # Memgraph CSV Import Tool Executable
 # ----------------------------------------------------------------------------
 add_executable(mg_import_csv mg_import_csv.cpp)
-target_link_libraries(mg_import_csv mg-storage-v2)
+target_link_libraries(mg_import_csv mg-storage-v2 mg-dbms)
 
 # Strip the executable in release build.
 if(lower_build_type STREQUAL "release")
diff --git a/src/communication/CMakeLists.txt b/src/communication/CMakeLists.txt
index 965a9d7ec..b40426ee4 100644
--- a/src/communication/CMakeLists.txt
+++ b/src/communication/CMakeLists.txt
@@ -25,4 +25,3 @@ target_link_libraries(mg-communication Boost::headers Threads::Threads mg-utils
 find_package(OpenSSL REQUIRED)
 target_link_libraries(mg-communication ${OPENSSL_LIBRARIES})
 target_include_directories(mg-communication SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
-target_precompile_headers(mg-communication INTERFACE http/server.hpp <boost/beast/websocket.hpp> bolt/v1/session.hpp)
diff --git a/src/communication/bolt/v1/states/handlers.hpp b/src/communication/bolt/v1/states/handlers.hpp
index 0b4d84324..26c995719 100644
--- a/src/communication/bolt/v1/states/handlers.hpp
+++ b/src/communication/bolt/v1/states/handlers.hpp
@@ -209,7 +209,11 @@ State HandleRunV1(TSession &session, const State state, const Marker marker) {
 
   DMG_ASSERT(!session.encoder_buffer_.HasData(), "There should be no data to write in this state");
 
+#if MG_ENTERPRISE
   spdlog::debug("[Run - {}] '{}'", session.GetCurrentDB(), query.ValueString());
+#else
+  spdlog::debug("[Run] '{}'", query.ValueString());
+#endif
 
   // Increment number of queries in the metrics
   IncrementQueryMetrics(session);
@@ -276,7 +280,11 @@ State HandleRunV4(TSession &session, const State state, const Marker marker) {
     return HandleFailure(session, e);
   }
 
+#if MG_ENTERPRISE
   spdlog::debug("[Run - {}] '{}'", session.GetCurrentDB(), query.ValueString());
+#else
+  spdlog::debug("[Run] '{}'", query.ValueString());
+#endif
 
   // Increment number of queries in the metrics
   IncrementQueryMetrics(session);
diff --git a/src/dbms/CMakeLists.txt b/src/dbms/CMakeLists.txt
index 8796790e4..8ec1e0972 100644
--- a/src/dbms/CMakeLists.txt
+++ b/src/dbms/CMakeLists.txt
@@ -1,3 +1,3 @@
 
-add_library(mg-dbms STATIC database.cpp)
+add_library(mg-dbms STATIC database.cpp replication_handler.cpp inmemory/replication_handlers.cpp)
 target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query)
diff --git a/src/dbms/database.cpp b/src/dbms/database.cpp
index 77d9087ef..b0862f913 100644
--- a/src/dbms/database.cpp
+++ b/src/dbms/database.cpp
@@ -10,6 +10,8 @@
 // licenses/APL.txt.
 
 #include "dbms/database.hpp"
+#include "dbms/inmemory/storage_helper.hpp"
+#include "dbms/replication_handler.hpp"
 #include "flags/storage_mode.hpp"
 #include "storage/v2/disk/storage.hpp"
 #include "storage/v2/inmemory/storage.hpp"
@@ -19,14 +21,15 @@ template struct memgraph::utils::Gatekeeper<memgraph::dbms::Database>;
 
 namespace memgraph::dbms {
 
-Database::Database(const storage::Config &config)
+Database::Database(storage::Config config, const replication::ReplicationState &repl_state)
     : trigger_store_(config.durability.storage_directory / "triggers"),
-      streams_{config.durability.storage_directory / "streams"} {
+      streams_{config.durability.storage_directory / "streams"},
+      repl_state_(&repl_state) {
   if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk ||
       utils::DirExists(config.disk.main_storage_directory)) {
-    storage_ = std::make_unique<storage::DiskStorage>(config);
+    storage_ = std::make_unique<storage::DiskStorage>(std::move(config));
   } else {
-    storage_ = std::make_unique<storage::InMemoryStorage>(config, config.storage_mode);
+    storage_ = dbms::CreateInMemoryStorage(std::move(config), repl_state);
   }
 }
 
diff --git a/src/dbms/database.hpp b/src/dbms/database.hpp
index 22f477f2c..d2a36368b 100644
--- a/src/dbms/database.hpp
+++ b/src/dbms/database.hpp
@@ -46,7 +46,7 @@ class Database {
    *
    * @param config storage configuration
    */
-  explicit Database(const storage::Config &config);
+  explicit Database(storage::Config config, const replication::ReplicationState &repl_state);
 
   /**
    * @brief Returns the raw storage pointer.
@@ -56,6 +56,7 @@ class Database {
    * @return storage::Storage*
    */
   storage::Storage *storage() { return storage_.get(); }
+  storage::Storage const *storage() const { return storage_.get(); }
 
   /**
    * @brief Storage's Accessor
@@ -65,12 +66,12 @@ class Database {
    */
   std::unique_ptr<storage::Storage::Accessor> Access(
       std::optional<storage::IsolationLevel> override_isolation_level = {}) {
-    return storage_->Access(override_isolation_level);
+    return storage_->Access(override_isolation_level, repl_state_->IsMain());
   }
 
   std::unique_ptr<storage::Storage::Accessor> UniqueAccess(
       std::optional<storage::IsolationLevel> override_isolation_level = {}) {
-    return storage_->UniqueAccess(override_isolation_level);
+    return storage_->UniqueAccess(override_isolation_level, repl_state_->IsMain());
   }
 
   /**
@@ -157,6 +158,8 @@ class Database {
 
   // TODO: Move to a better place
   utils::SkipList<query::PlanCacheEntry> plan_cache_;  //!< Plan cache associated with the storage
+
+  const replication::ReplicationState *repl_state_;
 };
 
 }  // namespace memgraph::dbms
diff --git a/src/dbms/database_handler.hpp b/src/dbms/database_handler.hpp
index 4f142a341..a6b3b563b 100644
--- a/src/dbms/database_handler.hpp
+++ b/src/dbms/database_handler.hpp
@@ -51,7 +51,8 @@ class DatabaseHandler : public Handler<Database> {
    * @param config Storage configuration
    * @return HandlerT::NewResult
    */
-  HandlerT::NewResult New(std::string_view name, storage::Config config) {
+  HandlerT::NewResult New(std::string_view name, storage::Config config,
+                          const replication::ReplicationState &repl_state) {
     // Control that no one is using the same data directory
     if (std::any_of(begin(), end(), [&](auto &elem) {
           auto db_acc = elem.second.access();
@@ -62,7 +63,7 @@ class DatabaseHandler : public Handler<Database> {
       return NewError::EXISTS;
     }
     config.name = name;  // Set storage id via config
-    return HandlerT::New(std::piecewise_construct, name, config);
+    return HandlerT::New(std::piecewise_construct, name, config, repl_state);
   }
 
   /**
@@ -93,5 +94,4 @@ class DatabaseHandler : public Handler<Database> {
 };
 
 }  // namespace memgraph::dbms
-
 #endif
diff --git a/src/dbms/dbms_handler.hpp b/src/dbms/dbms_handler.hpp
index 124ee0ccf..990420bf8 100644
--- a/src/dbms/dbms_handler.hpp
+++ b/src/dbms/dbms_handler.hpp
@@ -26,7 +26,9 @@
 #include "auth/auth.hpp"
 #include "constants.hpp"
 #include "dbms/database.hpp"
+#ifdef MG_ENTERPRISE
 #include "dbms/database_handler.hpp"
+#endif
 #include "global.hpp"
 #include "query/config.hpp"
 #include "query/interpreter_context.hpp"
@@ -81,32 +83,35 @@ static inline nlohmann::json ToJson(const Statistics &stats) {
   return res;
 }
 
-#ifdef MG_ENTERPRISE
-
-using DeleteResult = utils::BasicResult<DeleteError>;
-
 /**
  * @brief Multi-database session contexts handler.
  */
 class DbmsHandler {
  public:
   using LockT = utils::RWLock;
+#ifdef MG_ENTERPRISE
+
   using NewResultT = utils::BasicResult<NewError, DatabaseAccess>;
+  using DeleteResult = utils::BasicResult<DeleteError>;
 
   /**
    * @brief Initialize the handler.
    *
-   * @param configs storage and interpreter configurations
+   * @param configs storage configuration
    * @param auth pointer to the global authenticator
    * @param recovery_on_startup restore databases (and its content) and authentication data
    * @param delete_on_drop when dropping delete any associated directories on disk
    */
-  DbmsHandler(storage::Config config, auto *auth, bool recovery_on_startup, bool delete_on_drop)
-      : lock_{utils::RWLock::Priority::READ}, default_config_{std::move(config)}, delete_on_drop_(delete_on_drop) {
+  DbmsHandler(storage::Config config, const replication::ReplicationState &repl_state, auto *auth,
+              bool recovery_on_startup, bool delete_on_drop)
+      : lock_{utils::RWLock::Priority::READ},
+        default_config_{std::move(config)},
+        repl_state_(repl_state),
+        delete_on_drop_(delete_on_drop) {
     // TODO: Decouple storage config from dbms config
     // TODO: Save individual db configs inside the kvstore and restore from there
-    storage::UpdatePaths(*default_config_, default_config_->durability.storage_directory / "databases");
-    const auto &db_dir = default_config_->durability.storage_directory;
+    storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases");
+    const auto &db_dir = default_config_.durability.storage_directory;
     const auto durability_dir = db_dir / ".durability";
     utils::EnsureDirOrDie(db_dir);
     utils::EnsureDirOrDie(durability_dir);
@@ -114,7 +119,6 @@ class DbmsHandler {
 
     // Generate the default database
     MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB.");
-
     // Recover previous databases
     if (recovery_on_startup) {
       for (const auto &[name, _] : *durability_) {
@@ -132,7 +136,21 @@ class DbmsHandler {
       }
     }
   }
+#else
+  /**
+   * @brief Initialize the handler. A single database is supported in community edition.
+   *
+   * @param configs storage configuration
+   */
+  DbmsHandler(storage::Config config, const replication::ReplicationState &repl_state)
+      : db_gatekeeper_{[&] {
+                         config.name = kDefaultDB;
+                         return std::move(config);
+                       }(),
+                       repl_state} {}
+#endif
 
+#ifdef MG_ENTERPRISE
   /**
    * @brief Create a new Database associated with the "name" database
    *
@@ -151,11 +169,24 @@ class DbmsHandler {
    * @return DatabaseAccess
    * @throw UnknownDatabaseException if database not found
    */
-  DatabaseAccess Get(std::string_view name) {
+  DatabaseAccess Get(std::string_view name = kDefaultDB) {
     std::shared_lock<LockT> rd(lock_);
     return Get_(name);
   }
+#else
+  /**
+   * @brief Get the context associated with the default database
+   *
+   * @return DatabaseAccess
+   */
+  DatabaseAccess Get() {
+    auto acc = db_gatekeeper_.access();
+    MG_ASSERT(acc, "Failed to get default database!");
+    return *acc;
+  }
+#endif
 
+#ifdef MG_ENTERPRISE
   /**
    * @brief Delete database.
    *
@@ -201,6 +232,7 @@ class DbmsHandler {
 
     return {};  // Success
   }
+#endif
 
   /**
    * @brief Return all active databases.
@@ -208,8 +240,12 @@ class DbmsHandler {
    * @return std::vector<std::string>
    */
   std::vector<std::string> All() const {
+#ifdef MG_ENTERPRISE
     std::shared_lock<LockT> rd(lock_);
     return db_handler_.All();
+#else
+    return {db_gatekeeper_.access()->get()->id()};
+#endif
   }
 
   /**
@@ -220,24 +256,30 @@ class DbmsHandler {
   Statistics Stats() {
     Statistics stats{};
     // TODO: Handle overflow?
+#ifdef MG_ENTERPRISE
     std::shared_lock<LockT> rd(lock_);
     for (auto &[_, db_gk] : db_handler_) {
+#else
+    {
+      auto &db_gk = db_gatekeeper_;
+#endif
       auto db_acc_opt = db_gk.access();
-      if (!db_acc_opt) continue;
-      auto &db_acc = *db_acc_opt;
-      const auto &info = db_acc->GetInfo();
-      const auto &storage_info = info.storage_info;
-      stats.num_vertex += storage_info.vertex_count;
-      stats.num_edges += storage_info.edge_count;
-      stats.triggers += info.triggers;
-      stats.streams += info.streams;
-      ++stats.num_databases;
-      stats.indices += storage_info.label_indices + storage_info.label_property_indices;
-      stats.constraints += storage_info.existence_constraints + storage_info.unique_constraints;
-      ++stats.storage_modes[(int)storage_info.storage_mode];
-      ++stats.isolation_levels[(int)storage_info.isolation_level];
-      stats.snapshot_enabled += storage_info.durability_snapshot_enabled;
-      stats.wal_enabled += storage_info.durability_wal_enabled;
+      if (db_acc_opt) {
+        auto &db_acc = *db_acc_opt;
+        const auto &info = db_acc->GetInfo();
+        const auto &storage_info = info.storage_info;
+        stats.num_vertex += storage_info.vertex_count;
+        stats.num_edges += storage_info.edge_count;
+        stats.triggers += info.triggers;
+        stats.streams += info.streams;
+        ++stats.num_databases;
+        stats.indices += storage_info.label_indices + storage_info.label_property_indices;
+        stats.constraints += storage_info.existence_constraints + storage_info.unique_constraints;
+        ++stats.storage_modes[(int)storage_info.storage_mode];
+        ++stats.isolation_levels[(int)storage_info.isolation_level];
+        stats.snapshot_enabled += storage_info.durability_snapshot_enabled;
+        stats.wal_enabled += storage_info.durability_wal_enabled;
+      }
     }
     return stats;
   }
@@ -249,13 +291,19 @@ class DbmsHandler {
    */
   std::vector<DatabaseInfo> Info() {
     std::vector<DatabaseInfo> res;
-    res.reserve(std::distance(db_handler_.cbegin(), db_handler_.cend()));
+#ifdef MG_ENTERPRISE
     std::shared_lock<LockT> rd(lock_);
+    res.reserve(std::distance(db_handler_.cbegin(), db_handler_.cend()));
     for (auto &[_, db_gk] : db_handler_) {
+#else
+    {
+      auto &db_gk = db_gatekeeper_;
+#endif
       auto db_acc_opt = db_gk.access();
-      if (!db_acc_opt) continue;
-      auto &db_acc = *db_acc_opt;
-      res.push_back(db_acc->GetInfo());
+      if (db_acc_opt) {
+        auto &db_acc = *db_acc_opt;
+        res.push_back(db_acc->GetInfo());
+      }
     }
     return res;
   }
@@ -267,15 +315,21 @@ class DbmsHandler {
    * @param ic global InterpreterContext
    */
   void RestoreTriggers(query::InterpreterContext *ic) {
+#ifdef MG_ENTERPRISE
     std::lock_guard<LockT> wr(lock_);
     for (auto &[_, db_gk] : db_handler_) {
+#else
+    {
+      auto &db_gk = db_gatekeeper_;
+#endif
       auto db_acc_opt = db_gk.access();
-      if (!db_acc_opt) continue;
-      auto &db_acc = *db_acc_opt;
-      spdlog::debug("Restoring trigger for database \"{}\"", db_acc->id());
-      auto storage_accessor = db_acc->Access();
-      auto dba = memgraph::query::DbAccessor{storage_accessor.get()};
-      db_acc->trigger_store()->RestoreTriggers(&ic->ast_cache, &dba, ic->config.query, ic->auth_checker);
+      if (db_acc_opt) {
+        auto &db_acc = *db_acc_opt;
+        spdlog::debug("Restoring trigger for database \"{}\"", db_acc->id());
+        auto storage_accessor = db_acc->Access();
+        auto dba = memgraph::query::DbAccessor{storage_accessor.get()};
+        db_acc->trigger_store()->RestoreTriggers(&ic->ast_cache, &dba, ic->config.query, ic->auth_checker);
+      }
     }
   }
 
@@ -286,17 +340,67 @@ class DbmsHandler {
    * @param ic global InterpreterContext
    */
   void RestoreStreams(query::InterpreterContext *ic) {
+#ifdef MG_ENTERPRISE
     std::lock_guard<LockT> wr(lock_);
     for (auto &[_, db_gk] : db_handler_) {
+#else
+    {
+      auto &db_gk = db_gatekeeper_;
+#endif
       auto db_acc = db_gk.access();
-      if (!db_acc) continue;
-      auto *db = db_acc->get();
-      spdlog::debug("Restoring streams for database \"{}\"", db->id());
-      db->streams()->RestoreStreams(*db_acc, ic);
+      if (db_acc) {
+        auto *db = db_acc->get();
+        spdlog::debug("Restoring streams for database \"{}\"", db->id());
+        db->streams()->RestoreStreams(*db_acc, ic);
+      }
     }
   }
 
+  /**
+   * @brief todo
+   *
+   * @param f
+   */
+  void ForEach(auto f) {
+#ifdef MG_ENTERPRISE
+    std::shared_lock<LockT> rd(lock_);
+    for (auto &[_, db_gk] : db_handler_) {
+#else
+    {
+      auto &db_gk = db_gatekeeper_;
+#endif
+      auto db_acc = db_gk.access();
+      if (db_acc) {  // This isn't an error, just a defunct db
+        f(db_acc->get());
+      }
+    }
+  }
+
+  /**
+   * @brief todo
+   *
+   * @param f
+   */
+  void ForOne(auto f) {
+#ifdef MG_ENTERPRISE
+    std::shared_lock<LockT> rd(lock_);
+    for (auto &[_, db_gk] : db_handler_) {
+      auto db_acc = db_gk.access();
+      if (db_acc) {                   // This isn't an error, just a defunct db
+        if (f(db_acc->get())) break;  // Run until the first successful one
+      }
+    }
+#else
+    {
+      auto db_acc = db_gatekeeper_.access();
+      MG_ASSERT(db_acc, "Should always have the database");
+      f(db_acc->get());
+    }
+#endif
+  }
+
  private:
+#ifdef MG_ENTERPRISE
   /**
    * @brief return the storage directory of the associated database
    *
@@ -328,13 +432,9 @@ class DbmsHandler {
    * @return NewResultT context on success, error on failure
    */
   NewResultT New_(const std::string &name, std::filesystem::path storage_subdir) {
-    if (default_config_) {
-      auto config_copy = *default_config_;
-      storage::UpdatePaths(config_copy, default_config_->durability.storage_directory / storage_subdir);
-      return New_(name, config_copy);
-    }
-    spdlog::info("Trying to generate session context without any configurations.");
-    return NewError::NO_CONFIGS;
+    auto config_copy = default_config_;
+    storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / storage_subdir);
+    return New_(name, config_copy);
   }
 
   /**
@@ -351,7 +451,7 @@ class DbmsHandler {
       return NewError::DEFUNCT;
     }
 
-    auto new_db = db_handler_.New(name, storage_config);
+    auto new_db = db_handler_.New(name, storage_config, repl_state_);
     if (new_db.HasValue()) {
       // Success
       if (durability_) durability_->Put(name, "ok");  // TODO: Serialize the configuration?
@@ -436,14 +536,16 @@ class DbmsHandler {
     throw UnknownDatabaseException("Tried to retrieve an unknown database \"{}\".", name);
   }
 
-  // Should storage objects ever be deleted?
-  mutable LockT lock_;                             //!< protective lock
-  DatabaseHandler db_handler_;                     //!< multi-tenancy storage handler
-  std::optional<storage::Config> default_config_;  //!< Storage configuration used when creating new databases
-  std::unique_ptr<kvstore::KVStore> durability_;   //!< list of active dbs (pointer so we can postpone its creation)
-  std::set<std::string> defunct_dbs_;              //!< Databases that are in an unknown state due to various failures
-  bool delete_on_drop_;                            //!< Flag defining if dropping storage also deletes its directory
-};
+  mutable LockT lock_;                               //!< protective lock
+  storage::Config default_config_;                   //!< Storage configuration used when creating new databases
+  const replication::ReplicationState &repl_state_;  //!< Global replication state
+  DatabaseHandler db_handler_;                       //!< multi-tenancy storage handler
+  std::unique_ptr<kvstore::KVStore> durability_;     //!< list of active dbs (pointer so we can postpone its creation)
+  bool delete_on_drop_;                              //!< Flag defining if dropping storage also deletes its directory
+  std::set<std::string> defunct_dbs_;                //!< Databases that are in an unknown state due to various failures
+#else
+  mutable utils::Gatekeeper<Database> db_gatekeeper_;  //!< Single databases gatekeeper
 #endif
+};
 
 }  // namespace memgraph::dbms
diff --git a/src/dbms/global.hpp b/src/dbms/global.hpp
index 1047a31c0..031705745 100644
--- a/src/dbms/global.hpp
+++ b/src/dbms/global.hpp
@@ -19,6 +19,7 @@
 
 namespace memgraph::dbms {
 
+#ifdef MG_ENTERPRISE
 enum class DeleteError : uint8_t {
   DEFAULT_DB,
   USING,
@@ -34,11 +35,7 @@ enum class NewError : uint8_t {
   GENERIC,
 };
 
-enum class SetForResult : uint8_t {
-  SUCCESS,
-  ALREADY_SET,
-  FAIL,
-};
+#endif
 
 /**
  * UnknownSession Exception
diff --git a/src/dbms/handler.hpp b/src/dbms/handler.hpp
index 6558fee85..a7622e6b2 100644
--- a/src/dbms/handler.hpp
+++ b/src/dbms/handler.hpp
@@ -49,7 +49,7 @@ class Handler {
    * @return NewResult
    */
   template <typename... Args>
-  NewResult New(std::piecewise_construct_t /* marker */, std::string_view name, Args... args) {
+  NewResult New(std::piecewise_construct_t /* marker */, std::string_view name, Args &&...args) {
     // Make sure the emplace will succeed, since we don't want to create temporary objects that could break something
     if (!Has(name)) {
       auto [itr, _] = items_.emplace(std::piecewise_construct, std::forward_as_tuple(name),
diff --git a/src/storage/v2/inmemory/replication/replication_server.cpp b/src/dbms/inmemory/replication_handlers.cpp
similarity index 61%
rename from src/storage/v2/inmemory/replication/replication_server.cpp
rename to src/dbms/inmemory/replication_handlers.cpp
index d92bc750b..b50163c55 100644
--- a/src/storage/v2/inmemory/replication/replication_server.cpp
+++ b/src/dbms/inmemory/replication_handlers.cpp
@@ -9,7 +9,11 @@
 // by the Apache License, Version 2.0, included in the file
 // licenses/APL.txt.
 
-#include "storage/v2/inmemory/replication/replication_server.hpp"
+#include "dbms/inmemory/replication_handlers.hpp"
+#include "dbms/constants.hpp"
+#include "dbms/dbms_handler.hpp"
+#include "replication/replication_server.hpp"
+#include "spdlog/spdlog.h"
 #include "storage/v2/durability/durability.hpp"
 #include "storage/v2/durability/snapshot.hpp"
 #include "storage/v2/durability/version.hpp"
@@ -17,9 +21,20 @@
 #include "storage/v2/inmemory/storage.hpp"
 #include "storage/v2/inmemory/unique_constraints.hpp"
 
-namespace memgraph::storage {
+using memgraph::storage::Delta;
+using memgraph::storage::EdgeAccessor;
+using memgraph::storage::EdgeRef;
+using memgraph::storage::EdgeTypeId;
+using memgraph::storage::LabelIndexStats;
+using memgraph::storage::LabelPropertyIndexStats;
+using memgraph::storage::PropertyId;
+using memgraph::storage::UniqueConstraints;
+using memgraph::storage::View;
+using memgraph::storage::durability::WalDeltaData;
+
+namespace memgraph::dbms {
 namespace {
-std::pair<uint64_t, durability::WalDeltaData> ReadDelta(durability::BaseDecoder *decoder) {
+std::pair<uint64_t, WalDeltaData> ReadDelta(storage::durability::BaseDecoder *decoder) {
   try {
     auto timestamp = ReadWalDeltaHeader(decoder);
     SPDLOG_INFO("       Timestamp {}", timestamp);
@@ -27,78 +42,114 @@ std::pair<uint64_t, durability::WalDeltaData> ReadDelta(durability::BaseDecoder
     return {timestamp, delta};
   } catch (const slk::SlkReaderException &) {
     throw utils::BasicException("Missing data!");
-  } catch (const durability::RecoveryFailure &) {
+  } catch (const storage::durability::RecoveryFailure &) {
     throw utils::BasicException("Invalid data!");
   }
 };
+
+std::optional<DatabaseAccess> GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, std::string_view db_name) {
+  try {
+#ifdef MG_ENTERPRISE
+    auto acc = dbms_handler->Get(db_name);
+#else
+    if (db_name != dbms::kDefaultDB) {
+      spdlog::warn("Trying to replicate a non-default database on a community replica.");
+      return std::nullopt;
+    }
+    auto acc = dbms_handler->Get();
+#endif
+    if (!acc) {
+      spdlog::error("Failed to get access to ", db_name);
+      return std::nullopt;
+    }
+    auto *inmem_storage = dynamic_cast<storage::InMemoryStorage *>(acc.get()->storage());
+    if (!inmem_storage || inmem_storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) {
+      spdlog::error("Database \"{}\" is not IN_MEMORY_TRANSACTIONAL.", db_name);
+      return std::nullopt;
+    }
+    return std::optional{std::move(acc)};
+  } catch (const dbms::UnknownDatabaseException &e) {
+    spdlog::warn("No database \"{}\" on replica!", db_name);
+    return std::nullopt;
+  }
+}
 }  // namespace
 
-InMemoryReplicationServer::InMemoryReplicationServer(InMemoryStorage *storage,
-                                                     const memgraph::replication::ReplicationServerConfig &config,
-                                                     memgraph::replication::ReplicationEpoch *repl_epoch)
-    : ReplicationServer{config}, storage_(storage), repl_epoch_{repl_epoch} {
-  rpc_server_.Register<replication::HeartbeatRpc>([this](auto *req_reader, auto *res_builder) {
+void InMemoryReplicationHandlers::Register(dbms::DbmsHandler *dbms_handler, replication::ReplicationServer &server) {
+  server.rpc_server_.Register<storage::replication::HeartbeatRpc>([dbms_handler](auto *req_reader, auto *res_builder) {
     spdlog::debug("Received HeartbeatRpc");
-    this->HeartbeatHandler(req_reader, res_builder);
+    InMemoryReplicationHandlers::HeartbeatHandler(dbms_handler, req_reader, res_builder);
   });
-
-  rpc_server_.Register<replication::AppendDeltasRpc>([this](auto *req_reader, auto *res_builder) {
-    spdlog::debug("Received AppendDeltasRpc");
-    this->AppendDeltasHandler(req_reader, res_builder);
-  });
-  rpc_server_.Register<replication::SnapshotRpc>([this](auto *req_reader, auto *res_builder) {
+  server.rpc_server_.Register<storage::replication::AppendDeltasRpc>(
+      [dbms_handler](auto *req_reader, auto *res_builder) {
+        spdlog::debug("Received AppendDeltasRpc");
+        InMemoryReplicationHandlers::AppendDeltasHandler(dbms_handler, req_reader, res_builder);
+      });
+  server.rpc_server_.Register<storage::replication::SnapshotRpc>([dbms_handler](auto *req_reader, auto *res_builder) {
     spdlog::debug("Received SnapshotRpc");
-    this->SnapshotHandler(req_reader, res_builder);
+    InMemoryReplicationHandlers::SnapshotHandler(dbms_handler, req_reader, res_builder);
   });
-  rpc_server_.Register<replication::WalFilesRpc>([this](auto *req_reader, auto *res_builder) {
+  server.rpc_server_.Register<storage::replication::WalFilesRpc>([dbms_handler](auto *req_reader, auto *res_builder) {
     spdlog::debug("Received WalFilesRpc");
-    this->WalFilesHandler(req_reader, res_builder);
+    InMemoryReplicationHandlers::WalFilesHandler(dbms_handler, req_reader, res_builder);
   });
-  rpc_server_.Register<replication::CurrentWalRpc>([this](auto *req_reader, auto *res_builder) {
+  server.rpc_server_.Register<storage::replication::CurrentWalRpc>([dbms_handler](auto *req_reader, auto *res_builder) {
     spdlog::debug("Received CurrentWalRpc");
-    this->CurrentWalHandler(req_reader, res_builder);
+    InMemoryReplicationHandlers::CurrentWalHandler(dbms_handler, req_reader, res_builder);
   });
-  rpc_server_.Register<replication::TimestampRpc>([this](auto *req_reader, auto *res_builder) {
+  server.rpc_server_.Register<storage::replication::TimestampRpc>([dbms_handler](auto *req_reader, auto *res_builder) {
     spdlog::debug("Received TimestampRpc");
-    this->TimestampHandler(req_reader, res_builder);
+    InMemoryReplicationHandlers::TimestampHandler(dbms_handler, req_reader, res_builder);
   });
 }
 
-void InMemoryReplicationServer::HeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::HeartbeatReq req;
+void InMemoryReplicationHandlers::HeartbeatHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                   slk::Builder *res_builder) {
+  storage::replication::HeartbeatReq req;
   slk::Load(&req, req_reader);
-  replication::HeartbeatRes res{true, storage_->repl_storage_state_.last_commit_timestamp_.load(),
-                                std::string{repl_epoch_->id()}};
+  auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
+
+  // TODO: this handler is agnostic of InMemory, move to be reused by on-disk
+  auto const *storage = db_acc->get()->storage();
+  storage::replication::HeartbeatRes res{storage->id(), true,
+                                         storage->repl_storage_state_.last_commit_timestamp_.load(),
+                                         std::string{storage->repl_storage_state_.epoch_.id()}};
   slk::Save(res, res_builder);
 }
 
-void InMemoryReplicationServer::AppendDeltasHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::AppendDeltasReq req;
+void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                      slk::Builder *res_builder) {
+  storage::replication::AppendDeltasReq req;
   slk::Load(&req, req_reader);
+  auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
 
-  replication::Decoder decoder(req_reader);
+  storage::replication::Decoder decoder(req_reader);
 
   auto maybe_epoch_id = decoder.ReadString();
   MG_ASSERT(maybe_epoch_id, "Invalid replication message");
 
-  auto &repl_storage_state = storage_->repl_storage_state_;
-  if (*maybe_epoch_id != repl_epoch_->id()) {
-    auto prev_epoch = repl_epoch_->SetEpoch(*maybe_epoch_id);
+  auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
+  auto &repl_storage_state = storage->repl_storage_state_;
+  if (*maybe_epoch_id != storage->repl_storage_state_.epoch_.id()) {
+    auto prev_epoch = storage->repl_storage_state_.epoch_.SetEpoch(*maybe_epoch_id);
     repl_storage_state.AddEpochToHistoryForce(prev_epoch);
   }
 
-  if (storage_->wal_file_) {
-    if (req.seq_num > storage_->wal_file_->SequenceNumber() || *maybe_epoch_id != repl_epoch_->id()) {
-      storage_->wal_file_->FinalizeWal();
-      storage_->wal_file_.reset();
-      storage_->wal_seq_num_ = req.seq_num;
+  if (storage->wal_file_) {
+    if (req.seq_num > storage->wal_file_->SequenceNumber() ||
+        *maybe_epoch_id != storage->repl_storage_state_.epoch_.id()) {
+      storage->wal_file_->FinalizeWal();
+      storage->wal_file_.reset();
+      storage->wal_seq_num_ = req.seq_num;
       spdlog::trace("Finalized WAL file");
     } else {
-      MG_ASSERT(storage_->wal_file_->SequenceNumber() == req.seq_num, "Invalid sequence number of current wal file");
-      storage_->wal_seq_num_ = req.seq_num + 1;
+      MG_ASSERT(storage->wal_file_->SequenceNumber() == req.seq_num, "Invalid sequence number of current wal file");
+      storage->wal_seq_num_ = req.seq_num + 1;
     }
   } else {
-    storage_->wal_seq_num_ = req.seq_num;
+    storage->wal_seq_num_ = req.seq_num;
   }
 
   if (req.previous_commit_timestamp != repl_storage_state.last_commit_timestamp_.load()) {
@@ -107,144 +158,161 @@ void InMemoryReplicationServer::AppendDeltasHandler(slk::Reader *req_reader, slk
     while (!transaction_complete) {
       SPDLOG_INFO("Skipping delta");
       const auto [timestamp, delta] = ReadDelta(&decoder);
-      transaction_complete = durability::IsWalDeltaDataTypeTransactionEnd(
-          delta.type, durability::kVersion);  // TODO: Check if we are always using the latest version when replicating
+      transaction_complete = storage::durability::IsWalDeltaDataTypeTransactionEnd(
+          delta.type,
+          storage::durability::kVersion);  // TODO: Check if we are always using the latest version when replicating
     }
 
-    replication::AppendDeltasRes res{false, repl_storage_state.last_commit_timestamp_.load()};
+    storage::replication::AppendDeltasRes res{storage->id(), false, repl_storage_state.last_commit_timestamp_.load()};
     slk::Save(res, res_builder);
     return;
   }
 
-  ReadAndApplyDelta(storage_, &decoder,
-                    durability::kVersion);  // TODO: Check if we are always using the latest version when replicating
+  ReadAndApplyDelta(
+      storage, &decoder,
+      storage::durability::kVersion);  // TODO: Check if we are always using the latest version when replicating
 
-  replication::AppendDeltasRes res{true, repl_storage_state.last_commit_timestamp_.load()};
+  storage::replication::AppendDeltasRes res{storage->id(), true, repl_storage_state.last_commit_timestamp_.load()};
   slk::Save(res, res_builder);
   spdlog::debug("Replication recovery from append deltas finished, replica is now up to date!");
 }
 
-void InMemoryReplicationServer::SnapshotHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::SnapshotReq req;
+void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                  slk::Builder *res_builder) {
+  storage::replication::SnapshotReq req;
   slk::Load(&req, req_reader);
+  auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
 
-  replication::Decoder decoder(req_reader);
+  storage::replication::Decoder decoder(req_reader);
 
-  utils::EnsureDirOrDie(storage_->snapshot_directory_);
+  auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
+  utils::EnsureDirOrDie(storage->snapshot_directory_);
 
-  const auto maybe_snapshot_path = decoder.ReadFile(storage_->snapshot_directory_);
+  const auto maybe_snapshot_path = decoder.ReadFile(storage->snapshot_directory_);
   MG_ASSERT(maybe_snapshot_path, "Failed to load snapshot!");
   spdlog::info("Received snapshot saved to {}", *maybe_snapshot_path);
 
-  auto storage_guard = std::unique_lock{storage_->main_lock_};
+  auto storage_guard = std::unique_lock{storage->main_lock_};
   spdlog::trace("Clearing database since recovering from snapshot.");
   // Clear the database
-  storage_->vertices_.clear();
-  storage_->edges_.clear();
+  storage->vertices_.clear();
+  storage->edges_.clear();
 
-  storage_->constraints_.existence_constraints_ = std::make_unique<ExistenceConstraints>();
-  storage_->constraints_.unique_constraints_ = std::make_unique<InMemoryUniqueConstraints>();
-  storage_->indices_.label_index_ = std::make_unique<InMemoryLabelIndex>();
-  storage_->indices_.label_property_index_ = std::make_unique<InMemoryLabelPropertyIndex>();
+  storage->constraints_.existence_constraints_ = std::make_unique<storage::ExistenceConstraints>();
+  storage->constraints_.unique_constraints_ = std::make_unique<storage::InMemoryUniqueConstraints>();
+  storage->indices_.label_index_ = std::make_unique<storage::InMemoryLabelIndex>();
+  storage->indices_.label_property_index_ = std::make_unique<storage::InMemoryLabelPropertyIndex>();
   try {
     spdlog::debug("Loading snapshot");
-    auto recovered_snapshot = durability::LoadSnapshot(
-        *maybe_snapshot_path, &storage_->vertices_, &storage_->edges_, &storage_->repl_storage_state_.history,
-        storage_->name_id_mapper_.get(), &storage_->edge_count_, storage_->config_);
+    auto recovered_snapshot = storage::durability::LoadSnapshot(
+        *maybe_snapshot_path, &storage->vertices_, &storage->edges_, &storage->repl_storage_state_.history,
+        storage->name_id_mapper_.get(), &storage->edge_count_, storage->config_);
     spdlog::debug("Snapshot loaded successfully");
     // If this step is present it should always be the first step of
     // the recovery so we use the UUID we read from snasphost
-    storage_->uuid_ = std::move(recovered_snapshot.snapshot_info.uuid);
-    repl_epoch_->SetEpoch(std::move(recovered_snapshot.snapshot_info.epoch_id));
+    storage->uuid_ = std::move(recovered_snapshot.snapshot_info.uuid);
+    storage->repl_storage_state_.epoch_.SetEpoch(std::move(recovered_snapshot.snapshot_info.epoch_id));
     const auto &recovery_info = recovered_snapshot.recovery_info;
-    storage_->vertex_id_ = recovery_info.next_vertex_id;
-    storage_->edge_id_ = recovery_info.next_edge_id;
-    storage_->timestamp_ = std::max(storage_->timestamp_, recovery_info.next_timestamp);
+    storage->vertex_id_ = recovery_info.next_vertex_id;
+    storage->edge_id_ = recovery_info.next_edge_id;
+    storage->timestamp_ = std::max(storage->timestamp_, recovery_info.next_timestamp);
 
     spdlog::trace("Recovering indices and constraints from snapshot.");
-    durability::RecoverIndicesAndConstraints(recovered_snapshot.indices_constraints, &storage_->indices_,
-                                             &storage_->constraints_, &storage_->vertices_);
-  } catch (const durability::RecoveryFailure &e) {
+    storage::durability::RecoverIndicesAndConstraints(recovered_snapshot.indices_constraints, &storage->indices_,
+                                                      &storage->constraints_, &storage->vertices_);
+  } catch (const storage::durability::RecoveryFailure &e) {
     LOG_FATAL("Couldn't load the snapshot because of: {}", e.what());
   }
   storage_guard.unlock();
 
-  replication::SnapshotRes res{true, storage_->repl_storage_state_.last_commit_timestamp_.load()};
+  storage::replication::SnapshotRes res{storage->id(), true,
+                                        storage->repl_storage_state_.last_commit_timestamp_.load()};
   slk::Save(res, res_builder);
 
   spdlog::trace("Deleting old snapshot files due to snapshot recovery.");
   // Delete other durability files
-  auto snapshot_files = durability::GetSnapshotFiles(storage_->snapshot_directory_, storage_->uuid_);
+  auto snapshot_files = storage::durability::GetSnapshotFiles(storage->snapshot_directory_, storage->uuid_);
   for (const auto &[path, uuid, _] : snapshot_files) {
     if (path != *maybe_snapshot_path) {
       spdlog::trace("Deleting snapshot file {}", path);
-      storage_->file_retainer_.DeleteFile(path);
+      storage->file_retainer_.DeleteFile(path);
     }
   }
 
   spdlog::trace("Deleting old WAL files due to snapshot recovery.");
-  auto wal_files = durability::GetWalFiles(storage_->wal_directory_, storage_->uuid_);
+  auto wal_files = storage::durability::GetWalFiles(storage->wal_directory_, storage->uuid_);
   if (wal_files) {
     for (const auto &wal_file : *wal_files) {
       spdlog::trace("Deleting WAL file {}", wal_file.path);
-      storage_->file_retainer_.DeleteFile(wal_file.path);
+      storage->file_retainer_.DeleteFile(wal_file.path);
     }
 
-    storage_->wal_file_.reset();
+    storage->wal_file_.reset();
   }
   spdlog::debug("Replication recovery from snapshot finished!");
 }
 
-void InMemoryReplicationServer::WalFilesHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::WalFilesReq req;
+void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                  slk::Builder *res_builder) {
+  storage::replication::WalFilesReq req;
   slk::Load(&req, req_reader);
+  auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
 
   const auto wal_file_number = req.file_number;
   spdlog::debug("Received WAL files: {}", wal_file_number);
 
-  replication::Decoder decoder(req_reader);
+  storage::replication::Decoder decoder(req_reader);
 
-  utils::EnsureDirOrDie(storage_->wal_directory_);
+  auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
+  utils::EnsureDirOrDie(storage->wal_directory_);
 
   for (auto i = 0; i < wal_file_number; ++i) {
-    LoadWal(storage_, *repl_epoch_, &decoder);
+    LoadWal(storage, &decoder);
   }
 
-  replication::WalFilesRes res{true, storage_->repl_storage_state_.last_commit_timestamp_.load()};
+  storage::replication::WalFilesRes res{storage->id(), true,
+                                        storage->repl_storage_state_.last_commit_timestamp_.load()};
   slk::Save(res, res_builder);
   spdlog::debug("Replication recovery from WAL files ended successfully, replica is now up to date!");
 }
 
-void InMemoryReplicationServer::CurrentWalHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::CurrentWalReq req;
+void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                    slk::Builder *res_builder) {
+  storage::replication::CurrentWalReq req;
   slk::Load(&req, req_reader);
+  auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
 
-  replication::Decoder decoder(req_reader);
+  storage::replication::Decoder decoder(req_reader);
 
-  utils::EnsureDirOrDie(storage_->wal_directory_);
+  auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
+  utils::EnsureDirOrDie(storage->wal_directory_);
 
-  LoadWal(storage_, *repl_epoch_, &decoder);
+  LoadWal(storage, &decoder);
 
-  replication::CurrentWalRes res{true, storage_->repl_storage_state_.last_commit_timestamp_.load()};
+  storage::replication::CurrentWalRes res{storage->id(), true,
+                                          storage->repl_storage_state_.last_commit_timestamp_.load()};
   slk::Save(res, res_builder);
   spdlog::debug("Replication recovery from current WAL ended successfully, replica is now up to date!");
 }
 
-void InMemoryReplicationServer::LoadWal(InMemoryStorage *storage, memgraph::replication::ReplicationEpoch &epoch,
-                                        replication::Decoder *decoder) {
-  const auto temp_wal_directory = std::filesystem::temp_directory_path() / "memgraph" / durability::kWalDirectory;
+void InMemoryReplicationHandlers::LoadWal(storage::InMemoryStorage *storage, storage::replication::Decoder *decoder) {
+  const auto temp_wal_directory =
+      std::filesystem::temp_directory_path() / "memgraph" / storage::durability::kWalDirectory;
   utils::EnsureDir(temp_wal_directory);
   auto maybe_wal_path = decoder->ReadFile(temp_wal_directory);
   MG_ASSERT(maybe_wal_path, "Failed to load WAL!");
   spdlog::trace("Received WAL saved to {}", *maybe_wal_path);
   try {
-    auto wal_info = durability::ReadWalInfo(*maybe_wal_path);
+    auto wal_info = storage::durability::ReadWalInfo(*maybe_wal_path);
     if (wal_info.seq_num == 0) {
       storage->uuid_ = wal_info.uuid;
     }
-
-    if (wal_info.epoch_id != epoch.id()) {
-      auto prev_epoch = epoch.SetEpoch(wal_info.epoch_id);
+    auto &replica_epoch = storage->repl_storage_state_.epoch_;
+    if (wal_info.epoch_id != replica_epoch.id()) {
+      auto prev_epoch = replica_epoch.SetEpoch(wal_info.epoch_id);
       storage->repl_storage_state_.AddEpochToHistoryForce(prev_epoch);
     }
 
@@ -259,11 +327,12 @@ void InMemoryReplicationServer::LoadWal(InMemoryStorage *storage, memgraph::repl
       storage->wal_seq_num_ = wal_info.seq_num;
     }
     spdlog::trace("Loading WAL deltas from {}", *maybe_wal_path);
-    durability::Decoder wal;
-    const auto version = wal.Initialize(*maybe_wal_path, durability::kWalMagic);
+    storage::durability::Decoder wal;
+    const auto version = wal.Initialize(*maybe_wal_path, storage::durability::kWalMagic);
     spdlog::debug("WAL file {} loaded successfully", *maybe_wal_path);
-    if (!version) throw durability::RecoveryFailure("Couldn't read WAL magic and/or version!");
-    if (!durability::IsVersionSupported(*version)) throw durability::RecoveryFailure("Invalid WAL version!");
+    if (!version) throw storage::durability::RecoveryFailure("Couldn't read WAL magic and/or version!");
+    if (!storage::durability::IsVersionSupported(*version))
+      throw storage::durability::RecoveryFailure("Invalid WAL version!");
     wal.SetPosition(wal_info.offset_deltas);
 
     for (size_t i = 0; i < wal_info.num_deltas;) {
@@ -271,38 +340,46 @@ void InMemoryReplicationServer::LoadWal(InMemoryStorage *storage, memgraph::repl
     }
 
     spdlog::debug("Replication from current WAL successful!");
-  } catch (const durability::RecoveryFailure &e) {
+  } catch (const storage::durability::RecoveryFailure &e) {
     LOG_FATAL("Couldn't recover WAL deltas from {} because of: {}", *maybe_wal_path, e.what());
   }
 }
 
-void InMemoryReplicationServer::TimestampHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::TimestampReq req;
+void InMemoryReplicationHandlers::TimestampHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader,
+                                                   slk::Builder *res_builder) {
+  storage::replication::TimestampReq req;
   slk::Load(&req, req_reader);
+  auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
+  if (!db_acc) return;
 
-  replication::TimestampRes res{true, storage_->repl_storage_state_.last_commit_timestamp_.load()};
+  // TODO: this handler is agnostic of InMemory, move to be reused by on-disk
+  auto const *storage = db_acc->get()->storage();
+  storage::replication::TimestampRes res{storage->id(), true,
+                                         storage->repl_storage_state_.last_commit_timestamp_.load()};
   slk::Save(res, res_builder);
 }
 
-uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage, durability::BaseDecoder *decoder,
-                                                      const uint64_t version) {
+uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage *storage,
+                                                        storage::durability::BaseDecoder *decoder,
+                                                        const uint64_t version) {
   auto edge_acc = storage->edges_.access();
   auto vertex_acc = storage->vertices_.access();
 
   constexpr bool kUniqueAccess = true;
+  constexpr bool kSharedAccess = false;
 
-  std::optional<std::pair<uint64_t, InMemoryStorage::ReplicationAccessor>> commit_timestamp_and_accessor;
+  std::optional<std::pair<uint64_t, storage::InMemoryStorage::ReplicationAccessor>> commit_timestamp_and_accessor;
   auto get_transaction = [storage, &commit_timestamp_and_accessor](uint64_t commit_timestamp,
-                                                                   bool unique = !kUniqueAccess) {
+                                                                   bool unique = kSharedAccess) {
     if (!commit_timestamp_and_accessor) {
-      std::unique_ptr<Storage::Accessor> acc = nullptr;
+      std::unique_ptr<storage::Storage::Accessor> acc = nullptr;
       if (unique) {
-        acc = storage->UniqueAccess(std::nullopt);
+        acc = storage->UniqueAccess(std::nullopt, false /*not main*/);
       } else {
-        acc = storage->Access(std::nullopt);
+        acc = storage->Access(std::nullopt, false /*not main*/);
       }
-      auto inmem_acc = std::unique_ptr<InMemoryStorage::InMemoryAccessor>(
-          static_cast<InMemoryStorage::InMemoryAccessor *>(acc.release()));
+      auto inmem_acc = std::unique_ptr<storage::InMemoryStorage::InMemoryAccessor>(
+          static_cast<storage::InMemoryStorage::InMemoryAccessor *>(acc.release()));
       commit_timestamp_and_accessor.emplace(commit_timestamp, std::move(*inmem_acc));
     } else if (commit_timestamp_and_accessor->first != commit_timestamp) {
       throw utils::BasicException("Received more than one transaction!");
@@ -319,7 +396,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
       max_commit_timestamp = timestamp;
     }
 
-    transaction_complete = durability::IsWalDeltaDataTypeTransactionEnd(delta.type, version);
+    transaction_complete = storage::durability::IsWalDeltaDataTypeTransactionEnd(delta.type, version);
 
     if (timestamp < storage->timestamp_) {
       continue;
@@ -327,13 +404,13 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
 
     SPDLOG_INFO("  Delta {}", applied_deltas);
     switch (delta.type) {
-      case durability::WalDeltaData::Type::VERTEX_CREATE: {
+      case WalDeltaData::Type::VERTEX_CREATE: {
         spdlog::trace("       Create vertex {}", delta.vertex_create_delete.gid.AsUint());
         auto *transaction = get_transaction(timestamp);
         transaction->CreateVertexEx(delta.vertex_create_delete.gid);
         break;
       }
-      case durability::WalDeltaData::Type::VERTEX_DELETE: {
+      case WalDeltaData::Type::VERTEX_DELETE: {
         spdlog::trace("       Delete vertex {}", delta.vertex_create_delete.gid.AsUint());
         auto *transaction = get_transaction(timestamp);
         auto vertex = transaction->FindVertex(delta.vertex_create_delete.gid, View::NEW);
@@ -342,7 +419,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::VERTEX_ADD_LABEL: {
+      case WalDeltaData::Type::VERTEX_ADD_LABEL: {
         spdlog::trace("       Vertex {} add label {}", delta.vertex_add_remove_label.gid.AsUint(),
                       delta.vertex_add_remove_label.label);
         auto *transaction = get_transaction(timestamp);
@@ -352,7 +429,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::VERTEX_REMOVE_LABEL: {
+      case WalDeltaData::Type::VERTEX_REMOVE_LABEL: {
         spdlog::trace("       Vertex {} remove label {}", delta.vertex_add_remove_label.gid.AsUint(),
                       delta.vertex_add_remove_label.label);
         auto *transaction = get_transaction(timestamp);
@@ -362,7 +439,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::VERTEX_SET_PROPERTY: {
+      case WalDeltaData::Type::VERTEX_SET_PROPERTY: {
         spdlog::trace("       Vertex {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
                       delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
         auto *transaction = get_transaction(timestamp);
@@ -373,7 +450,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::EDGE_CREATE: {
+      case WalDeltaData::Type::EDGE_CREATE: {
         spdlog::trace("       Create edge {} of type {} from vertex {} to vertex {}",
                       delta.edge_create_delete.gid.AsUint(), delta.edge_create_delete.edge_type,
                       delta.edge_create_delete.from_vertex.AsUint(), delta.edge_create_delete.to_vertex.AsUint());
@@ -388,7 +465,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (edge.HasError()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::EDGE_DELETE: {
+      case WalDeltaData::Type::EDGE_DELETE: {
         spdlog::trace("       Delete edge {} of type {} from vertex {} to vertex {}",
                       delta.edge_create_delete.gid.AsUint(), delta.edge_create_delete.edge_type,
                       delta.edge_create_delete.from_vertex.AsUint(), delta.edge_create_delete.to_vertex.AsUint());
@@ -406,7 +483,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::EDGE_SET_PROPERTY: {
+      case WalDeltaData::Type::EDGE_SET_PROPERTY: {
         spdlog::trace("       Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
                       delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
         if (!storage->config_.items.properties_on_edges)
@@ -469,17 +546,18 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         break;
       }
 
-      case durability::WalDeltaData::Type::TRANSACTION_END: {
+      case WalDeltaData::Type::TRANSACTION_END: {
         spdlog::trace("       Transaction end");
         if (!commit_timestamp_and_accessor || commit_timestamp_and_accessor->first != timestamp)
           throw utils::BasicException("Invalid commit data!");
-        auto ret = commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first);
+        auto ret =
+            commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first, false /* not main */);
         if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
         commit_timestamp_and_accessor = std::nullopt;
         break;
       }
 
-      case durability::WalDeltaData::Type::LABEL_INDEX_CREATE: {
+      case WalDeltaData::Type::LABEL_INDEX_CREATE: {
         spdlog::trace("       Create label index on :{}", delta.operation_label.label);
         // Need to send the timestamp
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
@@ -487,14 +565,14 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_INDEX_DROP: {
+      case WalDeltaData::Type::LABEL_INDEX_DROP: {
         spdlog::trace("       Drop label index on :{}", delta.operation_label.label);
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
         if (transaction->DropIndex(storage->NameToLabel(delta.operation_label.label)).HasError())
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_INDEX_STATS_SET: {
+      case WalDeltaData::Type::LABEL_INDEX_STATS_SET: {
         spdlog::trace("       Set label index statistics on :{}", delta.operation_label_stats.label);
         // Need to send the timestamp
         auto *transaction = get_transaction(timestamp);
@@ -506,7 +584,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         transaction->SetIndexStats(label, stats);
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_INDEX_STATS_CLEAR: {
+      case WalDeltaData::Type::LABEL_INDEX_STATS_CLEAR: {
         const auto &info = delta.operation_label;
         spdlog::trace("       Clear label index statistics on :{}", info.label);
         // Need to send the timestamp
@@ -514,7 +592,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         transaction->DeleteLabelIndexStats(storage->NameToLabel(info.label));
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE: {
+      case WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE: {
         spdlog::trace("       Create label+property index on :{} ({})", delta.operation_label_property.label,
                       delta.operation_label_property.property);
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
@@ -525,7 +603,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP: {
+      case WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP: {
         spdlog::trace("       Drop label+property index on :{} ({})", delta.operation_label_property.label,
                       delta.operation_label_property.property);
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
@@ -536,7 +614,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET: {
+      case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET: {
         const auto &info = delta.operation_label_property_stats;
         spdlog::trace("       Set label-property index statistics on :{}", info.label);
         // Need to send the timestamp
@@ -550,7 +628,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         transaction->SetIndexStats(label, property, stats);
         break;
       }
-      case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR: {
+      case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR: {
         const auto &info = delta.operation_label;
         spdlog::trace("       Clear label-property index statistics on :{}", info.label);
         // Need to send the timestamp
@@ -558,7 +636,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         transaction->DeleteLabelPropertyIndexStats(storage->NameToLabel(info.label));
         break;
       }
-      case durability::WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
+      case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
         spdlog::trace("       Create existence constraint on :{} ({})", delta.operation_label_property.label,
                       delta.operation_label_property.property);
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
@@ -568,7 +646,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
         if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP: {
+      case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP: {
         spdlog::trace("       Drop existence constraint on :{} ({})", delta.operation_label_property.label,
                       delta.operation_label_property.property);
         auto *transaction = get_transaction(timestamp, kUniqueAccess);
@@ -579,7 +657,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE: {
+      case WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE: {
         std::stringstream ss;
         utils::PrintIterable(ss, delta.operation_label_properties.properties);
         spdlog::trace("       Create unique constraint on :{} ({})", delta.operation_label_properties.label, ss.str());
@@ -594,7 +672,7 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
           throw utils::BasicException("Invalid transaction!");
         break;
       }
-      case durability::WalDeltaData::Type::UNIQUE_CONSTRAINT_DROP: {
+      case WalDeltaData::Type::UNIQUE_CONSTRAINT_DROP: {
         std::stringstream ss;
         utils::PrintIterable(ss, delta.operation_label_properties.properties);
         spdlog::trace("       Drop unique constraint on :{} ({})", delta.operation_label_properties.label, ss.str());
@@ -621,4 +699,4 @@ uint64_t InMemoryReplicationServer::ReadAndApplyDelta(InMemoryStorage *storage,
   return applied_deltas;
 }
 
-}  // namespace memgraph::storage
+}  // namespace memgraph::dbms
diff --git a/src/dbms/inmemory/replication_handlers.hpp b/src/dbms/inmemory/replication_handlers.hpp
new file mode 100644
index 000000000..fc76d2b3a
--- /dev/null
+++ b/src/dbms/inmemory/replication_handlers.hpp
@@ -0,0 +1,49 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#pragma once
+
+#include "replication/replication_server.hpp"
+#include "replication/state.hpp"
+#include "storage/v2/replication/serialization.hpp"
+
+namespace memgraph::storage {
+class InMemoryStorage;
+}
+namespace memgraph::dbms {
+
+class DbmsHandler;
+
+class InMemoryReplicationHandlers {
+ public:
+  static void Register(dbms::DbmsHandler *dbms_handler, replication::ReplicationServer &server);
+
+ private:
+  // RPC handlers
+  static void HeartbeatHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void AppendDeltasHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void SnapshotHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void WalFilesHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void CurrentWalHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void TimestampHandler(dbms::DbmsHandler *dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
+
+  static void LoadWal(storage::InMemoryStorage *storage, storage::replication::Decoder *decoder);
+
+  static uint64_t ReadAndApplyDelta(storage::InMemoryStorage *storage, storage::durability::BaseDecoder *decoder,
+                                    uint64_t version);
+};
+
+}  // namespace memgraph::dbms
diff --git a/src/dbms/inmemory/storage_helper.hpp b/src/dbms/inmemory/storage_helper.hpp
new file mode 100644
index 000000000..347c16928
--- /dev/null
+++ b/src/dbms/inmemory/storage_helper.hpp
@@ -0,0 +1,67 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#pragma once
+
+#include <variant>
+
+#include "dbms/constants.hpp"
+#include "dbms/replication_handler.hpp"
+#include "replication/state.hpp"
+#include "storage/v2/config.hpp"
+#include "storage/v2/inmemory/storage.hpp"
+#include "storage/v2/storage.hpp"
+
+namespace memgraph::dbms {
+
+#ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY
+constexpr bool allow_mt_repl = true;
+#else
+constexpr bool allow_mt_repl = false;
+#endif
+
+inline std::unique_ptr<storage::Storage> CreateInMemoryStorage(
+    storage::Config config, const ::memgraph::replication::ReplicationState &repl_state) {
+  const auto wal_mode = config.durability.snapshot_wal_mode;
+  const auto name = config.name;
+  auto storage = std::make_unique<storage::InMemoryStorage>(std::move(config));
+
+  // Connect replication state and storage
+  storage->CreateSnapshotHandler(
+      [storage = storage.get(), &repl_state]() -> utils::BasicResult<storage::InMemoryStorage::CreateSnapshotError> {
+        if (repl_state.IsReplica()) {
+          return storage::InMemoryStorage::CreateSnapshotError::DisabledForReplica;
+        }
+        return storage->CreateSnapshot();
+      });
+
+  if (allow_mt_repl || name == dbms::kDefaultDB) {
+    // Handle global replication state
+    spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash.");
+    // RECOVER REPLICA CONNECTIONS
+    memgraph::dbms::RestoreReplication(repl_state, *storage);
+  } else if (const ::memgraph::replication::RoleMainData *data =
+                 std::get_if<::memgraph::replication::RoleMainData>(&repl_state.ReplicationData());
+             data && !data->registered_replicas_.empty()) {
+    spdlog::warn("Multi-tenant replication is currently not supported!");
+  }
+
+  if (wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && repl_state.IsMain()) {
+    spdlog::warn(
+        "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please consider "
+        "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because "
+        "without write-ahead logs this instance is not replicating any data.");
+  }
+
+  return std::move(storage);
+}
+
+}  // namespace memgraph::dbms
diff --git a/src/dbms/replication_handler.cpp b/src/dbms/replication_handler.cpp
new file mode 100644
index 000000000..cff93fd6b
--- /dev/null
+++ b/src/dbms/replication_handler.cpp
@@ -0,0 +1,234 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include "dbms/replication_handler.hpp"
+
+#include "dbms/constants.hpp"
+#include "dbms/dbms_handler.hpp"
+#include "dbms/inmemory/replication_handlers.hpp"
+#include "dbms/inmemory/storage_helper.hpp"
+#include "replication/state.hpp"
+
+using memgraph::replication::ReplicationClientConfig;
+using memgraph::replication::ReplicationState;
+using memgraph::replication::RoleMainData;
+using memgraph::replication::RoleReplicaData;
+
+namespace memgraph::dbms {
+
+namespace {
+
+std::string RegisterReplicaErrorToString(RegisterReplicaError error) {
+  switch (error) {
+    using enum RegisterReplicaError;
+    case NAME_EXISTS:
+      return "NAME_EXISTS";
+    case END_POINT_EXISTS:
+      return "END_POINT_EXISTS";
+    case CONNECTION_FAILED:
+      return "CONNECTION_FAILED";
+    case COULD_NOT_BE_PERSISTED:
+      return "COULD_NOT_BE_PERSISTED";
+  }
+}
+}  // namespace
+
+bool ReplicationHandler::SetReplicationRoleMain() {
+  auto const main_handler = [](RoleMainData const &) {
+    // If we are already MAIN, we don't want to change anything
+    return false;
+  };
+  auto const replica_handler = [this](RoleReplicaData const &) {
+    // STEP 1) bring down all REPLICA servers
+    dbms_handler_.ForEach([](Database *db) {
+      auto *storage = db->storage();
+      // Remember old epoch + storage timestamp association
+      storage->PrepareForNewEpoch();
+    });
+
+    // STEP 2) Change to MAIN
+    // TODO: restore replication servers if false?
+    if (!repl_state_.SetReplicationRoleMain()) {
+      // TODO: Handle recovery on failure???
+      return false;
+    }
+
+    // STEP 3) We are now MAIN, update storage local epoch
+    dbms_handler_.ForEach([&](Database *db) {
+      auto *storage = db->storage();
+      storage->repl_storage_state_.epoch_ = std::get<RoleMainData>(std::as_const(repl_state_).ReplicationData()).epoch_;
+    });
+
+    return true;
+  };
+
+  // TODO: under lock
+  return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
+}
+
+bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config) {
+  // We don't want to restart the server if we're already a REPLICA
+  if (repl_state_.IsReplica()) {
+    return false;
+  }
+
+  // Remove registered replicas
+  dbms_handler_.ForEach([&](Database *db) {
+    auto *storage = db->storage();
+    storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); });
+  });
+
+  // Creates the server
+  repl_state_.SetReplicationRoleReplica(config);
+
+  // Start
+  const auto success =
+      std::visit(utils::Overloaded{[](auto) {
+                                     // ASSERT
+                                     return false;
+                                   },
+                                   [this](RoleReplicaData const &data) {
+                                     // Register handlers
+                                     InMemoryReplicationHandlers::Register(&dbms_handler_, *data.server);
+                                     if (!data.server->Start()) {
+                                       spdlog::error("Unable to start the replication server.");
+                                       return false;
+                                     }
+                                     return true;
+                                   }},
+                 repl_state_.ReplicationData());
+  // TODO Handle error (restore to main?)
+  return success;
+}
+
+auto ReplicationHandler::RegisterReplica(const memgraph::replication::ReplicationClientConfig &config)
+    -> memgraph::utils::BasicResult<RegisterReplicaError> {
+  MG_ASSERT(repl_state_.IsMain(), "Only main instance can register a replica!");
+
+  auto res = repl_state_.RegisterReplica(config);
+  switch (res) {
+    case memgraph::replication::RegisterReplicaError::NOT_MAIN:
+      MG_ASSERT(false, "Only main instance can register a replica!");
+      return {};
+    case memgraph::replication::RegisterReplicaError::NAME_EXISTS:
+      return memgraph::dbms::RegisterReplicaError::NAME_EXISTS;
+    case memgraph::replication::RegisterReplicaError::END_POINT_EXISTS:
+      return memgraph::dbms::RegisterReplicaError::END_POINT_EXISTS;
+    case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
+      return memgraph::dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED;
+    case memgraph::replication::RegisterReplicaError::SUCCESS:
+      break;
+  }
+
+  bool all_clients_good = true;
+
+  if (!allow_mt_repl && dbms_handler_.All().size() > 1) {
+    spdlog::warn("Multi-tenant replication is currently not supported!");
+  }
+
+  dbms_handler_.ForEach([&](Database *db) {
+    auto *storage = db->storage();
+    if (!allow_mt_repl && storage->id() != kDefaultDB) {
+      return;
+    }
+    // TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes
+    if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
+
+    all_clients_good &=
+        storage->repl_storage_state_.replication_clients_.WithLock([storage, &config](auto &clients) -> bool {
+          auto client = storage->CreateReplicationClient(config, &storage->repl_storage_state_.epoch_);
+          client->Start();
+
+          if (client->State() == storage::replication::ReplicaState::INVALID) {
+            return false;
+          }
+          clients.push_back(std::move(client));
+          return true;
+        });
+  });
+  if (!all_clients_good) return RegisterReplicaError::CONNECTION_FAILED;  // TODO: this happen to 1 or many...what to do
+  return {};
+}
+
+auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterReplicaResult {
+  auto const replica_handler = [](RoleReplicaData const &) -> UnregisterReplicaResult {
+    return UnregisterReplicaResult::NOT_MAIN;
+  };
+  auto const main_handler = [this, name](RoleMainData &mainData) -> UnregisterReplicaResult {
+    if (!repl_state_.TryPersistUnregisterReplica(name)) {
+      return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
+    }
+    auto const n_unregistered =
+        std::erase_if(mainData.registered_replicas_,
+                      [&](ReplicationClientConfig const &registered_config) { return registered_config.name == name; });
+
+    dbms_handler_.ForEach([&](Database *db) {
+      db->storage()->repl_storage_state_.replication_clients_.WithLock(
+          [&](auto &clients) { std::erase_if(clients, [&](const auto &client) { return client->Name() == name; }); });
+    });
+
+    return n_unregistered != 0 ? UnregisterReplicaResult::SUCCESS : UnregisterReplicaResult::CAN_NOT_UNREGISTER;
+  };
+
+  return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
+}
+
+auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole { return repl_state_.GetRole(); }
+
+bool ReplicationHandler::IsMain() const { return repl_state_.IsMain(); }
+
+bool ReplicationHandler::IsReplica() const { return repl_state_.IsReplica(); }
+
+void RestoreReplication(const replication::ReplicationState &repl_state, storage::Storage &storage) {
+  spdlog::info("Restoring replication role.");
+
+  /// MAIN
+  auto const recover_main = [&storage](RoleMainData const &mainData) {
+    for (const auto &config : mainData.registered_replicas_) {
+      spdlog::info("Replica {} restoration started for {}.", config.name, storage.id());
+
+      auto register_replica = [&storage](const memgraph::replication::ReplicationClientConfig &config)
+          -> memgraph::utils::BasicResult<RegisterReplicaError> {
+        return storage.repl_storage_state_.replication_clients_.WithLock(
+            [&storage, &config](auto &clients) -> utils::BasicResult<RegisterReplicaError> {
+              auto client = storage.CreateReplicationClient(config, &storage.repl_storage_state_.epoch_);
+              client->Start();
+
+              if (client->State() == storage::replication::ReplicaState::INVALID) {
+                spdlog::warn("Connection failed when registering replica {}. Replica will still be registered.",
+                             client->Name());
+              }
+              clients.push_back(std::move(client));
+              return {};
+            });
+      };
+
+      auto ret = register_replica(config);
+      if (ret.HasError()) {
+        MG_ASSERT(RegisterReplicaError::CONNECTION_FAILED != ret.GetError());
+        LOG_FATAL("Failure when restoring replica {}: {}.", config.name, RegisterReplicaErrorToString(ret.GetError()));
+      }
+      spdlog::info("Replica {} restored for {}.", config.name, storage.id());
+    }
+    spdlog::info("Replication role restored to MAIN.");
+  };
+
+  /// REPLICA
+  auto const recover_replica = [](RoleReplicaData const &data) { /*nothing to do*/ };
+
+  std::visit(
+      utils::Overloaded{
+          recover_main,
+          recover_replica,
+      },
+      std::as_const(repl_state).ReplicationData());
+}
+}  // namespace memgraph::dbms
diff --git a/src/storage/v2/replication/replication_handler.hpp b/src/dbms/replication_handler.hpp
similarity index 77%
rename from src/storage/v2/replication/replication_handler.hpp
rename to src/dbms/replication_handler.hpp
index 797f76b54..e50c47969 100644
--- a/src/storage/v2/replication/replication_handler.hpp
+++ b/src/dbms/replication_handler.hpp
@@ -12,6 +12,7 @@
 #pragma once
 
 #include "replication/role.hpp"
+#include "storage/v2/storage.hpp"
 #include "utils/result.hpp"
 
 // BEGIN fwd declares
@@ -20,14 +21,10 @@ struct ReplicationState;
 struct ReplicationServerConfig;
 struct ReplicationClientConfig;
 }  // namespace memgraph::replication
-namespace memgraph::storage {
-class Storage;
-}
-// END fwd declares
 
-namespace memgraph::storage {
+namespace memgraph::dbms {
+class DbmsHandler;
 
-enum class RegistrationMode : std::uint8_t { MUST_BE_INSTANTLY_VALID, RESTORE };
 enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, CONNECTION_FAILED, COULD_NOT_BE_PERSISTED };
 enum class UnregisterReplicaResult : uint8_t {
   NOT_MAIN,
@@ -39,8 +36,8 @@ enum class UnregisterReplicaResult : uint8_t {
 /// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
 /// TODO: extend to do multiple storages
 struct ReplicationHandler {
-  ReplicationHandler(memgraph::replication::ReplicationState &replState, Storage &storage)
-      : repl_state_(replState), storage_(storage) {}
+  ReplicationHandler(memgraph::replication::ReplicationState &replState, DbmsHandler &dbms_handler)
+      : repl_state_(replState), dbms_handler_(dbms_handler) {}
 
   // as REPLICA, become MAIN
   bool SetReplicationRoleMain();
@@ -49,16 +46,12 @@ struct ReplicationHandler {
   bool SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config);
 
   // as MAIN, define and connect to REPLICAs
-  auto RegisterReplica(RegistrationMode registration_mode, const memgraph::replication::ReplicationClientConfig &config)
+  auto RegisterReplica(const memgraph::replication::ReplicationClientConfig &config)
       -> utils::BasicResult<RegisterReplicaError>;
 
   // as MAIN, remove a REPLICA connection
   auto UnregisterReplica(std::string_view name) -> UnregisterReplicaResult;
 
-  // Generic restoration
-  // TODO: decouple storage restoration from epoch restoration
-  void RestoreReplication();
-
   // Helper pass-through (TODO: remove)
   auto GetRole() const -> memgraph::replication::ReplicationRole;
   bool IsMain() const;
@@ -66,6 +59,11 @@ struct ReplicationHandler {
 
  private:
   memgraph::replication::ReplicationState &repl_state_;
-  Storage &storage_;
+  DbmsHandler &dbms_handler_;
 };
-}  // namespace memgraph::storage
+
+/// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
+/// TODO: extend to do multiple storages
+void RestoreReplication(const replication::ReplicationState &repl_state, storage::Storage &storage);
+
+}  // namespace memgraph::dbms
diff --git a/src/glue/CMakeLists.txt b/src/glue/CMakeLists.txt
index 5940e607c..da287179f 100644
--- a/src/glue/CMakeLists.txt
+++ b/src/glue/CMakeLists.txt
@@ -8,4 +8,3 @@ target_sources(mg-glue PRIVATE auth.cpp
                                MonitoringServerT.cpp
                                run_id.cpp)
 target_link_libraries(mg-glue mg-query mg-auth mg-audit mg-flags)
-target_precompile_headers(mg-glue INTERFACE auth_checker.hpp auth_handler.hpp)
diff --git a/src/glue/run_id.hpp b/src/glue/run_id.hpp
index 6616c49bd..fc003ff35 100644
--- a/src/glue/run_id.hpp
+++ b/src/glue/run_id.hpp
@@ -11,6 +11,8 @@
 
 #pragma once
 
+#include <string>
+
 namespace memgraph::glue {
 extern const std::string run_id_;
 }  // namespace memgraph::glue
diff --git a/src/license/license_sender.cpp b/src/license/license_sender.cpp
index 1feabbb48..c2149d9cf 100644
--- a/src/license/license_sender.cpp
+++ b/src/license/license_sender.cpp
@@ -52,7 +52,7 @@ void LicenseInfoSender::SendData() {
               {"valid", license_info->is_valid},
               {"physical_memory_size", memory_info.memory},
               {"swap_memory_size", memory_info.swap},
-              {"memory_usage", memory_res},
+              {"memory_used", memory_res},
               {"runtime_memory_limit", memory_limit_},
               {"license_memory_limit", license_info->license.memory_limit},
               {"timestamp", utils::Timestamp::Now().SecWithNsecSinceTheEpoch()}};
diff --git a/src/memgraph.cpp b/src/memgraph.cpp
index dc4b21577..983dd61f9 100644
--- a/src/memgraph.cpp
+++ b/src/memgraph.cpp
@@ -9,11 +9,13 @@
 // by the Apache License, Version 2.0, included in the file
 // licenses/APL.txt.
 
+#include <cstdint>
 #include "audit/log.hpp"
 #include "communication/metrics.hpp"
 #include "communication/websocket/auth.hpp"
 #include "communication/websocket/server.hpp"
 #include "dbms/constants.hpp"
+#include "dbms/inmemory/replication_handlers.hpp"
 #include "flags/all.hpp"
 #include "flags/run_time_configurable.hpp"
 #include "glue/MonitoringServerT.hpp"
@@ -43,9 +45,11 @@
 #include "query/auth_query_handler.hpp"
 #include "query/interpreter_context.hpp"
 
+namespace {
 constexpr const char *kMgUser = "MEMGRAPH_USER";
 constexpr const char *kMgPassword = "MEMGRAPH_PASSWORD";
 constexpr const char *kMgPassfile = "MEMGRAPH_PASSFILE";
+constexpr uint64_t kMgVmMaxMapCount = 262144;
 
 // TODO: move elsewhere so that we can remove need of interpreter.hpp
 void InitFromCypherlFile(memgraph::query::InterpreterContext &ctx, memgraph::dbms::DatabaseAccess &db_acc,
@@ -107,6 +111,7 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) {
                                                             block_shutdown_signals),
             "Unable to register SIGINT handler!");
 }
+}  // namespace
 
 int main(int argc, char **argv) {
   memgraph::memory::SetHooks();
@@ -204,6 +209,17 @@ int main(int argc, char **argv) {
   std::cout << "You are running Memgraph v" << gflags::VersionString() << std::endl;
   std::cout << "To get started with Memgraph, visit https://memgr.ph/start" << std::endl;
 
+  const auto vm_max_map_count = memgraph::utils::GetVmMaxMapCount();
+  if (vm_max_map_count.has_value()) {
+    if (vm_max_map_count.value() < kMgVmMaxMapCount) {
+      std::cout << "Max virtual memory areas vm.max_map_count " << vm_max_map_count.value()
+                << " is too low, increase to at least " << kMgVmMaxMapCount << std::endl;
+    }
+  } else {
+    std::cout << "Can't get info on vm.max_map_count, check whether it is too low, vm.max_map_count is at least "
+              << kMgVmMaxMapCount << std::endl;
+  }
+
   auto data_directory = std::filesystem::path(FLAGS_data_directory);
 
   memgraph::utils::EnsureDirOrDie(data_directory);
@@ -352,22 +368,35 @@ int main(int argc, char **argv) {
   std::unique_ptr<memgraph::query::AuthChecker> auth_checker;
   auth_glue(&auth_, auth_handler, auth_checker);
 
+  memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(db_config));
+
+  memgraph::dbms::DbmsHandler dbms_handler(db_config, repl_state
 #ifdef MG_ENTERPRISE
-  memgraph::dbms::DbmsHandler new_handler(db_config, &auth_, FLAGS_data_recovery_on_startup,
-                                          FLAGS_storage_delete_on_drop);
-  auto db_acc = new_handler.Get(memgraph::dbms::kDefaultDB);
-  memgraph::query::InterpreterContext interpreter_context_(interp_config, &new_handler, auth_handler.get(),
-                                                           auth_checker.get());
-#else
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gatekeeper{db_config};
-  auto db_acc_opt = db_gatekeeper.access();
-  MG_ASSERT(db_acc_opt, "Failed to access the main database");
-  auto &db_acc = *db_acc_opt;
-  memgraph::query::InterpreterContext interpreter_context_(interp_config, &db_gatekeeper, auth_handler.get(),
-                                                           auth_checker.get());
+                                           ,
+                                           &auth_, FLAGS_data_recovery_on_startup, FLAGS_storage_delete_on_drop
 #endif
+  );
+  auto db_acc = dbms_handler.Get();
+  memgraph::query::InterpreterContext interpreter_context_(interp_config, &dbms_handler, &repl_state,
+                                                           auth_handler.get(), auth_checker.get());
   MG_ASSERT(db_acc, "Failed to access the main database");
 
+  // TODO: Move it somewhere better
+  // Startup replication state (if recovered at startup)
+  MG_ASSERT(std::visit(memgraph::utils::Overloaded{[](memgraph::replication::RoleMainData const &) { return true; },
+                                                   [&](memgraph::replication::RoleReplicaData const &data) {
+                                                     // Register handlers
+                                                     memgraph::dbms::InMemoryReplicationHandlers::Register(
+                                                         &dbms_handler, *data.server);
+                                                     if (!data.server->Start()) {
+                                                       spdlog::error("Unable to start the replication server.");
+                                                       return false;
+                                                     }
+                                                     return true;
+                                                   }},
+                       repl_state.ReplicationData()),
+            "Replica recovery failure!");
+
   memgraph::query::procedure::gModuleRegistry.SetModulesDirectory(memgraph::flags::ParseQueryModulesDirectory(),
                                                                   FLAGS_data_directory);
   memgraph::query::procedure::gModuleRegistry.UnloadAndLoadModulesFromDirectories();
@@ -388,8 +417,8 @@ int main(int argc, char **argv) {
   }
 
 #ifdef MG_ENTERPRISE
-  new_handler.RestoreTriggers(&interpreter_context_);
-  new_handler.RestoreStreams(&interpreter_context_);
+  dbms_handler.RestoreTriggers(&interpreter_context_);
+  dbms_handler.RestoreStreams(&interpreter_context_);
 #else
   {
     // Triggers can execute query procedures, so we need to reload the modules first and then
@@ -432,11 +461,10 @@ int main(int argc, char **argv) {
   if (FLAGS_telemetry_enabled) {
     telemetry.emplace(telemetry_server, data_directory / "telemetry", memgraph::glue::run_id_, machine_id,
                       service_name == "BoltS", FLAGS_data_directory, std::chrono::minutes(10));
+    telemetry->AddStorageCollector(dbms_handler, auth_);
 #ifdef MG_ENTERPRISE
-    telemetry->AddStorageCollector(new_handler, auth_);
-    telemetry->AddDatabaseCollector(new_handler);
+    telemetry->AddDatabaseCollector(dbms_handler);
 #else
-    telemetry->AddStorageCollector(db_gatekeeper, auth_);
     telemetry->AddDatabaseCollector();
 #endif
     telemetry->AddClientCollector();
@@ -496,17 +524,16 @@ int main(int argc, char **argv) {
 
   if (!FLAGS_init_data_file.empty()) {
     spdlog::info("Running init data file.");
+    auto db_acc = dbms_handler.Get();
+    MG_ASSERT(db_acc, "Failed to gain access to the main database");
 #ifdef MG_ENTERPRISE
-    auto db_acc = new_handler.Get(memgraph::dbms::kDefaultDB);
     if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
       InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file, &audit_log);
     } else {
       InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file);
     }
 #else
-    auto db_acc_2 = db_gatekeeper.access();
-    MG_ASSERT(db_acc_2, "Failed to gain access to the main database");
-    InitFromCypherlFile(interpreter_context_, *db_acc_2, FLAGS_init_data_file);
+    InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file);
 #endif
   }
 
diff --git a/src/mg_import_csv.cpp b/src/mg_import_csv.cpp
index 11f71bb41..e0a82584b 100644
--- a/src/mg_import_csv.cpp
+++ b/src/mg_import_csv.cpp
@@ -19,7 +19,9 @@
 #include <regex>
 #include <unordered_map>
 
+#include "dbms/inmemory/storage_helper.hpp"
 #include "helpers.hpp"
+#include "replication/state.hpp"
 #include "storage/v2/config.hpp"
 #include "storage/v2/edge_accessor.hpp"
 #include "storage/v2/inmemory/storage.hpp"
@@ -702,14 +704,16 @@ int main(int argc, char *argv[]) {
   }
 
   std::unordered_map<NodeId, memgraph::storage::Gid> node_id_map;
-  auto store = std::make_unique<memgraph::storage::InMemoryStorage>(memgraph::storage::Config{
+  memgraph::storage::Config config{
 
       .items = {.properties_on_edges = FLAGS_storage_properties_on_edges},
       .durability = {.storage_directory = FLAGS_data_directory,
                      .recover_on_startup = false,
                      .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED,
                      .snapshot_on_exit = true},
-  });
+  };
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  auto store = memgraph::dbms::CreateInMemoryStorage(config, repl_state);
 
   memgraph::utils::Timer load_timer;
 
diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp
index c3e2a9ac8..1d854765a 100644
--- a/src/query/interpreter.cpp
+++ b/src/query/interpreter.cpp
@@ -37,6 +37,7 @@
 #include "dbms/database.hpp"
 #include "dbms/dbms_handler.hpp"
 #include "dbms/global.hpp"
+#include "dbms/inmemory/storage_helper.hpp"
 #include "flags/run_time_configurable.hpp"
 #include "glue/communication.hpp"
 #include "license/license.hpp"
@@ -94,16 +95,17 @@
 #include "utils/on_scope_exit.hpp"
 #include "utils/readable_size.hpp"
 #include "utils/settings.hpp"
+#include "utils/stat.hpp"
 #include "utils/string.hpp"
 #include "utils/tsc.hpp"
 #include "utils/typeinfo.hpp"
 #include "utils/variant_helpers.hpp"
 
 #include "dbms/dbms_handler.hpp"
+#include "dbms/replication_handler.hpp"
 #include "query/auth_query_handler.hpp"
 #include "query/interpreter_context.hpp"
 #include "replication/state.hpp"
-#include "storage/v2/replication/replication_handler.hpp"
 
 namespace memgraph::metrics {
 extern Event ReadQuery;
@@ -270,7 +272,8 @@ inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode
 
 class ReplQueryHandler final : public query::ReplicationQueryHandler {
  public:
-  explicit ReplQueryHandler(storage::Storage *db) : db_(db), handler_{db_->repl_state_, *db_} {}
+  explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler, memgraph::replication::ReplicationState *repl_state)
+      : dbms_handler_(dbms_handler), handler_{*repl_state, *dbms_handler} {}
 
   /// @throw QueryRuntimeException if an error ocurred.
   void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional<int64_t> port) override {
@@ -314,10 +317,6 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
       throw QueryRuntimeException("Replica can't register another replica!");
     }
 
-    if (name == memgraph::replication::kReservedReplicationRoleName) {
-      throw QueryRuntimeException("This replica name is reserved and can not be used as replica name!");
-    }
-
     auto repl_mode = convertToReplicationMode(sync_mode);
 
     auto maybe_ip_and_port =
@@ -330,8 +329,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
                                                          .port = port,
                                                          .replica_check_frequency = replica_check_frequency,
                                                          .ssl = std::nullopt};
-      using storage::RegistrationMode;
-      auto ret = handler_.RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID, config);
+      auto ret = handler_.RegisterReplica(config);
       if (ret.HasError()) {
         throw QueryRuntimeException(fmt::format("Couldn't register replica '{}'!", name));
       }
@@ -344,7 +342,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
   void DropReplica(std::string_view replica_name) override {
     auto const result = handler_.UnregisterReplica(replica_name);
     switch (result) {
-      using enum memgraph::storage::UnregisterReplicaResult;
+      using enum memgraph::dbms::UnregisterReplicaResult;
       case NOT_MAIN:
         throw QueryRuntimeException("Replica can't unregister a replica!");
       case COULD_NOT_BE_PERSISTED:
@@ -358,13 +356,22 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
 
   using Replica = ReplicationQueryHandler::Replica;
   std::vector<Replica> ShowReplicas() const override {
-    auto const &replState = db_->repl_state_;
-    if (replState.IsReplica()) {
+    if (handler_.IsReplica()) {
       // replica can't show registered replicas (it shouldn't have any)
       throw QueryRuntimeException("Replica can't show registered replicas (it shouldn't have any)!");
     }
 
-    auto repl_infos = db_->ReplicasInfo();
+    // TODO: Combine results? Have a single place with clients???
+    //       Also authentication checks (replica + database visibility)
+    std::vector<storage::ReplicaInfo> repl_infos{};
+    dbms_handler_->ForOne([&repl_infos](dbms::Database *db) -> bool {
+      auto infos = db->storage()->ReplicasInfo();
+      if (!infos.empty()) {
+        repl_infos = std::move(infos);
+        return true;
+      }
+      return false;
+    });
     std::vector<Replica> replicas;
     replicas.reserve(repl_infos.size());
 
@@ -408,8 +415,8 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
   }
 
  private:
-  storage::Storage *db_;
-  storage::ReplicationHandler handler_;
+  dbms::DbmsHandler *dbms_handler_;
+  dbms::ReplicationHandler handler_;
 };
 
 /// returns false if the replication role can't be set
@@ -418,7 +425,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
 Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_context, const Parameters &parameters) {
   AuthQueryHandler *auth = interpreter_context->auth;
 #ifdef MG_ENTERPRISE
-  auto *db_handler = interpreter_context->db_handler;
+  auto *db_handler = interpreter_context->dbms_handler;
 #endif
   // TODO: MemoryResource for EvaluationContext, it should probably be passed as
   // the argument to Callback.
@@ -702,8 +709,10 @@ Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_
   }
 }  // namespace
 
-Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &parameters, storage::Storage *storage,
-                                const query::InterpreterConfig &config, std::vector<Notification> *notifications) {
+Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &parameters,
+                                dbms::DbmsHandler *dbms_handler, const query::InterpreterConfig &config,
+                                std::vector<Notification> *notifications,
+                                memgraph::replication::ReplicationState *repl_state) {
   // TODO: MemoryResource for EvaluationContext, it should probably be passed as
   // the argument to Callback.
   EvaluationContext evaluation_context;
@@ -723,7 +732,8 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
         notifications->emplace_back(SeverityLevel::WARNING, NotificationCode::REPLICA_PORT_WARNING,
                                     "Be careful the replication port must be different from the memgraph port!");
       }
-      callback.fn = [handler = ReplQueryHandler{storage}, role = repl_query->role_, maybe_port]() mutable {
+      callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, role = repl_query->role_,
+                     maybe_port]() mutable {
         handler.SetReplicationRole(role, maybe_port);
         return std::vector<std::vector<TypedValue>>();
       };
@@ -735,7 +745,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
     }
     case ReplicationQuery::Action::SHOW_REPLICATION_ROLE: {
       callback.header = {"replication role"};
-      callback.fn = [handler = ReplQueryHandler{storage}] {
+      callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}] {
         auto mode = handler.ShowReplicationRole();
         switch (mode) {
           case ReplicationQuery::ReplicationRole::MAIN: {
@@ -754,7 +764,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
       auto socket_address = repl_query->socket_address_->Accept(evaluator);
       const auto replica_check_frequency = config.replication_replica_check_frequency;
 
-      callback.fn = [handler = ReplQueryHandler{storage}, name, socket_address, sync_mode,
+      callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, name, socket_address, sync_mode,
                      replica_check_frequency]() mutable {
         handler.RegisterReplica(name, std::string(socket_address.ValueString()), sync_mode, replica_check_frequency);
         return std::vector<std::vector<TypedValue>>();
@@ -765,7 +775,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
     }
     case ReplicationQuery::Action::DROP_REPLICA: {
       const auto &name = repl_query->replica_name_;
-      callback.fn = [handler = ReplQueryHandler{storage}, name]() mutable {
+      callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, name]() mutable {
         handler.DropReplica(name);
         return std::vector<std::vector<TypedValue>>();
       };
@@ -777,7 +787,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
       callback.header = {
           "name", "socket_address", "sync_mode", "current_timestamp_of_replica", "number_of_timestamp_behind_master",
           "state"};
-      callback.fn = [handler = ReplQueryHandler{storage}, replica_nfields = callback.header.size()] {
+      callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, replica_nfields = callback.header.size()] {
         const auto &replicas = handler.ShowReplicas();
         auto typed_replicas = std::vector<std::vector<TypedValue>>{};
         typed_replicas.reserve(replicas.size());
@@ -1399,42 +1409,16 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *strea
 
 using RWType = plan::ReadWriteTypeChecker::RWType;
 
-bool IsWriteQueryOnMainMemoryReplica(storage::Storage *storage,
-                                     const query::plan::ReadWriteTypeChecker::RWType query_type) {
-  if (auto storage_mode = storage->GetStorageMode(); storage_mode == storage::StorageMode::IN_MEMORY_ANALYTICAL ||
-                                                     storage_mode == storage::StorageMode::IN_MEMORY_TRANSACTIONAL) {
-    auto const &replState = storage->repl_state_;
-    return replState.IsReplica() && (query_type == RWType::W || query_type == RWType::RW);
-  }
-  return false;
-}
-
-bool IsReplica(storage::Storage *storage) {
-  if (auto storage_mode = storage->GetStorageMode(); storage_mode == storage::StorageMode::IN_MEMORY_ANALYTICAL ||
-                                                     storage_mode == storage::StorageMode::IN_MEMORY_TRANSACTIONAL) {
-    auto const &replState = storage->repl_state_;
-    return replState.IsReplica();
-  }
-  return false;
+bool IsQueryWrite(const query::plan::ReadWriteTypeChecker::RWType query_type) {
+  return query_type == RWType::W || query_type == RWType::RW;
 }
 
 }  // namespace
 
-#ifdef MG_ENTERPRISE
-InterpreterContext::InterpreterContext(InterpreterConfig interpreter_config, memgraph::dbms::DbmsHandler *handler,
-                                       query::AuthQueryHandler *ah, query::AuthChecker *ac)
-    : db_handler(handler), config(interpreter_config), auth(ah), auth_checker(ac) {}
-#else
-InterpreterContext::InterpreterContext(InterpreterConfig interpreter_config,
-                                       memgraph::utils::Gatekeeper<memgraph::dbms::Database> *db_gatekeeper,
-                                       query::AuthQueryHandler *ah, query::AuthChecker *ac)
-    : db_gatekeeper(db_gatekeeper), config(interpreter_config), auth(ah), auth_checker(ac) {}
-#endif
-
 Interpreter::Interpreter(InterpreterContext *interpreter_context) : interpreter_context_(interpreter_context) {
   MG_ASSERT(interpreter_context_, "Interpreter context must not be NULL");
 #ifndef MG_ENTERPRISE
-  auto db_acc = interpreter_context_->db_gatekeeper->access();
+  auto db_acc = interpreter_context_->dbms_handler->Get();
   MG_ASSERT(db_acc, "Database accessor needs to be valid");
   current_db_.db_acc_ = std::move(db_acc);
 #endif
@@ -2284,21 +2268,16 @@ PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transa
 }
 
 PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
-                                      std::vector<Notification> *notifications, CurrentDB &current_db,
-                                      const InterpreterConfig &config) {
+                                      std::vector<Notification> *notifications, dbms::DbmsHandler &dbms_handler,
+                                      const InterpreterConfig &config,
+                                      memgraph::replication::ReplicationState *repl_state) {
   if (in_explicit_transaction) {
     throw ReplicationModificationInMulticommandTxException();
   }
 
-  MG_ASSERT(current_db.db_acc_, "Replication query expects a current DB");
-  storage::Storage *storage = current_db.db_acc_->get()->storage();
-
-  if (storage->GetStorageMode() == storage::StorageMode::ON_DISK_TRANSACTIONAL) {
-    throw ReplicationDisabledOnDiskStorage();
-  }
-
   auto *replication_query = utils::Downcast<ReplicationQuery>(parsed_query.query);
-  auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, storage, config, notifications);
+  auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, config,
+                                         notifications, repl_state);
 
   return PreparedQuery{callback.header, std::move(parsed_query.required_privileges),
                        [callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr<PullPlanVector>{nullptr}](
@@ -2771,7 +2750,8 @@ PreparedQuery PrepareStorageModeQuery(ParsedQuery parsed_query, const bool in_ex
           "transactions using 'SHOW TRANSACTIONS' query and ensure no other transactions are active.");
     }
 
-    callback = [requested_mode, storage = db_acc->storage()]() -> std::function<void()> {
+    callback = [requested_mode,
+                storage = static_cast<storage::InMemoryStorage *>(db_acc->storage())]() -> std::function<void()> {
       // SetStorageMode will probably be handled at the Database level
       return [storage, requested_mode] { storage->SetStorageMode(requested_mode); };
     }();
@@ -2834,15 +2814,11 @@ PreparedQuery PrepareCreateSnapshotQuery(ParsedQuery parsed_query, bool in_expli
       std::move(parsed_query.required_privileges),
       [storage](AnyStream * /*stream*/, std::optional<int> /*n*/) -> std::optional<QueryHandlerResult> {
         auto *mem_storage = static_cast<storage::InMemoryStorage *>(storage);
-        if (auto maybe_error = mem_storage->CreateSnapshot(storage->repl_state_, {}); maybe_error.HasError()) {
+        if (auto maybe_error = mem_storage->CreateSnapshot(); maybe_error.HasError()) {
           switch (maybe_error.GetError()) {
             case storage::InMemoryStorage::CreateSnapshotError::DisabledForReplica:
               throw utils::BasicException(
                   "Failed to create a snapshot. Replica instances are not allowed to create them.");
-            case storage::InMemoryStorage::CreateSnapshotError::DisabledForAnalyticsPeriodicCommit:
-              spdlog::warn(utils::MessageWithLink("Periodic snapshots are disabled for analytical mode.",
-                                                  "https://memgr.ph/replication"));
-              break;
             case storage::InMemoryStorage::CreateSnapshotError::ReachedMaxNumTries:
               spdlog::warn("Failed to create snapshot. Reached max number of tries. Please contact support");
               break;
@@ -3146,11 +3122,15 @@ PreparedQuery PrepareSystemInfoQuery(ParsedQuery parsed_query, bool in_explicit_
       handler = [storage = current_db.db_acc_->get()->storage(), interpreter_isolation_level,
                  next_transaction_isolation_level] {
         auto info = storage->GetBaseInfo();
+        const auto vm_max_map_count = utils::GetVmMaxMapCount();
+        const int64_t vm_max_map_count_storage_info =
+            vm_max_map_count.has_value() ? vm_max_map_count.value() : memgraph::utils::VM_MAX_MAP_COUNT_DEFAULT;
         std::vector<std::vector<TypedValue>> results{
             {TypedValue("name"), TypedValue(storage->id())},
             {TypedValue("vertex_count"), TypedValue(static_cast<int64_t>(info.vertex_count))},
             {TypedValue("edge_count"), TypedValue(static_cast<int64_t>(info.edge_count))},
             {TypedValue("average_degree"), TypedValue(info.average_degree)},
+            {TypedValue("vm_max_map_count"), TypedValue(vm_max_map_count_storage_info)},
             {TypedValue("memory_res"), TypedValue(utils::GetReadableSize(static_cast<double>(info.memory_res)))},
             {TypedValue("disk_usage"), TypedValue(utils::GetReadableSize(static_cast<double>(info.disk_usage)))},
             {TypedValue("memory_tracked"),
@@ -3407,22 +3387,23 @@ PreparedQuery PrepareConstraintQuery(ParsedQuery parsed_query, bool in_explicit_
 
 PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &current_db,
                                         InterpreterContext *interpreter_context,
-                                        std::optional<std::function<void(std::string_view)>> on_change_cb) {
+                                        std::optional<std::function<void(std::string_view)>> on_change_cb,
+                                        memgraph::replication::ReplicationState *repl_state) {
 #ifdef MG_ENTERPRISE
   if (!license::global_license_checker.IsEnterpriseValidFast()) {
     throw QueryException("Trying to use enterprise feature without a valid license.");
   }
   // TODO: Remove once replicas support multi-tenant replication
   if (!current_db.db_acc_) throw DatabaseContextRequiredException("Multi database queries require a defined database.");
-  if (IsReplica(current_db.db_acc_->get()->storage())) {
-    throw QueryException("Query forbidden on the replica!");
-  }
 
   auto *query = utils::Downcast<MultiDatabaseQuery>(parsed_query.query);
-  auto *db_handler = interpreter_context->db_handler;
+  auto *db_handler = interpreter_context->dbms_handler;
 
   switch (query->action_) {
     case MultiDatabaseQuery::Action::CREATE:
+      if (repl_state->IsReplica()) {
+        throw QueryException("Query forbidden on the replica!");
+      }
       return PreparedQuery{
           {"STATUS"},
           std::move(parsed_query.required_privileges),
@@ -3465,6 +3446,9 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
       if (current_db.in_explicit_db_) {
         throw QueryException("Database switching is prohibited if session explicitly defines the used database");
       }
+      if (!dbms::allow_mt_repl && repl_state->IsReplica()) {
+        throw QueryException("Query forbidden on the replica!");
+      }
       return PreparedQuery{{"STATUS"},
                            std::move(parsed_query.required_privileges),
                            [db_name = query->db_name_, db_handler, &current_db, on_change_cb](
@@ -3496,6 +3480,9 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
                            query->db_name_};
 
     case MultiDatabaseQuery::Action::DROP:
+      if (repl_state->IsReplica()) {
+        throw QueryException("Query forbidden on the replica!");
+      }
       return PreparedQuery{
           {"STATUS"},
           std::move(parsed_query.required_privileges),
@@ -3503,11 +3490,9 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
               AnyStream *stream, std::optional<int> n) -> std::optional<QueryHandlerResult> {
             std::vector<std::vector<TypedValue>> status;
 
-            memgraph::dbms::DeleteResult success{};
-
             try {
               // Remove database
-              success = db_handler->Delete(db_name);
+              auto success = db_handler->Delete(db_name);
               if (!success.HasError()) {
                 // Remove from auth
                 auth->DeleteDatabase(db_name);
@@ -3556,14 +3541,9 @@ PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, CurrentDB &cur
   if (!license::global_license_checker.IsEnterpriseValidFast()) {
     throw QueryException("Trying to use enterprise feature without a valid license.");
   }
-  // TODO: Remove once replicas support multi-tenant replication
-  auto &replState = storage->repl_state_;
-  if (replState.IsReplica()) {
-    throw QueryException("SHOW DATABASES forbidden on the replica!");
-  }
 
   // TODO pick directly from ic
-  auto *db_handler = interpreter_context->db_handler;
+  auto *db_handler = interpreter_context->dbms_handler;
   AuthQueryHandler *auth = interpreter_context->auth;
 
   Callback callback;
@@ -3667,7 +3647,7 @@ void Interpreter::RollbackTransaction() {
 void Interpreter::SetCurrentDB(std::string_view db_name, bool in_explicit_db) {
   // Can throw
   // do we lock here?
-  current_db_.SetCurrentDB(interpreter_context_->db_handler->Get(db_name), in_explicit_db);
+  current_db_.SetCurrentDB(interpreter_context_->dbms_handler->Get(db_name), in_explicit_db);
 }
 #endif
 
@@ -3822,9 +3802,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
                                               &query_execution->notifications, current_db_);
     } else if (utils::Downcast<ReplicationQuery>(parsed_query.query)) {
       /// TODO: make replication DB agnostic
-      prepared_query =
-          PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications,
-                                  current_db_, interpreter_context_->config);
+      prepared_query = PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_,
+                                               &query_execution->notifications, *interpreter_context_->dbms_handler,
+                                               interpreter_context_->config, interpreter_context_->repl_state);
     } else if (utils::Downcast<LockPathQuery>(parsed_query.query)) {
       prepared_query = PrepareLockPathQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
     } else if (utils::Downcast<FreeMemoryQuery>(parsed_query.query)) {
@@ -3864,8 +3844,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
         throw MultiDatabaseQueryInMulticommandTxException();
       }
       /// SYSTEM (Replication) + INTERPRETER
-      prepared_query =
-          PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_);
+      prepared_query = PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_,
+                                                 interpreter_context_->repl_state);
     } else if (utils::Downcast<ShowDatabasesQuery>(parsed_query.query)) {
       /// SYSTEM PURE ("SHOW DATABASES")
       /// INTERPRETER (TODO: "SHOW DATABASE")
@@ -3887,7 +3867,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
 
     UpdateTypeCount(rw_type);
 
-    if (IsWriteQueryOnMainMemoryReplica(current_db_.db_acc_->get()->storage(), rw_type)) {
+    if (interpreter_context_->repl_state->IsReplica() && IsQueryWrite(rw_type)) {
       query_execution = nullptr;
       throw QueryException("Write query forbidden on the replica!");
     }
@@ -4116,7 +4096,8 @@ void Interpreter::Commit() {
 
   auto commit_confirmed_by_all_sync_repplicas = true;
 
-  auto maybe_commit_error = current_db_.db_transactional_accessor_->Commit();
+  auto maybe_commit_error =
+      current_db_.db_transactional_accessor_->Commit(std::nullopt, interpreter_context_->repl_state->IsMain());
   if (maybe_commit_error.HasError()) {
     const auto &error = maybe_commit_error.GetError();
 
diff --git a/src/query/interpreter_context.cpp b/src/query/interpreter_context.cpp
index c65b43505..75d734645 100644
--- a/src/query/interpreter_context.cpp
+++ b/src/query/interpreter_context.cpp
@@ -13,6 +13,12 @@
 
 #include "query/interpreter.hpp"
 namespace memgraph::query {
+
+InterpreterContext::InterpreterContext(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
+                                       replication::ReplicationState *rs, query::AuthQueryHandler *ah,
+                                       query::AuthChecker *ac)
+    : dbms_handler(dbms_handler), config(interpreter_config), repl_state(rs), auth(ah), auth_checker(ac) {}
+
 std::vector<std::vector<TypedValue>> InterpreterContext::TerminateTransactions(
     std::vector<std::string> maybe_kill_transaction_ids, const std::optional<std::string> &username,
     std::function<bool(std::string const &)> privilege_checker) {
diff --git a/src/query/interpreter_context.hpp b/src/query/interpreter_context.hpp
index 829fe9c31..af8648376 100644
--- a/src/query/interpreter_context.hpp
+++ b/src/query/interpreter_context.hpp
@@ -21,17 +21,14 @@
 #include "query/config.hpp"
 #include "query/cypher_query_interpreter.hpp"
 #include "query/typed_value.hpp"
+#include "replication/state.hpp"
 #include "utils/gatekeeper.hpp"
 #include "utils/skip_list.hpp"
 #include "utils/spin_lock.hpp"
 #include "utils/synchronized.hpp"
 
 namespace memgraph::dbms {
-#ifdef MG_ENTERPRISE
 class DbmsHandler;
-#else
-class Database;
-#endif
 }  // namespace memgraph::dbms
 
 namespace memgraph::query {
@@ -48,20 +45,10 @@ class Interpreter;
  *
  */
 struct InterpreterContext {
-#ifdef MG_ENTERPRISE
-  InterpreterContext(InterpreterConfig interpreter_config, memgraph::dbms::DbmsHandler *db_handler,
-                     AuthQueryHandler *ah = nullptr, AuthChecker *ac = nullptr);
-#else
-  InterpreterContext(InterpreterConfig interpreter_config,
-                     memgraph::utils::Gatekeeper<memgraph::dbms::Database> *db_gatekeeper,
-                     query::AuthQueryHandler *ah = nullptr, query::AuthChecker *ac = nullptr);
-#endif
+  InterpreterContext(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
+                     replication::ReplicationState *rs, AuthQueryHandler *ah = nullptr, AuthChecker *ac = nullptr);
 
-#ifdef MG_ENTERPRISE
-  memgraph::dbms::DbmsHandler *db_handler;
-#else
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> *db_gatekeeper;
-#endif
+  memgraph::dbms::DbmsHandler *dbms_handler;
 
   // Internal
   const InterpreterConfig config;
@@ -69,6 +56,7 @@ struct InterpreterContext {
   memgraph::utils::SkipList<QueryCacheEntry> ast_cache;
 
   // GLOBAL
+  memgraph::replication::ReplicationState *repl_state;
   AuthQueryHandler *auth;
   AuthChecker *auth_checker;
 
diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp
index 68d894d34..e4f96b684 100644
--- a/src/query/plan/operator.cpp
+++ b/src/query/plan/operator.cpp
@@ -2896,6 +2896,8 @@ void SetPropertiesOnRecord(TRecordAccessor *record, const TypedValue &rhs, SetPr
 
   auto update_props = [&, record](PropertiesMap &new_properties) {
     auto updated_properties = UpdatePropertiesChecked(record, new_properties);
+    // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
+    context->execution_stats[ExecutionStats::Key::UPDATED_PROPERTIES] += new_properties.size();
 
     if (should_register_change) {
       for (const auto &[id, old_value, new_value] : updated_properties) {
diff --git a/src/replication/CMakeLists.txt b/src/replication/CMakeLists.txt
index 4170cd6a8..772ae5591 100644
--- a/src/replication/CMakeLists.txt
+++ b/src/replication/CMakeLists.txt
@@ -8,17 +8,19 @@ target_sources(mg-replication
         include/replication/mode.hpp
         include/replication/role.hpp
         include/replication/status.hpp
+        include/replication/replication_server.hpp
 
         PRIVATE
         state.cpp
         epoch.cpp
         config.cpp
         status.cpp
+        replication_server.cpp
 )
 target_include_directories(mg-replication PUBLIC include)
 
 find_package(fmt REQUIRED)
 target_link_libraries(mg-replication
-    PUBLIC mg::utils mg::kvstore lib::json
+    PUBLIC mg::utils mg::kvstore lib::json mg::rpc mg::slk
     PRIVATE fmt::fmt
 )
diff --git a/src/replication/include/replication/config.hpp b/src/replication/include/replication/config.hpp
index 975fb6b4d..ca0cd8f16 100644
--- a/src/replication/include/replication/config.hpp
+++ b/src/replication/include/replication/config.hpp
@@ -21,13 +21,12 @@ namespace memgraph::replication {
 
 inline constexpr uint16_t kDefaultReplicationPort = 10000;
 inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
-inline constexpr auto *kReservedReplicationRoleName{"__replication_role"};
 
 struct ReplicationClientConfig {
   std::string name;
-  ReplicationMode mode;
+  ReplicationMode mode{};
   std::string ip_address;
-  uint16_t port;
+  uint16_t port{};
 
   // The default delay between main checking/pinging replicas is 1s because
   // that seems like a reasonable timeframe in which main should notice a
@@ -42,18 +41,23 @@ struct ReplicationClientConfig {
   };
 
   std::optional<SSL> ssl;
+
+  friend bool operator==(ReplicationClientConfig const &, ReplicationClientConfig const &) = default;
 };
 
 struct ReplicationServerConfig {
   std::string ip_address;
-  uint16_t port;
+  uint16_t port{};
   struct SSL {
     std::string key_file;
     std::string cert_file;
     std::string ca_file;
-    bool verify_peer;
+    bool verify_peer{};
+    friend bool operator==(SSL const &, SSL const &) = default;
   };
 
   std::optional<SSL> ssl;
+
+  friend bool operator==(ReplicationServerConfig const &, ReplicationServerConfig const &) = default;
 };
 }  // namespace memgraph::replication
diff --git a/src/replication/include/replication/epoch.hpp b/src/replication/include/replication/epoch.hpp
index 9cf04d11c..5252a1dfb 100644
--- a/src/replication/include/replication/epoch.hpp
+++ b/src/replication/include/replication/epoch.hpp
@@ -19,16 +19,21 @@ namespace memgraph::replication {
 
 struct ReplicationEpoch {
   ReplicationEpoch() : id_(memgraph::utils::GenerateUUID()) {}
-  ReplicationEpoch(ReplicationEpoch const &) = delete;
-  ReplicationEpoch(ReplicationEpoch &&) = delete;
-  ReplicationEpoch &operator=(ReplicationEpoch const &) = delete;
-  ReplicationEpoch &operator=(ReplicationEpoch &&) = delete;
+  explicit ReplicationEpoch(std::string explicit_id) : id_(std::move(explicit_id)) {}
+  ReplicationEpoch(ReplicationEpoch const &) = default;  // TODO: passkey idiom
+  ReplicationEpoch(ReplicationEpoch &&) = default;
+  ReplicationEpoch &operator=(ReplicationEpoch const &) = default;  // TODO: passkey idiom
+  ReplicationEpoch &operator=(ReplicationEpoch &&) = default;
 
   auto id() const -> std::string_view { return id_; }
 
-  auto NewEpoch() -> std::string { return std::exchange(id_, memgraph::utils::GenerateUUID()); }
+  // TODO: passkey idiom
+  friend struct ReplicationState;
+
   auto SetEpoch(std::string new_epoch) -> std::string { return std::exchange(id_, std::move(new_epoch)); }
 
+  friend bool operator==(ReplicationEpoch const &, ReplicationEpoch const &) = default;
+
  private:
   // UUID to distinguish different main instance runs for replication process
   // on SAME storage.
diff --git a/src/storage/v2/replication/replication_server.hpp b/src/replication/include/replication/replication_server.hpp
similarity index 51%
rename from src/storage/v2/replication/replication_server.hpp
rename to src/replication/include/replication/replication_server.hpp
index 0d7415396..032312dcc 100644
--- a/src/storage/v2/replication/replication_server.hpp
+++ b/src/replication/include/replication/replication_server.hpp
@@ -14,9 +14,32 @@
 #include "replication/config.hpp"
 #include "rpc/server.hpp"
 #include "slk/streams.hpp"
-#include "storage/v2/replication/global.hpp"
 
-namespace memgraph::storage {
+namespace memgraph::replication {
+
+struct FrequentHeartbeatReq {
+  static const utils::TypeInfo kType;                            // TODO: make constexpr?
+  static const utils::TypeInfo &GetTypeInfo() { return kType; }  // WHAT?
+
+  static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader);
+  static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder);
+  FrequentHeartbeatReq() {}
+};
+
+struct FrequentHeartbeatRes {
+  static const utils::TypeInfo kType;
+  static const utils::TypeInfo &GetTypeInfo() { return kType; }
+
+  static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader);
+  static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder);
+  FrequentHeartbeatRes() {}
+  explicit FrequentHeartbeatRes(bool success) : success(success) {}
+
+  bool success;
+};
+
+// TODO: move to own header
+using FrequentHeartbeatRpc = rpc::RequestResponse<FrequentHeartbeatReq, FrequentHeartbeatRes>;
 
 class ReplicationServer {
  public:
@@ -31,10 +54,10 @@ class ReplicationServer {
   bool Start();
 
  protected:
-  static void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
   communication::ServerContext rpc_server_context_;
-  rpc::Server rpc_server_;
+
+ public:
+  rpc::Server rpc_server_;  // TODO: Interface or something
 };
 
-}  // namespace memgraph::storage
+}  // namespace memgraph::replication
diff --git a/src/replication/include/replication/state.hpp b/src/replication/include/replication/state.hpp
index ac5b2841c..0460d0a9d 100644
--- a/src/replication/include/replication/state.hpp
+++ b/src/replication/include/replication/state.hpp
@@ -21,48 +21,70 @@
 #include "replication/epoch.hpp"
 #include "replication/mode.hpp"
 #include "replication/role.hpp"
+#include "replication_server.hpp"
+#include "status.hpp"
 #include "utils/result.hpp"
 
 namespace memgraph::replication {
 
 enum class RolePersisted : uint8_t { UNKNOWN_OR_NO, YES };
 
+enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, COULD_NOT_BE_PERSISTED, NOT_MAIN, SUCCESS };
+
+struct RoleMainData {
+  ReplicationEpoch epoch_;
+  std::vector<ReplicationClientConfig> registered_replicas_;
+};
+
+struct RoleReplicaData {
+  ReplicationServerConfig config;
+  std::unique_ptr<ReplicationServer> server;
+};
+
 struct ReplicationState {
-  ReplicationState(std::optional<std::filesystem::path> durability_dir);
+  explicit ReplicationState(std::optional<std::filesystem::path> durability_dir);
 
   ReplicationState(ReplicationState const &) = delete;
   ReplicationState(ReplicationState &&) = delete;
   ReplicationState &operator=(ReplicationState const &) = delete;
   ReplicationState &operator=(ReplicationState &&) = delete;
 
-  void SetRole(ReplicationRole role) { return replication_role_.store(role); }
-  auto GetRole() const -> ReplicationRole { return replication_role_.load(); }
-  bool IsMain() const { return replication_role_ == ReplicationRole::MAIN; }
-  bool IsReplica() const { return replication_role_ == ReplicationRole::REPLICA; }
-
-  auto GetEpoch() const -> const ReplicationEpoch & { return epoch_; }
-  auto GetEpoch() -> ReplicationEpoch & { return epoch_; }
-
   enum class FetchReplicationError : uint8_t {
     NOTHING_FETCHED,
     PARSE_ERROR,
   };
-  using ReplicationDataReplica = ReplicationServerConfig;
-  using ReplicationDataMain = std::vector<ReplicationClientConfig>;
-  using ReplicationData = std::variant<ReplicationDataMain, ReplicationDataReplica>;
-  using FetchReplicationResult = utils::BasicResult<FetchReplicationError, ReplicationData>;
-  auto FetchReplicationData() -> FetchReplicationResult;
+
+  using ReplicationData_t = std::variant<RoleMainData, RoleReplicaData>;
+  using FetchReplicationResult_t = utils::BasicResult<FetchReplicationError, ReplicationData_t>;
+  auto FetchReplicationData() -> FetchReplicationResult_t;
+
+  auto GetRole() const -> ReplicationRole {
+    return std::holds_alternative<RoleReplicaData>(replication_data_) ? ReplicationRole::REPLICA
+                                                                      : ReplicationRole::MAIN;
+  }
+  bool IsMain() const { return GetRole() == ReplicationRole::MAIN; }
+  bool IsReplica() const { return GetRole() == ReplicationRole::REPLICA; }
 
   bool ShouldPersist() const { return nullptr != durability_; }
-  bool TryPersistRoleMain();
+  bool TryPersistRoleMain(std::string new_epoch);
   bool TryPersistRoleReplica(const ReplicationServerConfig &config);
-  bool TryPersistUnregisterReplica(std::string_view &name);
+  bool TryPersistUnregisterReplica(std::string_view name);
   bool TryPersistRegisteredReplica(const ReplicationClientConfig &config);
 
+  // TODO: locked access
+  auto ReplicationData() -> ReplicationData_t & { return replication_data_; }
+  auto ReplicationData() const -> ReplicationData_t const & { return replication_data_; }
+  auto RegisterReplica(const ReplicationClientConfig &config) -> RegisterReplicaError;
+
+  bool SetReplicationRoleMain();
+
+  bool SetReplicationRoleReplica(const ReplicationServerConfig &config);
+
  private:
-  ReplicationEpoch epoch_;
-  std::atomic<ReplicationRole> replication_role_{ReplicationRole::MAIN};
+  bool HandleVersionMigration(durability::ReplicationRoleEntry &data) const;
+
   std::unique_ptr<kvstore::KVStore> durability_;
+  ReplicationData_t replication_data_;
   std::atomic<RolePersisted> role_persisted = RolePersisted::UNKNOWN_OR_NO;
 };
 
diff --git a/src/replication/include/replication/status.hpp b/src/replication/include/replication/status.hpp
index b158cf919..943db423a 100644
--- a/src/replication/include/replication/status.hpp
+++ b/src/replication/include/replication/status.hpp
@@ -15,25 +15,56 @@
 #include <cstdint>
 #include <optional>
 #include <string>
+#include <variant>
 
 #include "json/json.hpp"
 
 #include "replication/config.hpp"
+#include "replication/epoch.hpp"
 #include "replication/role.hpp"
 
-namespace memgraph::replication {
-struct ReplicationStatus {
-  std::string name;
-  std::string ip_address;
-  uint16_t port;
-  ReplicationMode sync_mode;
-  std::chrono::seconds replica_check_frequency;
-  std::optional<ReplicationClientConfig::SSL> ssl;
-  std::optional<ReplicationRole> role;
+namespace memgraph::replication::durability {
 
-  friend bool operator==(const ReplicationStatus &, const ReplicationStatus &) = default;
+// Keys
+constexpr auto *kReplicationRoleName{"__replication_role"};
+constexpr auto *kReplicationReplicaPrefix{"__replication_replica:"};  // introduced in V2
+
+enum class DurabilityVersion : uint8_t {
+  V1,  // no distinct key for replicas
+  V2,  // this version, epoch, replica prefix introduced
 };
 
-nlohmann::json ReplicationStatusToJSON(ReplicationStatus &&status);
-std::optional<ReplicationStatus> JSONToReplicationStatus(nlohmann::json &&data);
-}  // namespace memgraph::replication
+// fragment of key: "__replication_role"
+struct MainRole {
+  ReplicationEpoch epoch{};
+  friend bool operator==(MainRole const &, MainRole const &) = default;
+};
+
+// fragment of key: "__replication_role"
+struct ReplicaRole {
+  ReplicationServerConfig config;
+  friend bool operator==(ReplicaRole const &, ReplicaRole const &) = default;
+};
+
+// from key: "__replication_role"
+struct ReplicationRoleEntry {
+  DurabilityVersion version =
+      DurabilityVersion::V2;  // if not latest then migration required for kReplicationReplicaPrefix
+  std::variant<MainRole, ReplicaRole> role;
+
+  friend bool operator==(ReplicationRoleEntry const &, ReplicationRoleEntry const &) = default;
+};
+
+// from key: "__replication_replica:"
+struct ReplicationReplicaEntry {
+  ReplicationClientConfig config;
+  friend bool operator==(ReplicationReplicaEntry const &, ReplicationReplicaEntry const &) = default;
+};
+
+void to_json(nlohmann::json &j, const ReplicationRoleEntry &p);
+void from_json(const nlohmann::json &j, ReplicationRoleEntry &p);
+
+void to_json(nlohmann::json &j, const ReplicationReplicaEntry &p);
+void from_json(const nlohmann::json &j, ReplicationReplicaEntry &p);
+
+}  // namespace memgraph::replication::durability
diff --git a/src/replication/replication_server.cpp b/src/replication/replication_server.cpp
new file mode 100644
index 000000000..7d0ff3cc2
--- /dev/null
+++ b/src/replication/replication_server.cpp
@@ -0,0 +1,96 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include "replication/replication_server.hpp"
+#include "rpc/messages.hpp"
+#include "slk/serialization.hpp"
+#include "slk/streams.hpp"
+
+namespace memgraph::slk {
+
+// Serialize code for FrequentHeartbeatRes
+void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.success, builder);
+}
+void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->success, reader);
+}
+
+// Serialize code for FrequentHeartbeatReq
+void Save(const memgraph::replication::FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {}
+void Load(memgraph::replication::FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {}
+
+}  // namespace memgraph::slk
+
+namespace memgraph::replication {
+namespace {
+
+auto CreateServerContext(const memgraph::replication::ReplicationServerConfig &config) -> communication::ServerContext {
+  return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
+                                                     config.ssl->verify_peer}
+                      : communication::ServerContext{};
+}
+
+void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
+  FrequentHeartbeatReq req;
+  memgraph::slk::Load(&req, req_reader);
+  FrequentHeartbeatRes res{true};
+  memgraph::slk::Save(res, res_builder);
+}
+
+// NOTE: The replication server must have a single thread for processing
+// because there is no need for more processing threads - each replica can
+// have only a single main server. Also, the single-threaded guarantee
+// simplifies the rest of the implementation.
+constexpr auto kReplicationServerThreads = 1;
+}  // namespace
+
+constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ, "FrequentHeartbeatReq",
+                                                      nullptr};
+
+constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes",
+                                                      nullptr};
+
+void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self, builder);
+}
+void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(self, reader);
+}
+void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self, builder);
+}
+void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(self, reader);
+}
+
+ReplicationServer::ReplicationServer(const memgraph::replication::ReplicationServerConfig &config)
+    : rpc_server_context_{CreateServerContext(config)},
+      rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
+                  kReplicationServerThreads} {
+  rpc_server_.Register<FrequentHeartbeatRpc>([](auto *req_reader, auto *res_builder) {
+    spdlog::debug("Received FrequentHeartbeatRpc");
+    FrequentHeartbeatHandler(req_reader, res_builder);
+  });
+}
+
+ReplicationServer::~ReplicationServer() {
+  if (rpc_server_.IsRunning()) {
+    auto const &endpoint = rpc_server_.endpoint();
+    spdlog::trace("Closing replication server on {}:{}", endpoint.address, endpoint.port);
+    rpc_server_.Shutdown();
+  }
+  rpc_server_.AwaitShutdown();
+}
+
+bool ReplicationServer::Start() { return rpc_server_.Start(); }
+
+}  // namespace memgraph::replication
diff --git a/src/replication/state.cpp b/src/replication/state.cpp
index 45984d317..4551eba7e 100644
--- a/src/replication/state.cpp
+++ b/src/replication/state.cpp
@@ -11,134 +11,265 @@
 
 #include "replication/state.hpp"
 
-#include "replication/status.hpp"  //TODO: don't use status for durability
+#include "replication/replication_server.hpp"
+#include "replication/status.hpp"
 #include "utils/file.hpp"
+#include "utils/variant_helpers.hpp"
 
 constexpr auto kReplicationDirectory = std::string_view{"replication"};
 
 namespace memgraph::replication {
 
+auto BuildReplicaKey(std::string_view name) -> std::string {
+  auto key = std::string{durability::kReplicationReplicaPrefix};
+  key.append(name);
+  return key;
+}
+
 ReplicationState::ReplicationState(std::optional<std::filesystem::path> durability_dir) {
   if (!durability_dir) return;
   auto repl_dir = *std::move(durability_dir);
   repl_dir /= kReplicationDirectory;
   utils::EnsureDirOrDie(repl_dir);
   durability_ = std::make_unique<kvstore::KVStore>(std::move(repl_dir));
+
+  auto replicationData = FetchReplicationData();
+  if (replicationData.HasError()) {
+    switch (replicationData.GetError()) {
+      using enum ReplicationState::FetchReplicationError;
+      case NOTHING_FETCHED: {
+        spdlog::debug("Cannot find data needed for restore replication role in persisted metadata.");
+        replication_data_ = RoleMainData{};
+        return;
+      }
+      case PARSE_ERROR: {
+        LOG_FATAL("Cannot parse previously saved configuration of replication role.");
+        return;
+      }
+    }
+  }
+  replication_data_ = std::move(replicationData).GetValue();
 }
+
 bool ReplicationState::TryPersistRoleReplica(const ReplicationServerConfig &config) {
   if (!ShouldPersist()) return true;
-  // Only thing that matters here is the role saved as REPLICA and the listening port
-  auto data = ReplicationStatusToJSON(ReplicationStatus{.name = kReservedReplicationRoleName,
-                                                        .ip_address = config.ip_address,
-                                                        .port = config.port,
-                                                        .sync_mode = ReplicationMode::SYNC,
-                                                        .replica_check_frequency = std::chrono::seconds(0),
-                                                        .ssl = std::nullopt,
-                                                        .role = ReplicationRole::REPLICA});
 
-  if (durability_->Put(kReservedReplicationRoleName, data.dump())) {
-    role_persisted = RolePersisted::YES;
-    return true;
+  auto data = durability::ReplicationRoleEntry{.role = durability::ReplicaRole{
+                                                   .config = config,
+                                               }};
+
+  if (!durability_->Put(durability::kReplicationRoleName, nlohmann::json(data).dump())) {
+    spdlog::error("Error when saving REPLICA replication role in settings.");
+    return false;
   }
-  spdlog::error("Error when saving REPLICA replication role in settings.");
-  return false;
-}
-bool ReplicationState::TryPersistRoleMain() {
-  if (!ShouldPersist()) return true;
-  // Only thing that matters here is the role saved as MAIN
-  auto data = ReplicationStatusToJSON(ReplicationStatus{.name = kReservedReplicationRoleName,
-                                                        .ip_address = "",
-                                                        .port = 0,
-                                                        .sync_mode = ReplicationMode::SYNC,
-                                                        .replica_check_frequency = std::chrono::seconds(0),
-                                                        .ssl = std::nullopt,
-                                                        .role = ReplicationRole::MAIN});
+  role_persisted = RolePersisted::YES;
 
-  if (durability_->Put(kReservedReplicationRoleName, data.dump())) {
+  // Cleanup remove registered replicas (assume successful delete)
+  // NOTE: we could do the alternative which would be on REPLICA -> MAIN we recover these registered replicas
+  auto b = durability_->begin(durability::kReplicationReplicaPrefix);
+  auto e = durability_->end(durability::kReplicationReplicaPrefix);
+  for (; b != e; ++b) {
+    durability_->Delete(b->first);
+  }
+
+  return true;
+}
+
+bool ReplicationState::TryPersistRoleMain(std::string new_epoch) {
+  if (!ShouldPersist()) return true;
+
+  auto data =
+      durability::ReplicationRoleEntry{.role = durability::MainRole{.epoch = ReplicationEpoch{std::move(new_epoch)}}};
+
+  if (durability_->Put(durability::kReplicationRoleName, nlohmann::json(data).dump())) {
     role_persisted = RolePersisted::YES;
     return true;
   }
   spdlog::error("Error when saving MAIN replication role in settings.");
   return false;
 }
-bool ReplicationState::TryPersistUnregisterReplica(std::string_view &name) {
+
+bool ReplicationState::TryPersistUnregisterReplica(std::string_view name) {
   if (!ShouldPersist()) return true;
-  if (durability_->Delete(name)) return true;
+
+  auto key = BuildReplicaKey(name);
+
+  if (durability_->Delete(key)) return true;
   spdlog::error("Error when removing replica {} from settings.", name);
   return false;
 }
-auto ReplicationState::FetchReplicationData() -> FetchReplicationResult {
+
+// TODO: FetchEpochData (agnostic of FetchReplicationData, but should be done before)
+
+auto ReplicationState::FetchReplicationData() -> FetchReplicationResult_t {
   if (!ShouldPersist()) return FetchReplicationError::NOTHING_FETCHED;
-  const auto replication_data = durability_->Get(kReservedReplicationRoleName);
+  const auto replication_data = durability_->Get(durability::kReplicationRoleName);
   if (!replication_data.has_value()) {
     return FetchReplicationError::NOTHING_FETCHED;
   }
 
-  const auto maybe_replication_status = JSONToReplicationStatus(nlohmann::json::parse(*replication_data));
-  if (!maybe_replication_status.has_value()) {
+  auto json = nlohmann::json::parse(*replication_data, nullptr, false);
+  if (json.is_discarded()) {
     return FetchReplicationError::PARSE_ERROR;
   }
+  try {
+    durability::ReplicationRoleEntry data = json.get<durability::ReplicationRoleEntry>();
 
-  // To get here this must be the case
-  role_persisted = memgraph::replication::RolePersisted::YES;
-
-  const auto replication_status = *maybe_replication_status;
-  auto role = replication_status.role.value_or(ReplicationRole::MAIN);
-  switch (role) {
-    case ReplicationRole::REPLICA: {
-      return {ReplicationServerConfig{
-          .ip_address = kDefaultReplicationServerIp,
-          .port = replication_status.port,
-      }};
+    if (!HandleVersionMigration(data)) {
+      return FetchReplicationError::PARSE_ERROR;
     }
-    case ReplicationRole::MAIN: {
-      auto res = ReplicationState::ReplicationDataMain{};
-      res.reserve(durability_->Size() - 1);
-      for (const auto &[replica_name, replica_data] : *durability_) {
-        if (replica_name == kReservedReplicationRoleName) {
+
+    // To get here this must be the case
+    role_persisted = memgraph::replication::RolePersisted::YES;
+
+    return std::visit(
+        utils::Overloaded{
+            [&](durability::MainRole &&r) -> FetchReplicationResult_t {
+              auto res = RoleMainData{
+                  .epoch_ = std::move(r.epoch),
+              };
+              auto b = durability_->begin(durability::kReplicationReplicaPrefix);
+              auto e = durability_->end(durability::kReplicationReplicaPrefix);
+              res.registered_replicas_.reserve(durability_->Size(durability::kReplicationReplicaPrefix));
+              for (; b != e; ++b) {
+                auto const &[replica_name, replica_data] = *b;
+                auto json = nlohmann::json::parse(replica_data, nullptr, false);
+                if (json.is_discarded()) return FetchReplicationError::PARSE_ERROR;
+                try {
+                  durability::ReplicationReplicaEntry data = json.get<durability::ReplicationReplicaEntry>();
+                  auto key_name = std::string_view{replica_name}.substr(strlen(durability::kReplicationReplicaPrefix));
+                  if (key_name != data.config.name) {
+                    return FetchReplicationError::PARSE_ERROR;
+                  }
+                  res.registered_replicas_.emplace_back(std::move(data.config));
+                } catch (...) {
+                  return FetchReplicationError::PARSE_ERROR;
+                }
+              }
+              return {std::move(res)};
+            },
+            [&](durability::ReplicaRole &&r) -> FetchReplicationResult_t {
+              return {RoleReplicaData{r.config, std::make_unique<ReplicationServer>(r.config)}};
+            },
+        },
+        std::move(data.role));
+  } catch (...) {
+    return FetchReplicationError::PARSE_ERROR;
+  }
+}
+
+bool ReplicationState::HandleVersionMigration(durability::ReplicationRoleEntry &data) const {
+  switch (data.version) {
+    case durability::DurabilityVersion::V1: {
+      // For each replica config, change key to use the prefix
+      std::map<std::string, std::string> to_put;
+      std::vector<std::string> to_delete;
+      for (auto [old_key, old_data] : *durability_) {
+        // skip reserved keys
+        if (old_key == durability::kReplicationRoleName) {
           continue;
         }
 
-        const auto maybe_replica_status = JSONToReplicationStatus(nlohmann::json::parse(replica_data));
-        if (!maybe_replica_status.has_value()) {
-          return FetchReplicationError::PARSE_ERROR;
-        }
+        // Turn old data to new data
+        auto old_json = nlohmann::json::parse(old_data, nullptr, false);
+        if (old_json.is_discarded()) return false;  // Can not read old_data as json
+        try {
+          durability::ReplicationReplicaEntry new_data = old_json.get<durability::ReplicationReplicaEntry>();
 
-        auto replica_status = *maybe_replica_status;
-        if (replica_status.name != replica_name) {
-          return FetchReplicationError::PARSE_ERROR;
+          // Migrate to using new key
+          to_put.emplace(BuildReplicaKey(old_key), nlohmann::json(new_data).dump());
+        } catch (...) {
+          return false;  // Can not parse as ReplicationReplicaEntry
         }
-        res.emplace_back(ReplicationClientConfig{
-            .name = replica_status.name,
-            .mode = replica_status.sync_mode,
-            .ip_address = replica_status.ip_address,
-            .port = replica_status.port,
-            .replica_check_frequency = replica_status.replica_check_frequency,
-            .ssl = replica_status.ssl,
-        });
+        to_delete.push_back(std::move(old_key));
       }
-      return {std::move(res)};
+      // Set version
+      data.version = durability::DurabilityVersion::V2;
+      // Re-serialise (to include version + epoch)
+      to_put.emplace(durability::kReplicationRoleName, nlohmann::json(data).dump());
+      if (!durability_->PutAndDeleteMultiple(to_put, to_delete)) return false;  // some reason couldn't persist
+      [[fallthrough]];
+    }
+    case durability::DurabilityVersion::V2: {
+      // do nothing - add code if V3 ever happens
+      break;
     }
   }
+  return true;
 }
+
 bool ReplicationState::TryPersistRegisteredReplica(const ReplicationClientConfig &config) {
   if (!ShouldPersist()) return true;
 
   // If any replicas are persisted then Role must be persisted
   if (role_persisted != RolePersisted::YES) {
     DMG_ASSERT(IsMain(), "MAIN is expected");
-    if (!TryPersistRoleMain()) return false;
+    auto epoch_str = std::string(std::get<RoleMainData>(replication_data_).epoch_.id());
+    if (!TryPersistRoleMain(std::move(epoch_str))) return false;
   }
 
-  auto data = ReplicationStatusToJSON(ReplicationStatus{.name = config.name,
-                                                        .ip_address = config.ip_address,
-                                                        .port = config.port,
-                                                        .sync_mode = config.mode,
-                                                        .replica_check_frequency = config.replica_check_frequency,
-                                                        .ssl = config.ssl,
-                                                        .role = ReplicationRole::REPLICA});
-  if (durability_->Put(config.name, data.dump())) return true;
+  auto data = durability::ReplicationReplicaEntry{.config = config};
+
+  auto key = BuildReplicaKey(config.name);
+  if (durability_->Put(key, nlohmann::json(data).dump())) return true;
   spdlog::error("Error when saving replica {} in settings.", config.name);
   return false;
 }
+
+bool ReplicationState::SetReplicationRoleMain() {
+  auto new_epoch = utils::GenerateUUID();
+  if (!TryPersistRoleMain(new_epoch)) {
+    return false;
+  }
+  replication_data_ = RoleMainData{.epoch_ = ReplicationEpoch{new_epoch}};
+  return true;
+}
+
+bool ReplicationState::SetReplicationRoleReplica(const ReplicationServerConfig &config) {
+  if (!TryPersistRoleReplica(config)) {
+    return false;
+  }
+  replication_data_ = RoleReplicaData{config, std::make_unique<ReplicationServer>(config)};
+  return true;
+}
+
+auto ReplicationState::RegisterReplica(const ReplicationClientConfig &config) -> RegisterReplicaError {
+  auto const replica_handler = [](RoleReplicaData const &) -> RegisterReplicaError {
+    return RegisterReplicaError::NOT_MAIN;
+  };
+  auto const main_handler = [this, &config](RoleMainData &mainData) -> RegisterReplicaError {
+    // name check
+    auto name_check = [&config](auto const &replicas) {
+      auto name_matches = [&name = config.name](ReplicationClientConfig const &registered_config) {
+        return registered_config.name == name;
+      };
+      return std::any_of(replicas.begin(), replicas.end(), name_matches);
+    };
+    if (name_check(mainData.registered_replicas_)) {
+      return RegisterReplicaError::NAME_EXISTS;
+    }
+
+    // endpoint check
+    auto endpoint_check = [&](auto const &replicas) {
+      auto endpoint_matches = [&config](ReplicationClientConfig const &registered_config) {
+        return registered_config.ip_address == config.ip_address && registered_config.port == config.port;
+      };
+      return std::any_of(replicas.begin(), replicas.end(), endpoint_matches);
+    };
+    if (endpoint_check(mainData.registered_replicas_)) {
+      return RegisterReplicaError::END_POINT_EXISTS;
+    }
+
+    // Durability
+    if (!TryPersistRegisteredReplica(config)) {
+      return RegisterReplicaError::COULD_NOT_BE_PERSISTED;
+    }
+
+    // set
+    mainData.registered_replicas_.emplace_back(config);
+    return RegisterReplicaError::SUCCESS;
+  };
+
+  return std::visit(utils::Overloaded{main_handler, replica_handler}, replication_data_);
+}
 }  // namespace memgraph::replication
diff --git a/src/replication/status.cpp b/src/replication/status.cpp
index 711b1f955..06d67cc66 100644
--- a/src/replication/status.cpp
+++ b/src/replication/status.cpp
@@ -12,6 +12,9 @@
 
 #include "fmt/format.h"
 #include "utils/logging.hpp"
+#include "utils/variant_helpers.hpp"
+
+namespace memgraph::replication::durability {
 
 constexpr auto *kReplicaName = "replica_name";
 constexpr auto *kIpAddress = "replica_ip_address";
@@ -21,71 +24,87 @@ constexpr auto *kCheckFrequency = "replica_check_frequency";
 constexpr auto *kSSLKeyFile = "replica_ssl_key_file";
 constexpr auto *kSSLCertFile = "replica_ssl_cert_file";
 constexpr auto *kReplicationRole = "replication_role";
+constexpr auto *kEpoch = "epoch";
+constexpr auto *kVersion = "durability_version";
 
-namespace memgraph::replication {
-
-nlohmann::json ReplicationStatusToJSON(ReplicationStatus &&status) {
-  auto data = nlohmann::json::object();
-
-  data[kReplicaName] = std::move(status.name);
-  data[kIpAddress] = std::move(status.ip_address);
-  data[kPort] = status.port;
-  data[kSyncMode] = status.sync_mode;
-
-  data[kCheckFrequency] = status.replica_check_frequency.count();
-
-  if (status.ssl.has_value()) {
-    data[kSSLKeyFile] = std::move(status.ssl->key_file);
-    data[kSSLCertFile] = std::move(status.ssl->cert_file);
-  } else {
-    data[kSSLKeyFile] = nullptr;
-    data[kSSLCertFile] = nullptr;
-  }
-
-  if (status.role.has_value()) {
-    data[kReplicationRole] = *status.role;
-  }
-
-  return data;
-}
-std::optional<ReplicationStatus> JSONToReplicationStatus(nlohmann::json &&data) {
-  ReplicationStatus replica_status;
-
-  const auto get_failed_message = [](const std::string_view message, const std::string_view nested_message) {
-    return fmt::format("Failed to deserialize replica's configuration: {} : {}", message, nested_message);
+void to_json(nlohmann::json &j, const ReplicationRoleEntry &p) {
+  auto processMAIN = [&](MainRole const &main) {
+    j = nlohmann::json{{kVersion, p.version}, {kReplicationRole, ReplicationRole::MAIN}, {kEpoch, main.epoch.id()}};
   };
-
-  try {
-    data.at(kReplicaName).get_to(replica_status.name);
-    data.at(kIpAddress).get_to(replica_status.ip_address);
-    data.at(kPort).get_to(replica_status.port);
-    data.at(kSyncMode).get_to(replica_status.sync_mode);
-
-    replica_status.replica_check_frequency = std::chrono::seconds(data.at(kCheckFrequency));
-
-    const auto &key_file = data.at(kSSLKeyFile);
-    const auto &cert_file = data.at(kSSLCertFile);
-
-    MG_ASSERT(key_file.is_null() == cert_file.is_null());
-
-    if (!key_file.is_null()) {
-      replica_status.ssl = ReplicationClientConfig::SSL{};
-      data.at(kSSLKeyFile).get_to(replica_status.ssl->key_file);
-      data.at(kSSLCertFile).get_to(replica_status.ssl->cert_file);
-    }
-
-    if (data.find(kReplicationRole) != data.end()) {
-      replica_status.role = ReplicationRole::MAIN;
-      data.at(kReplicationRole).get_to(replica_status.role.value());
-    }
-  } catch (const nlohmann::json::type_error &exception) {
-    spdlog::error(get_failed_message("Invalid type conversion", exception.what()));
-    return std::nullopt;
-  } catch (const nlohmann::json::out_of_range &exception) {
-    spdlog::error(get_failed_message("Non existing field", exception.what()));
-    return std::nullopt;
-  }
-
-  return replica_status;
+  auto processREPLICA = [&](ReplicaRole const &replica) {
+    j = nlohmann::json{
+        {kVersion, p.version},
+        {kReplicationRole, ReplicationRole::REPLICA},
+        {kIpAddress, replica.config.ip_address},
+        {kPort, replica.config.port}
+        // TODO: SSL
+    };
+  };
+  std::visit(utils::Overloaded{processMAIN, processREPLICA}, p.role);
 }
-}  // namespace memgraph::replication
+
+void from_json(const nlohmann::json &j, ReplicationRoleEntry &p) {
+  // This value did not exist in V1, hence default DurabilityVersion::V1
+  DurabilityVersion version = j.value(kVersion, DurabilityVersion::V1);
+  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+  ReplicationRole role;
+  j.at(kReplicationRole).get_to(role);
+  switch (role) {
+    case ReplicationRole::MAIN: {
+      auto json_epoch = j.value(kEpoch, std::string{});
+      auto epoch = ReplicationEpoch{};
+      if (!json_epoch.empty()) epoch.SetEpoch(json_epoch);
+      p = ReplicationRoleEntry{.version = version, .role = MainRole{.epoch = std::move(epoch)}};
+      break;
+    }
+    case ReplicationRole::REPLICA: {
+      std::string ip_address;
+      // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+      uint16_t port;
+      j.at(kIpAddress).get_to(ip_address);
+      j.at(kPort).get_to(port);
+      auto config = ReplicationServerConfig{.ip_address = std::move(ip_address), .port = port};
+      p = ReplicationRoleEntry{.version = version, .role = ReplicaRole{.config = std::move(config)}};
+      break;
+    }
+  }
+}
+
+void to_json(nlohmann::json &j, const ReplicationReplicaEntry &p) {
+  auto common = nlohmann::json{{kReplicaName, p.config.name},
+                               {kIpAddress, p.config.ip_address},
+                               {kPort, p.config.port},
+                               {kSyncMode, p.config.mode},
+                               {kCheckFrequency, p.config.replica_check_frequency.count()}};
+
+  if (p.config.ssl.has_value()) {
+    common[kSSLKeyFile] = p.config.ssl->key_file;
+    common[kSSLCertFile] = p.config.ssl->cert_file;
+  } else {
+    common[kSSLKeyFile] = nullptr;
+    common[kSSLCertFile] = nullptr;
+  }
+  j = std::move(common);
+}
+void from_json(const nlohmann::json &j, ReplicationReplicaEntry &p) {
+  const auto &key_file = j.at(kSSLKeyFile);
+  const auto &cert_file = j.at(kSSLCertFile);
+
+  MG_ASSERT(key_file.is_null() == cert_file.is_null());
+
+  auto seconds = j.at(kCheckFrequency).get<std::chrono::seconds::rep>();
+  auto config = ReplicationClientConfig{
+      .name = j.at(kReplicaName).get<std::string>(),
+      .mode = j.at(kSyncMode).get<ReplicationMode>(),
+      .ip_address = j.at(kIpAddress).get<std::string>(),
+      .port = j.at(kPort).get<uint16_t>(),
+      .replica_check_frequency = std::chrono::seconds{seconds},
+  };
+  if (!key_file.is_null()) {
+    config.ssl = ReplicationClientConfig::SSL{};
+    key_file.get_to(config.ssl->key_file);
+    cert_file.get_to(config.ssl->cert_file);
+  }
+  p = ReplicationReplicaEntry{.config = std::move(config)};
+}
+}  // namespace memgraph::replication::durability
diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt
index cdfa42455..4558e91f6 100644
--- a/src/rpc/CMakeLists.txt
+++ b/src/rpc/CMakeLists.txt
@@ -7,5 +7,6 @@ find_package(fmt REQUIRED)
 find_package(gflags REQUIRED)
 
 add_library(mg-rpc STATIC ${rpc_src_files})
+add_library(mg::rpc ALIAS mg-rpc)
 target_link_libraries(mg-rpc Threads::Threads mg-communication mg-utils mg-io fmt::fmt gflags)
 target_link_libraries(mg-rpc mg-slk)
diff --git a/src/rpc/client.hpp b/src/rpc/client.hpp
index 1f9f33b50..bd98afe89 100644
--- a/src/rpc/client.hpp
+++ b/src/rpc/client.hpp
@@ -19,6 +19,7 @@
 #include "io/network/endpoint.hpp"
 #include "rpc/exceptions.hpp"
 #include "rpc/messages.hpp"
+#include "rpc/version.hpp"
 #include "slk/serialization.hpp"
 #include "slk/streams.hpp"
 #include "utils/logging.hpp"
@@ -43,7 +44,7 @@ class Client {
         : self_(self),
           guard_(std::move(guard)),
           req_builder_([self](const uint8_t *data, size_t size, bool have_more) {
-            if (!self->client_->Write(data, size, have_more)) throw RpcFailedException(self->endpoint_);
+            if (!self->client_->Write(data, size, have_more)) throw GenericRpcFailedException();
           }),
           res_load_(res_load) {}
 
@@ -69,11 +70,11 @@ class Client {
       while (true) {
         auto ret = slk::CheckStreamComplete(self_->client_->GetData(), self_->client_->GetDataSize());
         if (ret.status == slk::StreamStatus::INVALID) {
-          throw RpcFailedException(self_->endpoint_);
+          throw GenericRpcFailedException();
         } else if (ret.status == slk::StreamStatus::PARTIAL) {
           if (!self_->client_->Read(ret.stream_size - self_->client_->GetDataSize(),
                                     /* exactly_len = */ false)) {
-            throw RpcFailedException(self_->endpoint_);
+            throw GenericRpcFailedException();
           }
         } else {
           response_data_size = ret.stream_size;
@@ -88,11 +89,22 @@ class Client {
       utils::TypeId res_id{utils::TypeId::UNKNOWN};
       slk::Load(&res_id, &res_reader);
 
+      // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+      rpc::Version version;
+      slk::Load(&version, &res_reader);
+
+      if (version != rpc::current_version) {
+        // V1 we introduced versioning with, absolutely no backwards compatibility,
+        // because it's impossible to provide backwards compatibility with pre versioning.
+        // Future versions this may require mechanism for graceful version handling.
+        throw VersionMismatchRpcFailedException();
+      }
+
       // Check the response ID.
       if (res_id != res_type.id && res_id != utils::TypeId::UNKNOWN) {
         spdlog::error("Message response was of unexpected type");
         self_->client_ = std::nullopt;
-        throw RpcFailedException(self_->endpoint_);
+        throw GenericRpcFailedException();
       }
 
       SPDLOG_TRACE("[RpcClient] received {}", res_type.name);
@@ -153,7 +165,7 @@ class Client {
       if (!client_->Connect(endpoint_)) {
         SPDLOG_ERROR("Couldn't connect to remote address {}", endpoint_);
         client_ = std::nullopt;
-        throw RpcFailedException(endpoint_);
+        throw GenericRpcFailedException();
       }
     }
 
@@ -162,6 +174,8 @@ class Client {
 
     // Build and send the request.
     slk::Save(req_type.id, handler.GetBuilder());
+    slk::Save(rpc::current_version, handler.GetBuilder());
+
     TRequestResponse::Request::Save(request, handler.GetBuilder());
 
     // Return the handler to the user.
diff --git a/src/rpc/exceptions.hpp b/src/rpc/exceptions.hpp
index f6666baeb..b0eb6c329 100644
--- a/src/rpc/exceptions.hpp
+++ b/src/rpc/exceptions.hpp
@@ -19,19 +19,30 @@ namespace memgraph::rpc {
 /// `utils::BasicException` is used for transient errors that should be reported
 /// to the user and `utils::StacktraceException` is used for fatal errors.
 /// This exception always requires explicit handling.
-class RpcFailedException final : public utils::BasicException {
+class RpcFailedException : public utils::BasicException {
  public:
-  RpcFailedException(const io::network::Endpoint &endpoint)
-      : utils::BasicException::BasicException(
-            "Couldn't communicate with the cluster! Please contact your "
-            "database administrator."),
-        endpoint_(endpoint) {}
-
-  /// Returns the endpoint associated with the error.
-  const io::network::Endpoint &endpoint() const { return endpoint_; }
-  SPECIALIZE_GET_EXCEPTION_NAME(RpcFailedException)
-
- private:
-  io::network::Endpoint endpoint_;
+  RpcFailedException(std::string_view msg) : utils::BasicException(msg) {}
+  SPECIALIZE_GET_EXCEPTION_NAME(RpcFailedException);
 };
+
+class VersionMismatchRpcFailedException : public RpcFailedException {
+ public:
+  VersionMismatchRpcFailedException()
+      : RpcFailedException(
+            "Couldn't communicate with the cluster! There was a version mismatch. "
+            "Please contact your database administrator.") {}
+
+  SPECIALIZE_GET_EXCEPTION_NAME(VersionMismatchRpcFailedException);
+};
+
+class GenericRpcFailedException : public RpcFailedException {
+ public:
+  GenericRpcFailedException()
+      : RpcFailedException(
+            "Couldn't communicate with the cluster! Please contact your "
+            "database administrator.") {}
+
+  SPECIALIZE_GET_EXCEPTION_NAME(GenericRpcFailedException);
+};
+
 }  // namespace memgraph::rpc
diff --git a/src/rpc/protocol.cpp b/src/rpc/protocol.cpp
index ac74d754b..933daaa7f 100644
--- a/src/rpc/protocol.cpp
+++ b/src/rpc/protocol.cpp
@@ -13,6 +13,7 @@
 
 #include "rpc/messages.hpp"
 #include "rpc/server.hpp"
+#include "rpc/version.hpp"
 #include "slk/serialization.hpp"
 #include "slk/streams.hpp"
 #include "utils/on_scope_exit.hpp"
@@ -44,6 +45,16 @@ void Session::Execute() {
   // Load the request ID.
   utils::TypeId req_id{utils::TypeId::UNKNOWN};
   slk::Load(&req_id, &req_reader);
+  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+  rpc::Version version;
+  slk::Load(&version, &req_reader);
+
+  if (version != rpc::current_version) {
+    // V1 we introduced versioning with, absolutely no backwards compatibility,
+    // because it's impossible to provide backwards compatibility with pre versioning.
+    // Future versions this may require mechanism for graceful version handling.
+    throw SessionException("Session trying to execute a RPC call of an incorrect version!");
+  }
 
   // Access to `callbacks_` and `extended_callbacks_` is done here without
   // acquiring the `mutex_` because we don't allow RPC registration after the
@@ -62,10 +73,12 @@ void Session::Execute() {
     }
     SPDLOG_TRACE("[RpcServer] received {}", extended_it->second.req_type.name);
     slk::Save(extended_it->second.res_type.id, &res_builder);
+    slk::Save(rpc::current_version, &res_builder);
     extended_it->second.callback(endpoint_, &req_reader, &res_builder);
   } else {
     SPDLOG_TRACE("[RpcServer] received {}", it->second.req_type.name);
     slk::Save(it->second.res_type.id, &res_builder);
+    slk::Save(rpc::current_version, &res_builder);
     it->second.callback(&req_reader, &res_builder);
   }
 
diff --git a/src/rpc/version.hpp b/src/rpc/version.hpp
new file mode 100644
index 000000000..29e7f8d3a
--- /dev/null
+++ b/src/rpc/version.hpp
@@ -0,0 +1,27 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#pragma once
+
+#include <cstdint>
+
+namespace memgraph::rpc {
+
+using Version = uint64_t;
+
+// versioning of RPC was/will be introduced in 2.13
+// We start the versioning with a strange number, to radically reduce the
+// probability of accidental match/conformance with pre 2.13 versions
+constexpr auto v1 = Version{2023'10'30'0'2'13};
+
+constexpr auto current_version = v1;
+
+}  // namespace memgraph::rpc
diff --git a/src/slk/CMakeLists.txt b/src/slk/CMakeLists.txt
index 81570e1d9..ba427b3de 100644
--- a/src/slk/CMakeLists.txt
+++ b/src/slk/CMakeLists.txt
@@ -4,5 +4,6 @@ set(slk_src_files
 find_package(gflags REQUIRED)
 
 add_library(mg-slk STATIC ${slk_src_files})
+add_library(mg::slk ALIAS mg-slk)
 target_link_libraries(mg-slk gflags)
 target_link_libraries(mg-slk mg-utils)
diff --git a/src/storage/v2/CMakeLists.txt b/src/storage/v2/CMakeLists.txt
index 1a3776624..147684c54 100644
--- a/src/storage/v2/CMakeLists.txt
+++ b/src/storage/v2/CMakeLists.txt
@@ -35,24 +35,10 @@ add_library(mg-storage-v2 STATIC
         disk/unique_constraints.cpp
         storage_mode.cpp
         replication/replication_client.cpp
-        replication/replication_server.cpp
         replication/serialization.cpp
         replication/slk.cpp
         replication/rpc.cpp
         replication/replication_storage_state.cpp
-        replication/replication_handler.cpp
-        inmemory/replication/replication_server.cpp
         inmemory/replication/replication_client.cpp
 )
 target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils gflags absl::flat_hash_map mg-rpc mg-slk mg-events)
-
-# Until we get LTO there is an advantage to do some unity builds
-set_target_properties(mg-storage-v2
-        PROPERTIES
-        UNITY_BUILD ON
-        UNITY_BUILD_MODE GROUP
-)
-set_source_files_properties(
-        vertex_info_cache.cpp vertex_accessor.cpp
-        PROPERTIES UNITY_GROUP "ensure inline of vertex_info_cache"
-)
diff --git a/src/storage/v2/config.hpp b/src/storage/v2/config.hpp
index 4206fb187..7ea7e95b7 100644
--- a/src/storage/v2/config.hpp
+++ b/src/storage/v2/config.hpp
@@ -17,6 +17,7 @@
 #include "storage/v2/isolation_level.hpp"
 #include "storage/v2/storage_mode.hpp"
 #include "utils/exceptions.hpp"
+#include "utils/logging.hpp"
 
 namespace memgraph::storage {
 
@@ -34,10 +35,12 @@ struct Config {
 
     Type type{Type::PERIODIC};
     std::chrono::milliseconds interval{std::chrono::milliseconds(1000)};
+    friend bool operator==(const Gc &lrh, const Gc &rhs) = default;
   } gc;
 
   struct Items {
     bool properties_on_edges{true};
+    friend bool operator==(const Items &lrh, const Items &rhs) = default;
   } items;
 
   struct Durability {
@@ -62,10 +65,12 @@ struct Config {
     uint64_t recovery_thread_count{8};
 
     bool allow_parallel_index_creation{false};
+    friend bool operator==(const Durability &lrh, const Durability &rhs) = default;
   } durability;
 
   struct Transaction {
     IsolationLevel isolation_level{IsolationLevel::SNAPSHOT_ISOLATION};
+    friend bool operator==(const Transaction &lrh, const Transaction &rhs) = default;
   } transaction;
 
   struct DiskConfig {
@@ -77,13 +82,26 @@ struct Config {
     std::filesystem::path id_name_mapper_directory{"storage/rocksdb_id_name_mapper"};
     std::filesystem::path durability_directory{"storage/rocksdb_durability"};
     std::filesystem::path wal_directory{"storage/rocksdb_wal"};
+    friend bool operator==(const DiskConfig &lrh, const DiskConfig &rhs) = default;
   } disk;
 
   std::string name;
   bool force_on_disk{false};
   StorageMode storage_mode{StorageMode::IN_MEMORY_TRANSACTIONAL};
+
+  friend bool operator==(const Config &lrh, const Config &rhs) = default;
 };
 
+inline auto ReplicationStateRootPath(memgraph::storage::Config const &config) -> std::optional<std::filesystem::path> {
+  if (!config.durability.restore_replication_state_on_startup) {
+    spdlog::warn(
+        "Replication configuration will NOT be stored. When the server restarts, replication state will be "
+        "forgotten.");
+    return std::nullopt;
+  }
+  return {config.durability.storage_directory};
+}
+
 static inline void UpdatePaths(Config &config, const std::filesystem::path &storage_dir) {
   auto contained = [](const auto &path, const auto &base) -> std::optional<std::filesystem::path> {
     auto rel = std::filesystem::relative(path, base);
diff --git a/src/storage/v2/disk/label_index.hpp b/src/storage/v2/disk/label_index.hpp
index ebc0fb282..8e3fa5371 100644
--- a/src/storage/v2/disk/label_index.hpp
+++ b/src/storage/v2/disk/label_index.hpp
@@ -19,6 +19,7 @@
 #include "storage/v2/indices/label_index.hpp"
 #include "storage/v2/vertex.hpp"
 #include "utils/rocksdb_serialization.hpp"
+#include "utils/synchronized.hpp"
 
 namespace memgraph::storage {
 class DiskLabelIndex : public storage::LabelIndex {
diff --git a/src/storage/v2/disk/label_property_index.hpp b/src/storage/v2/disk/label_property_index.hpp
index 6b104ef9b..26f972d79 100644
--- a/src/storage/v2/disk/label_property_index.hpp
+++ b/src/storage/v2/disk/label_property_index.hpp
@@ -13,6 +13,7 @@
 
 #include "storage/v2/disk/rocksdb_storage.hpp"
 #include "storage/v2/indices/label_property_index.hpp"
+#include "utils/synchronized.hpp"
 
 namespace memgraph::storage {
 
diff --git a/src/storage/v2/disk/storage.cpp b/src/storage/v2/disk/storage.cpp
index f0280fdc0..8dca66b29 100644
--- a/src/storage/v2/disk/storage.cpp
+++ b/src/storage/v2/disk/storage.cpp
@@ -1552,7 +1552,7 @@ DiskStorage::CheckExistingVerticesBeforeCreatingUniqueConstraint(LabelId label,
 
 // NOLINTNEXTLINE(google-default-arguments)
 utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Commit(
-    const std::optional<uint64_t> desired_commit_timestamp) {
+    const std::optional<uint64_t> desired_commit_timestamp, bool /*is_main*/) {
   MG_ASSERT(is_transaction_active_, "The transaction is already terminated!");
   MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!");
 
@@ -1958,7 +1958,7 @@ UniqueConstraints::DeletionStatus DiskStorage::DiskAccessor::DropUniqueConstrain
   return UniqueConstraints::DeletionStatus::SUCCESS;
 }
 
-Transaction DiskStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) {
+Transaction DiskStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, bool /*is_main*/) {
   /// We acquire the transaction engine lock here because we access (and
   /// modify) the transaction engine variables (`transaction_id` and
   /// `timestamp`) below.
@@ -1983,7 +1983,8 @@ uint64_t DiskStorage::CommitTimestamp(const std::optional<uint64_t> desired_comm
   return *desired_commit_timestamp;
 }
 
-std::unique_ptr<Storage::Accessor> DiskStorage::Access(std::optional<IsolationLevel> override_isolation_level) {
+std::unique_ptr<Storage::Accessor> DiskStorage::Access(std::optional<IsolationLevel> override_isolation_level,
+                                                       bool /*is_main*/) {
   auto isolation_level = override_isolation_level.value_or(isolation_level_);
   if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) {
     throw utils::NotYetImplemented("Disk storage supports only SNAPSHOT isolation level.");
@@ -1991,7 +1992,8 @@ std::unique_ptr<Storage::Accessor> DiskStorage::Access(std::optional<IsolationLe
   return std::unique_ptr<DiskAccessor>(
       new DiskAccessor{Storage::Accessor::shared_access, this, isolation_level, storage_mode_});
 }
-std::unique_ptr<Storage::Accessor> DiskStorage::UniqueAccess(std::optional<IsolationLevel> override_isolation_level) {
+std::unique_ptr<Storage::Accessor> DiskStorage::UniqueAccess(std::optional<IsolationLevel> override_isolation_level,
+                                                             bool /*is_main*/) {
   auto isolation_level = override_isolation_level.value_or(isolation_level_);
   if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) {
     throw utils::NotYetImplemented("Disk storage supports only SNAPSHOT isolation level.");
diff --git a/src/storage/v2/disk/storage.hpp b/src/storage/v2/disk/storage.hpp
index 9bdf9f049..3575e685d 100644
--- a/src/storage/v2/disk/storage.hpp
+++ b/src/storage/v2/disk/storage.hpp
@@ -142,8 +142,8 @@ class DiskStorage final : public Storage {
     ConstraintsInfo ListAllConstraints() const override;
 
     // NOLINTNEXTLINE(google-default-arguments)
-    utils::BasicResult<StorageManipulationError, void> Commit(
-        std::optional<uint64_t> desired_commit_timestamp = {}) override;
+    utils::BasicResult<StorageManipulationError, void> Commit(std::optional<uint64_t> desired_commit_timestamp = {},
+                                                              bool is_main = true) override;
 
     void UpdateObjectsCountOnAbort();
 
@@ -172,9 +172,13 @@ class DiskStorage final : public Storage {
                                                            const std::set<PropertyId> &properties) override;
   };
 
-  std::unique_ptr<Storage::Accessor> Access(std::optional<IsolationLevel> override_isolation_level) override;
+  using Storage::Access;
+  std::unique_ptr<Storage::Accessor> Access(std::optional<IsolationLevel> override_isolation_level,
+                                            bool is_main) override;
 
-  std::unique_ptr<Storage::Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level) override;
+  using Storage::UniqueAccess;
+  std::unique_ptr<Storage::Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level,
+                                                  bool is_main) override;
 
   /// Flushing methods
   [[nodiscard]] utils::BasicResult<StorageManipulationError, void> FlushIndexCache(Transaction *transaction);
@@ -277,7 +281,8 @@ class DiskStorage final : public Storage {
 
   RocksDBStorage *GetRocksDBStorage() const { return kvstore_.get(); }
 
-  Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) override;
+  using Storage::CreateTransaction;
+  Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, bool is_main) override;
 
   void SetEdgeImportMode(EdgeImportMode edge_import_status);
 
@@ -304,22 +309,16 @@ class DiskStorage final : public Storage {
 
   void FreeMemory(std::unique_lock<utils::ResourceLock> /*lock*/) override {}
 
-  void PrepareForNewEpoch(std::string /*prev_epoch*/) override {
-    throw utils::BasicException("Disk storage mode does not support replication.");
-  }
+  void PrepareForNewEpoch() override { throw utils::BasicException("Disk storage mode does not support replication."); }
 
   uint64_t CommitTimestamp(std::optional<uint64_t> desired_commit_timestamp = {});
 
-  auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig & /*config*/)
+  auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig & /*config*/,
+                               const memgraph::replication::ReplicationEpoch * /*current_epoch*/)
       -> std::unique_ptr<ReplicationClient> override {
     throw utils::BasicException("Disk storage mode does not support replication.");
   }
 
-  auto CreateReplicationServer(const memgraph::replication::ReplicationServerConfig & /*config*/)
-      -> std::unique_ptr<ReplicationServer> override {
-    throw utils::BasicException("Disk storage mode does not support replication.");
-  }
-
   std::unique_ptr<RocksDBStorage> kvstore_;
   DurableMetadata durable_metadata_;
   EdgeImportMode edge_import_status_{EdgeImportMode::INACTIVE};
diff --git a/src/storage/v2/durability/durability.cpp b/src/storage/v2/durability/durability.cpp
index aa0730e6f..a3cf0e2bb 100644
--- a/src/storage/v2/durability/durability.cpp
+++ b/src/storage/v2/durability/durability.cpp
@@ -211,12 +211,10 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
 
 std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_directory,
                                         const std::filesystem::path &wal_directory, std::string *uuid,
-                                        memgraph::replication::ReplicationEpoch &epoch,
-                                        std::deque<std::pair<std::string, uint64_t>> *epoch_history,
-                                        utils::SkipList<Vertex> *vertices, utils::SkipList<Edge> *edges,
-                                        std::atomic<uint64_t> *edge_count, NameIdMapper *name_id_mapper,
-                                        Indices *indices, Constraints *constraints, const Config &config,
-                                        uint64_t *wal_seq_num) {
+                                        ReplicationStorageState &repl_storage_state, utils::SkipList<Vertex> *vertices,
+                                        utils::SkipList<Edge> *edges, std::atomic<uint64_t> *edge_count,
+                                        NameIdMapper *name_id_mapper, Indices *indices, Constraints *constraints,
+                                        const Config &config, uint64_t *wal_seq_num) {
   utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
   spdlog::info("Recovering persisted data using snapshot ({}) and WAL directory ({}).", snapshot_directory,
                wal_directory);
@@ -226,6 +224,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
     return std::nullopt;
   }
 
+  auto *const epoch_history = &repl_storage_state.history;
   utils::Timer timer;
 
   auto snapshot_files = GetSnapshotFiles(snapshot_directory);
@@ -264,7 +263,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
     recovery_info = recovered_snapshot->recovery_info;
     indices_constraints = std::move(recovered_snapshot->indices_constraints);
     snapshot_timestamp = recovered_snapshot->snapshot_info.start_timestamp;
-    epoch.SetEpoch(std::move(recovered_snapshot->snapshot_info.epoch_id));
+    repl_storage_state.epoch_.SetEpoch(std::move(recovered_snapshot->snapshot_info.epoch_id));
 
     if (!utils::DirExists(wal_directory)) {
       const auto par_exec_info = config.durability.allow_parallel_index_creation
@@ -309,7 +308,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
     // UUID used for durability is the UUID of the last WAL file.
     // Same for the epoch id.
     *uuid = std::move(wal_files.back().uuid);
-    epoch.SetEpoch(std::move(wal_files.back().epoch_id));
+    repl_storage_state.epoch_.SetEpoch(std::move(wal_files.back().epoch_id));
   }
 
   auto maybe_wal_files = GetWalFiles(wal_directory, *uuid);
@@ -365,7 +364,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
       }
       previous_seq_num = wal_file.seq_num;
 
-      if (wal_file.epoch_id != epoch.id()) {
+      if (wal_file.epoch_id != repl_storage_state.epoch_.id()) {
         // This way we skip WALs finalized only because of role change.
         // We can also set the last timestamp to 0 if last loaded timestamp
         // is nullopt as this can only happen if the WAL file with seq = 0
@@ -373,7 +372,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
         if (last_loaded_timestamp) {
           epoch_history->emplace_back(wal_file.epoch_id, *last_loaded_timestamp);
         }
-        epoch.SetEpoch(std::move(wal_file.epoch_id));
+        repl_storage_state.epoch_.SetEpoch(std::move(wal_file.epoch_id));
       }
       try {
         auto info = LoadWal(wal_file.path, &indices_constraints, last_loaded_timestamp, vertices, edges, name_id_mapper,
diff --git a/src/storage/v2/durability/durability.hpp b/src/storage/v2/durability/durability.hpp
index b68e18071..8b735f02a 100644
--- a/src/storage/v2/durability/durability.hpp
+++ b/src/storage/v2/durability/durability.hpp
@@ -19,6 +19,7 @@
 #include <variant>
 
 #include "replication/epoch.hpp"
+#include "replication/state.hpp"
 #include "storage/v2/config.hpp"
 #include "storage/v2/constraints/constraints.hpp"
 #include "storage/v2/durability/metadata.hpp"
@@ -26,6 +27,7 @@
 #include "storage/v2/edge.hpp"
 #include "storage/v2/indices/indices.hpp"
 #include "storage/v2/name_id_mapper.hpp"
+#include "storage/v2/replication/replication_storage_state.hpp"
 #include "storage/v2/vertex.hpp"
 #include "utils/skip_list.hpp"
 
@@ -110,11 +112,9 @@ void RecoverIndicesAndConstraints(
 /// @throw std::bad_alloc
 std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_directory,
                                         const std::filesystem::path &wal_directory, std::string *uuid,
-                                        memgraph::replication::ReplicationEpoch &epoch,
-                                        std::deque<std::pair<std::string, uint64_t>> *epoch_history,
-                                        utils::SkipList<Vertex> *vertices, utils::SkipList<Edge> *edges,
-                                        std::atomic<uint64_t> *edge_count, NameIdMapper *name_id_mapper,
-                                        Indices *indices, Constraints *constraints, const Config &config,
-                                        uint64_t *wal_seq_num);
+                                        ReplicationStorageState &repl_storage_state, utils::SkipList<Vertex> *vertices,
+                                        utils::SkipList<Edge> *edges, std::atomic<uint64_t> *edge_count,
+                                        NameIdMapper *name_id_mapper, Indices *indices, Constraints *constraints,
+                                        const Config &config, uint64_t *wal_seq_num);
 
 }  // namespace memgraph::storage::durability
diff --git a/src/storage/v2/durability/snapshot.cpp b/src/storage/v2/durability/snapshot.cpp
index eded85278..d4278dba9 100644
--- a/src/storage/v2/durability/snapshot.cpp
+++ b/src/storage/v2/durability/snapshot.cpp
@@ -135,10 +135,11 @@ SnapshotInfo ReadSnapshotInfo(const std::filesystem::path &path) {
   // Read offsets.
   {
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_OFFSETS) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_OFFSETS)
+      throw RecoveryFailure("Couldn't read marker for section offsets!");
 
     auto snapshot_size = snapshot.GetSize();
-    if (!snapshot_size) throw RecoveryFailure("Couldn't read data from snapshot!");
+    if (!snapshot_size) throw RecoveryFailure("Couldn't read snapshot size!");
 
     auto read_offset = [&snapshot, snapshot_size] {
       auto maybe_offset = snapshot.ReadUint();
@@ -166,29 +167,30 @@ SnapshotInfo ReadSnapshotInfo(const std::filesystem::path &path) {
 
   // Read metadata.
   {
-    if (!snapshot.SetPosition(info.offset_metadata)) throw RecoveryFailure("Couldn't read data from snapshot!");
+    if (!snapshot.SetPosition(info.offset_metadata)) throw RecoveryFailure("Couldn't read metadata offset!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_METADATA) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_METADATA)
+      throw RecoveryFailure("Couldn't read marker for section metadata!");
 
     auto maybe_uuid = snapshot.ReadString();
-    if (!maybe_uuid) throw RecoveryFailure("Invalid snapshot data!");
+    if (!maybe_uuid) throw RecoveryFailure("Couldn't read uuid!");
     info.uuid = std::move(*maybe_uuid);
 
     auto maybe_epoch_id = snapshot.ReadString();
-    if (!maybe_epoch_id) throw RecoveryFailure("Invalid snapshot data!");
+    if (!maybe_epoch_id) throw RecoveryFailure("Couldn't read epoch id!");
     info.epoch_id = std::move(*maybe_epoch_id);
 
     auto maybe_timestamp = snapshot.ReadUint();
-    if (!maybe_timestamp) throw RecoveryFailure("Invalid snapshot data!");
+    if (!maybe_timestamp) throw RecoveryFailure("Couldn't read start timestamp!");
     info.start_timestamp = *maybe_timestamp;
 
     auto maybe_edges = snapshot.ReadUint();
-    if (!maybe_edges) throw RecoveryFailure("Invalid snapshot data!");
+    if (!maybe_edges) throw RecoveryFailure("Couldn't read the number of edges!");
     info.edges_count = *maybe_edges;
 
     auto maybe_vertices = snapshot.ReadUint();
-    if (!maybe_vertices) throw RecoveryFailure("Invalid snapshot data!");
+    if (!maybe_vertices) throw RecoveryFailure("Couldn't read the number of vertices!");
     info.vertices_count = *maybe_vertices;
   }
 
@@ -199,19 +201,19 @@ std::vector<BatchInfo> ReadBatchInfos(Decoder &snapshot) {
   std::vector<BatchInfo> infos;
   const auto infos_size = snapshot.ReadUint();
   if (!infos_size.has_value()) {
-    throw RecoveryFailure("Invalid snapshot data!");
+    throw RecoveryFailure("Couldn't read number of batch infos!");
   }
   infos.reserve(*infos_size);
 
   for (auto i{0U}; i < *infos_size; ++i) {
     const auto offset = snapshot.ReadUint();
     if (!offset.has_value()) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Couldn't read batch info offset!");
     }
 
     const auto count = snapshot.ReadUint();
     if (!count.has_value()) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Couldn't read batch info count!");
     }
     infos.push_back(BatchInfo{*offset, *count});
   }
@@ -228,7 +230,7 @@ void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList<Edge> &
   auto edge_acc = edges.access();
   uint64_t last_edge_gid = 0;
   spdlog::info("Recovering {} edges.", edges_count);
-  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't read data from snapshot!");
+  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't set offset position for reading edges!");
 
   std::vector<std::pair<PropertyId, PropertyValue>> read_properties;
   uint64_t five_percent_chunk = edges_count / 20;
@@ -253,12 +255,12 @@ void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList<Edge> &
 
     {
       const auto marker = snapshot.ReadMarker();
-      if (!marker || *marker != Marker::SECTION_EDGE) throw RecoveryFailure("Invalid snapshot data!");
+      if (!marker || *marker != Marker::SECTION_EDGE) throw RecoveryFailure("Couldn't read section edge marker!");
     }
     // Read edge GID.
     auto gid = snapshot.ReadUint();
-    if (!gid) throw RecoveryFailure("Invalid snapshot data!");
-    if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+    if (!gid) throw RecoveryFailure("Failed to read edge gid!");
+    if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid edge gid read!");
     last_edge_gid = *gid;
 
     if (items.properties_on_edges) {
@@ -268,15 +270,15 @@ void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList<Edge> &
       // Recover properties.
       {
         auto props_size = snapshot.ReadUint();
-        if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!props_size) throw RecoveryFailure("Couldn't read the size of edge properties!");
         auto &props = it->properties;
         read_properties.clear();
         read_properties.reserve(*props_size);
         for (uint64_t j = 0; j < *props_size; ++j) {
           auto key = snapshot.ReadUint();
-          if (!key) throw RecoveryFailure("Invalid snapshot data!");
+          if (!key) throw RecoveryFailure("Couldn't read edge property id!");
           auto value = snapshot.ReadPropertyValue();
-          if (!value) throw RecoveryFailure("Invalid snapshot data!");
+          if (!value) throw RecoveryFailure("Couldn't read edge property value!");
           read_properties.emplace_back(get_property_from_id(*key), std::move(*value));
         }
         props.InitProperties(std::move(read_properties));
@@ -286,7 +288,7 @@ void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList<Edge> &
       // Read properties.
       {
         auto props_size = snapshot.ReadUint();
-        if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!props_size) throw RecoveryFailure("Couldn't read size of edge properties!");
         if (*props_size != 0)
           throw RecoveryFailure(
               "The snapshot has properties on edges, but the storage is "
@@ -304,7 +306,8 @@ uint64_t LoadPartialVertices(const std::filesystem::path &path, utils::SkipList<
                              TLabelFromIdFunc get_label_from_id, TPropertyFromIdFunc get_property_from_id) {
   Decoder snapshot;
   snapshot.Initialize(path, kSnapshotMagic);
-  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't read data from snapshot!");
+  if (!snapshot.SetPosition(from_offset))
+    throw RecoveryFailure("Couldn't set offset for reading vertices from a snapshot!");
 
   auto vertex_acc = vertices.access();
   uint64_t last_vertex_gid = 0;
@@ -331,14 +334,14 @@ uint64_t LoadPartialVertices(const std::filesystem::path &path, utils::SkipList<
     }
     {
       auto marker = snapshot.ReadMarker();
-      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Invalid snapshot data!");
+      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Couldn't read section vertex marker!");
     }
 
     // Insert vertex.
     auto gid = snapshot.ReadUint();
-    if (!gid) throw RecoveryFailure("Invalid snapshot data!");
+    if (!gid) throw RecoveryFailure("Couldn't read vertex gid!");
     if (i > 0 && *gid <= last_vertex_gid) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Read vertex gid is invalid!");
     }
     last_vertex_gid = *gid;
     auto [it, inserted] = vertex_acc.insert(Vertex{Gid::FromUint(*gid), nullptr});
@@ -347,12 +350,12 @@ uint64_t LoadPartialVertices(const std::filesystem::path &path, utils::SkipList<
     // Recover labels.
     {
       auto labels_size = snapshot.ReadUint();
-      if (!labels_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!labels_size) throw RecoveryFailure("Couldn't read the size of vertex labels!");
       auto &labels = it->labels;
       labels.reserve(*labels_size);
       for (uint64_t j = 0; j < *labels_size; ++j) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read vertex label!");
         labels.emplace_back(get_label_from_id(*label));
       }
     }
@@ -360,15 +363,15 @@ uint64_t LoadPartialVertices(const std::filesystem::path &path, utils::SkipList<
     // Recover properties.
     {
       auto props_size = snapshot.ReadUint();
-      if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!props_size) throw RecoveryFailure("Couldn't read size of vertex properties!");
       auto &props = it->properties;
       read_properties.clear();
       read_properties.reserve(*props_size);
       for (uint64_t j = 0; j < *props_size; ++j) {
         auto key = snapshot.ReadUint();
-        if (!key) throw RecoveryFailure("Invalid snapshot data!");
+        if (!key) throw RecoveryFailure("Couldn't read vertex property id!");
         auto value = snapshot.ReadPropertyValue();
-        if (!value) throw RecoveryFailure("Invalid snapshot data!");
+        if (!value) throw RecoveryFailure("Couldn't read vertex property value!");
         read_properties.emplace_back(get_property_from_id(*key), std::move(*value));
       }
       props.InitProperties(std::move(read_properties));
@@ -377,27 +380,27 @@ uint64_t LoadPartialVertices(const std::filesystem::path &path, utils::SkipList<
     // Skip in edges.
     {
       auto in_size = snapshot.ReadUint();
-      if (!in_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!in_size) throw RecoveryFailure("Couldn't read the number of in edges!");
       for (uint64_t j = 0; j < *in_size; ++j) {
         auto edge_gid = snapshot.ReadUint();
-        if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_gid) throw RecoveryFailure("Couldn't read edge gid!");
         auto from_gid = snapshot.ReadUint();
-        if (!from_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!from_gid) throw RecoveryFailure("Couldn't read from vertex gid!");
         auto edge_type = snapshot.ReadUint();
-        if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_type) throw RecoveryFailure("Couldn't read in edge type!");
       }
     }
 
     // Skip out edges.
     auto out_size = snapshot.ReadUint();
-    if (!out_size) throw RecoveryFailure("Invalid snapshot data!");
+    if (!out_size) throw RecoveryFailure("Couldn't read the number of out edges!");
     for (uint64_t j = 0; j < *out_size; ++j) {
       auto edge_gid = snapshot.ReadUint();
-      if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+      if (!edge_gid) throw RecoveryFailure("Couldn't read edge gid!");
       auto to_gid = snapshot.ReadUint();
-      if (!to_gid) throw RecoveryFailure("Invalid snapshot data!");
+      if (!to_gid) throw RecoveryFailure("Couldn't read to vertex gid!");
       auto edge_type = snapshot.ReadUint();
-      if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+      if (!edge_type) throw RecoveryFailure("Couldn't read out edge type!");
     }
   }
   spdlog::info("Process of recovering {} vertices is finished.", vertices_count);
@@ -421,7 +424,8 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
                                                       TEdgeTypeFromIdFunc get_edge_type_from_id) {
   Decoder snapshot;
   snapshot.Initialize(path, kSnapshotMagic);
-  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't read data from snapshot!");
+  if (!snapshot.SetPosition(from_offset))
+    throw RecoveryFailure("Couldn't set snapshot offset position doing loading partial connectivity!");
 
   auto vertex_acc = vertices.access();
   auto edge_acc = edges.access();
@@ -430,11 +434,11 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
   const auto first_vertex_gid = std::invoke([&]() mutable {
     {
       auto marker = snapshot.ReadMarker();
-      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Invalid snapshot data!");
+      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Couldn't set section vertex marker!");
     }
 
     auto gid = snapshot.ReadUint();
-    if (!gid) throw RecoveryFailure("Invalid snapshot data!");
+    if (!gid) throw RecoveryFailure("Couldn't read vertex gid!");
     return Gid::FromUint(*gid);
   });
 
@@ -442,12 +446,12 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
   uint64_t highest_edge_gid{0};
   auto vertex_it = vertex_acc.find(first_vertex_gid);
   if (vertex_it == vertex_acc.end()) {
-    throw RecoveryFailure("Invalid snapshot data!");
+    throw RecoveryFailure("Couldn't find vertex with first vertex gid!");
   }
 
   spdlog::info("Recovering connectivity for {} vertices.", vertices_count);
 
-  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't read data from snapshot!");
+  if (!snapshot.SetPosition(from_offset)) throw RecoveryFailure("Couldn't set from_offset position!");
 
   uint64_t five_percent_chunk = vertices_count / 20;
 
@@ -473,52 +477,52 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
     auto &vertex = *vertex_it;
     {
       auto marker = snapshot.ReadMarker();
-      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Invalid snapshot data!");
+      if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Couldn't read section vertex marker!");
     }
 
     auto gid = snapshot.ReadUint();
-    if (!gid) throw RecoveryFailure("Invalid snapshot data!");
-    if (gid != vertex.gid.AsUint()) throw RecoveryFailure("Invalid snapshot data!");
+    if (!gid) throw RecoveryFailure("Couldn't read vertex gid!");
+    if (gid != vertex.gid.AsUint()) throw RecoveryFailure("Read vertex gid is different from the existing one!");
 
     // Skip labels.
     {
       auto labels_size = snapshot.ReadUint();
-      if (!labels_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!labels_size) throw RecoveryFailure("Couldn't read the number of labels!");
       for (uint64_t j = 0; j < *labels_size; ++j) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label!");
       }
     }
 
     // Skip properties.
     {
       auto props_size = snapshot.ReadUint();
-      if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!props_size) throw RecoveryFailure("Couldn't read the number of vertex properties!");
       for (uint64_t j = 0; j < *props_size; ++j) {
         auto key = snapshot.ReadUint();
-        if (!key) throw RecoveryFailure("Invalid snapshot data!");
+        if (!key) throw RecoveryFailure("Couldn't read vertex property id!");
         auto value = snapshot.SkipPropertyValue();
-        if (!value) throw RecoveryFailure("Invalid snapshot data!");
+        if (!value) throw RecoveryFailure("Couldn't read vertex property value!");
       }
     }
 
     // Recover in edges.
     {
       auto in_size = snapshot.ReadUint();
-      if (!in_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!in_size) throw RecoveryFailure("Couldn't read the number of in edges!");
       vertex.in_edges.reserve(*in_size);
       for (uint64_t j = 0; j < *in_size; ++j) {
         auto edge_gid = snapshot.ReadUint();
-        if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_gid) throw RecoveryFailure("Couldn't read the edge gid!");
         highest_edge_gid = std::max(highest_edge_gid, *edge_gid);
 
         auto from_gid = snapshot.ReadUint();
-        if (!from_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!from_gid) throw RecoveryFailure("Couldn't read from vertex gid!");
         auto edge_type = snapshot.ReadUint();
-        if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_type) throw RecoveryFailure("Couldn't read edge type!");
 
         auto from_vertex = vertex_acc.find(Gid::FromUint(*from_gid));
-        if (from_vertex == vertex_acc.end()) throw RecoveryFailure("Invalid from vertex!");
+        if (from_vertex == vertex_acc.end()) throw RecoveryFailure("Couldn't find from vertex in loaded vertices!");
 
         EdgeRef edge_ref(Gid::FromUint(*edge_gid));
         if (items.properties_on_edges) {
@@ -541,19 +545,19 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
     // Recover out edges.
     {
       auto out_size = snapshot.ReadUint();
-      if (!out_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!out_size) throw RecoveryFailure("Couldn't read the number of out edges!");
       vertex.out_edges.reserve(*out_size);
       for (uint64_t j = 0; j < *out_size; ++j) {
         auto edge_gid = snapshot.ReadUint();
-        if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_gid) throw RecoveryFailure("Couldn't read edge gid!");
 
         auto to_gid = snapshot.ReadUint();
-        if (!to_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!to_gid) throw RecoveryFailure("Couldn't read to vertex gid!");
         auto edge_type = snapshot.ReadUint();
-        if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_type) throw RecoveryFailure("Couldn't read edge type!");
 
         auto to_vertex = vertex_acc.find(Gid::FromUint(*to_gid));
-        if (to_vertex == vertex_acc.end()) throw RecoveryFailure("Invalid to vertex!");
+        if (to_vertex == vertex_acc.end()) throw RecoveryFailure("Couldn't find to vertex in loaded vertices!");
 
         EdgeRef edge_ref(Gid::FromUint(*edge_gid));
         if (items.properties_on_edges) {
@@ -562,7 +566,7 @@ LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::pat
           // edges in the in/out edges list of vertices, therefore the edges has to be created here.
           if (snapshot_has_edges) {
             auto edge = edge_acc.find(Gid::FromUint(*edge_gid));
-            if (edge == edge_acc.end()) throw RecoveryFailure("Invalid edge!");
+            if (edge == edge_acc.end()) throw RecoveryFailure(":ouldn't find edge in the loaded edges!");
             edge_ref = EdgeRef(&*edge);
           } else {
             auto [edge, inserted] = edge_acc.insert(Edge{Gid::FromUint(*edge_gid), nullptr});
@@ -649,34 +653,34 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_mapper)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Failed to read section mapper!");
 
     auto size = snapshot.ReadUint();
-    if (!size) throw RecoveryFailure("Invalid snapshot data!");
+    if (!size) throw RecoveryFailure("Failed to read name-id mapper size!");
 
     for (uint64_t i = 0; i < *size; ++i) {
       auto id = snapshot.ReadUint();
-      if (!id) throw RecoveryFailure("Invalid snapshot data!");
+      if (!id) throw RecoveryFailure("Failed to read id for name-id mapper!");
       auto name = snapshot.ReadString();
-      if (!name) throw RecoveryFailure("Invalid snapshot data!");
+      if (!name) throw RecoveryFailure("Failed to read name for name-id mapper!");
       auto my_id = name_id_mapper->NameToId(*name);
       snapshot_id_map.emplace(*id, my_id);
       SPDLOG_TRACE("Mapping \"{}\"from snapshot id {} to actual id {}.", *name, *id, my_id);
     }
   }
-  auto get_label_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_label_from_id = [&snapshot_id_map](uint64_t label_id) {
+    auto it = snapshot_id_map.find(label_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find label id in snapshot_id_map!");
     return LabelId::FromUint(it->second);
   };
-  auto get_property_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_property_from_id = [&snapshot_id_map](uint64_t property_id) {
+    auto it = snapshot_id_map.find(property_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find property id in snapshot_id_map!");
     return PropertyId::FromUint(it->second);
   };
-  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t edge_type_id) {
+    auto it = snapshot_id_map.find(edge_type_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find edge type id in snapshot_id_map!");
     return EdgeTypeId::FromUint(it->second);
   };
 
@@ -693,14 +697,14 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       for (uint64_t i = 0; i < info.edges_count; ++i) {
         {
           const auto marker = snapshot.ReadMarker();
-          if (!marker || *marker != Marker::SECTION_EDGE) throw RecoveryFailure("Invalid snapshot data!");
+          if (!marker || *marker != Marker::SECTION_EDGE) throw RecoveryFailure("Couldn't read section edge marker");
         }
 
         if (items.properties_on_edges) {
           // Insert edge.
           auto gid = snapshot.ReadUint();
-          if (!gid) throw RecoveryFailure("Invalid snapshot data!");
-          if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!gid) throw RecoveryFailure("Couldn't read edge gid!");
+          if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid edge gid read!");
           last_edge_gid = *gid;
           spdlog::debug("Recovering edge {} with properties.", *gid);
           auto [it, inserted] = edge_acc.insert(Edge{Gid::FromUint(*gid), nullptr});
@@ -709,13 +713,13 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
           // Recover properties.
           {
             auto props_size = snapshot.ReadUint();
-            if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+            if (!props_size) throw RecoveryFailure("Couldn't read the size of properties!");
             auto &props = it->properties;
             for (uint64_t j = 0; j < *props_size; ++j) {
               auto key = snapshot.ReadUint();
-              if (!key) throw RecoveryFailure("Invalid snapshot data!");
+              if (!key) throw RecoveryFailure("Couldn't read edge property id!");
               auto value = snapshot.ReadPropertyValue();
-              if (!value) throw RecoveryFailure("Invalid snapshot data!");
+              if (!value) throw RecoveryFailure("Couldn't read edge property value!");
               SPDLOG_TRACE("Recovered property \"{}\" with value \"{}\" for edge {}.",
                            name_id_mapper->IdToName(snapshot_id_map.at(*key)), *value, *gid);
               props.SetProperty(get_property_from_id(*key), *value);
@@ -724,15 +728,15 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
         } else {
           // Read edge GID.
           auto gid = snapshot.ReadUint();
-          if (!gid) throw RecoveryFailure("Invalid snapshot data!");
-          if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!gid) throw RecoveryFailure("Couldn't read edge gid!");
+          if (i > 0 && *gid <= last_edge_gid) throw RecoveryFailure("Invalid edge gid read!");
           last_edge_gid = *gid;
 
           spdlog::debug("Ensuring edge {} doesn't have any properties.", *gid);
           // Read properties.
           {
             auto props_size = snapshot.ReadUint();
-            if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+            if (!props_size) throw RecoveryFailure("Couldn't read the size of properties!");
             if (*props_size != 0)
               throw RecoveryFailure(
                   "The snapshot has properties on edges, but the storage is "
@@ -751,14 +755,14 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     for (uint64_t i = 0; i < info.vertices_count; ++i) {
       {
         auto marker = snapshot.ReadMarker();
-        if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Invalid snapshot data!");
+        if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Couldn't read section vertex marker!");
       }
 
       // Insert vertex.
       auto gid = snapshot.ReadUint();
-      if (!gid) throw RecoveryFailure("Invalid snapshot data!");
+      if (!gid) throw RecoveryFailure("Couldn't read vertex gid!");
       if (i > 0 && *gid <= last_vertex_gid) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Invalid vertex gid read!");
       }
       last_vertex_gid = *gid;
       spdlog::debug("Recovering vertex {}.", *gid);
@@ -769,12 +773,12 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       spdlog::trace("Recovering labels for vertex {}.", *gid);
       {
         auto labels_size = snapshot.ReadUint();
-        if (!labels_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!labels_size) throw RecoveryFailure("Couldn't read the size of labels!");
         auto &labels = it->labels;
         labels.reserve(*labels_size);
         for (uint64_t j = 0; j < *labels_size; ++j) {
           auto label = snapshot.ReadUint();
-          if (!label) throw RecoveryFailure("Invalid snapshot data!");
+          if (!label) throw RecoveryFailure("Couldn't read vertex label!");
           SPDLOG_TRACE("Recovered label \"{}\" for vertex {}.", name_id_mapper->IdToName(snapshot_id_map.at(*label)),
                        *gid);
           labels.emplace_back(get_label_from_id(*label));
@@ -785,13 +789,13 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       spdlog::trace("Recovering properties for vertex {}.", *gid);
       {
         auto props_size = snapshot.ReadUint();
-        if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!props_size) throw RecoveryFailure("Couldn't read the size of properties!");
         auto &props = it->properties;
         for (uint64_t j = 0; j < *props_size; ++j) {
           auto key = snapshot.ReadUint();
-          if (!key) throw RecoveryFailure("Invalid snapshot data!");
+          if (!key) throw RecoveryFailure("Couldn't read the vertex property id!");
           auto value = snapshot.ReadPropertyValue();
-          if (!value) throw RecoveryFailure("Invalid snapshot data!");
+          if (!value) throw RecoveryFailure("Couldn't read the vertex property value!");
           SPDLOG_TRACE("Recovered property \"{}\" with value \"{}\" for vertex {}.",
                        name_id_mapper->IdToName(snapshot_id_map.at(*key)), *value, *gid);
           props.SetProperty(get_property_from_id(*key), *value);
@@ -801,27 +805,27 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       // Skip in edges.
       {
         auto in_size = snapshot.ReadUint();
-        if (!in_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!in_size) throw RecoveryFailure("Couldn't the read the size of input edges!");
         for (uint64_t j = 0; j < *in_size; ++j) {
           auto edge_gid = snapshot.ReadUint();
-          if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_gid) throw RecoveryFailure("Couldn't read the edge gid!");
           auto from_gid = snapshot.ReadUint();
-          if (!from_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!from_gid) throw RecoveryFailure("Couldn't read from vertex gid!");
           auto edge_type = snapshot.ReadUint();
-          if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_type) throw RecoveryFailure("Couldn't read edge type!");
         }
       }
 
       // Skip out edges.
       auto out_size = snapshot.ReadUint();
-      if (!out_size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!out_size) throw RecoveryFailure("Couldn't read the number of out edges!");
       for (uint64_t j = 0; j < *out_size; ++j) {
         auto edge_gid = snapshot.ReadUint();
-        if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_gid) throw RecoveryFailure("Couldn't read the edge gid!");
         auto to_gid = snapshot.ReadUint();
-        if (!to_gid) throw RecoveryFailure("Invalid snapshot data!");
+        if (!to_gid) throw RecoveryFailure("Couldn't read to vertex gid!");
         auto edge_type = snapshot.ReadUint();
-        if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+        if (!edge_type) throw RecoveryFailure("Couldn't read edge type!");
       }
     }
     spdlog::info("Vertices are recovered.");
@@ -832,34 +836,34 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     for (auto &vertex : vertex_acc) {
       {
         auto marker = snapshot.ReadMarker();
-        if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Invalid snapshot data!");
+        if (!marker || *marker != Marker::SECTION_VERTEX) throw RecoveryFailure("Couldn't read section vertex marker!");
       }
 
       spdlog::trace("Recovering connectivity for vertex {}.", vertex.gid.AsUint());
       // Check vertex.
       auto gid = snapshot.ReadUint();
-      if (!gid) throw RecoveryFailure("Invalid snapshot data!");
-      if (gid != vertex.gid.AsUint()) throw RecoveryFailure("Invalid snapshot data!");
+      if (!gid) throw RecoveryFailure("Couldn't read vertex gid!");
+      if (gid != vertex.gid.AsUint()) throw RecoveryFailure("Invalid vertex read!");
 
       // Skip labels.
       {
         auto labels_size = snapshot.ReadUint();
-        if (!labels_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!labels_size) throw RecoveryFailure("Couldn't read the size of labels!");
         for (uint64_t j = 0; j < *labels_size; ++j) {
           auto label = snapshot.ReadUint();
-          if (!label) throw RecoveryFailure("Invalid snapshot data!");
+          if (!label) throw RecoveryFailure("Couldn't read label!");
         }
       }
 
       // Skip properties.
       {
         auto props_size = snapshot.ReadUint();
-        if (!props_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!props_size) throw RecoveryFailure("Couldn't read the size of properties!");
         for (uint64_t j = 0; j < *props_size; ++j) {
           auto key = snapshot.ReadUint();
-          if (!key) throw RecoveryFailure("Invalid snapshot data!");
+          if (!key) throw RecoveryFailure("Couldn't read property key while skipping properties!");
           auto value = snapshot.SkipPropertyValue();
-          if (!value) throw RecoveryFailure("Invalid snapshot data!");
+          if (!value) throw RecoveryFailure("Couldn't read property value while skipping properties!");
         }
       }
 
@@ -867,17 +871,17 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       {
         spdlog::trace("Recovering inbound edges for vertex {}.", vertex.gid.AsUint());
         auto in_size = snapshot.ReadUint();
-        if (!in_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!in_size) throw RecoveryFailure("Couldn't read the size of in edges!");
         vertex.in_edges.reserve(*in_size);
         for (uint64_t j = 0; j < *in_size; ++j) {
           auto edge_gid = snapshot.ReadUint();
-          if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_gid) throw RecoveryFailure("Couldn't read egde gid!");
           last_edge_gid = std::max(last_edge_gid, *edge_gid);
 
           auto from_gid = snapshot.ReadUint();
-          if (!from_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!from_gid) throw RecoveryFailure("Couldn't read from vertex gid!");
           auto edge_type = snapshot.ReadUint();
-          if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_type) throw RecoveryFailure("Couldn't read edge type");
 
           auto from_vertex = vertex_acc.find(Gid::FromUint(*from_gid));
           if (from_vertex == vertex_acc.end()) throw RecoveryFailure("Invalid from vertex!");
@@ -903,17 +907,17 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       {
         spdlog::trace("Recovering outbound edges for vertex {}.", vertex.gid.AsUint());
         auto out_size = snapshot.ReadUint();
-        if (!out_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!out_size) throw RecoveryFailure("Couldn't read the number of out edges!");
         vertex.out_edges.reserve(*out_size);
         for (uint64_t j = 0; j < *out_size; ++j) {
           auto edge_gid = snapshot.ReadUint();
-          if (!edge_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_gid) throw RecoveryFailure("Couldn't read edge gid!");
           last_edge_gid = std::max(last_edge_gid, *edge_gid);
 
           auto to_gid = snapshot.ReadUint();
-          if (!to_gid) throw RecoveryFailure("Invalid snapshot data!");
+          if (!to_gid) throw RecoveryFailure("Couldn't read to vertex gid!");
           auto edge_type = snapshot.ReadUint();
-          if (!edge_type) throw RecoveryFailure("Invalid snapshot data!");
+          if (!edge_type) throw RecoveryFailure("Couldn't read edge type!");
 
           auto to_vertex = vertex_acc.find(Gid::FromUint(*to_gid));
           if (to_vertex == vertex_acc.end()) throw RecoveryFailure("Invalid to vertex!");
@@ -951,16 +955,16 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_indices)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Couldn't read section indices!");
 
     // Recover label indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of label indices");
       spdlog::info("Recovering metadata of {} label indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of label index!");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label, get_label_from_id(*label),
                                     "The label index already exists!");
         SPDLOG_TRACE("Recovered metadata of label index for :{}", name_id_mapper->IdToName(snapshot_id_map.at(*label)));
@@ -971,13 +975,13 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     // Recover label+property indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of label property indices!");
       spdlog::info("Recovering metadata of {} label+property indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label for label property index!");
         auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property for label property index");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label_property,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The label+property index already exists!");
@@ -987,6 +991,7 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
       }
       spdlog::info("Metadata of label+property indices are recovered.");
     }
+
     spdlog::info("Metadata of indices are recovered.");
   }
 
@@ -996,18 +1001,19 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_constraints)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_CONSTRAINTS) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_CONSTRAINTS)
+      throw RecoveryFailure("Couldn't read section constraints marker!");
 
     // Recover existence constraints.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of existence constraints!");
       spdlog::info("Recovering metadata of {} existence constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of existence constraints!");
         auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property of existence constraints!");
         AddRecoveredIndexConstraint(&indices_constraints.constraints.existence,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The existence constraint already exists!");
@@ -1023,17 +1029,17 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     // implemented in later versions of snapshot.
     if (*version >= kUniqueConstraintVersion) {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of unique constraints!");
       spdlog::info("Recovering metadata of {} unique constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of unique constraint!");
         auto properties_count = snapshot.ReadUint();
-        if (!properties_count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!properties_count) throw RecoveryFailure("Couldn't read the number of properties of unique constraint");
         std::set<PropertyId> properties;
         for (uint64_t j = 0; j < *properties_count; ++j) {
           auto property = snapshot.ReadUint();
-          if (!property) throw RecoveryFailure("Invalid snapshot data!");
+          if (!property) throw RecoveryFailure("Couldn't read property of unique constraint!");
           properties.insert(get_property_from_id(*property));
         }
         AddRecoveredIndexConstraint(&indices_constraints.constraints.unique, {get_label_from_id(*label), properties},
@@ -1052,21 +1058,22 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_epoch_history)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     const auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY)
+      throw RecoveryFailure("Couldn't recover section epoch history marker!");
 
     const auto history_size = snapshot.ReadUint();
     if (!history_size) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Couldn't read history size!");
     }
 
     for (int i = 0; i < *history_size; ++i) {
       auto maybe_epoch_id = snapshot.ReadString();
       if (!maybe_epoch_id) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read epoch id!");
       }
       const auto maybe_last_commit_timestamp = snapshot.ReadUint();
       if (!maybe_last_commit_timestamp) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read last commit timestamp!");
       }
       epoch_history->emplace_back(std::move(*maybe_epoch_id), *maybe_last_commit_timestamp);
     }
@@ -1120,34 +1127,34 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_mapper)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Couldn't read section mapper marker!");
 
     auto size = snapshot.ReadUint();
-    if (!size) throw RecoveryFailure("Invalid snapshot data!");
+    if (!size) throw RecoveryFailure("Couldn't read snapshot size!");
 
     for (uint64_t i = 0; i < *size; ++i) {
       auto id = snapshot.ReadUint();
-      if (!id) throw RecoveryFailure("Invalid snapshot data!");
+      if (!id) throw RecoveryFailure("Failed to read id for name-id mapper!");
       auto name = snapshot.ReadString();
-      if (!name) throw RecoveryFailure("Invalid snapshot data!");
+      if (!name) throw RecoveryFailure("Failed to read name for name-id mapper!");
       auto my_id = name_id_mapper->NameToId(*name);
       snapshot_id_map.emplace(*id, my_id);
       SPDLOG_TRACE("Mapping \"{}\"from snapshot id {} to actual id {}.", *name, *id, my_id);
     }
   }
-  auto get_label_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_label_from_id = [&snapshot_id_map](uint64_t label_id) {
+    auto it = snapshot_id_map.find(label_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find label id in snapshot_id_map!");
     return LabelId::FromUint(it->second);
   };
-  auto get_property_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_property_from_id = [&snapshot_id_map](uint64_t property_id) {
+    auto it = snapshot_id_map.find(property_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find property id in snapshot_id_map!");
     return PropertyId::FromUint(it->second);
   };
-  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t edge_type_id) {
+    auto it = snapshot_id_map.find(edge_type_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find edge type id in snapshot_id_map!");
     return EdgeTypeId::FromUint(it->second);
   };
 
@@ -1237,16 +1244,16 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_indices)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Couldn't read section indices!");
 
     // Recover label indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of label indices");
       spdlog::info("Recovering metadata of {} label indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of label index!");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label, get_label_from_id(*label),
                                     "The label index already exists!");
         SPDLOG_TRACE("Recovered metadata of label index for :{}", name_id_mapper->IdToName(snapshot_id_map.at(*label)));
@@ -1257,13 +1264,13 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
     // Recover label+property indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of label property indices!");
       spdlog::info("Recovering metadata of {} label+property indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label for label property index!");
         auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property for label property index");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label_property,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The label+property index already exists!");
@@ -1279,21 +1286,23 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
   // Recover constraints.
   {
     spdlog::info("Recovering metadata of constraints.");
-    if (!snapshot.SetPosition(info.offset_constraints)) throw RecoveryFailure("Couldn't read data from snapshot!");
+    if (!snapshot.SetPosition(info.offset_constraints))
+      throw RecoveryFailure("Couldn't read offset constraints marker!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_CONSTRAINTS) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_CONSTRAINTS)
+      throw RecoveryFailure("Couldn't read section constraints marker!");
 
     // Recover existence constraints.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of existence constraints!");
       spdlog::info("Recovering metadata of {} existence constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of existence constraints!");
         auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property of existence constraints!");
         AddRecoveredIndexConstraint(&indices_constraints.constraints.existence,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The existence constraint already exists!");
@@ -1309,17 +1318,17 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
     // implemented in later versions of snapshot.
     if (*version >= kUniqueConstraintVersion) {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of unique constraints!");
       spdlog::info("Recovering metadata of {} unique constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of unique constraint!");
         auto properties_count = snapshot.ReadUint();
-        if (!properties_count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!properties_count) throw RecoveryFailure("Couldn't read the number of properties of unique constraint");
         std::set<PropertyId> properties;
         for (uint64_t j = 0; j < *properties_count; ++j) {
           auto property = snapshot.ReadUint();
-          if (!property) throw RecoveryFailure("Invalid snapshot data!");
+          if (!property) throw RecoveryFailure("Couldn't read property of unique constraint!");
           properties.insert(get_property_from_id(*property));
         }
         AddRecoveredIndexConstraint(&indices_constraints.constraints.unique, {get_label_from_id(*label), properties},
@@ -1338,21 +1347,22 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
     if (!snapshot.SetPosition(info.offset_epoch_history)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     const auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY)
+      throw RecoveryFailure("Couldn't recover section epoch history marker!");
 
     const auto history_size = snapshot.ReadUint();
     if (!history_size) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Couldn't read history size!");
     }
 
     for (int i = 0; i < *history_size; ++i) {
       auto maybe_epoch_id = snapshot.ReadString();
       if (!maybe_epoch_id) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read epoch id!");
       }
       const auto maybe_last_commit_timestamp = snapshot.ReadUint();
       if (!maybe_last_commit_timestamp) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read last commit timestamp!");
       }
       epoch_history->emplace_back(std::move(*maybe_epoch_id), *maybe_last_commit_timestamp);
     }
@@ -1410,34 +1420,34 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     if (!snapshot.SetPosition(info.offset_mapper)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Failed to read section mapper!");
 
     auto size = snapshot.ReadUint();
-    if (!size) throw RecoveryFailure("Invalid snapshot data!");
+    if (!size) throw RecoveryFailure("Failed to read name-id mapper size!");
 
     for (uint64_t i = 0; i < *size; ++i) {
       auto id = snapshot.ReadUint();
-      if (!id) throw RecoveryFailure("Invalid snapshot data!");
+      if (!id) throw RecoveryFailure("Failed to read id for name-id mapper!");
       auto name = snapshot.ReadString();
-      if (!name) throw RecoveryFailure("Invalid snapshot data!");
+      if (!name) throw RecoveryFailure("Failed to read name for name-id mapper!");
       auto my_id = name_id_mapper->NameToId(*name);
       snapshot_id_map.emplace(*id, my_id);
       SPDLOG_TRACE("Mapping \"{}\"from snapshot id {} to actual id {}.", *name, *id, my_id);
     }
   }
-  auto get_label_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_label_from_id = [&snapshot_id_map](uint64_t label_id) {
+    auto it = snapshot_id_map.find(label_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find label id in snapshot_id_map!");
     return LabelId::FromUint(it->second);
   };
-  auto get_property_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_property_from_id = [&snapshot_id_map](uint64_t property_id) {
+    auto it = snapshot_id_map.find(property_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find property id in snapshot_id_map!");
     return PropertyId::FromUint(it->second);
   };
-  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t snapshot_id) {
-    auto it = snapshot_id_map.find(snapshot_id);
-    if (it == snapshot_id_map.end()) throw RecoveryFailure("Invalid snapshot data!");
+  auto get_edge_type_from_id = [&snapshot_id_map](uint64_t edge_type_id) {
+    auto it = snapshot_id_map.find(edge_type_id);
+    if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find edge type id in snapshot_id_map!");
     return EdgeTypeId::FromUint(it->second);
   };
 
@@ -1527,16 +1537,16 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     if (!snapshot.SetPosition(info.offset_indices)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Couldn't read section indices!");
 
     // Recover label indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of label indices");
       spdlog::info("Recovering metadata of {} label indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
-        const auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        auto label = snapshot.ReadUint();
+        if (!label) throw RecoveryFailure("Couldn't read label of label index!");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label, get_label_from_id(*label),
                                     "The label index already exists!");
         SPDLOG_TRACE("Recovered metadata of label index for :{}", name_id_mapper->IdToName(snapshot_id_map.at(*label)));
@@ -1547,15 +1557,15 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     // Recover label indices statistics.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of entries for label index statistics!");
       spdlog::info("Recovering metadata of {} label indices statistics.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         const auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label while recovering label index statistics!");
         const auto count = snapshot.ReadUint();
-        if (!count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!count) throw RecoveryFailure("Couldn't read count for label index statistics!");
         const auto avg_degree = snapshot.ReadDouble();
-        if (!avg_degree) throw RecoveryFailure("Invalid snapshot data!");
+        if (!avg_degree) throw RecoveryFailure("Couldn't read average degree for label index statistics");
         const auto label_id = get_label_from_id(*label);
         indices_constraints.indices.label_stats.emplace_back(label_id, LabelIndexStats{*count, *avg_degree});
         SPDLOG_TRACE("Recovered metadata of label index statistics for :{}",
@@ -1567,13 +1577,13 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     // Recover label+property indices.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of label property indices!");
       spdlog::info("Recovering metadata of {} label+property indices.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
-        const auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
-        const auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        auto label = snapshot.ReadUint();
+        if (!label) throw RecoveryFailure("Couldn't read label for label property index!");
+        auto property = snapshot.ReadUint();
+        if (!property) throw RecoveryFailure("Couldn't read property for label property index");
         AddRecoveredIndexConstraint(&indices_constraints.indices.label_property,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The label+property index already exists!");
@@ -1587,23 +1597,25 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     // Recover label+property indices statistics.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't recover the number of entries for label property statistics!");
       spdlog::info("Recovering metadata of {} label+property indices statistics.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         const auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label for label property index statistics!");
         const auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property for label property index statistics!");
         const auto count = snapshot.ReadUint();
-        if (!count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!count) throw RecoveryFailure("Couldn't read count for label property index statistics!!");
         const auto distinct_values_count = snapshot.ReadUint();
-        if (!distinct_values_count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!distinct_values_count)
+          throw RecoveryFailure("Couldn't read distinct values count for label property index statistics!");
         const auto statistic = snapshot.ReadDouble();
-        if (!statistic) throw RecoveryFailure("Invalid snapshot data!");
+        if (!statistic) throw RecoveryFailure("Couldn't read statistics value for label-property index statistics!");
         const auto avg_group_size = snapshot.ReadDouble();
-        if (!avg_group_size) throw RecoveryFailure("Invalid snapshot data!");
+        if (!avg_group_size)
+          throw RecoveryFailure("Couldn't read average group size for label property index statistics!");
         const auto avg_degree = snapshot.ReadDouble();
-        if (!avg_degree) throw RecoveryFailure("Invalid snapshot data!");
+        if (!avg_degree) throw RecoveryFailure("Couldn't read average degree for label property index statistics!");
         const auto label_id = get_label_from_id(*label);
         const auto property_id = get_property_from_id(*property);
         indices_constraints.indices.label_property_stats.emplace_back(
@@ -1625,18 +1637,19 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     if (!snapshot.SetPosition(info.offset_constraints)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_CONSTRAINTS) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_CONSTRAINTS)
+      throw RecoveryFailure("Couldn't read section constraints marker!");
 
     // Recover existence constraints.
     {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of existence constraints!");
       spdlog::info("Recovering metadata of {} existence constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of existence constraints!");
         auto property = snapshot.ReadUint();
-        if (!property) throw RecoveryFailure("Invalid snapshot data!");
+        if (!property) throw RecoveryFailure("Couldn't read property of existence constraints!");
         AddRecoveredIndexConstraint(&indices_constraints.constraints.existence,
                                     {get_label_from_id(*label), get_property_from_id(*property)},
                                     "The existence constraint already exists!");
@@ -1652,17 +1665,17 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     // implemented in later versions of snapshot.
     if (*version >= kUniqueConstraintVersion) {
       auto size = snapshot.ReadUint();
-      if (!size) throw RecoveryFailure("Invalid snapshot data!");
+      if (!size) throw RecoveryFailure("Couldn't read the number of unique constraints!");
       spdlog::info("Recovering metadata of {} unique constraints.", *size);
       for (uint64_t i = 0; i < *size; ++i) {
         auto label = snapshot.ReadUint();
-        if (!label) throw RecoveryFailure("Invalid snapshot data!");
+        if (!label) throw RecoveryFailure("Couldn't read label of unique constraints!");
         auto properties_count = snapshot.ReadUint();
-        if (!properties_count) throw RecoveryFailure("Invalid snapshot data!");
+        if (!properties_count) throw RecoveryFailure("Couldn't read the number of properties in unique constraint!");
         std::set<PropertyId> properties;
         for (uint64_t j = 0; j < *properties_count; ++j) {
           auto property = snapshot.ReadUint();
-          if (!property) throw RecoveryFailure("Invalid snapshot data!");
+          if (!property) throw RecoveryFailure("Couldn't read property of unique constraint!");
           properties.insert(get_property_from_id(*property));
         }
         AddRecoveredIndexConstraint(&indices_constraints.constraints.unique, {get_label_from_id(*label), properties},
@@ -1681,21 +1694,22 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
     if (!snapshot.SetPosition(info.offset_epoch_history)) throw RecoveryFailure("Couldn't read data from snapshot!");
 
     const auto marker = snapshot.ReadMarker();
-    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY) throw RecoveryFailure("Invalid snapshot data!");
+    if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY)
+      throw RecoveryFailure("Couldn't read section epoch history marker!");
 
     const auto history_size = snapshot.ReadUint();
     if (!history_size) {
-      throw RecoveryFailure("Invalid snapshot data!");
+      throw RecoveryFailure("Couldn't read history size!");
     }
 
     for (int i = 0; i < *history_size; ++i) {
       auto maybe_epoch_id = snapshot.ReadString();
       if (!maybe_epoch_id) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read maybe epoch id!");
       }
       const auto maybe_last_commit_timestamp = snapshot.ReadUint();
       if (!maybe_last_commit_timestamp) {
-        throw RecoveryFailure("Invalid snapshot data!");
+        throw RecoveryFailure("Couldn't read maybe last commit timestamp!");
       }
       epoch_history->emplace_back(std::move(*maybe_epoch_id), *maybe_last_commit_timestamp);
     }
@@ -1711,6 +1725,94 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
   return {info, recovery_info, std::move(indices_constraints)};
 }
 
+using OldSnapshotFiles = std::vector<std::pair<uint64_t, std::filesystem::path>>;
+void EnsureNecessaryWalFilesExist(const std::filesystem::path &wal_directory, const std::string &uuid,
+                                  OldSnapshotFiles old_snapshot_files, Transaction *transaction,
+                                  utils::FileRetainer *file_retainer) {
+  std::vector<std::tuple<uint64_t, uint64_t, uint64_t, std::filesystem::path>> wal_files;
+  std::error_code error_code;
+  for (const auto &item : std::filesystem::directory_iterator(wal_directory, error_code)) {
+    if (!item.is_regular_file()) continue;
+    try {
+      auto info = ReadWalInfo(item.path());
+      if (info.uuid != uuid) continue;
+      wal_files.emplace_back(info.seq_num, info.from_timestamp, info.to_timestamp, item.path());
+    } catch (const RecoveryFailure &e) {
+      spdlog::warn("Found a corrupt WAL file {} because of: {}. WAL file will be deleted.", item.path(), e.what());
+      file_retainer->DeleteFile(item.path());
+    }
+  }
+
+  if (error_code) {
+    spdlog::error(
+        utils::MessageWithLink("Couldn't ensure that only the absolutely necessary WAL files exist "
+                               "because an error occurred: {}.",
+                               error_code.message(), "https://memgr.ph/snapshots"));
+  }
+  std::sort(wal_files.begin(), wal_files.end());
+  uint64_t snapshot_start_timestamp = transaction->start_timestamp;
+  if (!old_snapshot_files.empty()) {
+    snapshot_start_timestamp = old_snapshot_files.front().first;
+  }
+  std::optional<uint64_t> pos = 0;
+  for (uint64_t i = 0; i < wal_files.size(); ++i) {
+    const auto &[seq_num, from_timestamp, to_timestamp, wal_path] = wal_files[i];
+    if (from_timestamp <= snapshot_start_timestamp) {
+      pos = i;
+    } else {
+      break;
+    }
+  }
+  if (pos && *pos > 0) {
+    // We need to leave at least one WAL file that contains deltas that were
+    // created before the oldest snapshot. Because we always leave at least
+    // one WAL file that contains deltas before the snapshot, this correctly
+    // handles the edge case when that one file is the current WAL file that
+    // is being appended to.
+    for (uint64_t i = 0; i < *pos; ++i) {
+      const auto &[seq_num, from_timestamp, to_timestamp, wal_path] = wal_files[i];
+      file_retainer->DeleteFile(wal_path);
+    }
+  }
+}
+
+auto EnsureRetentionCountSnapshotsExist(const std::filesystem::path &snapshot_directory, const std::string &path,
+                                        const std::string &uuid, utils::FileRetainer *file_retainer, Storage *storage)
+    -> OldSnapshotFiles {
+  OldSnapshotFiles old_snapshot_files;
+  std::error_code error_code;
+  for (const auto &item : std::filesystem::directory_iterator(snapshot_directory, error_code)) {
+    if (!item.is_regular_file()) continue;
+    if (item.path() == path) continue;
+    try {
+      auto info = ReadSnapshotInfo(item.path());
+      if (info.uuid != uuid) continue;
+      old_snapshot_files.emplace_back(info.start_timestamp, item.path());
+    } catch (const RecoveryFailure &e) {
+      spdlog::warn("Found a corrupt snapshot file {} becuase of: {}. Corrupted snapshot file will be deleted.",
+                   item.path(), e.what());
+      file_retainer->DeleteFile(item.path());
+    }
+  }
+
+  if (error_code) {
+    spdlog::error(utils::MessageWithLink(
+        "Couldn't ensure that exactly {} snapshots exist because an error occurred: {}.",
+        storage->config_.durability.snapshot_retention_count, error_code.message(), "https://memgr.ph/snapshots"));
+  }
+
+  std::sort(old_snapshot_files.begin(), old_snapshot_files.end());
+  if (old_snapshot_files.size() <= storage->config_.durability.snapshot_retention_count - 1) return old_snapshot_files;
+
+  uint32_t num_to_erase = old_snapshot_files.size() - (storage->config_.durability.snapshot_retention_count - 1);
+  for (size_t i = 0; i < num_to_erase; ++i) {
+    const auto &[_, snapshot_path] = old_snapshot_files[i];
+    file_retainer->DeleteFile(snapshot_path);
+  }
+  old_snapshot_files.erase(old_snapshot_files.begin(), old_snapshot_files.begin() + num_to_erase);
+  return old_snapshot_files;
+}
+
 void CreateSnapshot(Storage *storage, Transaction *transaction, const std::filesystem::path &snapshot_directory,
                     const std::filesystem::path &wal_directory, utils::SkipList<Vertex> *vertices,
                     utils::SkipList<Edge> *edges, const std::string &uuid,
@@ -2088,86 +2190,12 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
   snapshot.Finalize();
   spdlog::info("Snapshot creation successful!");
 
-  // Ensure exactly `snapshot_retention_count` snapshots exist.
-  std::vector<std::pair<uint64_t, std::filesystem::path>> old_snapshot_files;
-  {
-    std::error_code error_code;
-    for (const auto &item : std::filesystem::directory_iterator(snapshot_directory, error_code)) {
-      if (!item.is_regular_file()) continue;
-      if (item.path() == path) continue;
-      try {
-        auto info = ReadSnapshotInfo(item.path());
-        if (info.uuid != uuid) continue;
-        old_snapshot_files.emplace_back(info.start_timestamp, item.path());
-      } catch (const RecoveryFailure &e) {
-        spdlog::warn("Found a corrupt snapshot file {} becuase of: {}", item.path(), e.what());
-        continue;
-      }
-    }
+  OldSnapshotFiles old_snapshot_files =
+      EnsureRetentionCountSnapshotsExist(snapshot_directory, path, uuid, file_retainer, storage);
 
-    if (error_code) {
-      spdlog::error(utils::MessageWithLink(
-          "Couldn't ensure that exactly {} snapshots exist because an error occurred: {}.",
-          storage->config_.durability.snapshot_retention_count, error_code.message(), "https://memgr.ph/snapshots"));
-    }
-    std::sort(old_snapshot_files.begin(), old_snapshot_files.end());
-    if (old_snapshot_files.size() > storage->config_.durability.snapshot_retention_count - 1) {
-      auto num_to_erase = old_snapshot_files.size() - (storage->config_.durability.snapshot_retention_count - 1);
-      for (size_t i = 0; i < num_to_erase; ++i) {
-        const auto &[start_timestamp, snapshot_path] = old_snapshot_files[i];
-        file_retainer->DeleteFile(snapshot_path);
-      }
-      old_snapshot_files.erase(old_snapshot_files.begin(), old_snapshot_files.begin() + num_to_erase);
-    }
-  }
-
-  // Ensure that only the absolutely necessary WAL files exist.
   if (old_snapshot_files.size() == storage->config_.durability.snapshot_retention_count - 1 &&
       utils::DirExists(wal_directory)) {
-    std::vector<std::tuple<uint64_t, uint64_t, uint64_t, std::filesystem::path>> wal_files;
-    std::error_code error_code;
-    for (const auto &item : std::filesystem::directory_iterator(wal_directory, error_code)) {
-      if (!item.is_regular_file()) continue;
-      try {
-        auto info = ReadWalInfo(item.path());
-        if (info.uuid != uuid) continue;
-        wal_files.emplace_back(info.seq_num, info.from_timestamp, info.to_timestamp, item.path());
-      } catch (const RecoveryFailure &e) {
-        continue;
-      }
-    }
-
-    if (error_code) {
-      spdlog::error(
-          utils::MessageWithLink("Couldn't ensure that only the absolutely necessary WAL files exist "
-                                 "because an error occurred: {}.",
-                                 error_code.message(), "https://memgr.ph/snapshots"));
-    }
-    std::sort(wal_files.begin(), wal_files.end());
-    uint64_t snapshot_start_timestamp = transaction->start_timestamp;
-    if (!old_snapshot_files.empty()) {
-      snapshot_start_timestamp = old_snapshot_files.front().first;
-    }
-    std::optional<uint64_t> pos = 0;
-    for (uint64_t i = 0; i < wal_files.size(); ++i) {
-      const auto &[seq_num, from_timestamp, to_timestamp, wal_path] = wal_files[i];
-      if (from_timestamp <= snapshot_start_timestamp) {
-        pos = i;
-      } else {
-        break;
-      }
-    }
-    if (pos && *pos > 0) {
-      // We need to leave at least one WAL file that contains deltas that were
-      // created before the oldest snapshot. Because we always leave at least
-      // one WAL file that contains deltas before the snapshot, this correctly
-      // handles the edge case when that one file is the current WAL file that
-      // is being appended to.
-      for (uint64_t i = 0; i < *pos; ++i) {
-        const auto &[seq_num, from_timestamp, to_timestamp, wal_path] = wal_files[i];
-        file_retainer->DeleteFile(wal_path);
-      }
-    }
+    EnsureNecessaryWalFilesExist(wal_directory, uuid, std::move(old_snapshot_files), transaction, file_retainer);
   }
 }
 
diff --git a/src/storage/v2/inmemory/label_index.hpp b/src/storage/v2/inmemory/label_index.hpp
index 63b2ebad4..7d606574b 100644
--- a/src/storage/v2/inmemory/label_index.hpp
+++ b/src/storage/v2/inmemory/label_index.hpp
@@ -16,6 +16,7 @@
 #include "storage/v2/indices/label_index_stats.hpp"
 #include "storage/v2/vertex.hpp"
 #include "utils/rw_lock.hpp"
+#include "utils/synchronized.hpp"
 
 namespace memgraph::storage {
 
diff --git a/src/storage/v2/inmemory/label_property_index.hpp b/src/storage/v2/inmemory/label_property_index.hpp
index db3eb7faa..7f8c54909 100644
--- a/src/storage/v2/inmemory/label_property_index.hpp
+++ b/src/storage/v2/inmemory/label_property_index.hpp
@@ -15,6 +15,7 @@
 #include "storage/v2/indices/label_property_index.hpp"
 #include "storage/v2/indices/label_property_index_stats.hpp"
 #include "utils/rw_lock.hpp"
+#include "utils/synchronized.hpp"
 
 namespace memgraph::storage {
 
diff --git a/src/storage/v2/inmemory/replication/replication_client.cpp b/src/storage/v2/inmemory/replication/replication_client.cpp
index 48aeafd7d..b8ecc1c72 100644
--- a/src/storage/v2/inmemory/replication/replication_client.cpp
+++ b/src/storage/v2/inmemory/replication/replication_client.cpp
@@ -44,7 +44,7 @@ class CurrentWalHandler {
 
 ////// CurrentWalHandler //////
 CurrentWalHandler::CurrentWalHandler(ReplicationClient *self)
-    : self_(self), stream_(self_->rpc_client_.Stream<replication::CurrentWalRpc>()) {}
+    : self_(self), stream_(self_->rpc_client_.Stream<replication::CurrentWalRpc>(self->GetStorageId())) {}
 
 void CurrentWalHandler::AppendFilename(const std::string &filename) {
   replication::Encoder encoder(stream_.GetBuilder());
@@ -70,9 +70,10 @@ replication::CurrentWalRes CurrentWalHandler::Finalize() { return stream_.AwaitR
 
 ////// ReplicationClient Helpers //////
 
-replication::WalFilesRes TransferWalFiles(rpc::Client &client, const std::vector<std::filesystem::path> &wal_files) {
+replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client,
+                                          const std::vector<std::filesystem::path> &wal_files) {
   MG_ASSERT(!wal_files.empty(), "Wal files list is empty!");
-  auto stream = client.Stream<replication::WalFilesRpc>(wal_files.size());
+  auto stream = client.Stream<replication::WalFilesRpc>(std::move(db_name), wal_files.size());
   replication::Encoder encoder(stream.GetBuilder());
   for (const auto &wal : wal_files) {
     spdlog::debug("Sending wal file: {}", wal);
@@ -81,8 +82,8 @@ replication::WalFilesRes TransferWalFiles(rpc::Client &client, const std::vector
   return stream.AwaitResponse();
 }
 
-replication::SnapshotRes TransferSnapshot(rpc::Client &client, const std::filesystem::path &path) {
-  auto stream = client.Stream<replication::SnapshotRpc>();
+replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path) {
+  auto stream = client.Stream<replication::SnapshotRpc>(std::move(db_name));
   replication::Encoder encoder(stream.GetBuilder());
   encoder.WriteFile(path);
   return stream.AwaitResponse();
@@ -115,19 +116,19 @@ void InMemoryReplicationClient::RecoverReplica(uint64_t replica_commit) {
 
     const auto steps = GetRecoverySteps(replica_commit, &file_locker);
     int i = 0;
-    for (const auto &recovery_step : steps) {
+    for (const InMemoryReplicationClient::RecoveryStep &recovery_step : steps) {
       spdlog::trace("Recovering in step: {}", i++);
       try {
         std::visit(
             [&, this]<typename T>(T &&arg) {
               using StepType = std::remove_cvref_t<T>;
-              if constexpr (std::is_same_v<StepType, RecoverySnapshot>) {
+              if constexpr (std::is_same_v<StepType, RecoverySnapshot>) {  // TODO: split into 3 overloads
                 spdlog::debug("Sending the latest snapshot file: {}", arg);
-                auto response = TransferSnapshot(rpc_client_, arg);
+                auto response = TransferSnapshot(storage->id(), rpc_client_, arg);
                 replica_commit = response.current_commit_timestamp;
               } else if constexpr (std::is_same_v<StepType, RecoveryWals>) {
                 spdlog::debug("Sending the latest wal files");
-                auto response = TransferWalFiles(rpc_client_, arg);
+                auto response = TransferWalFiles(storage->id(), rpc_client_, arg);
                 replica_commit = response.current_commit_timestamp;
                 spdlog::debug("Wal files successfully transferred.");
               } else if constexpr (std::is_same_v<StepType, RecoveryCurrentWal>) {
diff --git a/src/storage/v2/inmemory/replication/replication_server.hpp b/src/storage/v2/inmemory/replication/replication_server.hpp
deleted file mode 100644
index 697108a0a..000000000
--- a/src/storage/v2/inmemory/replication/replication_server.hpp
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2023 Memgraph Ltd.
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
-// License, and you may not use this file except in compliance with the Business Source License.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-#pragma once
-
-#include "replication/epoch.hpp"
-#include "storage/v2/replication/replication_server.hpp"
-#include "storage/v2/replication/serialization.hpp"
-
-namespace memgraph::storage {
-
-class InMemoryStorage;
-
-class InMemoryReplicationServer : public ReplicationServer {
- public:
-  explicit InMemoryReplicationServer(InMemoryStorage *storage,
-                                     const memgraph::replication::ReplicationServerConfig &config,
-                                     memgraph::replication::ReplicationEpoch *repl_epoch);
-
- private:
-  // RPC handlers
-  void HeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  void AppendDeltasHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  void SnapshotHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  void WalFilesHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  void CurrentWalHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  void TimestampHandler(slk::Reader *req_reader, slk::Builder *res_builder);
-
-  static void LoadWal(InMemoryStorage *storage, memgraph::replication::ReplicationEpoch &epoch,
-                      replication::Decoder *decoder);
-
-  static uint64_t ReadAndApplyDelta(InMemoryStorage *storage, durability::BaseDecoder *decoder, uint64_t version);
-
-  InMemoryStorage *storage_;
-
-  memgraph::replication::ReplicationEpoch *repl_epoch_;
-};
-
-}  // namespace memgraph::storage
diff --git a/src/storage/v2/inmemory/storage.cpp b/src/storage/v2/inmemory/storage.cpp
index 22d2f3c16..5ed493432 100644
--- a/src/storage/v2/inmemory/storage.cpp
+++ b/src/storage/v2/inmemory/storage.cpp
@@ -16,11 +16,11 @@
 #include "storage/v2/metadata_delta.hpp"
 
 /// REPLICATION ///
+#include "dbms/inmemory/replication_handlers.hpp"
 #include "storage/v2/inmemory/replication/replication_client.hpp"
-#include "storage/v2/inmemory/replication/replication_server.hpp"
 #include "storage/v2/inmemory/unique_constraints.hpp"
-#include "storage/v2/replication/replication_handler.hpp"
 #include "utils/resource_lock.hpp"
+#include "utils/stat.hpp"
 
 namespace memgraph::storage {
 
@@ -60,12 +60,10 @@ InMemoryStorage::InMemoryStorage(Config config, StorageMode storage_mode)
               "process!",
               config_.durability.storage_directory);
   }
-  auto &repl_state = repl_state_;
   if (config_.durability.recover_on_startup) {
-    auto &epoch = repl_state.GetEpoch();
-    auto info = durability::RecoverData(snapshot_directory_, wal_directory_, &uuid_, epoch,
-                                        &repl_storage_state_.history, &vertices_, &edges_, &edge_count_,
-                                        name_id_mapper_.get(), &indices_, &constraints_, config_, &wal_seq_num_);
+    auto info =
+        durability::RecoverData(snapshot_directory_, wal_directory_, &uuid_, repl_storage_state_, &vertices_, &edges_,
+                                &edge_count_, name_id_mapper_.get(), &indices_, &constraints_, config_, &wal_seq_num_);
     if (info) {
       vertex_id_ = info->next_vertex_id;
       edge_id_ = info->next_edge_id;
@@ -103,51 +101,14 @@ InMemoryStorage::InMemoryStorage(Config config, StorageMode storage_mode)
           "those files into a .backup directory inside the storage directory.");
     }
   }
-  if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED) {
-    snapshot_runner_.Run("Snapshot", config_.durability.snapshot_interval, [this] {
-      auto const &repl_state = repl_state_;
-      if (auto maybe_error = this->CreateSnapshot(repl_state, {true}); maybe_error.HasError()) {
-        switch (maybe_error.GetError()) {
-          case CreateSnapshotError::DisabledForReplica:
-            spdlog::warn(
-                utils::MessageWithLink("Snapshots are disabled for replicas.", "https://memgr.ph/replication"));
-            break;
-          case CreateSnapshotError::DisabledForAnalyticsPeriodicCommit:
-            spdlog::warn(utils::MessageWithLink("Periodic snapshots are disabled for analytical mode.",
-                                                "https://memgr.ph/durability"));
-            break;
-          case storage::InMemoryStorage::CreateSnapshotError::ReachedMaxNumTries:
-            spdlog::warn("Failed to create snapshot. Reached max number of tries. Please contact support");
-            break;
-        }
-      }
-    });
-  }
   if (config_.gc.type == Config::Gc::Type::PERIODIC) {
     gc_runner_.Run("Storage GC", config_.gc.interval, [this] { this->CollectGarbage<false>(); });
   }
-
   if (timestamp_ == kTimestampInitialId) {
     commit_log_.emplace();
   } else {
     commit_log_.emplace(timestamp_);
   }
-
-  if (config_.durability.restore_replication_state_on_startup) {
-    spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash.");
-    ReplicationHandler{repl_state, *this}.RestoreReplication();
-  } else {
-    spdlog::warn(
-        "Replication configuration will NOT be stored. When the server restarts, replication state will be "
-        "forgotten.");
-  }
-
-  if (config_.durability.snapshot_wal_mode == Config::Durability::SnapshotWalMode::DISABLED && repl_state.IsMain()) {
-    spdlog::warn(
-        "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please consider "
-        "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because "
-        "without write-ahead logs this instance is not replicating any data.");
-  }
 }
 
 InMemoryStorage::InMemoryStorage(Config config) : InMemoryStorage(config, StorageMode::IN_MEMORY_TRANSACTIONAL) {}
@@ -167,29 +128,15 @@ InMemoryStorage::~InMemoryStorage() {
   if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED) {
     snapshot_runner_.Stop();
   }
-  if (config_.durability.snapshot_on_exit) {
-    auto const &repl_state = repl_state_;
-    if (auto maybe_error = this->CreateSnapshot(repl_state, {false}); maybe_error.HasError()) {
-      switch (maybe_error.GetError()) {
-        case CreateSnapshotError::DisabledForReplica:
-          spdlog::warn(utils::MessageWithLink("Snapshots are disabled for replicas.", "https://memgr.ph/replication"));
-          break;
-        case CreateSnapshotError::DisabledForAnalyticsPeriodicCommit:
-          spdlog::warn(utils::MessageWithLink("Periodic snapshots are disabled for analytical mode.",
-                                              "https://memgr.ph/replication"));
-          break;
-        case storage::InMemoryStorage::CreateSnapshotError::ReachedMaxNumTries:
-          spdlog::warn("Failed to create snapshot. Reached max number of tries. Please contact support");
-          break;
-      }
-    }
+  if (config_.durability.snapshot_on_exit && this->create_snapshot_handler) {
+    create_snapshot_handler();
   }
   committed_transactions_.WithLock([](auto &transactions) { transactions.clear(); });
 }
 
 InMemoryStorage::InMemoryAccessor::InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level,
-                                                    StorageMode storage_mode)
-    : Accessor(tag, storage, isolation_level, storage_mode), config_(storage->config_.items) {}
+                                                    StorageMode storage_mode, bool is_main)
+    : Accessor(tag, storage, isolation_level, storage_mode, is_main), config_(storage->config_.items) {}
 InMemoryStorage::InMemoryAccessor::InMemoryAccessor(InMemoryAccessor &&other) noexcept
     : Accessor(std::move(other)), config_(other.config_) {}
 
@@ -714,7 +661,7 @@ Result<EdgeAccessor> InMemoryStorage::InMemoryAccessor::EdgeChangeType(EdgeAcces
 
 // NOLINTNEXTLINE(google-default-arguments)
 utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAccessor::Commit(
-    const std::optional<uint64_t> desired_commit_timestamp) {
+    const std::optional<uint64_t> desired_commit_timestamp, bool is_main) {
   MG_ASSERT(is_transaction_active_, "The transaction is already terminated!");
   MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!");
 
@@ -722,7 +669,7 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
 
   auto *mem_storage = static_cast<InMemoryStorage *>(storage_);
 
-  auto const &replState = mem_storage->repl_state_;
+  // TODO: duplicated transaction finalisation in md_deltas and deltas processing cases
   if (!transaction_.md_deltas.empty()) {
     // This is usually done by the MVCC, but it does not handle the metadata deltas
     transaction_.EnsureCommitTimestampExists();
@@ -742,14 +689,14 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
     // modifications before they are written to disk.
     // Replica can log only the write transaction received from Main
     // so the Wal files are consistent
-    if (replState.IsMain() || desired_commit_timestamp.has_value()) {
+    if (is_main || desired_commit_timestamp.has_value()) {
       could_replicate_all_sync_replicas =
           mem_storage->AppendToWalDataDefinition(transaction_, *commit_timestamp_);  // protected by engine_guard
       // TODO: release lock, and update all deltas to have a local copy of the commit timestamp
       transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release);  // protected by engine_guard
       // Replica can only update the last commit timestamp with
       // the commits received from main.
-      if (replState.IsMain() || desired_commit_timestamp.has_value()) {
+      if (is_main || desired_commit_timestamp.has_value()) {
         // Update the last commit timestamp
         mem_storage->repl_storage_state_.last_commit_timestamp_.store(*commit_timestamp_);  // protected by engine_guard
       }
@@ -823,7 +770,7 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
         // modifications before they are written to disk.
         // Replica can log only the write transaction received from Main
         // so the Wal files are consistent
-        if (replState.IsMain() || desired_commit_timestamp.has_value()) {
+        if (is_main || desired_commit_timestamp.has_value()) {
           could_replicate_all_sync_replicas =
               mem_storage->AppendToWalDataManipulation(transaction_, *commit_timestamp_);  // protected by engine_guard
         }
@@ -834,7 +781,7 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
                                              std::memory_order_release);  // protected by engine_guard
         // Replica can only update the last commit timestamp with
         // the commits received from main.
-        if (replState.IsMain() || desired_commit_timestamp.has_value()) {
+        if (is_main || desired_commit_timestamp.has_value()) {
           // Update the last commit timestamp
           mem_storage->repl_storage_state_.last_commit_timestamp_.store(
               *commit_timestamp_);  // protected by engine_guard
@@ -1198,7 +1145,7 @@ VerticesIterable InMemoryStorage::InMemoryAccessor::Vertices(
       mem_label_property_index->Vertices(label, property, lower_bound, upper_bound, view, storage_, &transaction_));
 }
 
-Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) {
+Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, bool is_main) {
   // We acquire the transaction engine lock here because we access (and
   // modify) the transaction engine variables (`transaction_id` and
   // `timestamp`) below.
@@ -1213,16 +1160,34 @@ Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, S
     // of any query on replica to the last commited transaction
     // which is timestamp_ as only commit of transaction with writes
     // can change the value of it.
-    auto const &replState = repl_state_;
-    if (replState.IsReplica()) {
-      start_timestamp = timestamp_;
-    } else {
+    if (is_main) {
       start_timestamp = timestamp_++;
+    } else {
+      start_timestamp = timestamp_;
     }
   }
   return {transaction_id, start_timestamp, isolation_level, storage_mode, false};
 }
 
+void InMemoryStorage::SetStorageMode(StorageMode new_storage_mode) {
+  std::unique_lock main_guard{main_lock_};
+  MG_ASSERT(
+      (storage_mode_ == StorageMode::IN_MEMORY_ANALYTICAL || storage_mode_ == StorageMode::IN_MEMORY_TRANSACTIONAL) &&
+      (new_storage_mode == StorageMode::IN_MEMORY_ANALYTICAL ||
+       new_storage_mode == StorageMode::IN_MEMORY_TRANSACTIONAL));
+  if (storage_mode_ != new_storage_mode) {
+    if (new_storage_mode == StorageMode::IN_MEMORY_ANALYTICAL) {
+      snapshot_runner_.Stop();
+    } else if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED) {
+      snapshot_runner_.Run("Snapshot", config_.durability.snapshot_interval,
+                           [this]() { this->create_snapshot_handler(); });
+    }
+
+    storage_mode_ = new_storage_mode;
+    FreeMemory(std::move(main_guard));
+  }
+}
+
 template <bool force>
 void InMemoryStorage::CollectGarbage(std::unique_lock<utils::ResourceLock> main_guard) {
   // NOTE: You do not need to consider cleanup of deleted object that occurred in
@@ -1632,8 +1597,7 @@ void InMemoryStorage::FinalizeWalFile() {
 }
 
 bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction, uint64_t final_commit_timestamp) {
-  auto &replState = repl_state_;
-  if (!InitializeWalFile(replState.GetEpoch())) {
+  if (!InitializeWalFile(repl_storage_state_.epoch_)) {
     return true;
   }
   // Traverse deltas and append them to the WAL file.
@@ -1803,8 +1767,7 @@ bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction
 }
 
 bool InMemoryStorage::AppendToWalDataDefinition(const Transaction &transaction, uint64_t final_commit_timestamp) {
-  auto &replState = repl_state_;
-  if (!InitializeWalFile(replState.GetEpoch())) {
+  if (!InitializeWalFile(repl_storage_state_.epoch_)) {
     return true;
   }
 
@@ -1913,13 +1876,8 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera
   return AppendToWalDataDefinition(operation, label, {}, {}, final_commit_timestamp);
 }
 
-utils::BasicResult<InMemoryStorage::CreateSnapshotError> InMemoryStorage::CreateSnapshot(
-    memgraph::replication::ReplicationState const &replicationState, std::optional<bool> is_periodic) {
-  if (replicationState.IsReplica()) {
-    return CreateSnapshotError::DisabledForReplica;
-  }
-
-  auto const &epoch = replicationState.GetEpoch();
+utils::BasicResult<InMemoryStorage::CreateSnapshotError> InMemoryStorage::CreateSnapshot() {
+  auto const &epoch = repl_storage_state_.epoch_;
   auto snapshot_creator = [this, &epoch]() {
     utils::Timer timer;
     auto transaction = CreateTransaction(IsolationLevel::SNAPSHOT_ISOLATION, storage_mode_);
@@ -1946,9 +1904,6 @@ utils::BasicResult<InMemoryStorage::CreateSnapshotError> InMemoryStorage::Create
     } else {
       std::unique_lock main_guard{main_lock_};
       if (storage_mode_ == memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL) {
-        if (is_periodic && *is_periodic) {
-          return CreateSnapshotError::DisabledForAnalyticsPeriodicCommit;
-        }
         snapshot_creator();
         return {};
       }
@@ -1979,13 +1934,13 @@ uint64_t InMemoryStorage::CommitTimestamp(const std::optional<uint64_t> desired_
   return *desired_commit_timestamp;
 }
 
-void InMemoryStorage::PrepareForNewEpoch(std::string prev_epoch) {
+void InMemoryStorage::PrepareForNewEpoch() {
   std::unique_lock engine_guard{engine_lock_};
   if (wal_file_) {
     wal_file_->FinalizeWal();
     wal_file_.reset();
   }
-  repl_storage_state_.AddEpochToHistory(std::move(prev_epoch));
+  repl_storage_state_.TrackLatestHistory();
 }
 
 utils::FileRetainer::FileLockerAccessor::ret_type InMemoryStorage::IsPathLocked() {
@@ -2013,26 +1968,45 @@ utils::FileRetainer::FileLockerAccessor::ret_type InMemoryStorage::UnlockPath()
   return true;
 }
 
-auto InMemoryStorage::CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config)
+auto InMemoryStorage::CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config,
+                                              const memgraph::replication::ReplicationEpoch *current_epoch)
     -> std::unique_ptr<ReplicationClient> {
-  auto &replState = this->repl_state_;
-  return std::make_unique<InMemoryReplicationClient>(this, config, &replState.GetEpoch());
+  return std::make_unique<InMemoryReplicationClient>(this, config, current_epoch);
 }
 
-std::unique_ptr<ReplicationServer> InMemoryStorage::CreateReplicationServer(
-    const memgraph::replication::ReplicationServerConfig &config) {
-  auto &replState = this->repl_state_;
-  return std::make_unique<InMemoryReplicationServer>(this, config, &replState.GetEpoch());
+std::unique_ptr<Storage::Accessor> InMemoryStorage::Access(std::optional<IsolationLevel> override_isolation_level,
+                                                           bool is_main) {
+  return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{Storage::Accessor::shared_access, this,
+                                                                override_isolation_level.value_or(isolation_level_),
+                                                                storage_mode_, is_main});
+}
+std::unique_ptr<Storage::Accessor> InMemoryStorage::UniqueAccess(std::optional<IsolationLevel> override_isolation_level,
+                                                                 bool is_main) {
+  return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{Storage::Accessor::unique_access, this,
+                                                                override_isolation_level.value_or(isolation_level_),
+                                                                storage_mode_, is_main});
 }
 
-std::unique_ptr<Storage::Accessor> InMemoryStorage::Access(std::optional<IsolationLevel> override_isolation_level) {
-  return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{
-      Storage::Accessor::shared_access, this, override_isolation_level.value_or(isolation_level_), storage_mode_});
-}
-std::unique_ptr<Storage::Accessor> InMemoryStorage::UniqueAccess(
-    std::optional<IsolationLevel> override_isolation_level) {
-  return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{
-      Storage::Accessor::unique_access, this, override_isolation_level.value_or(isolation_level_), storage_mode_});
+void InMemoryStorage::CreateSnapshotHandler(
+    std::function<utils::BasicResult<InMemoryStorage::CreateSnapshotError>()> cb) {
+  create_snapshot_handler = [cb]() {
+    if (auto maybe_error = cb(); maybe_error.HasError()) {
+      switch (maybe_error.GetError()) {
+        case CreateSnapshotError::DisabledForReplica:
+          spdlog::warn(utils::MessageWithLink("Snapshots are disabled for replicas.", "https://memgr.ph/replication"));
+          break;
+        case CreateSnapshotError::ReachedMaxNumTries:
+          spdlog::warn("Failed to create snapshot. Reached max number of tries. Please contact support");
+          break;
+      }
+    }
+  };
+
+  // Run the snapshot thread (if enabled)
+  if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED) {
+    snapshot_runner_.Run("Snapshot", config_.durability.snapshot_interval,
+                         [this]() { this->create_snapshot_handler(); });
+  }
 }
 IndicesInfo InMemoryStorage::InMemoryAccessor::ListAllIndices() const {
   auto *in_memory = static_cast<InMemoryStorage *>(storage_);
diff --git a/src/storage/v2/inmemory/storage.hpp b/src/storage/v2/inmemory/storage.hpp
index 73b3bfc5c..1d16eadf1 100644
--- a/src/storage/v2/inmemory/storage.hpp
+++ b/src/storage/v2/inmemory/storage.hpp
@@ -31,6 +31,10 @@
 #include "utils/resource_lock.hpp"
 #include "utils/synchronized.hpp"
 
+namespace memgraph::dbms {
+class InMemoryReplicationHandlers;
+}
+
 namespace memgraph::storage {
 
 // The storage is based on this paper:
@@ -39,15 +43,11 @@ namespace memgraph::storage {
 // only implement snapshot isolation for transactions.
 
 class InMemoryStorage final : public Storage {
-  friend class InMemoryReplicationServer;
+  friend class memgraph::dbms::InMemoryReplicationHandlers;
   friend class InMemoryReplicationClient;
 
  public:
-  enum class CreateSnapshotError : uint8_t {
-    DisabledForReplica,
-    DisabledForAnalyticsPeriodicCommit,
-    ReachedMaxNumTries
-  };
+  enum class CreateSnapshotError : uint8_t { DisabledForReplica, ReachedMaxNumTries };
 
   /// @throw std::system_error
   /// @throw std::bad_alloc
@@ -66,7 +66,7 @@ class InMemoryStorage final : public Storage {
     friend class InMemoryStorage;
 
     explicit InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level,
-                              StorageMode storage_mode);
+                              StorageMode storage_mode, bool is_main = true);
 
    public:
     InMemoryAccessor(const InMemoryAccessor &) = delete;
@@ -204,8 +204,8 @@ class InMemoryStorage final : public Storage {
     /// case the transaction is automatically aborted.
     /// @throw std::bad_alloc
     // NOLINTNEXTLINE(google-default-arguments)
-    utils::BasicResult<StorageManipulationError, void> Commit(
-        std::optional<uint64_t> desired_commit_timestamp = {}) override;
+    utils::BasicResult<StorageManipulationError, void> Commit(std::optional<uint64_t> desired_commit_timestamp = {},
+                                                              bool is_main = true) override;
 
     /// @throw std::bad_alloc
     void Abort() override;
@@ -311,9 +311,13 @@ class InMemoryStorage final : public Storage {
     Transaction &GetTransaction() { return transaction_; }
   };
 
-  std::unique_ptr<Storage::Accessor> Access(std::optional<IsolationLevel> override_isolation_level) override;
+  using Storage::Access;
+  std::unique_ptr<Storage::Accessor> Access(std::optional<IsolationLevel> override_isolation_level,
+                                            bool is_main) override;
 
-  std::unique_ptr<Storage::Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level) override;
+  using Storage::UniqueAccess;
+  std::unique_ptr<Storage::Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level,
+                                                  bool is_main) override;
 
   void FreeMemory(std::unique_lock<utils::ResourceLock> main_guard) override;
 
@@ -321,16 +325,18 @@ class InMemoryStorage final : public Storage {
   utils::FileRetainer::FileLockerAccessor::ret_type LockPath();
   utils::FileRetainer::FileLockerAccessor::ret_type UnlockPath();
 
-  utils::BasicResult<InMemoryStorage::CreateSnapshotError> CreateSnapshot(
-      memgraph::replication::ReplicationState const &replicationState, std::optional<bool> is_periodic);
+  utils::BasicResult<InMemoryStorage::CreateSnapshotError> CreateSnapshot();
 
-  Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) override;
+  void CreateSnapshotHandler(std::function<utils::BasicResult<InMemoryStorage::CreateSnapshotError>()> cb);
 
-  auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config)
+  using Storage::CreateTransaction;
+  Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, bool is_main) override;
+
+  auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config,
+                               const memgraph::replication::ReplicationEpoch *current_epoch)
       -> std::unique_ptr<ReplicationClient> override;
 
-  auto CreateReplicationServer(const memgraph::replication::ReplicationServerConfig &config)
-      -> std::unique_ptr<ReplicationServer> override;
+  void SetStorageMode(StorageMode storage_mode);
 
  private:
   /// The force parameter determines the behaviour of the garbage collector.
@@ -377,7 +383,7 @@ class InMemoryStorage final : public Storage {
 
   uint64_t CommitTimestamp(std::optional<uint64_t> desired_commit_timestamp = {});
 
-  void PrepareForNewEpoch(std::string prev_epoch) override;
+  void PrepareForNewEpoch() override;
 
   // Main object storage
   utils::SkipList<storage::Vertex> vertices_;
@@ -444,6 +450,9 @@ class InMemoryStorage final : public Storage {
   // Flags to inform CollectGarbage that it needs to do the more expensive full scans
   std::atomic<bool> gc_full_scan_vertices_delete_ = false;
   std::atomic<bool> gc_full_scan_edges_delete_ = false;
+
+  // Moved the create snapshot to a user defined handler so we can remove the global replication state from the storage
+  std::function<void()> create_snapshot_handler{};
 };
 
 }  // namespace memgraph::storage
diff --git a/src/storage/v2/replication/replication_client.cpp b/src/storage/v2/replication/replication_client.cpp
index 0d7949e27..33313b130 100644
--- a/src/storage/v2/replication/replication_client.cpp
+++ b/src/storage/v2/replication/replication_client.cpp
@@ -49,8 +49,8 @@ uint64_t ReplicationClient::LastCommitTimestamp() const {
 void ReplicationClient::InitializeClient() {
   uint64_t current_commit_timestamp{kTimestampInitialId};
 
-  auto stream{rpc_client_.Stream<replication::HeartbeatRpc>(storage_->repl_storage_state_.last_commit_timestamp_,
-                                                            std::string{repl_epoch_->id()})};
+  auto stream{rpc_client_.Stream<replication::HeartbeatRpc>(
+      storage_->id(), storage_->repl_storage_state_.last_commit_timestamp_, std::string{repl_epoch_->id()})};
 
   const auto replica = stream.AwaitResponse();
   std::optional<uint64_t> branching_point;
@@ -98,7 +98,7 @@ TimestampInfo ReplicationClient::GetTimestampInfo() {
   info.current_number_of_timestamp_behind_master = 0;
 
   try {
-    auto stream{rpc_client_.Stream<replication::TimestampRpc>()};
+    auto stream{rpc_client_.Stream<replication::TimestampRpc>(storage_->id())};
     const auto response = stream.AwaitResponse();
     const auto is_success = response.success;
     if (!is_success) {
@@ -135,8 +135,15 @@ void ReplicationClient::TryInitializeClientAsync() {
 void ReplicationClient::TryInitializeClientSync() {
   try {
     InitializeClient();
+  } catch (const rpc::VersionMismatchRpcFailedException &) {
+    std::unique_lock client_guard{client_lock_};
+    replica_state_.store(replication::ReplicaState::INVALID);
+    spdlog::error(
+        utils::MessageWithLink("Failed to connect to replica {} at the endpoint {}. Because the replica "
+                               "deployed is not a compatible version.",
+                               name_, rpc_client_.Endpoint(), "https://memgr.ph/replication"));
   } catch (const rpc::RpcFailedException &) {
-    std::unique_lock client_guarde{client_lock_};
+    std::unique_lock client_guard{client_lock_};
     replica_state_.store(replication::ReplicaState::INVALID);
     spdlog::error(utils::MessageWithLink("Failed to connect to replica {} at the endpoint {}.", name_,
                                          rpc_client_.Endpoint(), "https://memgr.ph/replication"));
@@ -222,7 +229,7 @@ bool ReplicationClient::FinalizeTransactionReplication() {
 void ReplicationClient::FrequentCheck() {
   const auto is_success = std::invoke([this]() {
     try {
-      auto stream{rpc_client_.Stream<replication::FrequentHeartbeatRpc>()};
+      auto stream{rpc_client_.Stream<memgraph::replication::FrequentHeartbeatRpc>()};
       const auto response = stream.AwaitResponse();
       return response.success;
     } catch (const rpc::RpcFailedException &) {
@@ -280,7 +287,8 @@ void ReplicationClient::IfStreamingTransaction(const std::function<void(ReplicaS
 ReplicaStream::ReplicaStream(ReplicationClient *self, const uint64_t previous_commit_timestamp,
                              const uint64_t current_seq_num)
     : self_(self),
-      stream_(self_->rpc_client_.Stream<replication::AppendDeltasRpc>(previous_commit_timestamp, current_seq_num)) {
+      stream_(self_->rpc_client_.Stream<replication::AppendDeltasRpc>(self->GetStorageId(), previous_commit_timestamp,
+                                                                      current_seq_num)) {
   replication::Encoder encoder{stream_.GetBuilder()};
 
   encoder.WriteString(self->repl_epoch_->id());
@@ -312,4 +320,5 @@ void ReplicaStream::AppendOperation(durability::StorageMetadataOperation operati
 
 replication::AppendDeltasRes ReplicaStream::Finalize() { return stream_.AwaitResponse(); }
 
+auto ReplicationClient::GetStorageId() const -> std::string { return storage_->id(); }
 }  // namespace memgraph::storage
diff --git a/src/storage/v2/replication/replication_client.hpp b/src/storage/v2/replication/replication_client.hpp
index e3c1b2634..817f47bcb 100644
--- a/src/storage/v2/replication/replication_client.hpp
+++ b/src/storage/v2/replication/replication_client.hpp
@@ -29,6 +29,7 @@
 #include <optional>
 #include <set>
 #include <string>
+#include <variant>
 
 namespace memgraph::storage {
 
@@ -86,6 +87,8 @@ class ReplicationClient {
   auto State() const -> replication::ReplicaState { return replica_state_.load(); }
   auto GetTimestampInfo() -> TimestampInfo;
 
+  auto GetStorageId() const -> std::string;
+
   void Start();
   void StartTransactionReplication(const uint64_t current_wal_seq_num);
   // Replication clients can be removed at any point
diff --git a/src/storage/v2/replication/replication_handler.cpp b/src/storage/v2/replication/replication_handler.cpp
deleted file mode 100644
index ff09faef9..000000000
--- a/src/storage/v2/replication/replication_handler.cpp
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2023 Memgraph Ltd.
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
-// License, and you may not use this file except in compliance with the Business Source License.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-#include "storage/v2/replication/replication_handler.hpp"
-
-#include "replication/state.hpp"
-#include "storage/v2/storage.hpp"
-
-namespace memgraph::storage {
-
-namespace {
-
-std::string RegisterReplicaErrorToString(RegisterReplicaError error) {
-  switch (error) {
-    using enum RegisterReplicaError;
-    case NAME_EXISTS:
-      return "NAME_EXISTS";
-    case END_POINT_EXISTS:
-      return "END_POINT_EXISTS";
-    case CONNECTION_FAILED:
-      return "CONNECTION_FAILED";
-    case COULD_NOT_BE_PERSISTED:
-      return "COULD_NOT_BE_PERSISTED";
-  }
-}
-}  // namespace
-
-bool ReplicationHandler::SetReplicationRoleMain() {
-  // We don't want to generate new epoch_id and do the
-  // cleanup if we're already a MAIN
-  // TODO: under lock
-  if (repl_state_.IsMain()) {
-    return false;
-  }
-
-  // STEP 1) bring down all REPLICA servers
-  auto current_epoch = std::string(repl_state_.GetEpoch().id());
-  {  // TODO: foreach storage
-    // ensure replica server brought down
-    storage_.repl_storage_state_.replication_server_.reset(nullptr);
-    // Remember old epoch + storage timestamp association
-    storage_.PrepareForNewEpoch(current_epoch);
-  }
-
-  // STEP 2) Change to MAIN
-  repl_state_.GetEpoch().NewEpoch();
-  if (!repl_state_.TryPersistRoleMain()) {
-    // TODO: On failure restore old epoch? restore replication servers?
-    return false;
-  }
-  repl_state_.SetRole(memgraph::replication::ReplicationRole::MAIN);
-  return true;
-}
-memgraph::utils::BasicResult<RegisterReplicaError> ReplicationHandler::RegisterReplica(
-    const RegistrationMode registration_mode, const memgraph::replication::ReplicationClientConfig &config) {
-  MG_ASSERT(repl_state_.IsMain(), "Only main instance can register a replica!");
-
-  auto name_check = [&config](auto &clients) {
-    auto name_matches = [&name = config.name](const auto &client) { return client->Name() == name; };
-    return std::any_of(clients.begin(), clients.end(), name_matches);
-  };
-
-  io::network::Endpoint desired_endpoint;
-  if (io::network::Endpoint::GetIpFamily(config.ip_address) == io::network::Endpoint::IpFamily::NONE) {
-    desired_endpoint = io::network::Endpoint{io::network::Endpoint::needs_resolving, config.ip_address, config.port};
-  } else {
-    desired_endpoint = io::network::Endpoint{config.ip_address, config.port};
-  }
-  auto endpoint_check = [&](auto &clients) {
-    auto endpoint_matches = [&](const auto &client) { return client->Endpoint() == desired_endpoint; };
-    return std::any_of(clients.begin(), clients.end(), endpoint_matches);
-  };
-
-  auto task = [&](auto &clients) -> utils::BasicResult<RegisterReplicaError> {
-    if (name_check(clients)) {
-      return RegisterReplicaError::NAME_EXISTS;
-    }
-
-    if (endpoint_check(clients)) {
-      return RegisterReplicaError::END_POINT_EXISTS;
-    }
-
-    using enum RegistrationMode;
-    if (registration_mode != RESTORE && !repl_state_.TryPersistRegisteredReplica(config)) {
-      return RegisterReplicaError::COULD_NOT_BE_PERSISTED;
-    }
-
-    auto client = storage_.CreateReplicationClient(config);
-    client->Start();
-
-    if (client->State() == replication::ReplicaState::INVALID) {
-      if (registration_mode != RESTORE) {
-        return RegisterReplicaError::CONNECTION_FAILED;
-      }
-
-      spdlog::warn("Connection failed when registering replica {}. Replica will still be registered.", client->Name());
-    }
-
-    clients.push_back(std::move(client));
-    return {};
-  };
-
-  return storage_.repl_storage_state_.replication_clients_.WithLock(task);
-}
-bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config) {
-  // We don't want to restart the server if we're already a REPLICA
-  if (repl_state_.IsReplica()) {
-    return false;
-  }
-
-  std::unique_ptr<ReplicationServer> replication_server = storage_.CreateReplicationServer(config);
-  bool res = replication_server->Start();
-  if (!res) {
-    spdlog::error("Unable to start the replication server.");
-    return false;
-  }
-  storage_.repl_storage_state_.replication_server_ = std::move(replication_server);
-
-  if (!repl_state_.TryPersistRoleReplica(config)) {
-    return false;
-  }
-
-  repl_state_.SetRole(memgraph::replication::ReplicationRole::REPLICA);
-  return true;
-}
-auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterReplicaResult {
-  if (repl_state_.IsReplica()) {
-    return UnregisterReplicaResult::NOT_MAIN;
-  }
-
-  if (!repl_state_.TryPersistUnregisterReplica(name)) {
-    return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
-  }
-
-  auto const n_unregistered = storage_.repl_storage_state_.replication_clients_.WithLock([&](auto &clients) {
-    return std::erase_if(clients, [&](const auto &client) { return client->Name() == name; });
-  });
-  return (n_unregistered != 0) ? UnregisterReplicaResult::SUCCESS : UnregisterReplicaResult::CAN_NOT_UNREGISTER;
-}
-void ReplicationHandler::RestoreReplication() {
-  if (!repl_state_.ShouldPersist()) {
-    return;
-  }
-
-  spdlog::info("Restoring replication role.");
-
-  using memgraph::replication::ReplicationState;
-
-  auto replicationData = repl_state_.FetchReplicationData();
-  if (replicationData.HasError()) {
-    switch (replicationData.GetError()) {
-      using enum ReplicationState::FetchReplicationError;
-      case NOTHING_FETCHED: {
-        spdlog::debug("Cannot find data needed for restore replication role in persisted metadata.");
-        return;
-      }
-      case PARSE_ERROR: {
-        LOG_FATAL("Cannot parse previously saved configuration of replication role.");
-        return;
-      }
-    }
-  }
-
-  /// MAIN
-  auto const recover_main = [this](ReplicationState::ReplicationDataMain const &configs) {
-    storage_.repl_storage_state_.replication_server_.reset();
-    repl_state_.SetRole(memgraph::replication::ReplicationRole::MAIN);
-    for (const auto &config : configs) {
-      spdlog::info("Replica {} restored for {}.", config.name, storage_.id());
-      auto ret = RegisterReplica(RegistrationMode::RESTORE, config);
-      if (ret.HasError()) {
-        MG_ASSERT(RegisterReplicaError::CONNECTION_FAILED != ret.GetError());
-        LOG_FATAL("Failure when restoring replica {}: {}.", config.name, RegisterReplicaErrorToString(ret.GetError()));
-      }
-      spdlog::info("Replica {} restored for {}.", config.name, storage_.id());
-    }
-    spdlog::info("Replication role restored to MAIN.");
-  };
-
-  /// REPLICA
-  auto const recover_replica = [this](ReplicationState::ReplicationDataReplica const &config) {
-    auto replication_server = storage_.CreateReplicationServer(config);
-    if (!replication_server->Start()) {
-      LOG_FATAL("Unable to start the replication server.");
-    }
-    storage_.repl_storage_state_.replication_server_ = std::move(replication_server);
-    repl_state_.SetRole(memgraph::replication::ReplicationRole::REPLICA);
-    spdlog::info("Replication role restored to REPLICA.");
-  };
-
-  std::visit(
-      utils::Overloaded{
-          recover_main,
-          recover_replica,
-      },
-      *replicationData);
-}
-auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole { return repl_state_.GetRole(); }
-bool ReplicationHandler::IsMain() const { return repl_state_.IsMain(); }
-bool ReplicationHandler::IsReplica() const { return repl_state_.IsReplica(); }
-}  // namespace memgraph::storage
diff --git a/src/storage/v2/replication/replication_server.cpp b/src/storage/v2/replication/replication_server.cpp
deleted file mode 100644
index 0ed367d88..000000000
--- a/src/storage/v2/replication/replication_server.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2023 Memgraph Ltd.
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
-// License, and you may not use this file except in compliance with the Business Source License.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-#include "replication_server.hpp"
-#include "io/network/endpoint.hpp"
-#include "replication/config.hpp"
-#include "rpc.hpp"
-
-namespace memgraph::storage {
-namespace {
-
-auto CreateServerContext(const memgraph::replication::ReplicationServerConfig &config) -> communication::ServerContext {
-  return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
-                                                     config.ssl->verify_peer}
-                      : communication::ServerContext{};
-}
-
-// NOTE: The replication server must have a single thread for processing
-// because there is no need for more processing threads - each replica can
-// have only a single main server. Also, the single-threaded guarantee
-// simplifies the rest of the implementation.
-constexpr auto kReplictionServerThreads = 1;
-}  // namespace
-
-ReplicationServer::ReplicationServer(const memgraph::replication::ReplicationServerConfig &config)
-    : rpc_server_context_{CreateServerContext(config)},
-      rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
-                  kReplictionServerThreads} {
-  rpc_server_.Register<replication::FrequentHeartbeatRpc>([](auto *req_reader, auto *res_builder) {
-    spdlog::debug("Received FrequentHeartbeatRpc");
-    FrequentHeartbeatHandler(req_reader, res_builder);
-  });
-}
-
-ReplicationServer::~ReplicationServer() {
-  if (rpc_server_.IsRunning()) {
-    auto const &endpoint = rpc_server_.endpoint();
-    spdlog::trace("Closing replication server on {}:{}", endpoint.address, endpoint.port);
-    rpc_server_.Shutdown();
-  }
-  rpc_server_.AwaitShutdown();
-}
-
-bool ReplicationServer::Start() { return rpc_server_.Start(); }
-
-void ReplicationServer::FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
-  replication::FrequentHeartbeatReq req;
-  slk::Load(&req, req_reader);
-  replication::FrequentHeartbeatRes res{true};
-  slk::Save(res, res_builder);
-}
-
-}  // namespace memgraph::storage
diff --git a/src/storage/v2/replication/replication_storage_state.cpp b/src/storage/v2/replication/replication_storage_state.cpp
index 5779bd2ba..1cd0bec09 100644
--- a/src/storage/v2/replication/replication_storage_state.cpp
+++ b/src/storage/v2/replication/replication_storage_state.cpp
@@ -11,8 +11,8 @@
 
 #include "storage/v2/replication/replication_storage_state.hpp"
 
+#include "replication/replication_server.hpp"
 #include "storage/v2/replication/replication_client.hpp"
-#include "storage/v2/replication/replication_server.hpp"
 
 namespace memgraph::storage {
 
@@ -91,17 +91,16 @@ std::vector<ReplicaInfo> ReplicationStorageState::ReplicasInfo() const {
 }
 
 void ReplicationStorageState::Reset() {
-  replication_server_.reset();
   replication_clients_.WithLock([](auto &clients) { clients.clear(); });
 }
 
-void ReplicationStorageState::AddEpochToHistory(std::string prev_epoch) {
+void ReplicationStorageState::TrackLatestHistory() {
   constexpr uint16_t kEpochHistoryRetention = 1000;
   // Generate new epoch id and save the last one to the history.
   if (history.size() == kEpochHistoryRetention) {
     history.pop_front();
   }
-  history.emplace_back(std::move(prev_epoch), last_commit_timestamp_);
+  history.emplace_back(epoch_.id(), last_commit_timestamp_);
 }
 
 void ReplicationStorageState::AddEpochToHistoryForce(std::string prev_epoch) {
diff --git a/src/storage/v2/replication/replication_storage_state.hpp b/src/storage/v2/replication/replication_storage_state.hpp
index c3dc7c833..afedb3950 100644
--- a/src/storage/v2/replication/replication_storage_state.hpp
+++ b/src/storage/v2/replication/replication_storage_state.hpp
@@ -27,11 +27,12 @@
 #include "storage/v2/replication/global.hpp"
 #include "storage/v2/replication/rpc.hpp"
 #include "storage/v2/replication/serialization.hpp"
+#include "utils/synchronized.hpp"
 
 namespace memgraph::storage {
 
 class Storage;
-class ReplicationServer;
+
 class ReplicationClient;
 
 struct ReplicationStorageState {
@@ -49,7 +50,7 @@ struct ReplicationStorageState {
   auto ReplicasInfo() const -> std::vector<ReplicaInfo>;
 
   // History
-  void AddEpochToHistory(std::string prev_epoch);
+  void TrackLatestHistory();
   void AddEpochToHistoryForce(std::string prev_epoch);
 
   void Reset();
@@ -76,10 +77,9 @@ struct ReplicationStorageState {
   using ReplicationClientPtr = std::unique_ptr<ReplicationClient>;
   using ReplicationClientList = utils::Synchronized<std::vector<ReplicationClientPtr>, utils::RWSpinLock>;
 
-  // NOTE: Server is not in MAIN it is in REPLICA
-  std::unique_ptr<ReplicationServer> replication_server_{nullptr};
-
   ReplicationClientList replication_clients_;
+
+  memgraph::replication::ReplicationEpoch epoch_;
 };
 
 }  // namespace memgraph::storage
diff --git a/src/storage/v2/replication/rpc.cpp b/src/storage/v2/replication/rpc.cpp
index 783e3b4f5..1477aa108 100644
--- a/src/storage/v2/replication/rpc.cpp
+++ b/src/storage/v2/replication/rpc.cpp
@@ -34,18 +34,6 @@ void HeartbeatRes::Save(const HeartbeatRes &self, memgraph::slk::Builder *builde
   memgraph::slk::Save(self, builder);
 }
 void HeartbeatRes::Load(HeartbeatRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); }
-void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {
-  memgraph::slk::Save(self, builder);
-}
-void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {
-  memgraph::slk::Load(self, reader);
-}
-void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
-  memgraph::slk::Save(self, builder);
-}
-void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
-  memgraph::slk::Load(self, reader);
-}
 void SnapshotReq::Save(const SnapshotReq &self, memgraph::slk::Builder *builder) { memgraph::slk::Save(self, builder); }
 void SnapshotReq::Load(SnapshotReq *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); }
 void SnapshotRes::Save(const SnapshotRes &self, memgraph::slk::Builder *builder) { memgraph::slk::Save(self, builder); }
@@ -86,12 +74,6 @@ constexpr utils::TypeInfo storage::replication::HeartbeatReq::kType{utils::TypeI
 constexpr utils::TypeInfo storage::replication::HeartbeatRes::kType{utils::TypeId::REP_HEARTBEAT_RES, "HeartbeatRes",
                                                                     nullptr};
 
-constexpr utils::TypeInfo storage::replication::FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ,
-                                                                            "FrequentHeartbeatReq", nullptr};
-
-constexpr utils::TypeInfo storage::replication::FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES,
-                                                                            "FrequentHeartbeatRes", nullptr};
-
 constexpr utils::TypeInfo storage::replication::SnapshotReq::kType{utils::TypeId::REP_SNAPSHOT_REQ, "SnapshotReq",
                                                                    nullptr};
 
@@ -121,47 +103,61 @@ namespace slk {
 // Serialize code for TimestampRes
 
 void Save(const memgraph::storage::replication::TimestampRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
 }
 
 void Load(memgraph::storage::replication::TimestampRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
 }
 
 // Serialize code for TimestampReq
 
-void Save(const memgraph::storage::replication::TimestampReq &self, memgraph::slk::Builder *builder) {}
+void Save(const memgraph::storage::replication::TimestampReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
+}
 
-void Load(memgraph::storage::replication::TimestampReq *self, memgraph::slk::Reader *reader) {}
+void Load(memgraph::storage::replication::TimestampReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
+}
 
 // Serialize code for CurrentWalRes
 
 void Save(const memgraph::storage::replication::CurrentWalRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
 }
 
 void Load(memgraph::storage::replication::CurrentWalRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
 }
 
 // Serialize code for CurrentWalReq
 
-void Save(const memgraph::storage::replication::CurrentWalReq &self, memgraph::slk::Builder *builder) {}
+void Save(const memgraph::storage::replication::CurrentWalReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
+}
 
-void Load(memgraph::storage::replication::CurrentWalReq *self, memgraph::slk::Reader *reader) {}
+void Load(memgraph::storage::replication::CurrentWalReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
+}
 
 // Serialize code for WalFilesRes
 
 void Save(const memgraph::storage::replication::WalFilesRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
 }
 
 void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
 }
@@ -169,56 +165,50 @@ void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Read
 // Serialize code for WalFilesReq
 
 void Save(const memgraph::storage::replication::WalFilesReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.file_number, builder);
 }
 
 void Load(memgraph::storage::replication::WalFilesReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->file_number, reader);
 }
 
 // Serialize code for SnapshotRes
 
 void Save(const memgraph::storage::replication::SnapshotRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
 }
 
 void Load(memgraph::storage::replication::SnapshotRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
 }
 
 // Serialize code for SnapshotReq
 
-void Save(const memgraph::storage::replication::SnapshotReq &self, memgraph::slk::Builder *builder) {}
-
-void Load(memgraph::storage::replication::SnapshotReq *self, memgraph::slk::Reader *reader) {}
-
-// Serialize code for FrequentHeartbeatRes
-
-void Save(const memgraph::storage::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
-  memgraph::slk::Save(self.success, builder);
+void Save(const memgraph::storage::replication::SnapshotReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
 }
 
-void Load(memgraph::storage::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
-  memgraph::slk::Load(&self->success, reader);
+void Load(memgraph::storage::replication::SnapshotReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
 }
 
-// Serialize code for FrequentHeartbeatReq
-
-void Save(const memgraph::storage::replication::FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {}
-
-void Load(memgraph::storage::replication::FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {}
-
 // Serialize code for HeartbeatRes
 
 void Save(const memgraph::storage::replication::HeartbeatRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
   memgraph::slk::Save(self.epoch_id, builder);
 }
 
 void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
   memgraph::slk::Load(&self->epoch_id, reader);
@@ -227,11 +217,13 @@ void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Rea
 // Serialize code for HeartbeatReq
 
 void Save(const memgraph::storage::replication::HeartbeatReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.main_commit_timestamp, builder);
   memgraph::slk::Save(self.epoch_id, builder);
 }
 
 void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->main_commit_timestamp, reader);
   memgraph::slk::Load(&self->epoch_id, reader);
 }
@@ -239,11 +231,13 @@ void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Rea
 // Serialize code for AppendDeltasRes
 
 void Save(const memgraph::storage::replication::AppendDeltasRes &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.success, builder);
   memgraph::slk::Save(self.current_commit_timestamp, builder);
 }
 
 void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->success, reader);
   memgraph::slk::Load(&self->current_commit_timestamp, reader);
 }
@@ -251,11 +245,13 @@ void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk::
 // Serialize code for AppendDeltasReq
 
 void Save(const memgraph::storage::replication::AppendDeltasReq &self, memgraph::slk::Builder *builder) {
+  memgraph::slk::Save(self.db_name, builder);
   memgraph::slk::Save(self.previous_commit_timestamp, builder);
   memgraph::slk::Save(self.seq_num, builder);
 }
 
 void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::Reader *reader) {
+  memgraph::slk::Load(&self->db_name, reader);
   memgraph::slk::Load(&self->previous_commit_timestamp, reader);
   memgraph::slk::Load(&self->seq_num, reader);
 }
diff --git a/src/storage/v2/replication/rpc.hpp b/src/storage/v2/replication/rpc.hpp
index d2cf21fb4..1c0d425c8 100644
--- a/src/storage/v2/replication/rpc.hpp
+++ b/src/storage/v2/replication/rpc.hpp
@@ -32,9 +32,10 @@ struct AppendDeltasReq {
   static void Load(AppendDeltasReq *self, memgraph::slk::Reader *reader);
   static void Save(const AppendDeltasReq &self, memgraph::slk::Builder *builder);
   AppendDeltasReq() {}
-  AppendDeltasReq(uint64_t previous_commit_timestamp, uint64_t seq_num)
-      : previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {}
+  AppendDeltasReq(std::string name, uint64_t previous_commit_timestamp, uint64_t seq_num)
+      : db_name(std::move(name)), previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {}
 
+  std::string db_name;
   uint64_t previous_commit_timestamp;
   uint64_t seq_num;
 };
@@ -46,9 +47,10 @@ struct AppendDeltasRes {
   static void Load(AppendDeltasRes *self, memgraph::slk::Reader *reader);
   static void Save(const AppendDeltasRes &self, memgraph::slk::Builder *builder);
   AppendDeltasRes() {}
-  AppendDeltasRes(bool success, uint64_t current_commit_timestamp)
-      : success(success), current_commit_timestamp(current_commit_timestamp) {}
+  AppendDeltasRes(std::string name, bool success, uint64_t current_commit_timestamp)
+      : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
 };
@@ -62,9 +64,10 @@ struct HeartbeatReq {
   static void Load(HeartbeatReq *self, memgraph::slk::Reader *reader);
   static void Save(const HeartbeatReq &self, memgraph::slk::Builder *builder);
   HeartbeatReq() {}
-  HeartbeatReq(uint64_t main_commit_timestamp, std::string epoch_id)
-      : main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {}
+  HeartbeatReq(std::string name, uint64_t main_commit_timestamp, std::string epoch_id)
+      : db_name(std::move(name)), main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {}
 
+  std::string db_name;
   uint64_t main_commit_timestamp;
   std::string epoch_id;
 };
@@ -76,9 +79,13 @@ struct HeartbeatRes {
   static void Load(HeartbeatRes *self, memgraph::slk::Reader *reader);
   static void Save(const HeartbeatRes &self, memgraph::slk::Builder *builder);
   HeartbeatRes() {}
-  HeartbeatRes(bool success, uint64_t current_commit_timestamp, std::string epoch_id)
-      : success(success), current_commit_timestamp(current_commit_timestamp), epoch_id(epoch_id) {}
+  HeartbeatRes(std::string name, bool success, uint64_t current_commit_timestamp, std::string epoch_id)
+      : db_name(std::move(name)),
+        success(success),
+        current_commit_timestamp(current_commit_timestamp),
+        epoch_id(epoch_id) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
   std::string epoch_id;
@@ -86,29 +93,6 @@ struct HeartbeatRes {
 
 using HeartbeatRpc = rpc::RequestResponse<HeartbeatReq, HeartbeatRes>;
 
-struct FrequentHeartbeatReq {
-  static const utils::TypeInfo kType;
-  static const utils::TypeInfo &GetTypeInfo() { return kType; }
-
-  static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader);
-  static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder);
-  FrequentHeartbeatReq() {}
-};
-
-struct FrequentHeartbeatRes {
-  static const utils::TypeInfo kType;
-  static const utils::TypeInfo &GetTypeInfo() { return kType; }
-
-  static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader);
-  static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder);
-  FrequentHeartbeatRes() {}
-  explicit FrequentHeartbeatRes(bool success) : success(success) {}
-
-  bool success;
-};
-
-using FrequentHeartbeatRpc = rpc::RequestResponse<FrequentHeartbeatReq, FrequentHeartbeatRes>;
-
 struct SnapshotReq {
   static const utils::TypeInfo kType;
   static const utils::TypeInfo &GetTypeInfo() { return kType; }
@@ -116,6 +100,9 @@ struct SnapshotReq {
   static void Load(SnapshotReq *self, memgraph::slk::Reader *reader);
   static void Save(const SnapshotReq &self, memgraph::slk::Builder *builder);
   SnapshotReq() {}
+  explicit SnapshotReq(std::string name) : db_name(std::move(name)) {}
+
+  std::string db_name;
 };
 
 struct SnapshotRes {
@@ -125,9 +112,10 @@ struct SnapshotRes {
   static void Load(SnapshotRes *self, memgraph::slk::Reader *reader);
   static void Save(const SnapshotRes &self, memgraph::slk::Builder *builder);
   SnapshotRes() {}
-  SnapshotRes(bool success, uint64_t current_commit_timestamp)
-      : success(success), current_commit_timestamp(current_commit_timestamp) {}
+  SnapshotRes(std::string name, bool success, uint64_t current_commit_timestamp)
+      : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
 };
@@ -141,8 +129,9 @@ struct WalFilesReq {
   static void Load(WalFilesReq *self, memgraph::slk::Reader *reader);
   static void Save(const WalFilesReq &self, memgraph::slk::Builder *builder);
   WalFilesReq() {}
-  explicit WalFilesReq(uint64_t file_number) : file_number(file_number) {}
+  explicit WalFilesReq(std::string name, uint64_t file_number) : db_name(std::move(name)), file_number(file_number) {}
 
+  std::string db_name;
   uint64_t file_number;
 };
 
@@ -153,9 +142,10 @@ struct WalFilesRes {
   static void Load(WalFilesRes *self, memgraph::slk::Reader *reader);
   static void Save(const WalFilesRes &self, memgraph::slk::Builder *builder);
   WalFilesRes() {}
-  WalFilesRes(bool success, uint64_t current_commit_timestamp)
-      : success(success), current_commit_timestamp(current_commit_timestamp) {}
+  WalFilesRes(std::string name, bool success, uint64_t current_commit_timestamp)
+      : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
 };
@@ -169,6 +159,9 @@ struct CurrentWalReq {
   static void Load(CurrentWalReq *self, memgraph::slk::Reader *reader);
   static void Save(const CurrentWalReq &self, memgraph::slk::Builder *builder);
   CurrentWalReq() {}
+  explicit CurrentWalReq(std::string name) : db_name(std::move(name)) {}
+
+  std::string db_name;
 };
 
 struct CurrentWalRes {
@@ -178,9 +171,10 @@ struct CurrentWalRes {
   static void Load(CurrentWalRes *self, memgraph::slk::Reader *reader);
   static void Save(const CurrentWalRes &self, memgraph::slk::Builder *builder);
   CurrentWalRes() {}
-  CurrentWalRes(bool success, uint64_t current_commit_timestamp)
-      : success(success), current_commit_timestamp(current_commit_timestamp) {}
+  CurrentWalRes(std::string name, bool success, uint64_t current_commit_timestamp)
+      : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
 };
@@ -194,6 +188,9 @@ struct TimestampReq {
   static void Load(TimestampReq *self, memgraph::slk::Reader *reader);
   static void Save(const TimestampReq &self, memgraph::slk::Builder *builder);
   TimestampReq() {}
+  explicit TimestampReq(std::string name) : db_name(std::move(name)) {}
+
+  std::string db_name;
 };
 
 struct TimestampRes {
@@ -203,9 +200,10 @@ struct TimestampRes {
   static void Load(TimestampRes *self, memgraph::slk::Reader *reader);
   static void Save(const TimestampRes &self, memgraph::slk::Builder *builder);
   TimestampRes() {}
-  TimestampRes(bool success, uint64_t current_commit_timestamp)
-      : success(success), current_commit_timestamp(current_commit_timestamp) {}
+  TimestampRes(std::string name, bool success, uint64_t current_commit_timestamp)
+      : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {}
 
+  std::string db_name;
   bool success;
   uint64_t current_commit_timestamp;
 };
@@ -251,14 +249,6 @@ void Save(const memgraph::storage::replication::SnapshotReq &self, memgraph::slk
 
 void Load(memgraph::storage::replication::SnapshotReq *self, memgraph::slk::Reader *reader);
 
-void Save(const memgraph::storage::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder);
-
-void Load(memgraph::storage::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader);
-
-void Save(const memgraph::storage::replication::FrequentHeartbeatReq &self, memgraph::slk::Builder *builder);
-
-void Load(memgraph::storage::replication::FrequentHeartbeatReq *self, memgraph::slk::Reader *reader);
-
 void Save(const memgraph::storage::replication::HeartbeatRes &self, memgraph::slk::Builder *builder);
 
 void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Reader *reader);
diff --git a/src/storage/v2/storage.cpp b/src/storage/v2/storage.cpp
index 955b0868e..20d458e40 100644
--- a/src/storage/v2/storage.cpp
+++ b/src/storage/v2/storage.cpp
@@ -31,14 +31,8 @@ namespace memgraph::storage {
 
 class InMemoryStorage;
 
-auto ReplicationStateHelper(Config const &config) -> std::optional<std::filesystem::path> {
-  if (!config.durability.restore_replication_state_on_startup) return std::nullopt;
-  return {config.durability.storage_directory};
-}
-
 Storage::Storage(Config config, StorageMode storage_mode)
-    : repl_state_(ReplicationStateHelper(config)),
-      name_id_mapper_(std::invoke([config, storage_mode]() -> std::unique_ptr<NameIdMapper> {
+    : name_id_mapper_(std::invoke([config, storage_mode]() -> std::unique_ptr<NameIdMapper> {
         if (storage_mode == StorageMode::ON_DISK_TRANSACTIONAL) {
           return std::make_unique<DiskNameIdMapper>(config.disk.name_id_mapper_directory,
                                                     config.disk.id_name_mapper_directory);
@@ -55,26 +49,26 @@ Storage::Storage(Config config, StorageMode storage_mode)
 }
 
 Storage::Accessor::Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level,
-                            StorageMode storage_mode)
+                            StorageMode storage_mode, bool is_main)
     : storage_(storage),
       // The lock must be acquired before creating the transaction object to
       // prevent freshly created transactions from dangling in an active state
       // during exclusive operations.
       storage_guard_(storage_->main_lock_),
       unique_guard_(storage_->main_lock_, std::defer_lock),
-      transaction_(storage->CreateTransaction(isolation_level, storage_mode)),
+      transaction_(storage->CreateTransaction(isolation_level, storage_mode, is_main)),
       is_transaction_active_(true),
       creation_storage_mode_(storage_mode) {}
 
 Storage::Accessor::Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level,
-                            StorageMode storage_mode)
+                            StorageMode storage_mode, bool is_main)
     : storage_(storage),
       // The lock must be acquired before creating the transaction object to
       // prevent freshly created transactions from dangling in an active state
       // during exclusive operations.
       storage_guard_(storage_->main_lock_, std::defer_lock),
       unique_guard_(storage_->main_lock_),
-      transaction_(storage->CreateTransaction(isolation_level, storage_mode)),
+      transaction_(storage->CreateTransaction(isolation_level, storage_mode, is_main)),
       is_transaction_active_(true),
       creation_storage_mode_(storage_mode) {}
 
@@ -91,18 +85,6 @@ Storage::Accessor::Accessor(Accessor &&other) noexcept
   other.commit_timestamp_.reset();
 }
 
-/// Main lock is taken by the caller.
-void Storage::SetStorageMode(StorageMode storage_mode) {
-  std::unique_lock main_guard{main_lock_};
-  MG_ASSERT(
-      (storage_mode_ == StorageMode::IN_MEMORY_ANALYTICAL || storage_mode_ == StorageMode::IN_MEMORY_TRANSACTIONAL) &&
-      (storage_mode == StorageMode::IN_MEMORY_ANALYTICAL || storage_mode == StorageMode::IN_MEMORY_TRANSACTIONAL));
-  if (storage_mode_ != storage_mode) {
-    storage_mode_ = storage_mode;
-    FreeMemory(std::move(main_guard));
-  }
-}
-
 StorageMode Storage::GetStorageMode() const { return storage_mode_; }
 
 IsolationLevel Storage::GetIsolationLevel() const noexcept { return isolation_level_; }
diff --git a/src/storage/v2/storage.hpp b/src/storage/v2/storage.hpp
index b45f12035..0499b4665 100644
--- a/src/storage/v2/storage.hpp
+++ b/src/storage/v2/storage.hpp
@@ -20,6 +20,7 @@
 #include "kvstore/kvstore.hpp"
 #include "query/exceptions.hpp"
 #include "replication/config.hpp"
+#include "replication/replication_server.hpp"
 #include "storage/v2/all_vertices_iterable.hpp"
 #include "storage/v2/commit_log.hpp"
 #include "storage/v2/config.hpp"
@@ -30,7 +31,6 @@
 #include "storage/v2/mvcc.hpp"
 #include "storage/v2/replication/enums.hpp"
 #include "storage/v2/replication/replication_client.hpp"
-#include "storage/v2/replication/replication_server.hpp"
 #include "storage/v2/replication/replication_storage_state.hpp"
 #include "storage/v2/storage_error.hpp"
 #include "storage/v2/storage_mode.hpp"
@@ -130,8 +130,10 @@ class Storage {
     static constexpr struct UniqueAccess {
     } unique_access;
 
-    Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode);
-    Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode);
+    Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode,
+             bool is_main = true);
+    Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode,
+             bool is_main = true);
     Accessor(const Accessor &) = delete;
     Accessor &operator=(const Accessor &) = delete;
     Accessor &operator=(Accessor &&other) = delete;
@@ -211,7 +213,7 @@ class Storage {
 
     // NOLINTNEXTLINE(google-default-arguments)
     virtual utils::BasicResult<StorageManipulationError, void> Commit(
-        std::optional<uint64_t> desired_commit_timestamp = {}) = 0;
+        std::optional<uint64_t> desired_commit_timestamp = {}, bool is_main = true) = 0;
 
     virtual void Abort() = 0;
 
@@ -303,19 +305,26 @@ class Storage {
     return EdgeTypeId::FromUint(name_id_mapper_->NameToId(name));
   }
 
-  void SetStorageMode(StorageMode storage_mode);
-
   StorageMode GetStorageMode() const;
 
   virtual void FreeMemory(std::unique_lock<utils::ResourceLock> main_guard) = 0;
 
   void FreeMemory() { FreeMemory({}); }
 
-  virtual std::unique_ptr<Accessor> Access(std::optional<IsolationLevel> override_isolation_level) = 0;
-  std::unique_ptr<Accessor> Access() { return Access(std::optional<IsolationLevel>{}); }
+  virtual std::unique_ptr<Accessor> Access(std::optional<IsolationLevel> override_isolation_level, bool is_main) = 0;
+  std::unique_ptr<Accessor> Access(bool is_main = true) { return Access(std::optional<IsolationLevel>{}, is_main); }
+  std::unique_ptr<Accessor> Access(std::optional<IsolationLevel> override_isolation_level) {
+    return Access(std::move(override_isolation_level), true);
+  }
 
-  virtual std::unique_ptr<Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level) = 0;
-  std::unique_ptr<Accessor> UniqueAccess() { return UniqueAccess(std::optional<IsolationLevel>{}); }
+  virtual std::unique_ptr<Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level,
+                                                 bool is_main) = 0;
+  std::unique_ptr<Accessor> UniqueAccess(bool is_main = true) {
+    return UniqueAccess(std::optional<IsolationLevel>{}, is_main);
+  }
+  std::unique_ptr<Accessor> UniqueAccess(std::optional<IsolationLevel> override_isolation_level) {
+    return UniqueAccess(std::move(override_isolation_level), true);
+  }
 
   enum class SetIsolationLevelError : uint8_t { DisabledForAnalyticalMode };
 
@@ -342,23 +351,24 @@ class Storage {
     return GetInfo(force_dir);
   }
 
-  virtual Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) = 0;
+  Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode) {
+    return CreateTransaction(isolation_level, storage_mode, true);
+  }
 
-  virtual void PrepareForNewEpoch(std::string prev_epoch) = 0;
+  virtual Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, bool is_main) = 0;
 
-  virtual auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config)
+  virtual void PrepareForNewEpoch() = 0;
+
+  virtual auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig &config,
+                                       const memgraph::replication::ReplicationEpoch *current_epoch)
       -> std::unique_ptr<ReplicationClient> = 0;
 
-  virtual auto CreateReplicationServer(const memgraph::replication::ReplicationServerConfig &config)
-      -> std::unique_ptr<ReplicationServer> = 0;
-
   auto ReplicasInfo() const { return repl_storage_state_.ReplicasInfo(); }
   auto GetReplicaState(std::string_view name) const -> std::optional<replication::ReplicaState> {
     return repl_storage_state_.GetReplicaState(name);
   }
 
   // TODO: make non-public
-  memgraph::replication::ReplicationState repl_state_;
   ReplicationStorageState repl_storage_state_;
 
   // Main storage lock.
diff --git a/src/telemetry/collectors.cpp b/src/telemetry/collectors.cpp
index db5517e3e..c36c6abf9 100644
--- a/src/telemetry/collectors.cpp
+++ b/src/telemetry/collectors.cpp
@@ -78,8 +78,12 @@ const nlohmann::json GetResourceUsage(std::filesystem::path root_directory) {
   }
   auto cpu_total = GetCpuUsage(pid);
   cpu["usage"] = cpu_total.second;
-
-  return {{"cpu", cpu}, {"memory", utils::GetMemoryRES()}, {"disk", utils::GetDirDiskUsage(root_directory)}};
+  const auto vm_max_map_count = utils::GetVmMaxMapCount();
+  return {{"cpu", cpu},
+          {"memory", utils::GetMemoryRES()},
+          {"disk", utils::GetDirDiskUsage(root_directory)},
+          {"vm_max_map_count",
+           vm_max_map_count.has_value() ? *vm_max_map_count : memgraph::utils::VM_MAX_MAP_COUNT_DEFAULT}};
 }
 
 }  // namespace memgraph::telemetry
diff --git a/src/telemetry/telemetry.cpp b/src/telemetry/telemetry.cpp
index c5f68cd76..714788841 100644
--- a/src/telemetry/telemetry.cpp
+++ b/src/telemetry/telemetry.cpp
@@ -15,6 +15,7 @@
 
 #include <fmt/format.h>
 
+#include "communication/bolt/metrics.hpp"
 #include "requests/requests.hpp"
 #include "telemetry/collectors.hpp"
 #include "utils/event_counter.hpp"
@@ -154,7 +155,6 @@ void Telemetry::AddDatabaseCollector(dbms::DbmsHandler &dbms_handler) {
 #else
 #endif
 
-#ifdef MG_ENTERPRISE
 void Telemetry::AddStorageCollector(
     dbms::DbmsHandler &dbms_handler,
     memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> &auth) {
@@ -164,33 +164,6 @@ void Telemetry::AddStorageCollector(
     return ToJson(stats);
   });
 }
-#else
-void Telemetry::AddStorageCollector(
-    memgraph::utils::Gatekeeper<memgraph::dbms::Database> &db_gatekeeper,
-    memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> &auth) {
-  AddCollector("storage", [&db_gatekeeper, &auth]() -> nlohmann::json {
-    memgraph::dbms::Statistics stats;
-    auto db_acc_opt = db_gatekeeper.access();
-    MG_ASSERT(db_acc_opt, "Failed to get access to the default database");
-    auto &db_acc = *db_acc_opt;
-    const auto &info = db_acc->GetInfo();
-    const auto &storage_info = info.storage_info;
-    stats.num_vertex = storage_info.vertex_count;
-    stats.num_edges = storage_info.edge_count;
-    stats.triggers = info.triggers;
-    stats.streams = info.streams;
-    stats.num_databases = 1;
-    stats.indices += storage_info.label_indices + storage_info.label_property_indices;
-    stats.constraints += storage_info.existence_constraints + storage_info.unique_constraints;
-    ++stats.storage_modes[(int)storage_info.storage_mode];
-    ++stats.isolation_levels[(int)storage_info.isolation_level];
-    stats.snapshot_enabled = storage_info.durability_snapshot_enabled;
-    stats.wal_enabled = storage_info.durability_wal_enabled;
-    stats.users = auth->AllUsers().size();
-    return ToJson(stats);
-  });
-}
-#endif
 
 void Telemetry::AddExceptionCollector() {
   AddCollector("exception", []() -> nlohmann::json { return memgraph::metrics::global_counters_map.ToJson(); });
diff --git a/src/telemetry/telemetry.hpp b/src/telemetry/telemetry.hpp
index ebe98d6e7..c9b82f9ef 100644
--- a/src/telemetry/telemetry.hpp
+++ b/src/telemetry/telemetry.hpp
@@ -43,15 +43,9 @@ class Telemetry final {
   void AddCollector(const std::string &name, const std::function<const nlohmann::json(void)> &func);
 
   // Specialized collectors
-#ifdef MG_ENTERPRISE
   void AddStorageCollector(
       dbms::DbmsHandler &dbms_handler,
       memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> &auth);
-#else
-  void AddStorageCollector(
-      memgraph::utils::Gatekeeper<memgraph::dbms::Database> &db_gatekeeper,
-      memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> &auth);
-#endif
 
 #ifdef MG_ENTERPRISE
   void AddDatabaseCollector(dbms::DbmsHandler &dbms_handler);
diff --git a/src/utils/file.cpp b/src/utils/file.cpp
index 7be907428..de6590620 100644
--- a/src/utils/file.cpp
+++ b/src/utils/file.cpp
@@ -519,7 +519,7 @@ void OutputFile::FlushBufferInternal() {
   auto *buffer = buffer_;
   auto buffer_position = buffer_position_.load();
   while (buffer_position > 0) {
-    auto written = write(fd_, buffer, buffer_position_);
+    auto written = write(fd_, buffer, buffer_position);
     if (written == -1 && errno == EINTR) {
       continue;
     }
diff --git a/src/utils/gatekeeper.hpp b/src/utils/gatekeeper.hpp
index d187d3528..21dad2543 100644
--- a/src/utils/gatekeeper.hpp
+++ b/src/utils/gatekeeper.hpp
@@ -81,7 +81,7 @@ EvalResult(run_t, Func &&, T &) -> EvalResult<std::invoke_result_t<Func, T &>>;
 template <typename T>
 struct Gatekeeper {
   template <typename... Args>
-  explicit Gatekeeper(Args &&...args) : value_{std::forward<Args>(args)...} {}
+  explicit Gatekeeper(Args &&...args) : value_{std::in_place, std::forward<Args>(args)...} {}
 
   Gatekeeper(Gatekeeper const &) = delete;
   Gatekeeper(Gatekeeper &&) noexcept = delete;
diff --git a/src/utils/logging.hpp b/src/utils/logging.hpp
index 0b8eaa639..9b9e7705b 100644
--- a/src/utils/logging.hpp
+++ b/src/utils/logging.hpp
@@ -47,8 +47,12 @@ inline void AssertFailed(const char *file_name, int line_num, const char *expr,
 #define GET_MESSAGE(...) \
   BOOST_PP_IF(BOOST_PP_EQUAL(BOOST_PP_VARIADIC_SIZE(__VA_ARGS__), 0), "", fmt::format(__VA_ARGS__))
 
-#define MG_ASSERT(expr, ...) \
-  [[likely]] !!(expr) ? (void)0 : ::memgraph::logging::AssertFailed(__FILE__, __LINE__, #expr, GET_MESSAGE(__VA_ARGS__))
+#define MG_ASSERT(expr, ...)                                                                \
+  if (expr) [[likely]] {                                                                    \
+    (void)0;                                                                                \
+  } else {                                                                                  \
+    ::memgraph::logging::AssertFailed(__FILE__, __LINE__, #expr, GET_MESSAGE(__VA_ARGS__)); \
+  }
 
 #ifndef NDEBUG
 #define DMG_ASSERT(expr, ...) MG_ASSERT(expr, __VA_ARGS__)
diff --git a/src/utils/stat.hpp b/src/utils/stat.hpp
index a14973fd7..7e4fab29b 100644
--- a/src/utils/stat.hpp
+++ b/src/utils/stat.hpp
@@ -11,7 +11,9 @@
 
 #pragma once
 
+#include <cstddef>
 #include <filesystem>
+#include <optional>
 
 #include <unistd.h>
 
@@ -20,6 +22,8 @@
 
 namespace memgraph::utils {
 
+static constexpr int64_t VM_MAX_MAP_COUNT_DEFAULT{-1};
+
 /// Returns the number of bytes a directory is using on disk. If the given path
 /// isn't a directory, zero will be returned.
 template <bool IgnoreSymlink = true>
@@ -54,4 +58,20 @@ inline uint64_t GetMemoryRES() {
   return memory;
 }
 
+/// Returns the size of vm.max_map_count
+inline std::optional<int64_t> GetVmMaxMapCount() {
+  auto vm_max_map_count_data = utils::ReadLines("/proc/sys/vm/max_map_count");
+  if (vm_max_map_count_data.empty()) {
+    return std::nullopt;
+  }
+  if (vm_max_map_count_data.size() != 1) {
+    return std::nullopt;
+  }
+  const auto parts{utils::Split(vm_max_map_count_data[0])};
+  if (parts.size() != 1) {
+    return std::nullopt;
+  }
+  return std::stoi(parts[0]);
+}
+
 }  // namespace memgraph::utils
diff --git a/tests/benchmark/expansion.cpp b/tests/benchmark/expansion.cpp
index dd363a2c1..51f77f310 100644
--- a/tests/benchmark/expansion.cpp
+++ b/tests/benchmark/expansion.cpp
@@ -17,6 +17,7 @@
 #include "query/interpreter.hpp"
 #include "query/interpreter_context.hpp"
 #include "query/typed_value.hpp"
+#include "replication/status.hpp"
 #include "storage/v2/inmemory/storage.hpp"
 #include "storage/v2/isolation_level.hpp"
 #include "utils/logging.hpp"
@@ -28,16 +29,18 @@ class ExpansionBenchFixture : public benchmark::Fixture {
   std::optional<memgraph::query::InterpreterContext> interpreter_context;
   std::optional<memgraph::query::Interpreter> interpreter;
   std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk;
+  std::optional<memgraph::replication::ReplicationState> repl_state;
 
   void SetUp(const benchmark::State &state) override {
+    repl_state.emplace(std::nullopt);  // No need for a storage directory, since we are not replicating or restoring
     memgraph::storage::Config config{};
     config.durability.storage_directory = data_directory;
     config.disk.main_storage_directory = data_directory / "disk";
-    db_gk.emplace(std::move(config));
+    db_gk.emplace(std::move(config), *repl_state);
     auto db_acc_opt = db_gk->access();
     MG_ASSERT(db_acc_opt, "Failed to access db");
     auto &db_acc = *db_acc_opt;
-    interpreter_context.emplace(memgraph::query::InterpreterConfig{}, nullptr);
+    interpreter_context.emplace(memgraph::query::InterpreterConfig{}, nullptr, &repl_state.value());
 
     auto label = db_acc->storage()->NameToLabel("Starting");
 
diff --git a/tests/e2e/configuration/storage_info.py b/tests/e2e/configuration/storage_info.py
index c73656ddb..042a57b08 100644
--- a/tests/e2e/configuration/storage_info.py
+++ b/tests/e2e/configuration/storage_info.py
@@ -53,10 +53,15 @@ def test_does_default_config_match():
     config = cursor.fetchall()
 
     # The default value of these is dependent on the given machine.
-    machine_dependent_configurations = ["memory_res", "disk_usage", "memory_tracked", "allocation_limit"]
-
+    machine_dependent_configurations = [
+        "memory_res",
+        "disk_usage",
+        "memory_tracked",
+        "allocation_limit",
+        "vm_max_map_count",
+    ]
     # Number of different data-points returned by SHOW STORAGE INFO
-    assert len(config) == 12
+    assert len(config) == 13
 
     for conf in config:
         conf_name = conf[0]
diff --git a/tests/e2e/replication/show_while_creating_invalid_state.py b/tests/e2e/replication/show_while_creating_invalid_state.py
index be58f71ce..f8fae4cd6 100644
--- a/tests/e2e/replication/show_while_creating_invalid_state.py
+++ b/tests/e2e/replication/show_while_creating_invalid_state.py
@@ -348,7 +348,7 @@ def test_basic_recovery(connection):
 def test_replication_role_recovery(connection):
     # Goal of this test is to check the recovery of main and replica role.
     # 0/ We start all replicas manually: we want to be able to kill them ourselves without relying on external tooling to kill processes.
-    # 1/ We try to add a replica with reserved name which results in an exception
+    # 1/ We try to add a replica with reserved name which results in an exception <- Schema changed, there are no reserved names now
     # 2/ We check that all replicas have the correct state: they should all be ready.
     # 3/ We kill main.
     # 4/ We re-start main. We check that main indeed has the role main and replicas still have the correct state.
@@ -411,9 +411,9 @@ def test_replication_role_recovery(connection):
             "data_directory": f"{data_directory.name}/main",
         },
     }
-    # 1/
-    with pytest.raises(mgclient.DatabaseError):
-        execute_and_fetch_all(cursor, "REGISTER REPLICA __replication_role SYNC TO '127.0.0.1:10002';")
+    # 1/ Obsolete, schema change, no longer a reserved name
+    # with pytest.raises(mgclient.DatabaseError):
+    #     execute_and_fetch_all(cursor, "REGISTER REPLICA __replication_role SYNC TO '127.0.0.1:10002';")
 
     # 2/
     expected_data = {
diff --git a/tests/integration/mg_import_csv/runner.py b/tests/integration/mg_import_csv/runner.py
index 4bd54dce8..fa08008a6 100755
--- a/tests/integration/mg_import_csv/runner.py
+++ b/tests/integration/mg_import_csv/runner.py
@@ -181,7 +181,7 @@ def execute_test(name, test_path, test_config, memgraph_binary, mg_import_csv_bi
         # Verify the queries
         queries_expected.sort()
         queries_got.sort()
-        assert queries_got == queries_expected, "Expected\n{}\nto be equal to\n" "{}".format(
+        assert queries_got == queries_expected, "Got:\n{}\nExpected:\n" "{}".format(
             list_to_string(queries_got), list_to_string(queries_expected)
         )
     print("\033[1;32m~~ Test successful ~~\033[0m\n")
diff --git a/tests/integration/telemetry/client.cpp b/tests/integration/telemetry/client.cpp
index 05b73830f..558e0a6bc 100644
--- a/tests/integration/telemetry/client.cpp
+++ b/tests/integration/telemetry/client.cpp
@@ -39,14 +39,16 @@ int main(int argc, char **argv) {
 
   memgraph::storage::Config db_config;
   memgraph::storage::UpdatePaths(db_config, data_directory);
+  memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(db_config));
 
+  memgraph::dbms::DbmsHandler dbms_handler(db_config, repl_state
 #ifdef MG_ENTERPRISE
-  memgraph::dbms::DbmsHandler dbms_handler(db_config, &auth_, false, false);
-  memgraph::query::InterpreterContext interpreter_context_({}, &dbms_handler, &auth_handler, &auth_checker);
-#else
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gatekeeper{db_config};
-  memgraph::query::InterpreterContext interpreter_context_({}, nullptr, &auth_handler, &auth_checker);
+                                           ,
+                                           &auth_, false, false
 #endif
+  );
+  memgraph::query::InterpreterContext interpreter_context_({}, &dbms_handler, &repl_state, &auth_handler,
+                                                           &auth_checker);
 
   memgraph::requests::Init();
   memgraph::telemetry::Telemetry telemetry(FLAGS_endpoint, FLAGS_storage_directory, memgraph::utils::GenerateUUID(),
@@ -61,11 +63,10 @@ int main(int argc, char **argv) {
   });
 
   // Memgraph specific collectors
-#ifdef MG_ENTERPRISE
   telemetry.AddStorageCollector(dbms_handler, auth_);
+#ifdef MG_ENTERPRISE
   telemetry.AddDatabaseCollector(dbms_handler);
 #else
-  telemetry.AddStorageCollector(db_gatekeeper, auth_);
   telemetry.AddDatabaseCollector();
 #endif
   telemetry.AddClientCollector();
diff --git a/tests/integration/telemetry/server.py b/tests/integration/telemetry/server.py
index f3c8450b0..3336c3588 100755
--- a/tests/integration/telemetry/server.py
+++ b/tests/integration/telemetry/server.py
@@ -148,6 +148,7 @@ def verify_storage(storage, args):
             assert "cpu" in item["data"]["resources"]
             assert "memory" in item["data"]["resources"]
             assert "disk" in item["data"]["resources"]
+            assert "vm_max_map_count" in item["data"]["resources"]
             assert "uptime" in item["data"]
 
             uptime = item["data"]["uptime"]
diff --git a/tests/manual/single_query.cpp b/tests/manual/single_query.cpp
index f8063c14e..f58af9ae7 100644
--- a/tests/manual/single_query.cpp
+++ b/tests/manual/single_query.cpp
@@ -30,14 +30,16 @@ int main(int argc, char *argv[]) {
 
   auto data_directory = std::filesystem::temp_directory_path() / "single_query_test";
   memgraph::utils::OnScopeExit([&data_directory] { std::filesystem::remove_all(data_directory); });
+  memgraph::storage::Config db_config{.durability.storage_directory = data_directory,
+                                      .disk.main_storage_directory = data_directory / "disk"};
 
   memgraph::license::global_license_checker.EnableTesting();
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk(memgraph::storage::Config{
-      .durability.storage_directory = data_directory, .disk.main_storage_directory = data_directory / "disk"});
+  memgraph::replication::ReplicationState repl_state(memgraph::storage::ReplicationStateRootPath(db_config));
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk(db_config, repl_state);
   auto db_acc_opt = db_gk.access();
   MG_ASSERT(db_acc_opt, "Failed to access db");
   auto &db_acc = *db_acc_opt;
-  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr);
+  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr, &repl_state);
   memgraph::query::Interpreter interpreter{&interpreter_context, db_acc};
 
   ResultStreamFaker stream(db_acc->storage());
diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt
index b0783b191..1b7dece1c 100644
--- a/tests/unit/CMakeLists.txt
+++ b/tests/unit/CMakeLists.txt
@@ -313,7 +313,7 @@ add_unit_test(storage_v2_decoder_encoder.cpp)
 target_link_libraries(${test_prefix}storage_v2_decoder_encoder mg-storage-v2)
 
 add_unit_test(storage_v2_durability_inmemory.cpp)
-target_link_libraries(${test_prefix}storage_v2_durability_inmemory mg-storage-v2)
+target_link_libraries(${test_prefix}storage_v2_durability_inmemory mg-storage-v2 mg-dbms)
 
 add_unit_test(storage_rocks.cpp)
 target_link_libraries(${test_prefix}storage_rocks mg-storage-v2)
@@ -346,7 +346,7 @@ add_unit_test(storage_v2_wal_file.cpp)
 target_link_libraries(${test_prefix}storage_v2_wal_file mg-storage-v2 storage_test_utils fmt)
 
 add_unit_test(storage_v2_replication.cpp)
-target_link_libraries(${test_prefix}storage_v2_replication mg-storage-v2 fmt)
+target_link_libraries(${test_prefix}storage_v2_replication mg-storage-v2 mg-dbms fmt)
 
 add_unit_test(storage_v2_isolation_level.cpp)
 target_link_libraries(${test_prefix}storage_v2_isolation_level mg-storage-v2)
@@ -388,19 +388,15 @@ if(MG_ENTERPRISE)
   target_link_libraries(${test_prefix}slk_advanced mg-storage-v2)
 endif()
 
-if(MG_ENTERPRISE)
-  add_unit_test(slk_core.cpp)
-  target_link_libraries(${test_prefix}slk_core mg-slk gflags fmt)
+add_unit_test(slk_core.cpp)
+target_link_libraries(${test_prefix}slk_core mg-slk gflags fmt)
 
-  add_unit_test(slk_streams.cpp)
-  target_link_libraries(${test_prefix}slk_streams mg-slk gflags fmt)
-endif()
+add_unit_test(slk_streams.cpp)
+target_link_libraries(${test_prefix}slk_streams mg-slk gflags fmt)
 
 # Test mg-rpc
-if(MG_ENTERPRISE)
-  add_unit_test(rpc.cpp)
-  target_link_libraries(${test_prefix}rpc mg-rpc)
-endif()
+add_unit_test(rpc.cpp)
+target_link_libraries(${test_prefix}rpc mg-rpc)
 
 # Test websocket
 find_package(Boost REQUIRED)
@@ -415,6 +411,9 @@ if(MG_ENTERPRISE)
 
   add_unit_test_with_custom_main(dbms_handler.cpp)
   target_link_libraries(${test_prefix}dbms_handler mg-query mg-auth mg-glue mg-dbms)
+else()
+  add_unit_test_with_custom_main(dbms_handler_community.cpp)
+  target_link_libraries(${test_prefix}dbms_handler_community mg-query mg-auth mg-glue mg-dbms)
 endif()
 
 # Test distributed
diff --git a/tests/unit/database_get_info.cpp b/tests/unit/database_get_info.cpp
index 043042100..aac38159d 100644
--- a/tests/unit/database_get_info.cpp
+++ b/tests/unit/database_get_info.cpp
@@ -12,13 +12,17 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <filesystem>
+#include <optional>
 
 #include "dbms/database.hpp"
 #include "disk_test_utils.hpp"
 #include "query/interpret/awesome_memgraph_functions.hpp"
 #include "query/interpreter_context.hpp"
+#include "replication/state.hpp"
+#include "storage/v2/config.hpp"
 #include "storage/v2/disk/storage.hpp"
 #include "storage/v2/inmemory/storage.hpp"
+#include "storage/v2/replication/enums.hpp"
 
 // NOLINTNEXTLINE(google-build-using-namespace)
 using namespace memgraph::storage;
@@ -30,6 +34,8 @@ template <typename StorageType>
 class InfoTest : public testing::Test {
  protected:
   void SetUp() {
+    repl_state.emplace(memgraph::storage::ReplicationStateRootPath(config));
+    db_gk.emplace(config, *repl_state);
     auto db_acc_opt = db_gk->access();
     MG_ASSERT(db_acc_opt, "Failed to access db");
     auto &db_acc = *db_acc_opt;
@@ -43,6 +49,7 @@ class InfoTest : public testing::Test {
   void TearDown() {
     db_acc_.reset();
     db_gk.reset();
+    repl_state.reset();
     if (std::is_same<StorageType, memgraph::storage::DiskStorage>::value) {
       disk_test_utils::RemoveRocksDbDirs(testSuite);
     }
@@ -52,8 +59,10 @@ class InfoTest : public testing::Test {
   StorageMode mode{std::is_same_v<StorageType, DiskStorage> ? StorageMode::ON_DISK_TRANSACTIONAL
                                                             : StorageMode::IN_MEMORY_TRANSACTIONAL};
 
+  std::optional<memgraph::replication::ReplicationState> repl_state;
   std::optional<memgraph::dbms::DatabaseAccess> db_acc_;
-  std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk{
+  std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk;
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         memgraph::storage::UpdatePaths(config, storage_directory);
diff --git a/tests/unit/dbms_database.cpp b/tests/unit/dbms_database.cpp
index e2a6bf449..20e1f55ac 100644
--- a/tests/unit/dbms_database.cpp
+++ b/tests/unit/dbms_database.cpp
@@ -12,16 +12,19 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <filesystem>
+#include <optional>
 
 #include "dbms/database_handler.hpp"
 #include "dbms/global.hpp"
 
 #include "license/license.hpp"
 #include "query_plan_common.hpp"
+#include "replication/state.hpp"
 #include "storage/v2/view.hpp"
 
 std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / "MG_test_unit_dbms_database"};
 
+memgraph::replication::ReplicationState generic_repl_state{std::nullopt};
 memgraph::storage::Config default_conf(std::string name = "") {
   return {.durability = {.storage_directory = storage_directory / name,
                          .snapshot_wal_mode =
@@ -53,19 +56,19 @@ TEST_F(DBMS_Database, New) {
                        .snapshot_wal_mode =
                            memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL},
         .disk = {.main_storage_directory = storage_directory / "disk"}};
-    auto db2 = db_handler.New("db2", db_config);
+    auto db2 = db_handler.New("db2", db_config, generic_repl_state);
     ASSERT_TRUE(db2.HasValue() && db2.GetValue());
     ASSERT_TRUE(std::filesystem::exists(storage_directory / "db2"));
   }
   {
     // With default config
-    auto db3 = db_handler.New("db3", default_conf("db3"));
+    auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state);
     ASSERT_TRUE(db3.HasValue() && db3.GetValue());
     ASSERT_TRUE(std::filesystem::exists(storage_directory / "db3"));
-    auto db4 = db_handler.New("db4", default_conf("four"));
+    auto db4 = db_handler.New("db4", default_conf("four"), generic_repl_state);
     ASSERT_TRUE(db4.HasValue() && db4.GetValue());
     ASSERT_TRUE(std::filesystem::exists(storage_directory / "four"));
-    auto db5 = db_handler.New("db5", default_conf("db3"));
+    auto db5 = db_handler.New("db5", default_conf("db3"), generic_repl_state);
     ASSERT_TRUE(db5.HasError() && db5.GetError() == memgraph::dbms::NewError::EXISTS);
   }
 
@@ -80,9 +83,9 @@ TEST_F(DBMS_Database, New) {
 TEST_F(DBMS_Database, Get) {
   memgraph::dbms::DatabaseHandler db_handler;
 
-  auto db1 = db_handler.New("db1", default_conf("db1"));
-  auto db2 = db_handler.New("db2", default_conf("db2"));
-  auto db3 = db_handler.New("db3", default_conf("db3"));
+  auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state);
+  auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state);
+  auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state);
 
   ASSERT_TRUE(db1.HasValue());
   ASSERT_TRUE(db2.HasValue());
@@ -104,9 +107,9 @@ TEST_F(DBMS_Database, Get) {
 TEST_F(DBMS_Database, Delete) {
   memgraph::dbms::DatabaseHandler db_handler;
 
-  auto db1 = db_handler.New("db1", default_conf("db1"));
-  auto db2 = db_handler.New("db2", default_conf("db2"));
-  auto db3 = db_handler.New("db3", default_conf("db3"));
+  auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state);
+  auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state);
+  auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state);
 
   ASSERT_TRUE(db1.HasValue());
   ASSERT_TRUE(db2.HasValue());
@@ -141,8 +144,8 @@ TEST_F(DBMS_Database, DeleteAndRecover) {
   memgraph::dbms::DatabaseHandler db_handler;
 
   {
-    auto db1 = db_handler.New("db1", default_conf("db1"));
-    auto db2 = db_handler.New("db2", default_conf("db2"));
+    auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state);
+    auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state);
 
     memgraph::storage::Config conf_w_snap{
         .durability = {.storage_directory = storage_directory / "db3",
@@ -151,7 +154,7 @@ TEST_F(DBMS_Database, DeleteAndRecover) {
                        .snapshot_on_exit = true},
         .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}};
 
-    auto db3 = db_handler.New("db3", conf_w_snap);
+    auto db3 = db_handler.New("db3", conf_w_snap, generic_repl_state);
 
     ASSERT_TRUE(db1.HasValue());
     ASSERT_TRUE(db2.HasValue());
@@ -187,8 +190,8 @@ TEST_F(DBMS_Database, DeleteAndRecover) {
 
   {
     // Recover graphs (only db3)
-    auto db1 = db_handler.New("db1", default_conf("db1"));
-    auto db2 = db_handler.New("db2", default_conf("db2"));
+    auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state);
+    auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state);
 
     memgraph::storage::Config conf_w_rec{
         .durability = {.storage_directory = storage_directory / "db3",
@@ -197,7 +200,7 @@ TEST_F(DBMS_Database, DeleteAndRecover) {
                            memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL},
         .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}};
 
-    auto db3 = db_handler.New("db3", conf_w_rec);
+    auto db3 = db_handler.New("db3", conf_w_rec, generic_repl_state);
 
     // Check content
     {
diff --git a/tests/unit/dbms_handler.cpp b/tests/unit/dbms_handler.cpp
index 13fe7e69b..75efddefe 100644
--- a/tests/unit/dbms_handler.cpp
+++ b/tests/unit/dbms_handler.cpp
@@ -10,6 +10,7 @@
 // licenses/APL.txt.
 
 #include "query/auth_query_handler.hpp"
+#include "storage/v2/config.hpp"
 #ifdef MG_ENTERPRISE
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
@@ -51,15 +52,18 @@ class TestEnvironment : public ::testing::Environment {
     auth =
         std::make_unique<memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock>>(
             storage_directory / "auth");
-    ptr_ = std::make_unique<memgraph::dbms::DbmsHandler>(storage_conf, auth.get(), false, true);
+    repl_state_.emplace(memgraph::storage::ReplicationStateRootPath(storage_conf));
+    ptr_ = std::make_unique<memgraph::dbms::DbmsHandler>(storage_conf, *repl_state_, auth.get(), false, true);
   }
 
   void TearDown() override {
     ptr_.reset();
     auth.reset();
+    repl_state_.reset();
   }
 
   static std::unique_ptr<memgraph::dbms::DbmsHandler> ptr_;
+  std::optional<memgraph::replication::ReplicationState> repl_state_;
 };
 
 std::unique_ptr<memgraph::dbms::DbmsHandler> TestEnvironment::ptr_ = nullptr;
diff --git a/tests/unit/dbms_handler_community.cpp b/tests/unit/dbms_handler_community.cpp
new file mode 100644
index 000000000..efce2854d
--- /dev/null
+++ b/tests/unit/dbms_handler_community.cpp
@@ -0,0 +1,106 @@
+// Copyright 2023 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include "query/auth_query_handler.hpp"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <filesystem>
+#include <system_error>
+
+#include "dbms/constants.hpp"
+#include "dbms/dbms_handler.hpp"
+#include "dbms/global.hpp"
+#include "glue/auth_checker.hpp"
+#include "glue/auth_handler.hpp"
+#include "query/config.hpp"
+#include "query/interpreter.hpp"
+#include "storage/v2/config.hpp"
+
+// Global
+std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / "MG_test_unit_dbms_handler_community"};
+static memgraph::storage::Config storage_conf;
+std::unique_ptr<memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock>> auth;
+
+// Let this be global so we can test it different states throughout
+
+class TestEnvironment : public ::testing::Environment {
+ public:
+  static memgraph::dbms::DbmsHandler *get() { return ptr_.get(); }
+
+  void SetUp() override {
+    // Setup config
+    memgraph::storage::UpdatePaths(storage_conf, storage_directory);
+    storage_conf.durability.snapshot_wal_mode =
+        memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL;
+    // Clean storage directory (running multiple parallel test, run only if the first process)
+    if (std::filesystem::exists(storage_directory)) {
+      memgraph::utils::OutputFile lock_file_handle_;
+      lock_file_handle_.Open(storage_directory / ".lock", memgraph::utils::OutputFile::Mode::OVERWRITE_EXISTING);
+      if (lock_file_handle_.AcquireLock()) {
+        std::filesystem::remove_all(storage_directory);
+      }
+    }
+    auth =
+        std::make_unique<memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock>>(
+            storage_directory / "auth");
+    repl_state_.emplace(memgraph::storage::ReplicationStateRootPath(storage_conf));
+    ptr_ = std::make_unique<memgraph::dbms::DbmsHandler>(storage_conf, *repl_state_);
+  }
+
+  void TearDown() override {
+    ptr_.reset();
+    auth.reset();
+    repl_state_.reset();
+  }
+
+  static std::unique_ptr<memgraph::dbms::DbmsHandler> ptr_;
+  std::optional<memgraph::replication::ReplicationState> repl_state_;
+};
+
+std::unique_ptr<memgraph::dbms::DbmsHandler> TestEnvironment::ptr_ = nullptr;
+
+class DBMS_Handler : public testing::Test {};
+using DBMS_HandlerDeath = DBMS_Handler;
+
+TEST(DBMS_Handler, Init) {
+  // Check that the default db has been created successfully
+  std::vector<std::string> dirs = {"snapshots", "streams", "triggers", "wal"};
+  for (const auto &dir : dirs)
+    ASSERT_TRUE(std::filesystem::exists(storage_directory / dir)) << (storage_directory / dir);
+  auto &dbms = *TestEnvironment::get();
+  {
+    const auto all = dbms.All();
+    ASSERT_EQ(all.size(), 1);
+    ASSERT_EQ(all[0], memgraph::dbms::kDefaultDB);
+  }
+}
+
+TEST(DBMS_Handler, Get) {
+  auto &dbms = *TestEnvironment::get();
+  auto default_db = dbms.Get();
+  ASSERT_TRUE(default_db);
+  ASSERT_TRUE(default_db->storage() != nullptr);
+  ASSERT_TRUE(default_db->streams() != nullptr);
+  ASSERT_TRUE(default_db->trigger_store() != nullptr);
+  ASSERT_TRUE(default_db->thread_pool() != nullptr);
+  ASSERT_EQ(default_db->storage()->id(), memgraph::dbms::kDefaultDB);
+  auto conf = storage_conf;
+  conf.name = memgraph::dbms::kDefaultDB;
+  ASSERT_EQ(default_db->storage()->config_, conf);
+}
+
+int main(int argc, char *argv[]) {
+  ::testing::InitGoogleTest(&argc, argv);
+  // gtest takes ownership of the TestEnvironment ptr - we don't delete it.
+  ::testing::AddGlobalTestEnvironment(new TestEnvironment);
+  return RUN_ALL_TESTS();
+}
diff --git a/tests/unit/interpreter.cpp b/tests/unit/interpreter.cpp
index 0779adb41..679cff692 100644
--- a/tests/unit/interpreter.cpp
+++ b/tests/unit/interpreter.cpp
@@ -27,9 +27,11 @@
 #include "query/exceptions.hpp"
 #include "query/interpreter.hpp"
 #include "query/interpreter_context.hpp"
+#include "query/metadata.hpp"
 #include "query/stream.hpp"
 #include "query/typed_value.hpp"
 #include "query_common.hpp"
+#include "replication/state.hpp"
 #include "storage/v2/inmemory/storage.hpp"
 #include "storage/v2/isolation_level.hpp"
 #include "storage/v2/property_value.hpp"
@@ -60,9 +62,9 @@ class InterpreterTest : public ::testing::Test {
   const std::string testSuiteCsv = "interpreter_csv";
   std::filesystem::path data_directory = std::filesystem::temp_directory_path() / "MG_tests_unit_interpreter";
 
-  InterpreterTest() : interpreter_context({}, kNoHandler) {}
+  InterpreterTest() {}
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         config.durability.storage_directory = data_directory;
@@ -75,6 +77,8 @@ class InterpreterTest : public ::testing::Test {
       }()  // iile
   };
 
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -88,7 +92,7 @@ class InterpreterTest : public ::testing::Test {
       }()  // iile
   };
 
-  memgraph::query::InterpreterContext interpreter_context;
+  memgraph::query::InterpreterContext interpreter_context{{}, kNoHandler, &repl_state};
 
   void TearDown() override {
     if (std::is_same<StorageType, memgraph::storage::DiskStorage>::value) {
@@ -1131,7 +1135,8 @@ TYPED_TEST(InterpreterTest, AllowLoadCsvConfig) {
       config2.force_on_disk = true;
     }
 
-    memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk2(config2);
+    memgraph::replication::ReplicationState repl_state2{memgraph::storage::ReplicationStateRootPath(config2)};
+    memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk2(config2, repl_state2);
     auto db_acc_opt = db_gk2.access();
     ASSERT_TRUE(db_acc_opt) << "Failed to access db2";
     auto &db_acc = *db_acc_opt;
@@ -1140,7 +1145,9 @@ TYPED_TEST(InterpreterTest, AllowLoadCsvConfig) {
                                                  : memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL))
         << "Wrong storage mode!";
 
-    memgraph::query::InterpreterContext csv_interpreter_context{{.query = {.allow_load_csv = allow_load_csv}}, nullptr};
+    memgraph::replication::ReplicationState repl_state{std::nullopt};
+    memgraph::query::InterpreterContext csv_interpreter_context{
+        {.query = {.allow_load_csv = allow_load_csv}}, nullptr, &repl_state};
     InterpreterFaker interpreter_faker{&csv_interpreter_context, db_acc};
     for (const auto &query : queries) {
       if (allow_load_csv) {
@@ -1268,6 +1275,24 @@ TYPED_TEST(InterpreterTest, ExecutionStatsValues) {
   }
 }
 
+TYPED_TEST(InterpreterTest, ExecutionStatsValuesPropertiesSet) {
+  {
+    auto [stream, qid] = this->Prepare(
+        "CREATE (u:Employee {Uid: 'EMP_AAAAA', FirstName: 'Bong', LastName: 'Revilla'}) RETURN u.name AS name;");
+    this->Pull(&stream);
+  }
+  {
+    auto [stream, qid] = this->Prepare(
+        "MATCH (node:Employee) WHERE node.Uid='EMP_AAAAA' SET node={FirstName: 'James', LastName: 'Revilla', Uid: "
+        "'EMP_AAAAA', CreatedOn: 'null', CreatedBy: 'null', LastModifiedOn: '1698226931701', LastModifiedBy: 'null', "
+        "Description: 'null'};");
+    this->Pull(&stream);
+    auto stats = stream.GetSummary().at("stats").ValueMap();
+    auto key = memgraph::query::ExecutionStatsKeyToString(memgraph::query::ExecutionStats::Key::UPDATED_PROPERTIES);
+    ASSERT_EQ(stats[key].ValueInt(), 8);
+  }
+}
+
 TYPED_TEST(InterpreterTest, NotificationsValidStructure) {
   {
     auto [stream, qid] = this->Prepare("MATCH (n) DELETE n;");
diff --git a/tests/unit/query_dump.cpp b/tests/unit/query_dump.cpp
index 646e31f1e..f212d9189 100644
--- a/tests/unit/query_dump.cpp
+++ b/tests/unit/query_dump.cpp
@@ -25,6 +25,7 @@
 #include "query/interpreter_context.hpp"
 #include "query/stream/streams.hpp"
 #include "query/typed_value.hpp"
+#include "storage/v2/config.hpp"
 #include "storage/v2/disk/storage.hpp"
 #include "storage/v2/edge_accessor.hpp"
 #include "storage/v2/inmemory/storage.hpp"
@@ -282,7 +283,7 @@ class DumpTest : public ::testing::Test {
   const std::string testSuite = "query_dump";
   std::filesystem::path data_directory{std::filesystem::temp_directory_path() / "MG_tests_unit_query_dump_class"};
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         config.durability.storage_directory = data_directory;
@@ -295,6 +296,8 @@ class DumpTest : public ::testing::Test {
       }()  // iile
   };
 
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -308,7 +311,7 @@ class DumpTest : public ::testing::Test {
       }()  // iile
   };
 
-  memgraph::query::InterpreterContext context{memgraph::query::InterpreterConfig{}, nullptr};
+  memgraph::query::InterpreterContext context{memgraph::query::InterpreterConfig{}, nullptr, &repl_state};
 
   void TearDown() override {
     if (std::is_same<StorageType, memgraph::storage::DiskStorage>::value) {
@@ -697,8 +700,9 @@ TYPED_TEST(DumpTest, CheckStateVertexWithMultipleProperties) {
     config.disk = disk_test_utils::GenerateOnDiskConfig("query-dump-s1").disk;
     config.force_on_disk = true;
   }
+  memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(config));
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk(config);
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk(config, repl_state);
   auto db_acc_opt = db_gk.access();
   ASSERT_TRUE(db_acc_opt) << "Failed to access db";
   auto &db_acc = *db_acc_opt;
@@ -707,7 +711,7 @@ TYPED_TEST(DumpTest, CheckStateVertexWithMultipleProperties) {
                                                : memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL))
       << "Wrong storage mode!";
 
-  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr);
+  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr, &repl_state);
 
   {
     ResultStreamFaker stream(this->db->storage());
@@ -811,7 +815,8 @@ TYPED_TEST(DumpTest, CheckStateSimpleGraph) {
     config.force_on_disk = true;
   }
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk(config);
+  memgraph::replication::ReplicationState repl_state{ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   auto db_acc_opt = db_gk.access();
   ASSERT_TRUE(db_acc_opt) << "Failed to access db";
   auto &db_acc = *db_acc_opt;
@@ -820,7 +825,7 @@ TYPED_TEST(DumpTest, CheckStateSimpleGraph) {
                                                : memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL))
       << "Wrong storage mode!";
 
-  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr);
+  memgraph::query::InterpreterContext interpreter_context(memgraph::query::InterpreterConfig{}, nullptr, &repl_state);
   {
     ResultStreamFaker stream(this->db->storage());
     memgraph::query::AnyStream query_stream(&stream, memgraph::utils::NewDeleteResource());
diff --git a/tests/unit/query_plan_edge_cases.cpp b/tests/unit/query_plan_edge_cases.cpp
index f37848d91..52769fa55 100644
--- a/tests/unit/query_plan_edge_cases.cpp
+++ b/tests/unit/query_plan_edge_cases.cpp
@@ -40,20 +40,23 @@ class QueryExecution : public testing::Test {
 
   std::filesystem::path data_directory{std::filesystem::temp_directory_path() / "MG_tests_unit_query_plan_edge_cases"};
 
-  std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk{
-      [&]() {
-        memgraph::storage::Config config{};
-        config.durability.storage_directory = data_directory;
-        config.disk.main_storage_directory = config.durability.storage_directory / "disk";
-        if constexpr (std::is_same_v<StorageType, memgraph::storage::DiskStorage>) {
-          config.disk = disk_test_utils::GenerateOnDiskConfig(testSuite).disk;
-          config.force_on_disk = true;
-        }
-        return config;
-      }()  // iile
-  };
+  std::optional<memgraph::replication::ReplicationState> repl_state;
+  std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk;
 
   void SetUp() {
+    auto config = [&]() {
+      memgraph::storage::Config config{};
+      config.durability.storage_directory = data_directory;
+      config.disk.main_storage_directory = config.durability.storage_directory / "disk";
+      if constexpr (std::is_same_v<StorageType, memgraph::storage::DiskStorage>) {
+        config.disk = disk_test_utils::GenerateOnDiskConfig(testSuite).disk;
+        config.force_on_disk = true;
+      }
+      return config;
+    }();  // iile
+
+    repl_state.emplace(memgraph::storage::ReplicationStateRootPath(config));
+    db_gk.emplace(config, *repl_state);
     auto db_acc_opt = db_gk->access();
     MG_ASSERT(db_acc_opt, "Failed to access db");
     auto &db_acc = *db_acc_opt;
@@ -63,7 +66,7 @@ class QueryExecution : public testing::Test {
               "Wrong storage mode!");
     db_acc_ = std::move(db_acc);
 
-    interpreter_context_.emplace(memgraph::query::InterpreterConfig{}, nullptr);
+    interpreter_context_.emplace(memgraph::query::InterpreterConfig{}, nullptr, &repl_state.value());
     interpreter_.emplace(&*interpreter_context_, *db_acc_);
   }
 
@@ -72,6 +75,7 @@ class QueryExecution : public testing::Test {
     interpreter_context_ = std::nullopt;
     db_acc_.reset();
     db_gk.reset();
+    repl_state.reset();
     if (std::is_same<StorageType, memgraph::storage::DiskStorage>::value) {
       disk_test_utils::RemoveRocksDbDirs(testSuite);
     }
diff --git a/tests/unit/query_streams.cpp b/tests/unit/query_streams.cpp
index a64c2b090..5dfd0a8f1 100644
--- a/tests/unit/query_streams.cpp
+++ b/tests/unit/query_streams.cpp
@@ -24,6 +24,7 @@
 #include "query/interpreter.hpp"
 #include "query/interpreter_context.hpp"
 #include "query/stream/streams.hpp"
+#include "storage/v2/config.hpp"
 #include "storage/v2/disk/storage.hpp"
 #include "storage/v2/inmemory/storage.hpp"
 #include "test_utils.hpp"
@@ -76,7 +77,7 @@ class StreamsTestFixture : public ::testing::Test {
   // InterpreterContext::auth_checker_ is used in the Streams object, but only in the message processing part. Because
   // these tests don't send any messages, the auth_checker_ pointer can be left as nullptr.
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         config.durability.storage_directory = data_directory_;
@@ -88,6 +89,9 @@ class StreamsTestFixture : public ::testing::Test {
         return config;
       }()  // iile
   };
+
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db_{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -100,7 +104,7 @@ class StreamsTestFixture : public ::testing::Test {
         return db_acc;
       }()  // iile
   };
-  memgraph::query::InterpreterContext interpreter_context_{memgraph::query::InterpreterConfig{}, nullptr};
+  memgraph::query::InterpreterContext interpreter_context_{memgraph::query::InterpreterConfig{}, nullptr, &repl_state};
   std::filesystem::path streams_data_directory_{data_directory_ / "separate-dir-for-test"};
   std::optional<StreamsTest> proxyStreams_;
 
diff --git a/tests/unit/replication_persistence_helper.cpp b/tests/unit/replication_persistence_helper.cpp
index b2fcae53e..ade9ef638 100644
--- a/tests/unit/replication_persistence_helper.cpp
+++ b/tests/unit/replication_persistence_helper.cpp
@@ -20,83 +20,95 @@
 #include <optional>
 #include <string>
 
-using namespace memgraph::storage::replication;
+using namespace memgraph::replication::durability;
 using namespace memgraph::replication;
 
-class ReplicationPersistanceHelperTest : public testing::Test {
- protected:
-  void SetUp() override {}
+static_assert(sizeof(ReplicationRoleEntry) == 168,
+              "Most likely you modified ReplicationRoleEntry without updating the tests. ");
 
-  void TearDown() override {}
+static_assert(sizeof(ReplicationReplicaEntry) == 160,
+              "Most likely you modified ReplicationReplicaEntry without updating the tests.");
 
-  ReplicationStatus CreateReplicationStatus(std::string name, std::string ip_address, uint16_t port,
-                                            ReplicationMode sync_mode, std::chrono::seconds replica_check_frequency,
-                                            std::optional<ReplicationClientConfig::SSL> ssl,
-                                            std::optional<ReplicationRole> role) const {
-    return ReplicationStatus{.name = name,
-                             .ip_address = ip_address,
-                             .port = port,
-                             .sync_mode = sync_mode,
-                             .replica_check_frequency = replica_check_frequency,
-                             .ssl = ssl,
-                             .role = role};
-  }
-
-  static_assert(
-      sizeof(ReplicationStatus) == 160,
-      "Most likely you modified ReplicationStatus without updating the tests. Please modify CreateReplicationStatus. ");
-};
-
-TEST_F(ReplicationPersistanceHelperTest, BasicTestAllAttributesInitialized) {
-  auto replicas_status = CreateReplicationStatus(
-      "name", "ip_address", 0, ReplicationMode::SYNC, std::chrono::seconds(1),
-      ReplicationClientConfig::SSL{.key_file = "key_file", .cert_file = "cert_file"}, ReplicationRole::REPLICA);
-
-  auto json_status = ReplicationStatusToJSON(ReplicationStatus(replicas_status));
-  auto replicas_status_converted = JSONToReplicationStatus(std::move(json_status));
-
-  ASSERT_EQ(replicas_status, *replicas_status_converted);
+TEST(ReplicationDurability, V1Main) {
+  auto const role_entry = ReplicationRoleEntry{.version = DurabilityVersion::V1,
+                                               .role = MainRole{
+                                                   .epoch = ReplicationEpoch{"TEST_STRING"},
+                                               }};
+  nlohmann::json j;
+  to_json(j, role_entry);
+  ReplicationRoleEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(role_entry, deser);
 }
 
-TEST_F(ReplicationPersistanceHelperTest, BasicTestOnlyMandatoryAttributesInitialized) {
-  auto replicas_status = CreateReplicationStatus("name", "ip_address", 0, ReplicationMode::SYNC,
-                                                 std::chrono::seconds(1), std::nullopt, std::nullopt);
-
-  auto json_status = ReplicationStatusToJSON(ReplicationStatus(replicas_status));
-  auto replicas_status_converted = JSONToReplicationStatus(std::move(json_status));
-
-  ASSERT_EQ(replicas_status, *replicas_status_converted);
+TEST(ReplicationDurability, V2Main) {
+  auto const role_entry = ReplicationRoleEntry{.version = DurabilityVersion::V2,
+                                               .role = MainRole{
+                                                   .epoch = ReplicationEpoch{"TEST_STRING"},
+                                               }};
+  nlohmann::json j;
+  to_json(j, role_entry);
+  ReplicationRoleEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(role_entry, deser);
 }
 
-TEST_F(ReplicationPersistanceHelperTest, BasicTestAllAttributesButSSLInitialized) {
-  auto replicas_status = CreateReplicationStatus("name", "ip_address", 0, ReplicationMode::SYNC,
-                                                 std::chrono::seconds(1), std::nullopt, ReplicationRole::MAIN);
-
-  auto json_status = ReplicationStatusToJSON(ReplicationStatus(replicas_status));
-  auto replicas_status_converted = JSONToReplicationStatus(std::move(json_status));
-
-  ASSERT_EQ(replicas_status, *replicas_status_converted);
+TEST(ReplicationDurability, V1Replica) {
+  auto const role_entry =
+      ReplicationRoleEntry{.version = DurabilityVersion::V1,
+                           .role = ReplicaRole{
+                               .config = ReplicationServerConfig{.ip_address = "000.123.456.789", .port = 2023},
+                           }};
+  nlohmann::json j;
+  to_json(j, role_entry);
+  ReplicationRoleEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(role_entry, deser);
 }
 
-TEST_F(ReplicationPersistanceHelperTest, BasicTestAllAttributesButTimeoutInitialized) {
-  auto replicas_status = CreateReplicationStatus(
-      "name", "ip_address", 0, ReplicationMode::SYNC, std::chrono::seconds(1),
-      ReplicationClientConfig::SSL{.key_file = "key_file", .cert_file = "cert_file"}, ReplicationRole::REPLICA);
-
-  auto json_status = ReplicationStatusToJSON(ReplicationStatus(replicas_status));
-  auto replicas_status_converted = JSONToReplicationStatus(std::move(json_status));
-
-  ASSERT_EQ(replicas_status, *replicas_status_converted);
+TEST(ReplicationDurability, V2Replica) {
+  auto const role_entry =
+      ReplicationRoleEntry{.version = DurabilityVersion::V2,
+                           .role = ReplicaRole{
+                               .config = ReplicationServerConfig{.ip_address = "000.123.456.789", .port = 2023},
+                           }};
+  nlohmann::json j;
+  to_json(j, role_entry);
+  ReplicationRoleEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(role_entry, deser);
 }
 
-TEST_F(ReplicationPersistanceHelperTest, BasicTestAllAttributesButReplicationRoleInitialized) {
-  // this one is importand for backwards compatibility
-  auto replicas_status = CreateReplicationStatus(
-      "name", "ip_address", 0, ReplicationMode::SYNC, std::chrono::seconds(1),
-      ReplicationClientConfig::SSL{.key_file = "key_file", .cert_file = "cert_file"}, std::nullopt);
-
-  auto json_status = ReplicationStatusToJSON(ReplicationStatus(replicas_status));
-  auto replicas_status_converted = JSONToReplicationStatus(std::move(json_status));
-
-  ASSERT_EQ(replicas_status, *replicas_status_converted);
+TEST(ReplicationDurability, ReplicaEntrySync) {
+  using namespace std::chrono_literals;
+  using namespace std::string_literals;
+  auto const replica_entry = ReplicationReplicaEntry{.config = ReplicationClientConfig{
+                                                         .name = "TEST_NAME"s,
+                                                         .mode = ReplicationMode::SYNC,
+                                                         .ip_address = "000.123.456.789"s,
+                                                         .port = 2023,
+                                                         .replica_check_frequency = 3s,
+                                                     }};
+  nlohmann::json j;
+  to_json(j, replica_entry);
+  ReplicationReplicaEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(replica_entry, deser);
+}
+
+TEST(ReplicationDurability, ReplicaEntryAsync) {
+  using namespace std::chrono_literals;
+  using namespace std::string_literals;
+  auto const replica_entry = ReplicationReplicaEntry{.config = ReplicationClientConfig{
+                                                         .name = "TEST_NAME"s,
+                                                         .mode = ReplicationMode::ASYNC,
+                                                         .ip_address = "000.123.456.789"s,
+                                                         .port = 2023,
+                                                         .replica_check_frequency = 3s,
+                                                     }};
+  nlohmann::json j;
+  to_json(j, replica_entry);
+  ReplicationReplicaEntry deser;
+  from_json(j, deser);
+  ASSERT_EQ(replica_entry, deser);
 }
diff --git a/tests/unit/storage_v2_constraints.cpp b/tests/unit/storage_v2_constraints.cpp
index 4a60c5330..dfee45a0e 100644
--- a/tests/unit/storage_v2_constraints.cpp
+++ b/tests/unit/storage_v2_constraints.cpp
@@ -41,7 +41,8 @@ class ConstraintsTest : public testing::Test {
     /// TODO: andi How to make this better? Because currentlly for every test changed you need to create a configuration
     config_ = disk_test_utils::GenerateOnDiskConfig(testSuite);
     config_.force_on_disk = std::is_same_v<StorageType, memgraph::storage::DiskStorage>;
-    db_gk_.emplace(config_);
+    repl_state_.emplace(memgraph::storage::ReplicationStateRootPath(config_));
+    db_gk_.emplace(config_, *repl_state_);
     auto db_acc_opt = db_gk_->access();
     MG_ASSERT(db_acc_opt, "Failed to access db");
     db_acc_ = *db_acc_opt;
@@ -56,6 +57,7 @@ class ConstraintsTest : public testing::Test {
     storage = nullptr;
     db_acc_.reset();
     db_gk_.reset();
+    repl_state_.reset();
 
     if (std::is_same<StorageType, memgraph::storage::DiskStorage>::value) {
       disk_test_utils::RemoveRocksDbDirs(testSuite);
@@ -64,6 +66,7 @@ class ConstraintsTest : public testing::Test {
 
   Storage *storage;
   memgraph::storage::Config config_;
+  std::optional<memgraph::replication::ReplicationState> repl_state_;
   std::optional<memgraph::dbms::DatabaseAccess> db_acc_;
   std::optional<memgraph::utils::Gatekeeper<memgraph::dbms::Database>> db_gk_;
   PropertyId prop1;
diff --git a/tests/unit/storage_v2_durability_inmemory.cpp b/tests/unit/storage_v2_durability_inmemory.cpp
index 2e30c7eeb..725db9283 100644
--- a/tests/unit/storage_v2_durability_inmemory.cpp
+++ b/tests/unit/storage_v2_durability_inmemory.cpp
@@ -10,6 +10,7 @@
 // licenses/APL.txt.
 
 #include <gmock/gmock.h>
+#include <gtest/gtest-death-test.h>
 #include <gtest/gtest.h>
 #include <sys/types.h>
 #include <sys/wait.h>
@@ -22,6 +23,9 @@
 #include <iostream>
 #include <thread>
 
+#include "dbms/database.hpp"
+#include "replication/state.hpp"
+#include "storage/v2/config.hpp"
 #include "storage/v2/durability/marker.hpp"
 #include "storage/v2/durability/paths.hpp"
 #include "storage/v2/durability/snapshot.hpp"
@@ -333,7 +337,6 @@ class DurabilityTest : public ::testing::TestWithParam<bool> {
         case DatasetType::ONLY_BASE_WITH_EXTENDED_INDICES_AND_CONSTRAINTS:
         case DatasetType::ONLY_EXTENDED_WITH_BASE_INDICES_AND_CONSTRAINTS:
         case DatasetType::BASE_WITH_EXTENDED: {
-          const auto &i = acc->ListAllIndices();
           const auto l_stats = acc->GetIndexStats(base_label_unindexed);
           ASSERT_TRUE(l_stats);
           ASSERT_EQ(l_stats->count, 1);
@@ -790,13 +793,14 @@ INSTANTIATE_TEST_CASE_P(EdgesWithoutProperties, DurabilityTest, ::testing::Value
 TEST_P(DurabilityTest, SnapshotOnExit) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), GetParam());
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
-    CreateExtendedDataset(store.get());
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
+    CreateExtendedDataset(db.storage());
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -805,16 +809,17 @@ TEST_P(DurabilityTest, SnapshotOnExit) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -824,12 +829,14 @@ TEST_P(DurabilityTest, SnapshotOnExit) {
 TEST_P(DurabilityTest, SnapshotPeriodic) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory,
-                        .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
-                        .snapshot_interval = std::chrono::milliseconds(2000)}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory,
+                       .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
+                       .snapshot_interval = std::chrono::milliseconds(2000)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
   }
 
@@ -839,16 +846,17 @@ TEST_P(DurabilityTest, SnapshotPeriodic) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -863,14 +871,16 @@ TEST_P(DurabilityTest, SnapshotFallback) {
     auto const expected_write_time = std::chrono::milliseconds(750);
     auto const snapshot_interval = std::chrono::milliseconds(3000);
 
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
-             .snapshot_interval = snapshot_interval,
-             .snapshot_retention_count = 10,  // We don't anticipate that we make this many
-         }}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
+            .snapshot_interval = snapshot_interval,
+            .snapshot_retention_count = 10,  // We don't anticipate that we make this many
+        }};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
 
     auto const ensure_snapshot_is_written = [&](auto &&func) {
       auto const pre_count = GetSnapshotsList().size();
@@ -885,9 +895,9 @@ TEST_P(DurabilityTest, SnapshotFallback) {
       // its oldest active should be newer than the transaction used when running `func`
     };
 
-    ensure_snapshot_is_written([&]() { CreateBaseDataset(store.get(), GetParam()); });
+    ensure_snapshot_is_written([&]() { CreateBaseDataset(db.storage(), GetParam()); });
     number_to_save = GetSnapshotsList().size();
-    ensure_snapshot_is_written([&]() { CreateExtendedDataset(store.get()); });
+    ensure_snapshot_is_written([&]() { CreateExtendedDataset(db.storage()); });
   }
 
   ASSERT_EQ(GetBackupSnapshotsList().size(), 0);
@@ -906,16 +916,17 @@ TEST_P(DurabilityTest, SnapshotFallback) {
   }
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -925,10 +936,12 @@ TEST_P(DurabilityTest, SnapshotFallback) {
 TEST_P(DurabilityTest, SnapshotEverythingCorrupt) {
   // Create unrelated snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -951,14 +964,17 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) {
 
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory,
-                        .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
-                        .snapshot_interval = std::chrono::milliseconds(2000)}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory,
+                       .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
+                       .snapshot_interval = std::chrono::milliseconds(2000)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+
+    CreateBaseDataset(db.storage(), GetParam());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
-    CreateExtendedDataset(store.get());
+    CreateExtendedDataset(db.storage());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
   }
 
@@ -991,11 +1007,14 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) {
 
   // Recover snapshot.
   ASSERT_DEATH(
-      {
-        std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-            {.items = {.properties_on_edges = GetParam()},
-             .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      },
+      ([&]() {
+        memgraph::storage::Config config{
+            .items = {.properties_on_edges = GetParam()},
+            .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+        memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+        memgraph::dbms::Database db{config, repl_state};
+      }())  // iile
+      ,
       "");
 }
 
@@ -1003,10 +1022,11 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) {
 TEST_P(DurabilityTest, SnapshotRetention) {
   // Create unrelated snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -1020,15 +1040,17 @@ TEST_P(DurabilityTest, SnapshotRetention) {
 
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory,
-                        .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
-                        .snapshot_interval = std::chrono::milliseconds(2000),
-                        .snapshot_retention_count = 3}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory,
+                       .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
+                       .snapshot_interval = std::chrono::milliseconds(2000),
+                       .snapshot_retention_count = 3}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
     // Restore unrelated snapshots after the database has been started.
     RestoreBackups();
-    CreateBaseDataset(store.get(), GetParam());
+    CreateBaseDataset(db.storage(), GetParam());
     // Allow approximately 5 snapshots to be created.
     std::this_thread::sleep_for(std::chrono::milliseconds(10000));
   }
@@ -1057,16 +1079,17 @@ TEST_P(DurabilityTest, SnapshotRetention) {
   }
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1076,13 +1099,14 @@ TEST_P(DurabilityTest, SnapshotRetention) {
 TEST_P(DurabilityTest, SnapshotMixedUUID) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), GetParam());
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
-    CreateExtendedDataset(store.get());
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
+    CreateExtendedDataset(db.storage());
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -1092,19 +1116,22 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) {
 
   // Recover snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
   }
 
   // Create another snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), GetParam());
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -1121,16 +1148,17 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1140,10 +1168,11 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) {
 TEST_P(DurabilityTest, SnapshotBackup) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -1157,11 +1186,13 @@ TEST_P(DurabilityTest, SnapshotBackup) {
 
   // Start storage without recovery.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory,
-                        .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
-                        .snapshot_interval = std::chrono::minutes(20)}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory,
+                       .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT,
+                       .snapshot_interval = std::chrono::minutes(20)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -1174,13 +1205,14 @@ TEST_P(DurabilityTest, SnapshotBackup) {
 TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnEdges) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = false},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), false);
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, false);
-    CreateExtendedDataset(store.get());
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, false);
+    memgraph::storage::Config config{.items = {.properties_on_edges = false},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), false);
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, false);
+    CreateExtendedDataset(db.storage());
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false);
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -1189,16 +1221,17 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = true},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, false);
+  memgraph::storage::Config config{.items = {.properties_on_edges = true},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false);
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1208,13 +1241,14 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE
 TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnEdges) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = true},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), true);
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, true);
-    CreateExtendedDataset(store.get());
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, true);
+    memgraph::storage::Config config{.items = {.properties_on_edges = true},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), true);
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, true);
+    CreateExtendedDataset(db.storage());
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, true);
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -1224,11 +1258,14 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE
 
   // Recover snapshot.
   ASSERT_DEATH(
-      {
-        std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-            {.items = {.properties_on_edges = false},
-             .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      },
+      ([&]() {
+        memgraph::storage::Config config{
+            .items = {.properties_on_edges = false},
+            .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+        memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+        memgraph::dbms::Database db{config, repl_state};
+      }())  // iile
+      ,
       "");
 }
 
@@ -1236,16 +1273,17 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE
 TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutPropertiesOnEdges) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = true},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), true);
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, true);
-    CreateExtendedDataset(store.get());
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, true);
+    memgraph::storage::Config config{.items = {.properties_on_edges = true},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), true);
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, true);
+    CreateExtendedDataset(db.storage());
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, true);
     // Remove properties from edges.
     {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       for (auto vertex : acc->Vertices(memgraph::storage::View::OLD)) {
         auto in_edges = vertex.InEdges(memgraph::storage::View::OLD);
         ASSERT_TRUE(in_edges.HasValue());
@@ -1278,16 +1316,17 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutProp
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = false},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, false);
+  memgraph::storage::Config config{.items = {.properties_on_edges = false},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false);
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1297,15 +1336,17 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutProp
 TEST_P(DurabilityTest, WalBasic) {
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
-    CreateExtendedDataset(store.get());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    CreateExtendedDataset(db.storage());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -1314,16 +1355,17 @@ TEST_P(DurabilityTest, WalBasic) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1333,15 +1375,17 @@ TEST_P(DurabilityTest, WalBasic) {
 TEST_P(DurabilityTest, WalBackup) {
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -1356,12 +1400,14 @@ TEST_P(DurabilityTest, WalBackup) {
 
   // Start storage without recovery.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20)}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -1374,14 +1420,16 @@ TEST_P(DurabilityTest, WalBackup) {
 TEST_P(DurabilityTest, WalAppendToExisting) {
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -1391,23 +1439,27 @@ TEST_P(DurabilityTest, WalAppendToExisting) {
 
   // Recover WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
   }
 
   // Recover WALs and create more WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .recover_on_startup = true,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateExtendedDataset(store.get());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .recover_on_startup = true,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateExtendedDataset(db.storage());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -1416,16 +1468,17 @@ TEST_P(DurabilityTest, WalAppendToExisting) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1438,34 +1491,37 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
 
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     auto v1 = acc->CreateVertex();
     gid_v1 = v1.Gid();
     auto v2 = acc->CreateVertex();
     gid_v2 = v2.Gid();
-    auto e1Res = acc->CreateEdge(&v1, &v2, store->NameToEdgeType("e1"));
+    auto e1Res = acc->CreateEdge(&v1, &v2, db.storage()->NameToEdgeType("e1"));
     ASSERT_TRUE(e1Res.HasValue());
     auto e1 = std::move(e1Res.GetValue());
     gid_e1 = e1.Gid();
-    ASSERT_TRUE(v1.AddLabel(store->NameToLabel("l11")).HasValue());
-    ASSERT_TRUE(v1.AddLabel(store->NameToLabel("l12")).HasValue());
-    ASSERT_TRUE(v1.AddLabel(store->NameToLabel("l13")).HasValue());
+    ASSERT_TRUE(v1.AddLabel(db.storage()->NameToLabel("l11")).HasValue());
+    ASSERT_TRUE(v1.AddLabel(db.storage()->NameToLabel("l12")).HasValue());
+    ASSERT_TRUE(v1.AddLabel(db.storage()->NameToLabel("l13")).HasValue());
     if (GetParam()) {
       ASSERT_TRUE(
-          e1.SetProperty(store->NameToProperty("test"), memgraph::storage::PropertyValue("nandare")).HasValue());
+          e1.SetProperty(db.storage()->NameToProperty("test"), memgraph::storage::PropertyValue("nandare")).HasValue());
     }
-    ASSERT_TRUE(v2.AddLabel(store->NameToLabel("l21")).HasValue());
-    ASSERT_TRUE(v2.SetProperty(store->NameToProperty("hello"), memgraph::storage::PropertyValue("world")).HasValue());
+    ASSERT_TRUE(v2.AddLabel(db.storage()->NameToLabel("l21")).HasValue());
+    ASSERT_TRUE(
+        v2.SetProperty(db.storage()->NameToProperty("hello"), memgraph::storage::PropertyValue("world")).HasValue());
     auto v3 = acc->CreateVertex();
     gid_v3 = v3.Gid();
-    ASSERT_TRUE(v3.SetProperty(store->NameToProperty("v3"), memgraph::storage::PropertyValue(42)).HasValue());
+    ASSERT_TRUE(v3.SetProperty(db.storage()->NameToProperty("v3"), memgraph::storage::PropertyValue(42)).HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
@@ -1475,11 +1531,12 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
 
     auto indices = acc->ListAllIndices();
     ASSERT_EQ(indices.label.size(), 0);
@@ -1492,8 +1549,8 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
       ASSERT_TRUE(v1);
       auto labels = v1->Labels(memgraph::storage::View::OLD);
       ASSERT_TRUE(labels.HasValue());
-      ASSERT_THAT(*labels, UnorderedElementsAre(store->NameToLabel("l11"), store->NameToLabel("l12"),
-                                                store->NameToLabel("l13")));
+      ASSERT_THAT(*labels, UnorderedElementsAre(db.storage()->NameToLabel("l11"), db.storage()->NameToLabel("l12"),
+                                                db.storage()->NameToLabel("l13")));
       auto props = v1->Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(props.HasValue());
       ASSERT_EQ(props->size(), 0);
@@ -1508,7 +1565,7 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
       auto edge_props = edge.Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(edge_props.HasValue());
       if (GetParam()) {
-        ASSERT_THAT(*edge_props, UnorderedElementsAre(std::make_pair(store->NameToProperty("test"),
+        ASSERT_THAT(*edge_props, UnorderedElementsAre(std::make_pair(db.storage()->NameToProperty("test"),
                                                                      memgraph::storage::PropertyValue("nandare"))));
       } else {
         ASSERT_EQ(edge_props->size(), 0);
@@ -1519,10 +1576,10 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
       ASSERT_TRUE(v2);
       auto labels = v2->Labels(memgraph::storage::View::OLD);
       ASSERT_TRUE(labels.HasValue());
-      ASSERT_THAT(*labels, UnorderedElementsAre(store->NameToLabel("l21")));
+      ASSERT_THAT(*labels, UnorderedElementsAre(db.storage()->NameToLabel("l21")));
       auto props = v2->Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(props.HasValue());
-      ASSERT_THAT(*props, UnorderedElementsAre(std::make_pair(store->NameToProperty("hello"),
+      ASSERT_THAT(*props, UnorderedElementsAre(std::make_pair(db.storage()->NameToProperty("hello"),
                                                               memgraph::storage::PropertyValue("world"))));
       auto in_edges = v2->InEdges(memgraph::storage::View::OLD);
       ASSERT_TRUE(in_edges.HasValue());
@@ -1532,7 +1589,7 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
       auto edge_props = edge.Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(edge_props.HasValue());
       if (GetParam()) {
-        ASSERT_THAT(*edge_props, UnorderedElementsAre(std::make_pair(store->NameToProperty("test"),
+        ASSERT_THAT(*edge_props, UnorderedElementsAre(std::make_pair(db.storage()->NameToProperty("test"),
                                                                      memgraph::storage::PropertyValue("nandare"))));
       } else {
         ASSERT_EQ(edge_props->size(), 0);
@@ -1549,8 +1606,8 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
       ASSERT_EQ(labels->size(), 0);
       auto props = v3->Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(props.HasValue());
-      ASSERT_THAT(*props, UnorderedElementsAre(
-                              std::make_pair(store->NameToProperty("v3"), memgraph::storage::PropertyValue(42))));
+      ASSERT_THAT(*props, UnorderedElementsAre(std::make_pair(db.storage()->NameToProperty("v3"),
+                                                              memgraph::storage::PropertyValue(42))));
       auto in_edges = v3->InEdges(memgraph::storage::View::OLD);
       ASSERT_TRUE(in_edges.HasValue());
       ASSERT_EQ(in_edges->edges.size(), 0);
@@ -1562,9 +1619,9 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1574,49 +1631,51 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) {
 TEST_P(DurabilityTest, WalCreateAndRemoveEverything) {
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
-    CreateExtendedDataset(store.get());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    CreateExtendedDataset(db.storage());
     auto indices = [&] {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto res = acc->ListAllIndices();
       acc->Commit();
       return res;
     }();  // iile
     for (const auto &index : indices.label) {
-      auto unique_acc = store->UniqueAccess();
+      auto unique_acc = db.storage()->UniqueAccess();
       ASSERT_FALSE(unique_acc->DropIndex(index).HasError());
       ASSERT_FALSE(unique_acc->Commit().HasError());
     }
     for (const auto &index : indices.label_property) {
-      auto unique_acc = store->UniqueAccess();
+      auto unique_acc = db.storage()->UniqueAccess();
       ASSERT_FALSE(unique_acc->DropIndex(index.first, index.second).HasError());
       ASSERT_FALSE(unique_acc->Commit().HasError());
     }
     auto constraints = [&] {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto res = acc->ListAllConstraints();
       acc->Commit();
       return res;
     }();  // iile
     for (const auto &constraint : constraints.existence) {
-      auto unique_acc = store->UniqueAccess();
+      auto unique_acc = db.storage()->UniqueAccess();
       ASSERT_FALSE(unique_acc->DropExistenceConstraint(constraint.first, constraint.second).HasError());
       ASSERT_FALSE(unique_acc->Commit().HasError());
     }
     for (const auto &constraint : constraints.unique) {
-      auto unique_acc = store->UniqueAccess();
+      auto unique_acc = db.storage()->UniqueAccess();
       ASSERT_EQ(unique_acc->DropUniqueConstraint(constraint.first, constraint.second),
                 memgraph::storage::UniqueConstraints::DeletionStatus::SUCCESS);
       ASSERT_FALSE(unique_acc->Commit().HasError());
     }
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     for (auto vertex : acc->Vertices(memgraph::storage::View::OLD)) {
       ASSERT_TRUE(acc->DetachDeleteVertex(&vertex).HasValue());
     }
@@ -1629,11 +1688,12 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto indices = acc->ListAllIndices();
     ASSERT_EQ(indices.label.size(), 0);
     ASSERT_EQ(indices.label_property.size(), 0);
@@ -1650,9 +1710,9 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) {
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1665,39 +1725,44 @@ TEST_P(DurabilityTest, WalTransactionOrdering) {
 
   // Create WAL.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 100000,
-             .wal_file_flush_every_n_tx = kFlushWalEvery,
-         }}));
-    auto acc1 = store->Access();
-    auto acc2 = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 100000,
+            .wal_file_flush_every_n_tx = kFlushWalEvery,
+        }};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc1 = db.storage()->Access();
+    auto acc2 = db.storage()->Access();
 
     // Create vertex in transaction 2.
     {
       auto vertex2 = acc2->CreateVertex();
       gid2 = vertex2.Gid();
-      ASSERT_TRUE(vertex2.SetProperty(store->NameToProperty("id"), memgraph::storage::PropertyValue(2)).HasValue());
+      ASSERT_TRUE(
+          vertex2.SetProperty(db.storage()->NameToProperty("id"), memgraph::storage::PropertyValue(2)).HasValue());
     }
 
-    auto acc3 = store->Access();
+    auto acc3 = db.storage()->Access();
 
     // Create vertex in transaction 3.
     {
       auto vertex3 = acc3->CreateVertex();
       gid3 = vertex3.Gid();
-      ASSERT_TRUE(vertex3.SetProperty(store->NameToProperty("id"), memgraph::storage::PropertyValue(3)).HasValue());
+      ASSERT_TRUE(
+          vertex3.SetProperty(db.storage()->NameToProperty("id"), memgraph::storage::PropertyValue(3)).HasValue());
     }
 
     // Create vertex in transaction 1.
     {
       auto vertex1 = acc1->CreateVertex();
       gid1 = vertex1.Gid();
-      ASSERT_TRUE(vertex1.SetProperty(store->NameToProperty("id"), memgraph::storage::PropertyValue(1)).HasValue());
+      ASSERT_TRUE(
+          vertex1.SetProperty(db.storage()->NameToProperty("id"), memgraph::storage::PropertyValue(1)).HasValue());
     }
 
     // Commit transaction 3, then 1, then 2.
@@ -1760,11 +1825,12 @@ TEST_P(DurabilityTest, WalTransactionOrdering) {
   }
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     for (auto [gid, id] : std::vector<std::pair<memgraph::storage::Gid, int64_t>>{{gid1, 1}, {gid2, 2}, {gid3, 3}}) {
       auto vertex = acc->FindVertex(gid, memgraph::storage::View::OLD);
       ASSERT_TRUE(vertex);
@@ -1774,15 +1840,15 @@ TEST_P(DurabilityTest, WalTransactionOrdering) {
       auto props = vertex->Properties(memgraph::storage::View::OLD);
       ASSERT_TRUE(props.HasValue());
       ASSERT_EQ(props->size(), 1);
-      ASSERT_EQ(props->at(store->NameToProperty("id")), memgraph::storage::PropertyValue(id));
+      ASSERT_EQ(props->at(db.storage()->NameToProperty("id")), memgraph::storage::PropertyValue(id));
     }
   }
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1792,18 +1858,20 @@ TEST_P(DurabilityTest, WalTransactionOrdering) {
 TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) {
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
-    CreateExtendedDataset(store.get());
-    auto label_indexed = store->NameToLabel("base_indexed");
-    auto label_unindexed = store->NameToLabel("base_unindexed");
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    CreateExtendedDataset(db.storage());
+    auto label_indexed = db.storage()->NameToLabel("base_indexed");
+    auto label_unindexed = db.storage()->NameToLabel("base_unindexed");
+    auto acc = db.storage()->Access();
     for (auto vertex : acc->Vertices(memgraph::storage::View::OLD)) {
       auto has_indexed = vertex.HasLabel(label_indexed, memgraph::storage::View::OLD);
       ASSERT_TRUE(has_indexed.HasValue());
@@ -1820,16 +1888,17 @@ TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::ONLY_EXTENDED_WITH_BASE_INDICES_AND_CONSTRAINTS, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::ONLY_EXTENDED_WITH_BASE_INDICES_AND_CONSTRAINTS, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1841,16 +1910,18 @@ TEST_P(DurabilityTest, WalDeathResilience) {
   if (pid == 0) {
     // Create WALs.
     {
-      std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-          {.items = {.properties_on_edges = GetParam()},
-           .durability = {
-               .storage_directory = storage_directory,
-               .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-               .snapshot_interval = std::chrono::minutes(20),
-               .wal_file_flush_every_n_tx = kFlushWalEvery}}));
+      memgraph::storage::Config config{
+          .items = {.properties_on_edges = GetParam()},
+          .durability = {
+              .storage_directory = storage_directory,
+              .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+              .snapshot_interval = std::chrono::minutes(20),
+              .wal_file_flush_every_n_tx = kFlushWalEvery}};
+      memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+      memgraph::dbms::Database db{config, repl_state};
       // Create one million vertices.
       for (uint64_t i = 0; i < 1000000; ++i) {
-        auto acc = store->Access();
+        auto acc = db.storage()->Access();
         acc->CreateVertex();
         MG_ASSERT(!acc->Commit().HasError(), "Couldn't commit transaction!");
       }
@@ -1876,17 +1947,19 @@ TEST_P(DurabilityTest, WalDeathResilience) {
   const uint64_t kExtraItems = 1000;
   uint64_t count = 0;
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .recover_on_startup = true,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery,
-         }}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .recover_on_startup = true,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery,
+        }};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
     {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto iterable = acc->Vertices(memgraph::storage::View::OLD);
       for (auto it = iterable.begin(); it != iterable.end(); ++it) {
         ++count;
@@ -1895,7 +1968,7 @@ TEST_P(DurabilityTest, WalDeathResilience) {
     }
 
     {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       for (uint64_t i = 0; i < kExtraItems; ++i) {
         acc->CreateVertex();
       }
@@ -1909,12 +1982,13 @@ TEST_P(DurabilityTest, WalDeathResilience) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   {
     uint64_t current = 0;
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto iterable = acc->Vertices(memgraph::storage::View::OLD);
     for (auto it = iterable.begin(); it != iterable.end(); ++it) {
       ++current;
@@ -1924,9 +1998,9 @@ TEST_P(DurabilityTest, WalDeathResilience) {
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -1936,15 +2010,17 @@ TEST_P(DurabilityTest, WalDeathResilience) {
 TEST_P(DurabilityTest, WalMissingSecond) {
   // Create unrelated WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -1960,29 +2036,32 @@ TEST_P(DurabilityTest, WalMissingSecond) {
 
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
     const uint64_t kNumVertices = 1000;
     std::vector<memgraph::storage::Gid> gids;
     gids.reserve(kNumVertices);
     for (uint64_t i = 0; i < kNumVertices; ++i) {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto vertex = acc->CreateVertex();
       gids.push_back(vertex.Gid());
       ASSERT_FALSE(acc->Commit().HasError());
     }
     for (uint64_t i = 0; i < kNumVertices; ++i) {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto vertex = acc->FindVertex(gids[i], memgraph::storage::View::OLD);
       ASSERT_TRUE(vertex);
-      ASSERT_TRUE(vertex->SetProperty(store->NameToProperty("nandare"), memgraph::storage::PropertyValue("haihaihai!"))
-                      .HasValue());
+      ASSERT_TRUE(
+          vertex->SetProperty(db.storage()->NameToProperty("nandare"), memgraph::storage::PropertyValue("haihaihai!"))
+              .HasValue());
       ASSERT_FALSE(acc->Commit().HasError());
     }
   }
@@ -2011,11 +2090,14 @@ TEST_P(DurabilityTest, WalMissingSecond) {
 
   // Recover WALs.
   ASSERT_DEATH(
-      {
-        std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-            {.items = {.properties_on_edges = GetParam()},
-             .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      },
+      ([&]() {
+        memgraph::storage::Config config{
+            .items = {.properties_on_edges = GetParam()},
+            .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+        memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+        memgraph::dbms::Database db{config, repl_state};
+      }())  // iile
+      ,
       "");
 }
 
@@ -2023,15 +2105,17 @@ TEST_P(DurabilityTest, WalMissingSecond) {
 TEST_P(DurabilityTest, WalCorruptSecond) {
   // Create unrelated WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -2047,29 +2131,32 @@ TEST_P(DurabilityTest, WalCorruptSecond) {
 
   // Create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
     const uint64_t kNumVertices = 1000;
     std::vector<memgraph::storage::Gid> gids;
     gids.reserve(kNumVertices);
     for (uint64_t i = 0; i < kNumVertices; ++i) {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto vertex = acc->CreateVertex();
       gids.push_back(vertex.Gid());
       ASSERT_FALSE(acc->Commit().HasError());
     }
     for (uint64_t i = 0; i < kNumVertices; ++i) {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       auto vertex = acc->FindVertex(gids[i], memgraph::storage::View::OLD);
       ASSERT_TRUE(vertex);
-      ASSERT_TRUE(vertex->SetProperty(store->NameToProperty("nandare"), memgraph::storage::PropertyValue("haihaihai!"))
-                      .HasValue());
+      ASSERT_TRUE(
+          vertex->SetProperty(db.storage()->NameToProperty("nandare"), memgraph::storage::PropertyValue("haihaihai!"))
+              .HasValue());
       ASSERT_FALSE(acc->Commit().HasError());
     }
   }
@@ -2097,11 +2184,14 @@ TEST_P(DurabilityTest, WalCorruptSecond) {
 
   // Recover WALs.
   ASSERT_DEATH(
-      {
-        std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-            {.items = {.properties_on_edges = GetParam()},
-             .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      },
+      ([&]() {
+        memgraph::storage::Config config{
+            .items = {.properties_on_edges = GetParam()},
+            .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+        memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+        memgraph::dbms::Database db{config, repl_state};
+      }())  // iile
+      ,
       "");
 }
 
@@ -2109,16 +2199,18 @@ TEST_P(DurabilityTest, WalCorruptSecond) {
 TEST_P(DurabilityTest, WalCorruptLastTransaction) {
   // Create WALs
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
-    CreateExtendedDataset(store.get(), /* single_transaction = */ true);
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
+    CreateExtendedDataset(db.storage(), /* single_transaction = */ true);
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 0);
@@ -2135,18 +2227,19 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) {
   }
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   // The extended dataset shouldn't be recovered because its WAL transaction was
   // corrupt.
-  VerifyDataset(store.get(), DatasetType::ONLY_BASE_WITH_EXTENDED_INDICES_AND_CONSTRAINTS, GetParam());
+  VerifyDataset(db.storage(), DatasetType::ONLY_BASE_WITH_EXTENDED_INDICES_AND_CONSTRAINTS, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2156,15 +2249,17 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) {
 TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) {
   // Create WALs
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     auto vertex1 = acc->CreateVertex();
     auto vertex2 = acc->CreateVertex();
     ASSERT_TRUE(vertex1.AddLabel(acc->NameToLabel("nandare")).HasValue());
@@ -2205,11 +2300,12 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     uint64_t count = 0;
     auto iterable = acc->Vertices(memgraph::storage::View::OLD);
     for (auto it = iterable.begin(); it != iterable.end(); ++it) {
@@ -2220,9 +2316,9 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) {
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2232,16 +2328,18 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) {
 TEST_P(DurabilityTest, WalAndSnapshot) {
   // Create snapshot and WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::milliseconds(2000),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::milliseconds(2000),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
-    CreateExtendedDataset(store.get());
+    CreateExtendedDataset(db.storage());
   }
 
   ASSERT_GE(GetSnapshotsList().size(), 1);
@@ -2250,16 +2348,17 @@ TEST_P(DurabilityTest, WalAndSnapshot) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot and WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2269,10 +2368,11 @@ TEST_P(DurabilityTest, WalAndSnapshot) {
 TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -2282,23 +2382,27 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) {
 
   // Recover snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
   }
 
   // Recover snapshot and create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .recover_on_startup = true,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateExtendedDataset(store.get());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .recover_on_startup = true,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateExtendedDataset(db.storage());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -2307,16 +2411,17 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot and WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2326,10 +2431,11 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) {
 TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
   // Create snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                     .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -2339,23 +2445,27 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
 
   // Recover snapshot.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-    VerifyDataset(store.get(), DatasetType::ONLY_BASE, GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam());
   }
 
   // Recover snapshot and create WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .recover_on_startup = true,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    CreateExtendedDataset(store.get());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .recover_on_startup = true,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateExtendedDataset(db.storage());
   }
 
   ASSERT_EQ(GetSnapshotsList().size(), 1);
@@ -2366,21 +2476,23 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
   // Recover snapshot and WALs and create more WALs.
   memgraph::storage::Gid vertex_gid;
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .recover_on_startup = true,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .recover_on_startup = true,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
     vertex_gid = vertex.Gid();
     if (GetParam()) {
       ASSERT_TRUE(
-          vertex.SetProperty(store->NameToProperty("meaning"), memgraph::storage::PropertyValue(42)).HasValue());
+          vertex.SetProperty(db.storage()->NameToProperty("meaning"), memgraph::storage::PropertyValue(42)).HasValue());
     }
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2391,13 +2503,14 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot and WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam(),
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam(),
                 /* verify_info = */ false);
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->FindVertex(vertex_gid, memgraph::storage::View::OLD);
     ASSERT_TRUE(vertex);
     auto labels = vertex->Labels(memgraph::storage::View::OLD);
@@ -2406,8 +2519,8 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
     auto props = vertex->Properties(memgraph::storage::View::OLD);
     ASSERT_TRUE(props.HasValue());
     if (GetParam()) {
-      ASSERT_THAT(*props, UnorderedElementsAre(
-                              std::make_pair(store->NameToProperty("meaning"), memgraph::storage::PropertyValue(42))));
+      ASSERT_THAT(*props, UnorderedElementsAre(std::make_pair(db.storage()->NameToProperty("meaning"),
+                                                              memgraph::storage::PropertyValue(42))));
     } else {
       ASSERT_EQ(props->size(), 0);
     }
@@ -2415,9 +2528,9 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -2427,15 +2540,17 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) {
 TEST_P(DurabilityTest, WalAndSnapshotWalRetention) {
   // Create unrelated WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::minutes(20),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = kFlushWalEvery}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::minutes(20),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = kFlushWalEvery}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -2453,20 +2568,22 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) {
 
   // Create snapshot and WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::seconds(2),
-             .wal_file_size_kibibytes = 1,
-             .wal_file_flush_every_n_tx = 1}}));
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::seconds(2),
+            .wal_file_size_kibibytes = 1,
+            .wal_file_flush_every_n_tx = 1}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
     // Restore unrelated snapshots after the database has been started.
     RestoreBackups();
     memgraph::utils::Timer timer;
     // Allow at least 6 snapshots to be created.
     while (timer.Elapsed().count() < 13.0) {
-      auto acc = store->Access();
+      auto acc = db.storage()->Access();
       acc->CreateVertex();
       ASSERT_FALSE(acc->Commit().HasError());
       ++items_created;
@@ -2486,10 +2603,12 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) {
 
     // Recover and verify data.
     {
-      std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-          {.items = {.properties_on_edges = GetParam()},
-           .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      auto acc = store->Access();
+      memgraph::storage::Config config{
+          .items = {.properties_on_edges = GetParam()},
+          .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+      memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+      memgraph::dbms::Database db{config, repl_state};
+      auto acc = db.storage()->Access();
       for (uint64_t j = 0; j < items_created; ++j) {
         auto vertex = acc->FindVertex(memgraph::storage::Gid::FromUint(j), memgraph::storage::View::OLD);
         ASSERT_TRUE(vertex);
@@ -2503,11 +2622,14 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) {
   // Recover data after all of the snapshots have been destroyed. The recovery
   // shouldn't be possible because the initial WALs are already deleted.
   ASSERT_DEATH(
-      {
-        std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-            {.items = {.properties_on_edges = GetParam()},
-             .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-      },
+      ([&]() {
+        memgraph::storage::Config config{
+            .items = {.properties_on_edges = GetParam()},
+            .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+        memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+        memgraph::dbms::Database db{config, repl_state};
+      }())  // iile
+      ,
       "");
 }
 
@@ -2515,13 +2637,15 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) {
 TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) {
   // Create unrelated snapshot and WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::seconds(2)}}));
-    auto acc = store->Access();
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::seconds(2)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    auto acc = db.storage()->Access();
     for (uint64_t i = 0; i < 1000; ++i) {
       acc->CreateVertex();
     }
@@ -2536,15 +2660,17 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) {
 
   // Create snapshot and WALs.
   {
-    std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-        {.items = {.properties_on_edges = GetParam()},
-         .durability = {
-             .storage_directory = storage_directory,
-             .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-             .snapshot_interval = std::chrono::seconds(2)}}));
-    CreateBaseDataset(store.get(), GetParam());
+    memgraph::storage::Config config{
+        .items = {.properties_on_edges = GetParam()},
+        .durability = {
+            .storage_directory = storage_directory,
+            .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_interval = std::chrono::seconds(2)}};
+    memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+    memgraph::dbms::Database db{config, repl_state};
+    CreateBaseDataset(db.storage(), GetParam());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
-    CreateExtendedDataset(store.get());
+    CreateExtendedDataset(db.storage());
     std::this_thread::sleep_for(std::chrono::milliseconds(2500));
   }
 
@@ -2562,16 +2688,17 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) {
   ASSERT_EQ(GetBackupWalsList().size(), 0);
 
   // Recover snapshot and WALs.
-  std::unique_ptr<memgraph::storage::Storage> store(new memgraph::storage::InMemoryStorage(
-      {.items = {.properties_on_edges = GetParam()},
-       .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}));
-  VerifyDataset(store.get(), DatasetType::BASE_WITH_EXTENDED, GetParam());
+  memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()},
+                                   .durability = {.storage_directory = storage_directory, .recover_on_startup = true}};
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::dbms::Database db{config, repl_state};
+  VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam());
 
   // Try to use the storage.
   {
-    auto acc = store->Access();
+    auto acc = db.storage()->Access();
     auto vertex = acc->CreateVertex();
-    auto edge = acc->CreateEdge(&vertex, &vertex, store->NameToEdgeType("et"));
+    auto edge = acc->CreateEdge(&vertex, &vertex, db.storage()->NameToEdgeType("et"));
     ASSERT_TRUE(edge.HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
diff --git a/tests/unit/storage_v2_replication.cpp b/tests/unit/storage_v2_replication.cpp
index 85b7f801b..261b2ccf0 100644
--- a/tests/unit/storage_v2_replication.cpp
+++ b/tests/unit/storage_v2_replication.cpp
@@ -11,6 +11,7 @@
 
 #include <chrono>
 #include <memory>
+#include <optional>
 #include <thread>
 
 #include <fmt/format.h>
@@ -21,14 +22,22 @@
 #include <storage/v2/inmemory/storage.hpp>
 #include <storage/v2/property_value.hpp>
 #include <storage/v2/replication/enums.hpp>
+#include "dbms/database.hpp"
+#include "dbms/dbms_handler.hpp"
+#include "dbms/replication_handler.hpp"
+#include "query/interpreter_context.hpp"
 #include "replication/config.hpp"
+#include "replication/state.hpp"
 #include "storage/v2/indices/label_index_stats.hpp"
-#include "storage/v2/replication/replication_handler.hpp"
 #include "storage/v2/storage.hpp"
 #include "storage/v2/view.hpp"
+#include "utils/synchronized.hpp"
 
 using testing::UnorderedElementsAre;
 
+using memgraph::dbms::RegisterReplicaError;
+using memgraph::dbms::ReplicationHandler;
+using memgraph::dbms::UnregisterReplicaResult;
 using memgraph::replication::ReplicationClientConfig;
 using memgraph::replication::ReplicationMode;
 using memgraph::replication::ReplicationRole;
@@ -38,11 +47,7 @@ using memgraph::storage::EdgeAccessor;
 using memgraph::storage::Gid;
 using memgraph::storage::InMemoryStorage;
 using memgraph::storage::PropertyValue;
-using memgraph::storage::RegisterReplicaError;
-using memgraph::storage::RegistrationMode;
-using memgraph::storage::ReplicationHandler;
 using memgraph::storage::Storage;
-using memgraph::storage::UnregisterReplicaResult;
 using memgraph::storage::View;
 using memgraph::storage::replication::ReplicaState;
 
@@ -50,15 +55,38 @@ class ReplicationTest : public ::testing::Test {
  protected:
   std::filesystem::path storage_directory{std::filesystem::temp_directory_path() /
                                           "MG_test_unit_storage_v2_replication"};
+  std::filesystem::path repl_storage_directory{std::filesystem::temp_directory_path() /
+                                               "MG_test_unit_storage_v2_replication_repl"};
+  std::filesystem::path repl2_storage_directory{std::filesystem::temp_directory_path() /
+                                                "MG_test_unit_storage_v2_replication_repl2"};
   void SetUp() override { Clear(); }
 
   void TearDown() override { Clear(); }
 
-  Config configuration{.items = {.properties_on_edges = true},
-                       .durability = {
-                           .storage_directory = storage_directory,
-                           .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                       }};
+  Config main_conf = [&] {
+    Config config{.items = {.properties_on_edges = true},
+                  .durability = {
+                      .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+                  }};
+    UpdatePaths(config, storage_directory);
+    return config;
+  }();
+  Config repl_conf = [&] {
+    Config config{.items = {.properties_on_edges = true},
+                  .durability = {
+                      .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+                  }};
+    UpdatePaths(config, repl_storage_directory);
+    return config;
+  }();
+  Config repl2_conf = [&] {
+    Config config{.items = {.properties_on_edges = true},
+                  .durability = {
+                      .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+                  }};
+    UpdatePaths(config, repl2_storage_directory);
+    return config;
+  }();
 
   const std::string local_host = ("127.0.0.1");
   const std::array<uint16_t, 2> ports{10000, 20000};
@@ -66,30 +94,49 @@ class ReplicationTest : public ::testing::Test {
 
  private:
   void Clear() {
-    if (!std::filesystem::exists(storage_directory)) return;
-    std::filesystem::remove_all(storage_directory);
+    if (std::filesystem::exists(storage_directory)) std::filesystem::remove_all(storage_directory);
+    if (std::filesystem::exists(repl_storage_directory)) std::filesystem::remove_all(repl_storage_directory);
+    if (std::filesystem::exists(repl2_storage_directory)) std::filesystem::remove_all(repl2_storage_directory);
   }
 };
 
-TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
-  std::unique_ptr<Storage> main_store = std::make_unique<InMemoryStorage>(configuration);
-  std::unique_ptr<Storage> replica_store = std::make_unique<InMemoryStorage>(configuration);
+struct MinMemgraph {
+  MinMemgraph(const memgraph::storage::Config &conf)
+      : repl_state{ReplicationStateRootPath(conf)},
+        dbms{conf, repl_state
+#ifdef MG_ENTERPRISE
+             ,
+             reinterpret_cast<
+                 memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *>(0),
+             true, false
+#endif
+        },
+        db{*dbms.Get().get()},
+        repl_handler(repl_state, dbms) {
+  }
+  memgraph::replication::ReplicationState repl_state;
+  memgraph::dbms::DbmsHandler dbms;
+  memgraph::dbms::Database &db;
+  ReplicationHandler repl_handler;
+};
 
-  auto replica_store_handler = ReplicationHandler{replica_store->repl_state_, *replica_store};
+TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
+  MinMemgraph main(main_conf);
+  MinMemgraph replica(repl_conf);
+
+  auto replica_store_handler = replica.repl_handler;
   replica_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = "REPLICA",
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = ports[0],
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = "REPLICA",
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = ports[0],
+                   })
                    .HasError());
 
   // vertex create
@@ -100,27 +147,27 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   const auto *vertex_property_value = "vertex_property_value";
   std::optional<Gid> vertex_gid;
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
-    ASSERT_TRUE(v.AddLabel(main_store->NameToLabel(vertex_label)).HasValue());
-    ASSERT_TRUE(
-        v.SetProperty(main_store->NameToProperty(vertex_property), PropertyValue(vertex_property_value)).HasValue());
+    ASSERT_TRUE(v.AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue());
+    ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value))
+                    .HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
   {
-    auto acc = replica_store->Access();
+    auto acc = replica.db.Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     const auto labels = v->Labels(View::OLD);
     ASSERT_TRUE(labels.HasValue());
     ASSERT_EQ(labels->size(), 1);
-    ASSERT_THAT(*labels, UnorderedElementsAre(replica_store->NameToLabel(vertex_label)));
+    ASSERT_THAT(*labels, UnorderedElementsAre(replica.db.storage()->NameToLabel(vertex_label)));
     const auto properties = v->Properties(View::OLD);
     ASSERT_TRUE(properties.HasValue());
     ASSERT_EQ(properties->size(), 1);
-    ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica_store->NameToProperty(vertex_property),
+    ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToProperty(vertex_property),
                                                                  PropertyValue(vertex_property_value))));
 
     ASSERT_FALSE(acc->Commit().HasError());
@@ -128,15 +175,15 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
 
   // vertex remove label
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
-    ASSERT_TRUE(v->RemoveLabel(main_store->NameToLabel(vertex_label)).HasValue());
+    ASSERT_TRUE(v->RemoveLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue());
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
   {
-    auto acc = replica_store->Access();
+    auto acc = replica.db.Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     const auto labels = v->Labels(View::OLD);
@@ -147,7 +194,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
 
   // vertex delete
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     ASSERT_TRUE(acc->DeleteVertex(&*v).HasValue());
@@ -155,7 +202,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   }
 
   {
-    auto acc = replica_store->Access();
+    auto acc = replica.db.Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_FALSE(v);
     vertex_gid.reset();
@@ -169,14 +216,14 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   const auto *edge_property_value = "edge_property_value";
   std::optional<Gid> edge_gid;
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
-    auto edgeRes = acc->CreateEdge(&v, &v, main_store->NameToEdgeType(edge_type));
+    auto edgeRes = acc->CreateEdge(&v, &v, main.db.storage()->NameToEdgeType(edge_type));
     ASSERT_TRUE(edgeRes.HasValue());
     auto edge = edgeRes.GetValue();
-    ASSERT_TRUE(
-        edge.SetProperty(main_store->NameToProperty(edge_property), PropertyValue(edge_property_value)).HasValue());
+    ASSERT_TRUE(edge.SetProperty(main.db.storage()->NameToProperty(edge_property), PropertyValue(edge_property_value))
+                    .HasValue());
     edge_gid.emplace(edge.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -191,24 +238,24 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   };
 
   {
-    auto acc = replica_store->Access();
+    auto acc = replica.db.Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     const auto out_edges = v->OutEdges(View::OLD);
     ASSERT_TRUE(out_edges.HasValue());
     const auto edge = find_edge(out_edges->edges, *edge_gid);
-    ASSERT_EQ(edge->EdgeType(), replica_store->NameToEdgeType(edge_type));
+    ASSERT_EQ(edge->EdgeType(), replica.db.storage()->NameToEdgeType(edge_type));
     const auto properties = edge->Properties(View::OLD);
     ASSERT_TRUE(properties.HasValue());
     ASSERT_EQ(properties->size(), 1);
-    ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica_store->NameToProperty(edge_property),
+    ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToProperty(edge_property),
                                                                  PropertyValue(edge_property_value))));
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
   // delete edge
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     auto out_edges = v->OutEdges(View::OLD);
@@ -219,7 +266,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   }
 
   {
-    auto acc = replica_store->Access();
+    auto acc = replica.db.Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     const auto out_edges = v->OutEdges(View::OLD);
@@ -239,67 +286,72 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   const memgraph::storage::LabelPropertyIndexStats lp_stats{98, 76, 5.4, 3.2, 1.0};
 
   {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_FALSE(unique_acc->CreateIndex(main_store->NameToLabel(label)).HasError());
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_FALSE(unique_acc->CreateIndex(main.db.storage()->NameToLabel(label)).HasError());
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
-    unique_acc->SetIndexStats(main_store->NameToLabel(label), l_stats);
+    auto unique_acc = main.db.UniqueAccess();
+    unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), l_stats);
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
+    auto unique_acc = main.db.UniqueAccess();
     ASSERT_FALSE(
-        unique_acc->CreateIndex(main_store->NameToLabel(label), main_store->NameToProperty(property)).HasError());
-    ASSERT_FALSE(unique_acc->Commit().HasError());
-  }
-  {
-    auto unique_acc = main_store->UniqueAccess();
-    unique_acc->SetIndexStats(main_store->NameToLabel(label), main_store->NameToProperty(property), lp_stats);
-    ASSERT_FALSE(unique_acc->Commit().HasError());
-  }
-  {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_FALSE(
-        unique_acc->CreateExistenceConstraint(main_store->NameToLabel(label), main_store->NameToProperty(property))
+        unique_acc->CreateIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property))
             .HasError());
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_FALSE(
-        unique_acc
-            ->CreateUniqueConstraint(main_store->NameToLabel(label),
-                                     {main_store->NameToProperty(property), main_store->NameToProperty(property_extra)})
-            .HasError());
+    auto unique_acc = main.db.UniqueAccess();
+    unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property),
+                              lp_stats);
+    ASSERT_FALSE(unique_acc->Commit().HasError());
+  }
+  {
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_FALSE(unique_acc
+                     ->CreateExistenceConstraint(main.db.storage()->NameToLabel(label),
+                                                 main.db.storage()->NameToProperty(property))
+                     .HasError());
+    ASSERT_FALSE(unique_acc->Commit().HasError());
+  }
+  {
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_FALSE(unique_acc
+                     ->CreateUniqueConstraint(main.db.storage()->NameToLabel(label),
+                                              {main.db.storage()->NameToProperty(property),
+                                               main.db.storage()->NameToProperty(property_extra)})
+                     .HasError());
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
 
   {
-    const auto indices = replica_store->Access()->ListAllIndices();
-    ASSERT_THAT(indices.label, UnorderedElementsAre(replica_store->NameToLabel(label)));
-    ASSERT_THAT(indices.label_property, UnorderedElementsAre(std::make_pair(replica_store->NameToLabel(label),
-                                                                            replica_store->NameToProperty(property))));
-    const auto &l_stats_rep = replica_store->Access()->GetIndexStats(replica_store->NameToLabel(label));
+    const auto indices = replica.db.Access()->ListAllIndices();
+    ASSERT_THAT(indices.label, UnorderedElementsAre(replica.db.storage()->NameToLabel(label)));
+    ASSERT_THAT(indices.label_property,
+                UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToLabel(label),
+                                                    replica.db.storage()->NameToProperty(property))));
+    const auto &l_stats_rep = replica.db.Access()->GetIndexStats(replica.db.storage()->NameToLabel(label));
     ASSERT_TRUE(l_stats_rep);
     ASSERT_EQ(l_stats_rep->count, l_stats.count);
     ASSERT_EQ(l_stats_rep->avg_degree, l_stats.avg_degree);
-    const auto &lp_stats_rep = replica_store->Access()->GetIndexStats(replica_store->NameToLabel(label),
-                                                                      replica_store->NameToProperty(property));
+    const auto &lp_stats_rep = replica.db.Access()->GetIndexStats(replica.db.storage()->NameToLabel(label),
+                                                                  replica.db.storage()->NameToProperty(property));
     ASSERT_TRUE(lp_stats_rep);
     ASSERT_EQ(lp_stats_rep->count, lp_stats.count);
     ASSERT_EQ(lp_stats_rep->distinct_values_count, lp_stats.distinct_values_count);
     ASSERT_EQ(lp_stats_rep->statistic, lp_stats.statistic);
     ASSERT_EQ(lp_stats_rep->avg_group_size, lp_stats.avg_group_size);
     ASSERT_EQ(lp_stats_rep->avg_degree, lp_stats.avg_degree);
-    const auto constraints = replica_store->Access()->ListAllConstraints();
-    ASSERT_THAT(constraints.existence, UnorderedElementsAre(std::make_pair(replica_store->NameToLabel(label),
-                                                                           replica_store->NameToProperty(property))));
+    const auto constraints = replica.db.Access()->ListAllConstraints();
+    ASSERT_THAT(constraints.existence,
+                UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToLabel(label),
+                                                    replica.db.storage()->NameToProperty(property))));
     ASSERT_THAT(constraints.unique,
-                UnorderedElementsAre(std::make_pair(
-                    replica_store->NameToLabel(label),
-                    std::set{replica_store->NameToProperty(property), replica_store->NameToProperty(property_extra)})));
+                UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToLabel(label),
+                                                    std::set{replica.db.storage()->NameToProperty(property),
+                                                             replica.db.storage()->NameToProperty(property_extra)})));
   }
 
   // label index drop
@@ -307,107 +359,90 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) {
   // existence constraint drop
   // unique constriant drop
   {
-    auto unique_acc = main_store->UniqueAccess();
-    unique_acc->DeleteLabelIndexStats(main_store->NameToLabel(label));
+    auto unique_acc = main.db.UniqueAccess();
+    unique_acc->DeleteLabelIndexStats(main.db.storage()->NameToLabel(label));
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_FALSE(unique_acc->DropIndex(main_store->NameToLabel(label)).HasError());
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_FALSE(unique_acc->DropIndex(main.db.storage()->NameToLabel(label)).HasError());
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
-    unique_acc->DeleteLabelPropertyIndexStats(main_store->NameToLabel(label));
+    auto unique_acc = main.db.UniqueAccess();
+    unique_acc->DeleteLabelPropertyIndexStats(main.db.storage()->NameToLabel(label));
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
+    auto unique_acc = main.db.UniqueAccess();
     ASSERT_FALSE(
-        unique_acc->DropIndex(main_store->NameToLabel(label), main_store->NameToProperty(property)).HasError());
-    ASSERT_FALSE(unique_acc->Commit().HasError());
-  }
-  {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_FALSE(
-        unique_acc->DropExistenceConstraint(main_store->NameToLabel(label), main_store->NameToProperty(property))
+        unique_acc->DropIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property))
             .HasError());
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
   {
-    auto unique_acc = main_store->UniqueAccess();
-    ASSERT_EQ(
-        unique_acc->DropUniqueConstraint(main_store->NameToLabel(label), {main_store->NameToProperty(property),
-                                                                          main_store->NameToProperty(property_extra)}),
-        memgraph::storage::UniqueConstraints::DeletionStatus::SUCCESS);
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_FALSE(unique_acc
+                     ->DropExistenceConstraint(main.db.storage()->NameToLabel(label),
+                                               main.db.storage()->NameToProperty(property))
+                     .HasError());
+    ASSERT_FALSE(unique_acc->Commit().HasError());
+  }
+  {
+    auto unique_acc = main.db.UniqueAccess();
+    ASSERT_EQ(unique_acc->DropUniqueConstraint(
+                  main.db.storage()->NameToLabel(label),
+                  {main.db.storage()->NameToProperty(property), main.db.storage()->NameToProperty(property_extra)}),
+              memgraph::storage::UniqueConstraints::DeletionStatus::SUCCESS);
     ASSERT_FALSE(unique_acc->Commit().HasError());
   }
 
   {
-    const auto indices = replica_store->Access()->ListAllIndices();
+    const auto indices = replica.db.Access()->ListAllIndices();
     ASSERT_EQ(indices.label.size(), 0);
     ASSERT_EQ(indices.label_property.size(), 0);
 
-    const auto &l_stats_rep = replica_store->Access()->GetIndexStats(replica_store->NameToLabel(label));
+    const auto &l_stats_rep = replica.db.Access()->GetIndexStats(replica.db.storage()->NameToLabel(label));
     ASSERT_FALSE(l_stats_rep);
-    const auto &lp_stats_rep = replica_store->Access()->GetIndexStats(replica_store->NameToLabel(label),
-                                                                      replica_store->NameToProperty(property));
+    const auto &lp_stats_rep = replica.db.Access()->GetIndexStats(replica.db.storage()->NameToLabel(label),
+                                                                  replica.db.storage()->NameToProperty(property));
     ASSERT_FALSE(lp_stats_rep);
 
-    const auto constraints = replica_store->Access()->ListAllConstraints();
+    const auto constraints = replica.db.Access()->ListAllConstraints();
     ASSERT_EQ(constraints.existence.size(), 0);
     ASSERT_EQ(constraints.unique.size(), 0);
   }
 }
 
 TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) {
-  std::unique_ptr<Storage> main_store{
-      new InMemoryStorage({.durability = {
-                               .storage_directory = storage_directory,
-                               .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                           }})};
+  MinMemgraph main(main_conf);
+  MinMemgraph replica1(repl_conf);
+  MinMemgraph replica2(repl2_conf);
 
-  std::unique_ptr<Storage> replica_store1{
-      new InMemoryStorage({.durability = {
-                               .storage_directory = storage_directory,
-                               .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                           }})};
-
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
-
-  std::unique_ptr<Storage> replica_store2{
-      new InMemoryStorage({.durability = {
-                               .storage_directory = storage_directory,
-                               .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                           }})};
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[1],
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[0],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = ports[0],
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[0],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = ports[0],
+                   })
                    .HasError());
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[1],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = ports[1],
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[1],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = ports[1],
+                   })
                    .HasError());
 
   const auto *vertex_label = "label";
@@ -415,11 +450,11 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) {
   const auto *vertex_property_value = "property_value";
   std::optional<Gid> vertex_gid;
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->CreateVertex();
-    ASSERT_TRUE(v.AddLabel(main_store->NameToLabel(vertex_label)).HasValue());
-    ASSERT_TRUE(
-        v.SetProperty(main_store->NameToProperty(vertex_property), PropertyValue(vertex_property_value)).HasValue());
+    ASSERT_TRUE(v.AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue());
+    ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value))
+                    .HasValue());
     vertex_gid.emplace(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
   }
@@ -434,13 +469,13 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) {
     ASSERT_FALSE(acc->Commit().HasError());
   };
 
-  check_replica(replica_store1.get());
-  check_replica(replica_store2.get());
+  check_replica(replica1.db.storage());
+  check_replica(replica2.db.storage());
 
-  auto handler = ReplicationHandler{main_store->repl_state_, *main_store};
+  auto handler = main.repl_handler;
   handler.UnregisterReplica(replicas[1]);
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
@@ -448,7 +483,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) {
 
   // REPLICA1 should contain the new vertex
   {
-    auto acc = replica_store1->Access();
+    auto acc = replica1.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     ASSERT_FALSE(acc->Commit().HasError());
@@ -456,7 +491,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) {
 
   // REPLICA2 should not contain the new vertex
   {
-    auto acc = replica_store2->Access();
+    auto acc = replica2.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_FALSE(v);
     ASSERT_FALSE(acc->Commit().HasError());
@@ -467,15 +502,17 @@ TEST_F(ReplicationTest, RecoveryProcess) {
   std::vector<Gid> vertex_gids;
   // Force the creation of snapshot
   {
-    std::unique_ptr<Storage> main_store{
-        new InMemoryStorage({.durability = {
-                                 .storage_directory = storage_directory,
-                                 .recover_on_startup = true,
-                                 .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                                 .snapshot_on_exit = true,
-                             }})};
+    memgraph::storage::Config conf{
+        .durability = {
+            .recover_on_startup = true,
+            .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            .snapshot_on_exit = true,
+        }};
+    UpdatePaths(conf, storage_directory);
+    MinMemgraph main(conf);
+
     {
-      auto acc = main_store->Access();
+      auto acc = main.db.Access();
       // Create the vertex before registering a replica
       auto v = acc->CreateVertex();
       vertex_gids.emplace_back(v.Gid());
@@ -485,120 +522,117 @@ TEST_F(ReplicationTest, RecoveryProcess) {
 
   {
     // Create second WAL
-    std::unique_ptr<Storage> main_store{new InMemoryStorage(
-        {.durability = {.storage_directory = storage_directory,
-                        .recover_on_startup = true,
-                        .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}})};
+    memgraph::storage::Config conf{
+        .durability = {.recover_on_startup = true,
+                       .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}};
+    UpdatePaths(conf, storage_directory);
+    MinMemgraph main(conf);
     // Create vertices in 2 different transactions
     {
-      auto acc = main_store->Access();
+      auto acc = main.db.Access();
       auto v = acc->CreateVertex();
       vertex_gids.emplace_back(v.Gid());
       ASSERT_FALSE(acc->Commit().HasError());
     }
     {
-      auto acc = main_store->Access();
+      auto acc = main.db.Access();
       auto v = acc->CreateVertex();
       vertex_gids.emplace_back(v.Gid());
       ASSERT_FALSE(acc->Commit().HasError());
     }
   }
 
-  std::unique_ptr<Storage> main_store{
-      new InMemoryStorage({.durability = {
-                               .storage_directory = storage_directory,
-                               .recover_on_startup = true,
-                               .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
-                           }})};
+  memgraph::storage::Config conf{
+      .durability = {
+          .recover_on_startup = true,
+          .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+      }};
+  UpdatePaths(conf, storage_directory);
+  MinMemgraph main(conf);
 
   static constexpr const auto *property_name = "property_name";
   static constexpr const auto property_value = 1;
   {
     // Force the creation of current WAL file
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     for (const auto &vertex_gid : vertex_gids) {
       auto v = acc->FindVertex(vertex_gid, View::OLD);
       ASSERT_TRUE(v);
-      ASSERT_TRUE(v->SetProperty(main_store->NameToProperty(property_name), PropertyValue(property_value)).HasValue());
+      ASSERT_TRUE(
+          v->SetProperty(main.db.storage()->NameToProperty(property_name), PropertyValue(property_value)).HasValue());
     }
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
-  std::filesystem::path replica_storage_directory{std::filesystem::temp_directory_path() /
-                                                  "MG_test_unit_storage_v2_replication_replica"};
-  memgraph::utils::OnScopeExit replica_directory_cleaner(
-      [&]() { std::filesystem::remove_all(replica_storage_directory); });
-
   static constexpr const auto *vertex_label = "vertex_label";
   {
-    std::unique_ptr<Storage> replica_store{new InMemoryStorage(
-        {.durability = {.storage_directory = replica_storage_directory,
-                        .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}})};
+    MinMemgraph replica(repl_conf);
+    auto replica_store_handler = replica.repl_handler;
 
-    auto replica_store_handler = ReplicationHandler{replica_store->repl_state_, *replica_store};
     replica_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
         .ip_address = local_host,
         .port = ports[0],
     });
-    auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-    ASSERT_FALSE(main_store_handler
-                     .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                      ReplicationClientConfig{
-                                          .name = replicas[0],
-                                          .mode = ReplicationMode::SYNC,
-                                          .ip_address = local_host,
-                                          .port = ports[0],
-                                      })
+    ASSERT_FALSE(main.repl_handler
+                     .RegisterReplica(ReplicationClientConfig{
+                         .name = replicas[0],
+                         .mode = ReplicationMode::SYNC,
+                         .ip_address = local_host,
+                         .port = ports[0],
+                     })
                      .HasError());
 
-    ASSERT_EQ(main_store->GetReplicaState(replicas[0]), ReplicaState::RECOVERY);
+    ASSERT_EQ(main.db.storage()->GetReplicaState(replicas[0]), ReplicaState::RECOVERY);
 
-    while (main_store->GetReplicaState(replicas[0]) != ReplicaState::READY) {
+    while (main.db.storage()->GetReplicaState(replicas[0]) != ReplicaState::READY) {
       std::this_thread::sleep_for(std::chrono::milliseconds(10));
     }
 
     {
-      auto acc = main_store->Access();
+      auto acc = main.db.Access();
       for (const auto &vertex_gid : vertex_gids) {
         auto v = acc->FindVertex(vertex_gid, View::OLD);
         ASSERT_TRUE(v);
-        ASSERT_TRUE(v->AddLabel(main_store->NameToLabel(vertex_label)).HasValue());
+        ASSERT_TRUE(v->AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue());
       }
       ASSERT_FALSE(acc->Commit().HasError());
     }
     {
-      auto acc = replica_store->Access();
+      auto acc = replica.db.Access();
       for (const auto &vertex_gid : vertex_gids) {
         auto v = acc->FindVertex(vertex_gid, View::OLD);
         ASSERT_TRUE(v);
         const auto labels = v->Labels(View::OLD);
         ASSERT_TRUE(labels.HasValue());
-        ASSERT_THAT(*labels, UnorderedElementsAre(replica_store->NameToLabel(vertex_label)));
+        ASSERT_THAT(*labels, UnorderedElementsAre(replica.db.storage()->NameToLabel(vertex_label)));
         const auto properties = v->Properties(View::OLD);
         ASSERT_TRUE(properties.HasValue());
-        ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica_store->NameToProperty(property_name),
-                                                                     PropertyValue(property_value))));
+        ASSERT_THAT(*properties,
+                    UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToProperty(property_name),
+                                                        PropertyValue(property_value))));
       }
       ASSERT_FALSE(acc->Commit().HasError());
     }
   }
   {
-    std::unique_ptr<Storage> replica_store{new InMemoryStorage(
-        {.durability = {.storage_directory = replica_storage_directory,
-                        .recover_on_startup = true,
-                        .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}})};
+    memgraph::storage::Config repl_conf{
+        .durability = {.recover_on_startup = true,
+                       .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}};
+    UpdatePaths(repl_conf, repl_storage_directory);
+    MinMemgraph replica(repl_conf);
     {
-      auto acc = replica_store->Access();
+      auto acc = replica.db.Access();
       for (const auto &vertex_gid : vertex_gids) {
         auto v = acc->FindVertex(vertex_gid, View::OLD);
         ASSERT_TRUE(v);
         const auto labels = v->Labels(View::OLD);
         ASSERT_TRUE(labels.HasValue());
-        ASSERT_THAT(*labels, UnorderedElementsAre(replica_store->NameToLabel(vertex_label)));
+        ASSERT_THAT(*labels, UnorderedElementsAre(replica.db.storage()->NameToLabel(vertex_label)));
         const auto properties = v->Properties(View::OLD);
         ASSERT_TRUE(properties.HasValue());
-        ASSERT_THAT(*properties, UnorderedElementsAre(std::make_pair(replica_store->NameToProperty(property_name),
-                                                                     PropertyValue(property_value))));
+        ASSERT_THAT(*properties,
+                    UnorderedElementsAre(std::make_pair(replica.db.storage()->NameToProperty(property_name),
+                                                        PropertyValue(property_value))));
       }
       ASSERT_FALSE(acc->Commit().HasError());
     }
@@ -606,48 +640,45 @@ TEST_F(ReplicationTest, RecoveryProcess) {
 }
 
 TEST_F(ReplicationTest, BasicAsynchronousReplicationTest) {
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
+  MinMemgraph main(main_conf);
+  MinMemgraph replica_async(repl_conf);
 
-  std::unique_ptr<Storage> replica_store_async{new InMemoryStorage(configuration)};
-
-  auto replica_store_handler = ReplicationHandler{replica_store_async->repl_state_, *replica_store_async};
+  auto replica_store_handler = replica_async.repl_handler;
   replica_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[1],
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = "REPLICA_ASYNC",
-                                        .mode = ReplicationMode::ASYNC,
-                                        .ip_address = local_host,
-                                        .port = ports[1],
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = "REPLICA_ASYNC",
+                       .mode = ReplicationMode::ASYNC,
+                       .ip_address = local_host,
+                       .port = ports[1],
+                   })
                    .HasError());
 
   static constexpr size_t vertices_create_num = 10;
   std::vector<Gid> created_vertices;
   for (size_t i = 0; i < vertices_create_num; ++i) {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     auto v = acc->CreateVertex();
     created_vertices.push_back(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
 
     if (i == 0) {
-      ASSERT_EQ(main_store->GetReplicaState("REPLICA_ASYNC"), ReplicaState::REPLICATING);
+      ASSERT_EQ(main.db.storage()->GetReplicaState("REPLICA_ASYNC"), ReplicaState::REPLICATING);
     } else {
-      ASSERT_EQ(main_store->GetReplicaState("REPLICA_ASYNC"), ReplicaState::RECOVERY);
+      ASSERT_EQ(main.db.storage()->GetReplicaState("REPLICA_ASYNC"), ReplicaState::RECOVERY);
     }
   }
 
-  while (main_store->GetReplicaState("REPLICA_ASYNC") != ReplicaState::READY) {
+  while (main.db.storage()->GetReplicaState("REPLICA_ASYNC") != ReplicaState::READY) {
     std::this_thread::sleep_for(std::chrono::milliseconds(10));
   }
 
   ASSERT_TRUE(std::all_of(created_vertices.begin(), created_vertices.end(), [&](const auto vertex_gid) {
-    auto acc = replica_store_async->Access();
+    auto acc = replica_async.db.storage()->Access();
     auto v = acc->FindVertex(vertex_gid, View::OLD);
     const bool exists = v.has_value();
     EXPECT_FALSE(acc->Commit().HasError());
@@ -656,116 +687,108 @@ TEST_F(ReplicationTest, BasicAsynchronousReplicationTest) {
 }
 
 TEST_F(ReplicationTest, EpochTest) {
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
+  MinMemgraph main(main_conf);
+  MinMemgraph replica1(repl_conf);
 
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
 
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
-
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph replica2(repl2_conf);
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = 10001,
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[0],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = ports[0],
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[0],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = ports[0],
+                   })
                    .HasError());
 
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[1],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = 10001,
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[1],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = 10001,
+                   })
                    .HasError());
 
   std::optional<Gid> vertex_gid;
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     const auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
   }
   {
-    auto acc = replica_store1->Access();
+    auto acc = replica1.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     ASSERT_FALSE(acc->Commit().HasError());
   }
   {
-    auto acc = replica_store2->Access();
+    auto acc = replica2.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
-  main_store_handler.UnregisterReplica(replicas[0]);
-  main_store_handler.UnregisterReplica(replicas[1]);
+  main.repl_handler.UnregisterReplica(replicas[0]);
+  main.repl_handler.UnregisterReplica(replicas[1]);
 
-  ASSERT_TRUE(replica1_store_handler.SetReplicationRoleMain());
+  ASSERT_TRUE(replica1.repl_handler.SetReplicationRoleMain());
 
-  ASSERT_FALSE(replica1_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[1],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = 10001,
-                                    })
+  ASSERT_FALSE(replica1.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[1],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = 10001,
+                   })
 
                    .HasError());
 
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     acc->CreateVertex();
     ASSERT_FALSE(acc->Commit().HasError());
   }
   {
-    auto acc = replica_store1->Access();
+    auto acc = replica1.db.storage()->Access();
     auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
   }
   // Replica1 should forward it's vertex to Replica2
   {
-    auto acc = replica_store2->Access();
+    auto acc = replica2.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_TRUE(v);
     ASSERT_FALSE(acc->Commit().HasError());
   }
 
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
-  ASSERT_TRUE(main_store_handler
-                  .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                   ReplicationClientConfig{
-                                       .name = replicas[0],
-                                       .mode = ReplicationMode::SYNC,
-                                       .ip_address = local_host,
-                                       .port = ports[0],
-                                   })
+  ASSERT_TRUE(main.repl_handler
+                  .RegisterReplica(ReplicationClientConfig{
+                      .name = replicas[0],
+                      .mode = ReplicationMode::SYNC,
+                      .ip_address = local_host,
+                      .port = ports[0],
+                  })
 
                   .HasError());
 
   {
-    auto acc = main_store->Access();
+    auto acc = main.db.Access();
     const auto v = acc->CreateVertex();
     vertex_gid.emplace(v.Gid());
     ASSERT_FALSE(acc->Commit().HasError());
@@ -773,7 +796,7 @@ TEST_F(ReplicationTest, EpochTest) {
   // Replica1 is not compatible with the main so it shouldn't contain
   // it's newest vertex
   {
-    auto acc = replica_store1->Access();
+    auto acc = replica1.db.storage()->Access();
     const auto v = acc->FindVertex(*vertex_gid, View::OLD);
     ASSERT_FALSE(v);
     ASSERT_FALSE(acc->Commit().HasError());
@@ -781,52 +804,47 @@ TEST_F(ReplicationTest, EpochTest) {
 }
 
 TEST_F(ReplicationTest, ReplicationInformation) {
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
+  MinMemgraph main(main_conf);
+  MinMemgraph replica1(repl_conf);
 
   uint16_t replica1_port = 10001;
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = replica1_port,
   });
 
   uint16_t replica2_port = 10002;
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph replica2(repl2_conf);
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = replica2_port,
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[0],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = replica1_port,
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[0],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = replica1_port,
+                   })
 
                    .HasError());
 
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[1],
-                                        .mode = ReplicationMode::ASYNC,
-                                        .ip_address = local_host,
-                                        .port = replica2_port,
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[1],
+                       .mode = ReplicationMode::ASYNC,
+                       .ip_address = local_host,
+                       .port = replica2_port,
+                   })
 
                    .HasError());
 
-  ASSERT_TRUE(main_store->repl_state_.IsMain());
-  ASSERT_TRUE(replica_store1->repl_state_.IsReplica());
-  ASSERT_TRUE(replica_store2->repl_state_.IsReplica());
+  ASSERT_TRUE(main.repl_state.IsMain());
+  ASSERT_TRUE(replica1.repl_state.IsReplica());
+  ASSERT_TRUE(replica2.repl_state.IsReplica());
 
-  const auto replicas_info = main_store->ReplicasInfo();
+  const auto replicas_info = main.db.storage()->ReplicasInfo();
   ASSERT_EQ(replicas_info.size(), 2);
 
   const auto &first_info = replicas_info[0];
@@ -843,122 +861,121 @@ TEST_F(ReplicationTest, ReplicationInformation) {
 }
 
 TEST_F(ReplicationTest, ReplicationReplicaWithExistingName) {
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
+  MinMemgraph main(main_conf);
+  MinMemgraph replica1(repl_conf);
 
   uint16_t replica1_port = 10001;
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = replica1_port,
   });
 
   uint16_t replica2_port = 10002;
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph replica2(repl2_conf);
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = replica2_port,
   });
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[0],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = replica1_port,
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[0],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = replica1_port,
+                   })
                    .HasError());
 
-  ASSERT_TRUE(main_store_handler
-                  .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                   ReplicationClientConfig{
-                                       .name = replicas[0],
-                                       .mode = ReplicationMode::ASYNC,
-                                       .ip_address = local_host,
-                                       .port = replica2_port,
-                                   })
+  ASSERT_TRUE(main.repl_handler
+                  .RegisterReplica(ReplicationClientConfig{
+                      .name = replicas[0],
+                      .mode = ReplicationMode::ASYNC,
+                      .ip_address = local_host,
+                      .port = replica2_port,
+                  })
                   .GetError() == RegisterReplicaError::NAME_EXISTS);
 }
 
 TEST_F(ReplicationTest, ReplicationReplicaWithExistingEndPoint) {
   uint16_t common_port = 10001;
 
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph main(main_conf);
+  MinMemgraph replica1(repl_conf);
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = common_port,
   });
 
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph replica2(repl2_conf);
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = common_port,
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_FALSE(main_store_handler
-                   .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                    ReplicationClientConfig{
-                                        .name = replicas[0],
-                                        .mode = ReplicationMode::SYNC,
-                                        .ip_address = local_host,
-                                        .port = common_port,
-                                    })
+  ASSERT_FALSE(main.repl_handler
+                   .RegisterReplica(ReplicationClientConfig{
+                       .name = replicas[0],
+                       .mode = ReplicationMode::SYNC,
+                       .ip_address = local_host,
+                       .port = common_port,
+                   })
                    .HasError());
 
-  ASSERT_TRUE(main_store_handler
-                  .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                   ReplicationClientConfig{
-                                       .name = replicas[1],
-                                       .mode = ReplicationMode::ASYNC,
-                                       .ip_address = local_host,
-                                       .port = common_port,
-                                   })
+  ASSERT_TRUE(main.repl_handler
+                  .RegisterReplica(ReplicationClientConfig{
+                      .name = replicas[1],
+                      .mode = ReplicationMode::ASYNC,
+                      .ip_address = local_host,
+                      .port = common_port,
+                  })
                   .GetError() == RegisterReplicaError::END_POINT_EXISTS);
 }
 
 TEST_F(ReplicationTest, RestoringReplicationAtStartupAfterDroppingReplica) {
-  auto main_config = configuration;
+  auto main_config = main_conf;
+  auto replica1_config = main_conf;
+  auto replica2_config = main_conf;
   main_config.durability.restore_replication_state_on_startup = true;
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(main_config)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
 
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  std::filesystem::path replica1_storage_directory{std::filesystem::temp_directory_path() / "replica1"};
+  std::filesystem::path replica2_storage_directory{std::filesystem::temp_directory_path() / "replica2"};
+  memgraph::utils::OnScopeExit replica1_directory_cleaner(
+      [&]() { std::filesystem::remove_all(replica1_storage_directory); });
+  memgraph::utils::OnScopeExit replica2_directory_cleaner(
+      [&]() { std::filesystem::remove_all(replica2_storage_directory); });
+
+  UpdatePaths(replica1_config, replica1_storage_directory);
+  UpdatePaths(replica2_config, replica2_storage_directory);
+
+  std::optional<MinMemgraph> main(main_config);
+  MinMemgraph replica1(replica1_config);
+
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
 
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  MinMemgraph replica2(replica2_config);
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[1],
   });
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  auto res =
-      main_store_handler.RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID, ReplicationClientConfig{
-                                                                                        .name = replicas[0],
-                                                                                        .mode = ReplicationMode::SYNC,
-                                                                                        .ip_address = local_host,
-                                                                                        .port = ports[0],
-                                                                                    });
+  auto res = main->repl_handler.RegisterReplica(ReplicationClientConfig{
+      .name = replicas[0],
+      .mode = ReplicationMode::SYNC,
+      .ip_address = local_host,
+      .port = ports[0],
+  });
   ASSERT_FALSE(res.HasError());
-  res = main_store_handler.RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID, ReplicationClientConfig{
-                                                                                          .name = replicas[1],
-                                                                                          .mode = ReplicationMode::SYNC,
-                                                                                          .ip_address = local_host,
-                                                                                          .port = ports[1],
-                                                                                      });
+  res = main->repl_handler.RegisterReplica(ReplicationClientConfig{
+      .name = replicas[1],
+      .mode = ReplicationMode::SYNC,
+      .ip_address = local_host,
+      .port = ports[1],
+  });
   ASSERT_FALSE(res.HasError());
 
-  auto replica_infos = main_store->ReplicasInfo();
+  auto replica_infos = main->db.storage()->ReplicasInfo();
 
   ASSERT_EQ(replica_infos.size(), 2);
   ASSERT_EQ(replica_infos[0].name, replicas[0]);
@@ -968,11 +985,11 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartupAfterDroppingReplica) {
   ASSERT_EQ(replica_infos[1].endpoint.address, local_host);
   ASSERT_EQ(replica_infos[1].endpoint.port, ports[1]);
 
-  main_store.reset();
+  main.reset();
 
-  std::unique_ptr<Storage> other_main_store{new InMemoryStorage(main_config)};
+  MinMemgraph other_main(main_config);
 
-  replica_infos = other_main_store->ReplicasInfo();
+  replica_infos = other_main.db.storage()->ReplicasInfo();
   ASSERT_EQ(replica_infos.size(), 2);
   ASSERT_EQ(replica_infos[0].name, replicas[0]);
   ASSERT_EQ(replica_infos[0].endpoint.address, local_host);
@@ -983,43 +1000,39 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartupAfterDroppingReplica) {
 }
 
 TEST_F(ReplicationTest, RestoringReplicationAtStartup) {
-  auto main_config = configuration;
+  auto main_config = main_conf;
   main_config.durability.restore_replication_state_on_startup = true;
 
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(main_config)};
-  std::unique_ptr<Storage> replica_store1{new InMemoryStorage(configuration)};
+  std::optional<MinMemgraph> main(main_config);
+  MinMemgraph replica1(repl_conf);
 
-  auto replica1_store_handler = ReplicationHandler{replica_store1->repl_state_, *replica_store1};
-  replica1_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica1.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[0],
   });
 
-  std::unique_ptr<Storage> replica_store2{new InMemoryStorage(configuration)};
+  MinMemgraph replica2(repl2_conf);
 
-  auto replica2_store_handler = ReplicationHandler{replica_store2->repl_state_, *replica_store2};
-  replica2_store_handler.SetReplicationRoleReplica(ReplicationServerConfig{
+  replica2.repl_handler.SetReplicationRoleReplica(ReplicationServerConfig{
       .ip_address = local_host,
       .port = ports[1],
   });
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  auto res =
-      main_store_handler.RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID, ReplicationClientConfig{
-                                                                                        .name = replicas[0],
-                                                                                        .mode = ReplicationMode::SYNC,
-                                                                                        .ip_address = local_host,
-                                                                                        .port = ports[0],
-                                                                                    });
+  auto res = main->repl_handler.RegisterReplica(ReplicationClientConfig{
+      .name = replicas[0],
+      .mode = ReplicationMode::SYNC,
+      .ip_address = local_host,
+      .port = ports[0],
+  });
   ASSERT_FALSE(res.HasError());
-  res = main_store_handler.RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID, ReplicationClientConfig{
-                                                                                          .name = replicas[1],
-                                                                                          .mode = ReplicationMode::SYNC,
-                                                                                          .ip_address = local_host,
-                                                                                          .port = ports[1],
-                                                                                      });
+  res = main->repl_handler.RegisterReplica(ReplicationClientConfig{
+      .name = replicas[1],
+      .mode = ReplicationMode::SYNC,
+      .ip_address = local_host,
+      .port = ports[1],
+  });
   ASSERT_FALSE(res.HasError());
 
-  auto replica_infos = main_store->ReplicasInfo();
+  auto replica_infos = main->db.storage()->ReplicasInfo();
 
   ASSERT_EQ(replica_infos.size(), 2);
   ASSERT_EQ(replica_infos[0].name, replicas[0]);
@@ -1029,20 +1042,21 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartup) {
   ASSERT_EQ(replica_infos[1].endpoint.address, local_host);
   ASSERT_EQ(replica_infos[1].endpoint.port, ports[1]);
 
-  auto handler = ReplicationHandler{main_store->repl_state_, *main_store};
+  auto handler = main->repl_handler;
   const auto unregister_res = handler.UnregisterReplica(replicas[0]);
   ASSERT_EQ(unregister_res, UnregisterReplicaResult::SUCCESS);
 
-  replica_infos = main_store->ReplicasInfo();
+  replica_infos = main->db.storage()->ReplicasInfo();
   ASSERT_EQ(replica_infos.size(), 1);
   ASSERT_EQ(replica_infos[0].name, replicas[1]);
   ASSERT_EQ(replica_infos[0].endpoint.address, local_host);
   ASSERT_EQ(replica_infos[0].endpoint.port, ports[1]);
 
-  main_store.reset();
+  main.reset();
 
-  std::unique_ptr<Storage> other_main_store{new InMemoryStorage(main_config)};
-  replica_infos = other_main_store->ReplicasInfo();
+  MinMemgraph other_main(main_config);
+
+  replica_infos = other_main.db.storage()->ReplicasInfo();
   ASSERT_EQ(replica_infos.size(), 1);
   ASSERT_EQ(replica_infos[0].name, replicas[1]);
   ASSERT_EQ(replica_infos[0].endpoint.address, local_host);
@@ -1050,16 +1064,14 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartup) {
 }
 
 TEST_F(ReplicationTest, AddingInvalidReplica) {
-  std::unique_ptr<Storage> main_store{new InMemoryStorage(configuration)};
+  MinMemgraph main(main_conf);
 
-  auto main_store_handler = ReplicationHandler{main_store->repl_state_, *main_store};
-  ASSERT_TRUE(main_store_handler
-                  .RegisterReplica(RegistrationMode::MUST_BE_INSTANTLY_VALID,
-                                   ReplicationClientConfig{
-                                       .name = "REPLICA",
-                                       .mode = ReplicationMode::SYNC,
-                                       .ip_address = local_host,
-                                       .port = ports[0],
-                                   })
+  ASSERT_TRUE(main.repl_handler
+                  .RegisterReplica(ReplicationClientConfig{
+                      .name = "REPLICA",
+                      .mode = ReplicationMode::SYNC,
+                      .ip_address = local_host,
+                      .port = ports[0],
+                  })
                   .GetError() == RegisterReplicaError::CONNECTION_FAILED);
 }
diff --git a/tests/unit/storage_v2_storage_mode.cpp b/tests/unit/storage_v2_storage_mode.cpp
index 6656acf02..3daea2e69 100644
--- a/tests/unit/storage_v2_storage_mode.cpp
+++ b/tests/unit/storage_v2_storage_mode.cpp
@@ -43,7 +43,7 @@ TEST_P(StorageModeTest, Mode) {
       std::make_unique<memgraph::storage::InMemoryStorage>(memgraph::storage::Config{
           .transaction{.isolation_level = memgraph::storage::IsolationLevel::SNAPSHOT_ISOLATION}});
 
-  storage->SetStorageMode(storage_mode);
+  static_cast<memgraph::storage::InMemoryStorage *>(storage.get())->SetStorageMode(storage_mode);
   auto creator = storage->Access();
   auto other_analytics_mode_reader = storage->Access();
 
@@ -75,9 +75,11 @@ class StorageModeMultiTxTest : public ::testing::Test {
     return tmp;
   }();  // iile
 
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{memgraph::storage::Config{
-      .durability.storage_directory = data_directory, .disk.main_storage_directory = data_directory / "disk"}};
+  memgraph::storage::Config config{.durability.storage_directory = data_directory,
+                                   .disk.main_storage_directory = data_directory / "disk"};
 
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -86,8 +88,7 @@ class StorageModeMultiTxTest : public ::testing::Test {
         return db_acc;
       }()  // iile
   };
-
-  memgraph::query::InterpreterContext interpreter_context{{}, nullptr};
+  memgraph::query::InterpreterContext interpreter_context{{}, nullptr, &repl_state};
   InterpreterFaker running_interpreter{&interpreter_context, db}, main_interpreter{&interpreter_context, db};
 };
 
diff --git a/tests/unit/transaction_queue.cpp b/tests/unit/transaction_queue.cpp
index 45aad1588..d031b76b0 100644
--- a/tests/unit/transaction_queue.cpp
+++ b/tests/unit/transaction_queue.cpp
@@ -31,7 +31,8 @@ class TransactionQueueSimpleTest : public ::testing::Test {
  protected:
   const std::string testSuite = "transactin_queue";
   std::filesystem::path data_directory{std::filesystem::temp_directory_path() / "MG_tests_unit_transaction_queue_intr"};
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{
+
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         config.durability.storage_directory = data_directory;
@@ -44,6 +45,8 @@ class TransactionQueueSimpleTest : public ::testing::Test {
       }()  // iile
   };
 
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -56,7 +59,7 @@ class TransactionQueueSimpleTest : public ::testing::Test {
         return db_acc;
       }()  // iile
   };
-  memgraph::query::InterpreterContext interpreter_context{{}, nullptr};
+  memgraph::query::InterpreterContext interpreter_context{{}, nullptr, &repl_state};
   InterpreterFaker running_interpreter{&interpreter_context, db}, main_interpreter{&interpreter_context, db};
 
   void TearDown() override {
diff --git a/tests/unit/transaction_queue_multiple.cpp b/tests/unit/transaction_queue_multiple.cpp
index f5f9941d1..0b6cdf635 100644
--- a/tests/unit/transaction_queue_multiple.cpp
+++ b/tests/unit/transaction_queue_multiple.cpp
@@ -39,7 +39,8 @@ class TransactionQueueMultipleTest : public ::testing::Test {
   const std::string testSuite = "transactin_queue_multiple";
   std::filesystem::path data_directory{std::filesystem::temp_directory_path() /
                                        "MG_tests_unit_transaction_queue_multiple_intr"};
-  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{
+
+  memgraph::storage::Config config{
       [&]() {
         memgraph::storage::Config config{};
         config.durability.storage_directory = data_directory;
@@ -52,6 +53,8 @@ class TransactionQueueMultipleTest : public ::testing::Test {
       }()  // iile
   };
 
+  memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
+  memgraph::utils::Gatekeeper<memgraph::dbms::Database> db_gk{config, repl_state};
   memgraph::dbms::DatabaseAccess db{
       [&]() {
         auto db_acc_opt = db_gk.access();
@@ -65,7 +68,7 @@ class TransactionQueueMultipleTest : public ::testing::Test {
       }()  // iile
   };
 
-  memgraph::query::InterpreterContext interpreter_context{{}, nullptr};
+  memgraph::query::InterpreterContext interpreter_context{{}, nullptr, &repl_state};
   InterpreterFaker main_interpreter{&interpreter_context, db};
   std::vector<InterpreterFaker *> running_interpreters;