diff --git a/.github/workflows/diff.yaml b/.github/workflows/diff.yaml
index 49b7d4273..8b8ee607f 100644
--- a/.github/workflows/diff.yaml
+++ b/.github/workflows/diff.yaml
@@ -257,6 +257,17 @@ jobs:
           --organization-name $MEMGRAPH_ORGANIZATION_NAME \
           test-memgraph drivers
 
+      - name: Run HA driver tests
+        run: |
+          ./release/package/mgbuild.sh \
+          --toolchain $TOOLCHAIN \
+          --os $OS \
+          --arch $ARCH \
+          --threads $THREADS \
+          --enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
+          --organization-name $MEMGRAPH_ORGANIZATION_NAME \
+          test-memgraph drivers-high-availability
+
       - name: Run integration tests
         run: |
           ./release/package/mgbuild.sh \
@@ -278,7 +289,7 @@ jobs:
           --enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
           --organization-name $MEMGRAPH_ORGANIZATION_NAME \
           test-memgraph cppcheck-and-clang-format
-      
+
       - name: Save cppcheck and clang-format errors
         uses: actions/upload-artifact@v4
         with:
diff --git a/include/_mgp.hpp b/include/_mgp.hpp
index 8b67bc36a..b1d9e26d5 100644
--- a/include/_mgp.hpp
+++ b/include/_mgp.hpp
@@ -326,6 +326,21 @@ inline mgp_vertex *graph_get_vertex_by_id(mgp_graph *g, mgp_vertex_id id, mgp_me
   return MgInvoke<mgp_vertex *>(mgp_graph_get_vertex_by_id, g, id, memory);
 }
 
+inline bool graph_has_text_index(mgp_graph *graph, const char *index_name) {
+  return MgInvoke<int>(mgp_graph_has_text_index, graph, index_name);
+}
+
+inline mgp_map *graph_search_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
+                                        text_search_mode search_mode, mgp_memory *memory) {
+  return MgInvoke<mgp_map *>(mgp_graph_search_text_index, graph, index_name, search_query, search_mode, memory);
+}
+
+inline mgp_map *graph_aggregate_over_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
+                                                const char *aggregation_query, mgp_memory *memory) {
+  return MgInvoke<mgp_map *>(mgp_graph_aggregate_over_text_index, graph, index_name, search_query, aggregation_query,
+                             memory);
+}
+
 inline mgp_vertices_iterator *graph_iter_vertices(mgp_graph *g, mgp_memory *memory) {
   return MgInvoke<mgp_vertices_iterator *>(mgp_graph_iter_vertices, g, memory);
 }
diff --git a/include/mg_procedure.h b/include/mg_procedure.h
index 93ef241d8..117dc66ab 100644
--- a/include/mg_procedure.h
+++ b/include/mg_procedure.h
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -891,6 +891,36 @@ enum mgp_error mgp_edge_iter_properties(struct mgp_edge *e, struct mgp_memory *m
 enum mgp_error mgp_graph_get_vertex_by_id(struct mgp_graph *g, struct mgp_vertex_id id, struct mgp_memory *memory,
                                           struct mgp_vertex **result);
 
+/// Result is non-zero if the index with the given name exists.
+/// The current implementation always returns without errors.
+enum mgp_error mgp_graph_has_text_index(struct mgp_graph *graph, const char *index_name, int *result);
+
+/// Available modes of searching text indices.
+MGP_ENUM_CLASS text_search_mode{
+    SPECIFIED_PROPERTIES,
+    REGEX,
+    ALL_PROPERTIES,
+};
+
+/// Search the named text index for the given query. The result is a map with the "search_results" and "error_msg" keys.
+/// The "search_results" key contains the vertices whose text-indexed properties match the given query.
+/// In case of a Tantivy error, the "search_results" key is absent, and "error_msg" contains the error message.
+/// Return mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE if there’s an allocation error while constructing the results map.
+/// Return mgp_error::MGP_ERROR_KEY_ALREADY_EXISTS if the same key is being created in the results map more than once.
+enum mgp_error mgp_graph_search_text_index(struct mgp_graph *graph, const char *index_name, const char *search_query,
+                                           enum text_search_mode search_mode, struct mgp_memory *memory,
+                                           struct mgp_map **result);
+
+/// Aggregate over the results of a search over the named text index. The result is a map with the "aggregation_results"
+/// and "error_msg" keys.
+/// The "aggregation_results" key contains the vertices whose text-indexed properties match the given query.
+/// In case of a Tantivy error, the "aggregation_results" key is absent, and "error_msg" contains the error message.
+/// Return mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE if there’s an allocation error while constructing the results map.
+/// Return mgp_error::MGP_ERROR_KEY_ALREADY_EXISTS if the same key is being created in the results map more than once.
+enum mgp_error mgp_graph_aggregate_over_text_index(struct mgp_graph *graph, const char *index_name,
+                                                   const char *search_query, const char *aggregation_query,
+                                                   struct mgp_memory *memory, struct mgp_map **result);
+
 /// Creates label index for given label.
 /// mgp_error::MGP_ERROR_NO_ERROR is always returned.
 /// if label index already exists, result will be 0, otherwise 1.
diff --git a/include/mgp.hpp b/include/mgp.hpp
index 3f7ed591e..f35231062 100644
--- a/include/mgp.hpp
+++ b/include/mgp.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -32,6 +32,15 @@
 
 namespace mgp {
 
+class TextSearchException : public std::exception {
+ public:
+  explicit TextSearchException(std::string message) : message_(std::move(message)) {}
+  const char *what() const noexcept override { return message_.c_str(); }
+
+ private:
+  std::string message_;
+};
+
 class IndexException : public std::exception {
  public:
   explicit IndexException(std::string message) : message_(std::move(message)) {}
@@ -4306,12 +4315,12 @@ inline void AddParamsReturnsToProc(mgp_proc *proc, std::vector<Parameter> &param
 }
 }  // namespace detail
 
-inline bool CreateLabelIndex(mgp_graph *memgaph_graph, const std::string_view label) {
-  return create_label_index(memgaph_graph, label.data());
+inline bool CreateLabelIndex(mgp_graph *memgraph_graph, const std::string_view label) {
+  return create_label_index(memgraph_graph, label.data());
 }
 
-inline bool DropLabelIndex(mgp_graph *memgaph_graph, const std::string_view label) {
-  return drop_label_index(memgaph_graph, label.data());
+inline bool DropLabelIndex(mgp_graph *memgraph_graph, const std::string_view label) {
+  return drop_label_index(memgraph_graph, label.data());
 }
 
 inline List ListAllLabelIndices(mgp_graph *memgraph_graph) {
@@ -4322,14 +4331,14 @@ inline List ListAllLabelIndices(mgp_graph *memgraph_graph) {
   return List(label_indices);
 }
 
-inline bool CreateLabelPropertyIndex(mgp_graph *memgaph_graph, const std::string_view label,
+inline bool CreateLabelPropertyIndex(mgp_graph *memgraph_graph, const std::string_view label,
                                      const std::string_view property) {
-  return create_label_property_index(memgaph_graph, label.data(), property.data());
+  return create_label_property_index(memgraph_graph, label.data(), property.data());
 }
 
-inline bool DropLabelPropertyIndex(mgp_graph *memgaph_graph, const std::string_view label,
+inline bool DropLabelPropertyIndex(mgp_graph *memgraph_graph, const std::string_view label,
                                    const std::string_view property) {
-  return drop_label_property_index(memgaph_graph, label.data(), property.data());
+  return drop_label_property_index(memgraph_graph, label.data(), property.data());
 }
 
 inline List ListAllLabelPropertyIndices(mgp_graph *memgraph_graph) {
@@ -4340,6 +4349,58 @@ inline List ListAllLabelPropertyIndices(mgp_graph *memgraph_graph) {
   return List(label_property_indices);
 }
 
+namespace {
+constexpr std::string_view kErrorMsgKey = "error_msg";
+constexpr std::string_view kSearchResultsKey = "search_results";
+constexpr std::string_view kAggregationResultsKey = "aggregation_results";
+}  // namespace
+
+inline List SearchTextIndex(mgp_graph *memgraph_graph, std::string_view index_name, std::string_view search_query,
+                            text_search_mode search_mode) {
+  auto results_or_error = Map(mgp::MemHandlerCallback(graph_search_text_index, memgraph_graph, index_name.data(),
+                                                      search_query.data(), search_mode));
+  if (results_or_error.KeyExists(kErrorMsgKey)) {
+    if (!results_or_error.At(kErrorMsgKey).IsString()) {
+      throw TextSearchException{"The error message is not a string!"};
+    }
+    throw TextSearchException(results_or_error.At(kErrorMsgKey).ValueString().data());
+  }
+
+  if (!results_or_error.KeyExists(kSearchResultsKey)) {
+    throw TextSearchException{"Incomplete text index search results!"};
+  }
+
+  if (!results_or_error.At(kSearchResultsKey).IsList()) {
+    throw TextSearchException{"Text index search results have wrong type!"};
+  }
+
+  return results_or_error.At(kSearchResultsKey).ValueList();
+}
+
+inline std::string_view AggregateOverTextIndex(mgp_graph *memgraph_graph, std::string_view index_name,
+                                               std::string_view search_query, std::string_view aggregation_query) {
+  auto results_or_error =
+      Map(mgp::MemHandlerCallback(graph_aggregate_over_text_index, memgraph_graph, index_name.data(),
+                                  search_query.data(), aggregation_query.data()));
+
+  if (results_or_error.KeyExists(kErrorMsgKey)) {
+    if (!results_or_error.At(kErrorMsgKey).IsString()) {
+      throw TextSearchException{"The error message is not a string!"};
+    }
+    throw TextSearchException(results_or_error.At(kErrorMsgKey).ValueString().data());
+  }
+
+  if (!results_or_error.KeyExists(kAggregationResultsKey)) {
+    throw TextSearchException{"Incomplete text index aggregation results!"};
+  }
+
+  if (!results_or_error.At(kAggregationResultsKey).IsString()) {
+    throw TextSearchException{"Text index aggregation results have wrong type!"};
+  }
+
+  return results_or_error.At(kAggregationResultsKey).ValueString();
+}
+
 inline bool CreateExistenceConstraint(mgp_graph *memgraph_graph, const std::string_view label,
                                       const std::string_view property) {
   return create_existence_constraint(memgraph_graph, label.data(), property.data());
diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt
index 7d568d548..ab6a313f1 100644
--- a/libs/CMakeLists.txt
+++ b/libs/CMakeLists.txt
@@ -295,6 +295,32 @@ set_path_external_library(jemalloc STATIC
 
 import_header_library(rangev3 ${CMAKE_CURRENT_SOURCE_DIR}/rangev3/include)
 
+ExternalProject_Add(mgcxx-proj
+  PREFIX         mgcxx-proj
+  GIT_REPOSITORY https://github.com/memgraph/mgcxx
+  GIT_TAG        "v0.0.4"
+  CMAKE_ARGS
+    "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
+    "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}"
+    "-DENABLE_TESTS=OFF"
+  INSTALL_DIR    "${PROJECT_BINARY_DIR}/mgcxx"
+)
+ExternalProject_Get_Property(mgcxx-proj install_dir)
+set(MGCXX_ROOT ${install_dir})
+
+add_library(tantivy_text_search STATIC IMPORTED GLOBAL)
+add_dependencies(tantivy_text_search mgcxx-proj)
+set_property(TARGET tantivy_text_search PROPERTY IMPORTED_LOCATION ${MGCXX_ROOT}/lib/libtantivy_text_search.a)
+
+add_library(mgcxx_text_search STATIC IMPORTED GLOBAL)
+add_dependencies(mgcxx_text_search mgcxx-proj)
+set_property(TARGET mgcxx_text_search PROPERTY IMPORTED_LOCATION ${MGCXX_ROOT}/lib/libmgcxx_text_search.a)
+# We need to create the include directory first in order to be able to add it
+# as an include directory. The header files in the include directory will be
+# generated later during the build process.
+file(MAKE_DIRECTORY ${MGCXX_ROOT}/include)
+set_property(TARGET mgcxx_text_search PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MGCXX_ROOT}/include)
+
 # Setup NuRaft
 import_external_library(nuraft STATIC
   ${CMAKE_CURRENT_SOURCE_DIR}/nuraft/lib/libnuraft.a
diff --git a/query_modules/CMakeLists.txt b/query_modules/CMakeLists.txt
index 41dbb495c..1336f3eb0 100644
--- a/query_modules/CMakeLists.txt
+++ b/query_modules/CMakeLists.txt
@@ -6,6 +6,8 @@ project(memgraph_query_modules)
 
 disallow_in_source_build()
 
+find_package(fmt REQUIRED)
+
 # Everything that is installed here, should be under the "query_modules" component.
 set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "query_modules")
 string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
@@ -58,6 +60,22 @@ install(PROGRAMS $<TARGET_FILE:schema>
 # Also install the source of the example, so user can read it.
 install(FILES schema.cpp DESTINATION lib/memgraph/query_modules/src)
 
+add_library(text SHARED text_search_module.cpp)
+target_include_directories(text PRIVATE ${CMAKE_SOURCE_DIR}/include)
+target_compile_options(text PRIVATE -Wall)
+target_link_libraries(text PRIVATE -static-libgcc -static-libstdc++ fmt::fmt)
+# Strip C++ example in release build.
+if (lower_build_type STREQUAL "release")
+  add_custom_command(TARGET text POST_BUILD
+                     COMMAND strip -s $<TARGET_FILE:text>
+                     COMMENT "Stripping symbols and sections from the C++ text_search module")
+endif()
+install(PROGRAMS $<TARGET_FILE:text>
+        DESTINATION lib/memgraph/query_modules
+        RENAME text.so)
+# Also install the source of the example, so user can read it.
+install(FILES text_search_module.cpp DESTINATION lib/memgraph/query_modules/src)
+
 # Install the Python example and modules
 install(FILES example.py DESTINATION lib/memgraph/query_modules RENAME py_example.py)
 install(FILES graph_analyzer.py DESTINATION lib/memgraph/query_modules)
diff --git a/query_modules/text_search_module.cpp b/query_modules/text_search_module.cpp
new file mode 100644
index 000000000..8e4405058
--- /dev/null
+++ b/query_modules/text_search_module.cpp
@@ -0,0 +1,149 @@
+// Copyright 2024 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include <string>
+#include <string_view>
+
+#include <fmt/format.h>
+
+#include <mgp.hpp>
+
+namespace TextSearch {
+constexpr std::string_view kProcedureSearch = "search";
+constexpr std::string_view kProcedureRegexSearch = "regex_search";
+constexpr std::string_view kProcedureSearchAllProperties = "search_all";
+constexpr std::string_view kProcedureAggregate = "aggregate";
+constexpr std::string_view kParameterIndexName = "index_name";
+constexpr std::string_view kParameterSearchQuery = "search_query";
+constexpr std::string_view kParameterAggregationQuery = "aggregation_query";
+constexpr std::string_view kReturnNode = "node";
+constexpr std::string_view kReturnAggregation = "aggregation";
+const std::string kSearchAllPrefix = "all";
+
+void Search(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
+void RegexSearch(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
+void SearchAllProperties(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
+void Aggregate(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
+}  // namespace TextSearch
+
+void TextSearch::Search(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
+  mgp::MemoryDispatcherGuard guard{memory};
+  const auto record_factory = mgp::RecordFactory(result);
+  auto arguments = mgp::List(args);
+
+  try {
+    const auto *index_name = arguments[0].ValueString().data();
+    const auto *search_query = arguments[1].ValueString().data();
+    for (const auto &node :
+         mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::SPECIFIED_PROPERTIES)) {
+      auto record = record_factory.NewRecord();
+      record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
+    }
+  } catch (const std::exception &e) {
+    record_factory.SetErrorMessage(e.what());
+  }
+}
+
+void TextSearch::RegexSearch(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
+  mgp::MemoryDispatcherGuard guard{memory};
+  const auto record_factory = mgp::RecordFactory(result);
+  auto arguments = mgp::List(args);
+
+  try {
+    const auto *index_name = arguments[0].ValueString().data();
+    const auto *search_query = arguments[1].ValueString().data();
+    for (const auto &node : mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::REGEX)) {
+      auto record = record_factory.NewRecord();
+      record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
+    }
+  } catch (const std::exception &e) {
+    record_factory.SetErrorMessage(e.what());
+  }
+}
+
+void TextSearch::SearchAllProperties(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result,
+                                     mgp_memory *memory) {
+  mgp::MemoryDispatcherGuard guard{memory};
+  const auto record_factory = mgp::RecordFactory(result);
+  auto arguments = mgp::List(args);
+
+  try {
+    const auto *index_name = arguments[0].ValueString().data();
+    const auto *search_query = fmt::format("{}:{}", kSearchAllPrefix, arguments[1].ValueString()).data();
+    for (const auto &node :
+         mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::ALL_PROPERTIES)) {
+      auto record = record_factory.NewRecord();
+      record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
+    }
+  } catch (const std::exception &e) {
+    record_factory.SetErrorMessage(e.what());
+  }
+}
+
+void TextSearch::Aggregate(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
+  mgp::MemoryDispatcherGuard guard{memory};
+  const auto record_factory = mgp::RecordFactory(result);
+  auto arguments = mgp::List(args);
+
+  try {
+    const auto *index_name = arguments[0].ValueString().data();
+    const auto *search_query = arguments[1].ValueString().data();
+    const auto *aggregation_query = arguments[2].ValueString().data();
+    const auto aggregation_result =
+        mgp::AggregateOverTextIndex(memgraph_graph, index_name, search_query, aggregation_query);
+    auto record = record_factory.NewRecord();
+    record.Insert(TextSearch::kReturnAggregation.data(), aggregation_result.data());
+  } catch (const std::exception &e) {
+    record_factory.SetErrorMessage(e.what());
+  }
+}
+
+extern "C" int mgp_init_module(struct mgp_module *module, struct mgp_memory *memory) {
+  try {
+    mgp::MemoryDispatcherGuard guard{memory};
+
+    AddProcedure(TextSearch::Search, TextSearch::kProcedureSearch, mgp::ProcedureType::Read,
+                 {
+                     mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
+                     mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
+                 },
+                 {mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
+
+    AddProcedure(TextSearch::RegexSearch, TextSearch::kProcedureRegexSearch, mgp::ProcedureType::Read,
+                 {
+                     mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
+                     mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
+                 },
+                 {mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
+
+    AddProcedure(TextSearch::SearchAllProperties, TextSearch::kProcedureSearchAllProperties, mgp::ProcedureType::Read,
+                 {
+                     mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
+                     mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
+                 },
+                 {mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
+
+    AddProcedure(TextSearch::Aggregate, TextSearch::kProcedureAggregate, mgp::ProcedureType::Read,
+                 {
+                     mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
+                     mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
+                     mgp::Parameter(TextSearch::kParameterAggregationQuery, mgp::Type::String),
+                 },
+                 {mgp::Return(TextSearch::kReturnAggregation, mgp::Type::String)}, module, memory);
+  } catch (const std::exception &e) {
+    std::cerr << "Error while initializing query module: " << e.what() << std::endl;
+    return 1;
+  }
+
+  return 0;
+}
+
+extern "C" int mgp_shutdown_module() { return 0; }
diff --git a/release/package/mgbuild.sh b/release/package/mgbuild.sh
index e24776f60..934e962b7 100755
--- a/release/package/mgbuild.sh
+++ b/release/package/mgbuild.sh
@@ -48,9 +48,9 @@ SUPPORTED_ARCHS=(
 )
 SUPPORTED_TESTS=(
     clang-tidy cppcheck-and-clang-format code-analysis
-    code-coverage drivers durability e2e gql-behave
+    code-coverage drivers drivers-high-availability durability e2e gql-behave
     integration leftover-CTest macro-benchmark
-    mgbench stress-plain stress-ssl 
+    mgbench stress-plain stress-ssl
     unit unit-coverage upload-to-bench-graph
 
 )
@@ -116,7 +116,7 @@ print_help () {
 
   echo -e "\nToolchain v5 supported OSs:"
   echo -e "  \"${SUPPORTED_OS_V5[*]}\""
-  
+
   echo -e "\nExample usage:"
   echo -e "  $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run"
   echo -e "  $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community"
@@ -296,7 +296,7 @@ build_memgraph () {
     docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/"
   fi
   # Change ownership of copied files so the mg user inside container can access them
-  docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR" 
+  docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
 
   echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
   docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
@@ -318,10 +318,9 @@ build_memgraph () {
   # Define cmake command
   local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .."
   docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO && $cmake_cmd"
-  
   # ' is used instead of " because we need to run make within the allowed
   # container resources.
-  # Default value for $threads is 0 instead of $(nproc) because macos 
+  # Default value for $threads is 0 instead of $(nproc) because macos
   # doesn't support the nproc command.
   # 0 is set for default value and checked here because mgbuild containers
   # support nproc
@@ -363,7 +362,7 @@ copy_memgraph() {
       local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph"
       local host_output_path="$PROJECT_ROOT/build/memgraph"
       mkdir -p "$PROJECT_ROOT/build"
-      docker cp -L $build_container:$container_output_path $host_output_path 
+      docker cp -L $build_container:$container_output_path $host_output_path
       echo "Binary saved to $host_output_path"
     ;;
     --build-logs)
@@ -371,7 +370,7 @@ copy_memgraph() {
       local container_output_path="$MGBUILD_ROOT_DIR/build/logs"
       local host_output_path="$PROJECT_ROOT/build/logs"
       mkdir -p "$PROJECT_ROOT/build"
-      docker cp -L $build_container:$container_output_path $host_output_path 
+      docker cp -L $build_container:$container_output_path $host_output_path
       echo "Build logs saved to $host_output_path"
     ;;
     --package)
@@ -418,6 +417,9 @@ test_memgraph() {
     drivers)
       docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh'
     ;;
+    drivers-high-availability)
+      docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run_cluster.sh'
+    ;;
     integration)
       docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh'
     ;;
@@ -664,4 +666,4 @@ case $command in
         echo "Error: Unknown command '$command'"
         exit 1
     ;;
-esac    
+esac
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4d5d523c6..af88e624a 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -45,7 +45,7 @@ set(mg_single_node_v2_sources
 add_executable(memgraph ${mg_single_node_v2_sources})
 target_include_directories(memgraph PUBLIC ${CMAKE_SOURCE_DIR}/include)
 target_link_libraries(memgraph stdc++fs Threads::Threads
-        mg-telemetry mg-communication mg-communication-metrics mg-memory mg-utils mg-license mg-settings mg-glue mg-flags mg::system mg::replication_handler)
+        mg-telemetry mgcxx_text_search tantivy_text_search mg-communication mg-communication-metrics mg-memory mg-utils mg-license mg-settings mg-glue mg-flags mg::system mg::replication_handler)
 
 # NOTE: `include/mg_procedure.syms` describes a pattern match for symbols which
 # should be dynamically exported, so that `dlopen` can correctly link th
diff --git a/src/communication/bolt/v1/session.hpp b/src/communication/bolt/v1/session.hpp
index 2261a3234..55d8a7a54 100644
--- a/src/communication/bolt/v1/session.hpp
+++ b/src/communication/bolt/v1/session.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -88,6 +88,12 @@ class Session {
 
   virtual void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &run_time_info) = 0;
 
+#ifdef MG_ENTERPRISE
+  virtual auto Route(std::map<std::string, Value> const &routing,
+                     std::vector<memgraph::communication::bolt::Value> const &bookmarks,
+                     std::map<std::string, Value> const &extra) -> std::map<std::string, Value> = 0;
+#endif
+
   /**
    * Put results of the processed query in the `encoder`.
    *
diff --git a/src/communication/bolt/v1/states/executing.hpp b/src/communication/bolt/v1/states/executing.hpp
index b58b3c39b..2ab2cacc2 100644
--- a/src/communication/bolt/v1/states/executing.hpp
+++ b/src/communication/bolt/v1/states/executing.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -79,9 +79,9 @@ State RunHandlerV4(Signature signature, TSession &session, State state, Marker m
     }
     case Signature::Route: {
       if constexpr (bolt_minor >= 3) {
-        if (signature == Signature::Route) return HandleRoute<TSession>(session, marker);
+        return HandleRoute<TSession>(session, marker);
       } else {
-        spdlog::trace("Supported only in bolt v4.3");
+        spdlog::trace("Supported only in bolt versions >= 4.3");
         return State::Close;
       }
     }
diff --git a/src/communication/bolt/v1/states/handlers.hpp b/src/communication/bolt/v1/states/handlers.hpp
index 3ffcb6f55..afdc37ad9 100644
--- a/src/communication/bolt/v1/states/handlers.hpp
+++ b/src/communication/bolt/v1/states/handlers.hpp
@@ -478,9 +478,6 @@ State HandleGoodbye() {
 
 template <typename TSession>
 State HandleRoute(TSession &session, const Marker marker) {
-  // Route message is not implemented since it is Neo4j specific, therefore we will receive it and inform user that
-  // there is no implementation. Before that, we have to read out the fields from the buffer to leave it in a clean
-  // state.
   if (marker != Marker::TinyStruct3) {
     spdlog::trace("Expected TinyStruct3 marker, but received 0x{:02x}!", utils::UnderlyingCast(marker));
     return State::Close;
@@ -496,11 +493,27 @@ State HandleRoute(TSession &session, const Marker marker) {
     spdlog::trace("Couldn't read bookmarks field!");
     return State::Close;
   }
+
+  // TODO: (andi) Fix Bolt versions
   Value db;
   if (!session.decoder_.ReadValue(&db)) {
     spdlog::trace("Couldn't read db field!");
     return State::Close;
   }
+
+#ifdef MG_ENTERPRISE
+  try {
+    auto res = session.Route(routing.ValueMap(), bookmarks.ValueList(), {});
+    if (!session.encoder_.MessageSuccess(std::move(res))) {
+      spdlog::trace("Couldn't send result of routing!");
+      return State::Close;
+    }
+    return State::Idle;
+  } catch (const std::exception &e) {
+    return HandleFailure(session, e);
+  }
+
+#else
   session.encoder_buffer_.Clear();
   bool fail_sent =
       session.encoder_.MessageFailure({{"code", "66"}, {"message", "Route message is not supported in Memgraph!"}});
@@ -509,6 +522,7 @@ State HandleRoute(TSession &session, const Marker marker) {
     return State::Close;
   }
   return State::Error;
+#endif
 }
 
 template <typename TSession>
diff --git a/src/coordination/CMakeLists.txt b/src/coordination/CMakeLists.txt
index ef9376a70..d39d3e738 100644
--- a/src/coordination/CMakeLists.txt
+++ b/src/coordination/CMakeLists.txt
@@ -6,7 +6,7 @@ target_sources(mg-coordination
         include/coordination/coordinator_state.hpp
         include/coordination/coordinator_rpc.hpp
         include/coordination/coordinator_server.hpp
-        include/coordination/coordinator_config.hpp
+        include/coordination/coordinator_communication_config.hpp
         include/coordination/coordinator_exceptions.hpp
         include/coordination/coordinator_slk.hpp
         include/coordination/coordinator_instance.hpp
@@ -23,7 +23,7 @@ target_sources(mg-coordination
         include/nuraft/coordinator_state_manager.hpp
 
         PRIVATE
-        coordinator_config.cpp
+        coordinator_communication_config.cpp
         coordinator_client.cpp
         coordinator_state.cpp
         coordinator_rpc.cpp
diff --git a/src/coordination/coordinator_client.cpp b/src/coordination/coordinator_client.cpp
index 8530faff3..44817ccfe 100644
--- a/src/coordination/coordinator_client.cpp
+++ b/src/coordination/coordinator_client.cpp
@@ -14,7 +14,7 @@
 
 #include "coordination/coordinator_client.hpp"
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "coordination/coordinator_rpc.hpp"
 #include "replication_coordination_glue/common.hpp"
 #include "replication_coordination_glue/messages.hpp"
@@ -23,18 +23,17 @@
 namespace memgraph::coordination {
 
 namespace {
-auto CreateClientContext(memgraph::coordination::CoordinatorClientConfig const &config)
+auto CreateClientContext(memgraph::coordination::CoordinatorToReplicaConfig const &config)
     -> communication::ClientContext {
   return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file}
                       : communication::ClientContext{};
 }
 }  // namespace
 
-CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
+CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
                                      HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb)
     : rpc_context_{CreateClientContext(config)},
-      rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
-                  &rpc_context_},
+      rpc_client_{config.mgt_server, &rpc_context_},
       config_{std::move(config)},
       coord_instance_{coord_instance},
       succ_cb_{std::move(succ_cb)},
@@ -86,7 +85,9 @@ void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
 void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
 void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
 
-auto CoordinatorClient::ReplicationClientInfo() const -> ReplClientInfo { return config_.replication_client_info; }
+auto CoordinatorClient::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
+  return config_.replication_client_info;
+}
 
 auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid,
                                                     ReplicationClientsInfo replication_clients_info) const -> bool {
diff --git a/src/coordination/coordinator_cluster_state.cpp b/src/coordination/coordinator_cluster_state.cpp
index cf6e1a574..2ee95ae6d 100644
--- a/src/coordination/coordinator_cluster_state.cpp
+++ b/src/coordination/coordinator_cluster_state.cpp
@@ -18,101 +18,178 @@
 
 namespace memgraph::coordination {
 
-void to_json(nlohmann::json &j, InstanceState const &instance_state) {
-  j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
+void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state) {
+  j = nlohmann::json{
+      {"config", instance_state.config}, {"status", instance_state.status}, {"uuid", instance_state.instance_uuid}};
 }
 
-void from_json(nlohmann::json const &j, InstanceState &instance_state) {
+void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state) {
   j.at("config").get_to(instance_state.config);
   j.at("status").get_to(instance_state.status);
+  j.at("uuid").get_to(instance_state.instance_uuid);
 }
 
-CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances)
-    : instances_{std::move(instances)} {}
+CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances,
+                                                 utils::UUID const &current_main_uuid, bool is_lock_opened)
+    : repl_instances_{std::move(instances)}, current_main_uuid_(current_main_uuid), is_lock_opened_(is_lock_opened) {}
 
-CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {}
+CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other)
+    : repl_instances_{other.repl_instances_},
+      current_main_uuid_(other.current_main_uuid_),
+      is_lock_opened_(other.is_lock_opened_) {}
 
 CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
   if (this == &other) {
     return *this;
   }
-  instances_ = other.instances_;
+  repl_instances_ = other.repl_instances_;
+  current_main_uuid_ = other.current_main_uuid_;
+  is_lock_opened_ = other.is_lock_opened_;
   return *this;
 }
 
 CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
-    : instances_{std::move(other.instances_)} {}
+    : repl_instances_{std::move(other.repl_instances_)},
+      current_main_uuid_(other.current_main_uuid_),
+      is_lock_opened_(other.is_lock_opened_) {}
 
 CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
   if (this == &other) {
     return *this;
   }
-  instances_ = std::move(other.instances_);
+  repl_instances_ = std::move(other.repl_instances_);
+  current_main_uuid_ = other.current_main_uuid_;
+  is_lock_opened_ = other.is_lock_opened_;
   return *this;
 }
 
 auto CoordinatorClusterState::MainExists() const -> bool {
   auto lock = std::shared_lock{log_lock_};
-  return std::ranges::any_of(instances_,
+  return std::ranges::any_of(repl_instances_,
                              [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
 }
 
-auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
+auto CoordinatorClusterState::HasMainState(std::string_view instance_name) const -> bool {
   auto lock = std::shared_lock{log_lock_};
-  auto const it = instances_.find(instance_name);
-  return it != instances_.end() && it->second.status == ReplicationRole::MAIN;
+  auto const it = repl_instances_.find(instance_name);
+  return it != repl_instances_.end() && it->second.status == ReplicationRole::MAIN;
 }
 
-auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
+auto CoordinatorClusterState::HasReplicaState(std::string_view instance_name) const -> bool {
   auto lock = std::shared_lock{log_lock_};
-  auto const it = instances_.find(instance_name);
-  return it != instances_.end() && it->second.status == ReplicationRole::REPLICA;
+  auto const it = repl_instances_.find(instance_name);
+  return it != repl_instances_.end() && it->second.status == ReplicationRole::REPLICA;
 }
 
-auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void {
-  auto lock = std::lock_guard{log_lock_};
-  instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
+auto CoordinatorClusterState::IsCurrentMain(std::string_view instance_name) const -> bool {
+  auto lock = std::shared_lock{log_lock_};
+  auto const it = repl_instances_.find(instance_name);
+  return it != repl_instances_.end() && it->second.status == ReplicationRole::MAIN &&
+         it->second.instance_uuid == current_main_uuid_;
 }
 
 auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
   auto lock = std::lock_guard{log_lock_};
   switch (log_action) {
+      // end of OPEN_LOCK_REGISTER_REPLICATION_INSTANCE
     case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
-      auto const &config = std::get<CoordinatorClientConfig>(log_entry);
-      instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
+      auto const &config = std::get<CoordinatorToReplicaConfig>(log_entry);
+      spdlog::trace("DoAction: register replication instance {}", config.instance_name);
+      // Setting instance uuid to random, if registration fails, we are still in random state
+      repl_instances_.emplace(config.instance_name,
+                              ReplicationInstanceState{config, ReplicationRole::REPLICA, utils::UUID{}});
+      is_lock_opened_ = false;
       break;
     }
+      // end of OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE
     case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
       auto const instance_name = std::get<std::string>(log_entry);
-      instances_.erase(instance_name);
+      spdlog::trace("DoAction: unregister replication instance {}", instance_name);
+      repl_instances_.erase(instance_name);
+      is_lock_opened_ = false;
       break;
     }
+      // end of OPEN_LOCK_SET_INSTANCE_AS_MAIN and OPEN_LOCK_FAILOVER
     case RaftLogAction::SET_INSTANCE_AS_MAIN: {
-      auto const instance_name = std::get<std::string>(log_entry);
-      auto it = instances_.find(instance_name);
-      MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
+      auto const instance_uuid_change = std::get<InstanceUUIDUpdate>(log_entry);
+      auto it = repl_instances_.find(instance_uuid_change.instance_name);
+      MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
       it->second.status = ReplicationRole::MAIN;
+      it->second.instance_uuid = instance_uuid_change.uuid;
+      is_lock_opened_ = false;
+      spdlog::trace("DoAction: set replication instance {} as main with uuid {}", instance_uuid_change.instance_name,
+                    std::string{instance_uuid_change.uuid});
       break;
     }
+      // end of OPEN_LOCK_SET_INSTANCE_AS_REPLICA
     case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
       auto const instance_name = std::get<std::string>(log_entry);
-      auto it = instances_.find(instance_name);
-      MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
+      auto it = repl_instances_.find(instance_name);
+      MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
       it->second.status = ReplicationRole::REPLICA;
+      is_lock_opened_ = false;
+      spdlog::trace("DoAction: set replication instance {} as replica", instance_name);
       break;
     }
-    case RaftLogAction::UPDATE_UUID: {
-      uuid_ = std::get<utils::UUID>(log_entry);
+    case RaftLogAction::UPDATE_UUID_OF_NEW_MAIN: {
+      current_main_uuid_ = std::get<utils::UUID>(log_entry);
+      spdlog::trace("DoAction: update uuid of new main {}", std::string{current_main_uuid_});
       break;
     }
+    case RaftLogAction::UPDATE_UUID_FOR_INSTANCE: {
+      auto const instance_uuid_change = std::get<InstanceUUIDUpdate>(log_entry);
+      auto it = repl_instances_.find(instance_uuid_change.instance_name);
+      MG_ASSERT(it != repl_instances_.end(), "Instance doesn't exist as part of RAFT state");
+      it->second.instance_uuid = instance_uuid_change.uuid;
+      spdlog::trace("DoAction: update uuid for instance {} to {}", instance_uuid_change.instance_name,
+                    std::string{instance_uuid_change.uuid});
+      break;
+    }
+    case RaftLogAction::ADD_COORDINATOR_INSTANCE: {
+      auto const &config = std::get<CoordinatorToCoordinatorConfig>(log_entry);
+      coordinators_.emplace_back(CoordinatorInstanceState{config});
+      spdlog::trace("DoAction: add coordinator instance {}", config.coordinator_server_id);
+      break;
+    }
+    case RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE: {
+      is_lock_opened_ = true;
+      spdlog::trace("DoAction: open lock register");
+      break;
+      // TODO(antoniofilipovic) save what we are doing to be able to undo....
+    }
+    case RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE: {
+      is_lock_opened_ = true;
+      spdlog::trace("DoAction: open lock unregister");
+      break;
+      // TODO(antoniofilipovic) save what we are doing
+    }
+    case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN: {
+      is_lock_opened_ = true;
+      spdlog::trace("DoAction: open lock set instance as main");
+      break;
+      // TODO(antoniofilipovic) save what we are doing
+    }
+    case RaftLogAction::OPEN_LOCK_FAILOVER: {
+      is_lock_opened_ = true;
+      spdlog::trace("DoAction: open lock failover");
+      break;
+      // TODO(antoniofilipovic) save what we are doing
+    }
+    case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA: {
+      is_lock_opened_ = true;
+      spdlog::trace("DoAction: open lock set instance as replica");
+      break;
+      // TODO(antoniofilipovic) save what we need to undo
+    }
   }
 }
 
 auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
   auto lock = std::shared_lock{log_lock_};
-
-  auto const log = nlohmann::json(instances_).dump();
-
+  nlohmann::json j = {{"repl_instances", repl_instances_},
+                      {"is_lock_opened", is_lock_opened_},
+                      {"current_main_uuid", current_main_uuid_}};
+  auto const log = j.dump();
   data = buffer::alloc(sizeof(uint32_t) + log.size());
   buffer_serializer bs(data);
   bs.put_str(log);
@@ -121,26 +198,34 @@ auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
 auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
   buffer_serializer bs(data);
   auto const j = nlohmann::json::parse(bs.get_str());
-  auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>();
-
-  return CoordinatorClusterState{std::move(instances)};
+  auto instances = j["repl_instances"].get<std::map<std::string, ReplicationInstanceState, std::less<>>>();
+  auto current_main_uuid = j["current_main_uuid"].get<utils::UUID>();
+  bool is_lock_opened = j["is_lock_opened"].get<int>();
+  return CoordinatorClusterState{std::move(instances), current_main_uuid, is_lock_opened};
 }
 
-auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> {
+auto CoordinatorClusterState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
   auto lock = std::shared_lock{log_lock_};
-  return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
+  return repl_instances_ | ranges::views::values | ranges::to<std::vector<ReplicationInstanceState>>;
 }
 
-auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
+auto CoordinatorClusterState::GetCurrentMainUUID() const -> utils::UUID { return current_main_uuid_; }
 
-auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
+auto CoordinatorClusterState::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
   auto lock = std::shared_lock{log_lock_};
-  auto const it =
-      std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
-  if (it == instances_.end()) {
-    return {};
-  }
-  return it->first;
+  auto const it = repl_instances_.find(instance_name);
+  MG_ASSERT(it != repl_instances_.end(), "Instance with that name doesn't exist.");
+  return it->second.instance_uuid;
+}
+
+auto CoordinatorClusterState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
+  auto lock = std::shared_lock{log_lock_};
+  return coordinators_;
+}
+
+auto CoordinatorClusterState::IsLockOpened() const -> bool {
+  auto lock = std::shared_lock{log_lock_};
+  return is_lock_opened_;
 }
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/coordinator_config.cpp b/src/coordination/coordinator_communication_config.cpp
similarity index 50%
rename from src/coordination/coordinator_config.cpp
rename to src/coordination/coordinator_communication_config.cpp
index a1147d3b6..43e7fbc37 100644
--- a/src/coordination/coordinator_config.cpp
+++ b/src/coordination/coordinator_communication_config.cpp
@@ -11,43 +11,62 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 
 namespace memgraph::coordination {
 
-void to_json(nlohmann::json &j, ReplClientInfo const &config) {
+void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config) {
+  j = nlohmann::json{{"coordinator_server_id", config.coordinator_server_id},
+                     {"coordinator_server", config.coordinator_server},
+                     {"bolt_server", config.bolt_server}};
+}
+
+void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config) {
+  config.coordinator_server_id = j.at("coordinator_server_id").get<uint32_t>();
+  config.coordinator_server = j.at("coordinator_server").get<io::network::Endpoint>();
+  config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
+}
+
+void to_json(nlohmann::json &j, ReplicationClientInfo const &config) {
   j = nlohmann::json{{"instance_name", config.instance_name},
                      {"replication_mode", config.replication_mode},
-                     {"replication_ip_address", config.replication_ip_address},
-                     {"replication_port", config.replication_port}};
+                     {"replication_server", config.replication_server}};
 }
 
-void from_json(nlohmann::json const &j, ReplClientInfo &config) {
+void from_json(nlohmann::json const &j, ReplicationClientInfo &config) {
   config.instance_name = j.at("instance_name").get<std::string>();
   config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
-  config.replication_ip_address = j.at("replication_ip_address").get<std::string>();
-  config.replication_port = j.at("replication_port").get<uint16_t>();
+  config.replication_server = j.at("replication_server").get<io::network::Endpoint>();
 }
 
-void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) {
+void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config) {
   j = nlohmann::json{{"instance_name", config.instance_name},
-                     {"ip_address", config.ip_address},
-                     {"port", config.port},
+                     {"mgt_server", config.mgt_server},
+                     {"bolt_server", config.bolt_server},
                      {"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
                      {"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
                      {"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
                      {"replication_client_info", config.replication_client_info}};
 }
 
-void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) {
+void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config) {
   config.instance_name = j.at("instance_name").get<std::string>();
-  config.ip_address = j.at("ip_address").get<std::string>();
-  config.port = j.at("port").get<uint16_t>();
+  config.mgt_server = j.at("mgt_server").get<io::network::Endpoint>();
+  config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
   config.instance_health_check_frequency_sec =
       std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
   config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
   config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
-  config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>();
+  config.replication_client_info = j.at("replication_client_info").get<ReplicationClientInfo>();
+}
+
+void from_json(nlohmann::json const &j, InstanceUUIDUpdate &instance_uuid_change) {
+  instance_uuid_change.uuid = j.at("uuid").get<utils::UUID>();
+  instance_uuid_change.instance_name = j.at("instance_name").get<std::string>();
+}
+
+void to_json(nlohmann::json &j, InstanceUUIDUpdate const &instance_uuid_change) {
+  j = nlohmann::json{{"instance_name", instance_uuid_change.instance_name}, {"uuid", instance_uuid_change.uuid}};
 }
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/coordinator_handlers.cpp b/src/coordination/coordinator_handlers.cpp
index 637360267..e5b7a663f 100644
--- a/src/coordination/coordinator_handlers.cpp
+++ b/src/coordination/coordinator_handlers.cpp
@@ -95,8 +95,8 @@ void CoordinatorHandlers::DemoteMainToReplicaHandler(replication::ReplicationHan
   slk::Load(&req, req_reader);
 
   const replication::ReplicationServerConfig clients_config{
-      .ip_address = req.replication_client_info.replication_ip_address,
-      .port = req.replication_client_info.replication_port};
+      .ip_address = req.replication_client_info.replication_server.address,
+      .port = req.replication_client_info.replication_server.port};
 
   if (!replication_handler.SetReplicationRoleReplica(clients_config, std::nullopt)) {
     spdlog::error("Demoting main to replica failed!");
@@ -136,8 +136,8 @@ void CoordinatorHandlers::PromoteReplicaToMainHandler(replication::ReplicationHa
     return replication::ReplicationClientConfig{
         .name = repl_info_config.instance_name,
         .mode = repl_info_config.replication_mode,
-        .ip_address = repl_info_config.replication_ip_address,
-        .port = repl_info_config.replication_port,
+        .ip_address = repl_info_config.replication_server.address,
+        .port = repl_info_config.replication_server.port,
     };
   };
 
diff --git a/src/coordination/coordinator_instance.cpp b/src/coordination/coordinator_instance.cpp
index 791ffbc59..6dc4a2eaf 100644
--- a/src/coordination/coordinator_instance.cpp
+++ b/src/coordination/coordinator_instance.cpp
@@ -14,7 +14,6 @@
 #include "coordination/coordinator_instance.hpp"
 
 #include "coordination/coordinator_exceptions.hpp"
-#include "coordination/fmt.hpp"
 #include "dbms/constants.hpp"
 #include "nuraft/coordinator_state_machine.hpp"
 #include "nuraft/coordinator_state_manager.hpp"
@@ -31,10 +30,11 @@ using nuraft::ptr;
 using nuraft::srv_config;
 
 CoordinatorInstance::CoordinatorInstance()
-    : raft_state_(RaftState::MakeRaftState(
+    : thread_pool_{1},
+      raft_state_(RaftState::MakeRaftState(
           [this]() {
             spdlog::info("Leader changed, starting all replication instances!");
-            auto const instances = raft_state_.GetInstances();
+            auto const instances = raft_state_.GetReplicationInstances();
             auto replicas = instances | ranges::views::filter([](auto const &instance) {
                               return instance.status == ReplicationRole::REPLICA;
                             });
@@ -56,23 +56,34 @@ CoordinatorInstance::CoordinatorInstance()
                                            &CoordinatorInstance::MainFailCallback);
             });
 
-            std::ranges::for_each(repl_instances_, [this](auto &instance) {
-              instance.SetNewMainUUID(raft_state_.GetUUID());
-              instance.StartFrequentCheck();
-            });
+            std::ranges::for_each(repl_instances_, [](auto &instance) { instance.StartFrequentCheck(); });
           },
           [this]() {
-            spdlog::info("Leader changed, stopping all replication instances!");
-            repl_instances_.clear();
+            thread_pool_.AddTask([this]() {
+              spdlog::info("Leader changed, trying to stop all replication instances frequent checks!");
+              // We need to stop checks before taking a lock because deadlock can happen if instances waits
+              // to take a lock in frequent check, and this thread already has a lock and waits for instance to
+              // be done with frequent check
+              for (auto &repl_instance : repl_instances_) {
+                repl_instance.StopFrequentCheck();
+              }
+              auto lock = std::unique_lock{coord_instance_lock_};
+              repl_instances_.clear();
+              spdlog::info("Stopped all replication instance frequent checks.");
+            });
           })) {
   client_succ_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
-    auto lock = std::lock_guard{self->coord_instance_lock_};
+    auto lock = std::unique_lock{self->coord_instance_lock_};
+    // when coordinator is becoming follower it will want to stop all threads doing frequent checks
+    // Thread can get stuck here waiting for lock so we need to frequently check if we are in shutdown state
+
     auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
     std::invoke(repl_instance.GetSuccessCallback(), self, repl_instance_name);
   };
 
   client_fail_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
-    auto lock = std::lock_guard{self->coord_instance_lock_};
+    auto lock = std::unique_lock{self->coord_instance_lock_};
+
     auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
     std::invoke(repl_instance.GetFailCallback(), self, repl_instance_name);
   };
@@ -101,7 +112,7 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
   if (raft_state_.IsLeader()) {
     auto const stringify_repl_role = [this](ReplicationInstance const &instance) -> std::string {
       if (!instance.IsAlive()) return "unknown";
-      if (raft_state_.IsMain(instance.InstanceName())) return "main";
+      if (raft_state_.IsCurrentMain(instance.InstanceName())) return "main";
       return "replica";
     };
 
@@ -122,26 +133,36 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
       std::ranges::transform(repl_instances_, std::back_inserter(instances_status), process_repl_instance_as_leader);
     }
   } else {
-    auto const stringify_inst_status = [](ReplicationRole status) -> std::string {
-      return status == ReplicationRole::MAIN ? "main" : "replica";
+    auto const stringify_inst_status = [raft_state_ptr = &raft_state_](
+                                           utils::UUID const &main_uuid,
+                                           ReplicationInstanceState const &instance) -> std::string {
+      if (raft_state_ptr->IsCurrentMain(instance.config.instance_name)) {
+        return "main";
+      }
+      if (raft_state_ptr->HasMainState(instance.config.instance_name)) {
+        return "unknown";
+      }
+      return "replica";
     };
 
     // TODO: (andi) Add capability that followers can also return socket addresses
-    auto process_repl_instance_as_follower = [&stringify_inst_status](auto const &instance) -> InstanceStatus {
+    auto process_repl_instance_as_follower =
+        [this, &stringify_inst_status](ReplicationInstanceState const &instance) -> InstanceStatus {
       return {.instance_name = instance.config.instance_name,
-              .cluster_role = stringify_inst_status(instance.status),
+              .cluster_role = stringify_inst_status(raft_state_.GetCurrentMainUUID(), instance),
               .health = "unknown"};
     };
 
-    std::ranges::transform(raft_state_.GetInstances(), std::back_inserter(instances_status),
+    std::ranges::transform(raft_state_.GetReplicationInstances(), std::back_inserter(instances_status),
                            process_repl_instance_as_follower);
   }
-
   return instances_status;
 }
 
 auto CoordinatorInstance::TryFailover() -> void {
-  auto const is_replica = [this](ReplicationInstance const &instance) { return IsReplica(instance.InstanceName()); };
+  auto const is_replica = [this](ReplicationInstance const &instance) {
+    return HasReplicaState(instance.InstanceName());
+  };
 
   auto alive_replicas =
       repl_instances_ | ranges::views::filter(is_replica) | ranges::views::filter(&ReplicationInstance::IsAlive);
@@ -151,11 +172,6 @@ auto CoordinatorInstance::TryFailover() -> void {
     return;
   }
 
-  if (!raft_state_.RequestLeadership()) {
-    spdlog::error("Failover failed since the instance is not the leader!");
-    return;
-  }
-
   auto const get_ts = [](ReplicationInstance &replica) { return replica.GetClient().SendGetInstanceTimestampsRpc(); };
 
   auto maybe_instance_db_histories = alive_replicas | ranges::views::transform(get_ts) | ranges::to<std::vector>();
@@ -183,6 +199,10 @@ auto CoordinatorInstance::TryFailover() -> void {
 
   auto *new_main = &FindReplicationInstance(most_up_to_date_instance);
 
+  if (!raft_state_.AppendOpenLockFailover(most_up_to_date_instance)) {
+    spdlog::error("Aborting failover as instance is not anymore leader.");
+    return;
+  }
   new_main->PauseFrequentCheck();
   utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
 
@@ -192,16 +212,18 @@ auto CoordinatorInstance::TryFailover() -> void {
 
   auto const new_main_uuid = utils::UUID{};
 
-  auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
-    return !instance.SendSwapAndUpdateUUID(new_main_uuid);
+  auto const failed_to_swap = [this, &new_main_uuid](ReplicationInstance &instance) {
+    return !instance.SendSwapAndUpdateUUID(new_main_uuid) ||
+           !raft_state_.AppendUpdateUUIDForInstanceLog(instance.InstanceName(), new_main_uuid);
   };
 
   // If for some replicas swap fails, for others on successful ping we will revert back on next change
   // or we will do failover first again and then it will be consistent again
   if (std::ranges::any_of(alive_replicas | ranges::views::filter(is_not_new_main), failed_to_swap)) {
-    spdlog::error("Failed to swap uuid for all instances");
+    spdlog::error("Aborting failover. Failed to swap uuid for all alive instances.");
     return;
   }
+
   auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
                            ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
                            ranges::to<ReplicationClientsInfo>();
@@ -212,27 +234,36 @@ auto CoordinatorInstance::TryFailover() -> void {
     return;
   }
 
-  if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
+  if (!raft_state_.AppendUpdateUUIDForNewMainLog(new_main_uuid)) {
     return;
   }
 
   auto const new_main_instance_name = new_main->InstanceName();
 
-  if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name)) {
+  if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name, new_main_uuid)) {
     return;
   }
 
+  if (!new_main->EnableWritingOnMain()) {
+    spdlog::error("Failover successful but couldn't enable writing on instance.");
+  }
+
   spdlog::info("Failover successful! Instance {} promoted to main.", new_main->InstanceName());
 }
 
 auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance_name)
     -> SetInstanceToMainCoordinatorStatus {
   auto lock = std::lock_guard{coord_instance_lock_};
+  if (raft_state_.IsLockOpened()) {
+    return SetInstanceToMainCoordinatorStatus::LOCK_OPENED;
+  }
 
   if (raft_state_.MainExists()) {
     return SetInstanceToMainCoordinatorStatus::MAIN_ALREADY_EXISTS;
   }
 
+  // TODO(antoniofilipovic) Check if request leadership can cause problems due to changing of leadership while other
+  // doing failover
   if (!raft_state_.RequestLeadership()) {
     return SetInstanceToMainCoordinatorStatus::NOT_LEADER;
   }
@@ -249,6 +280,10 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
     return SetInstanceToMainCoordinatorStatus::NO_INSTANCE_WITH_NAME;
   }
 
+  if (!raft_state_.AppendOpenLockSetInstanceToMain(instance_name)) {
+    return SetInstanceToMainCoordinatorStatus::OPEN_LOCK;
+  }
+
   new_main->PauseFrequentCheck();
   utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
 
@@ -258,12 +293,13 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
 
   auto const new_main_uuid = utils::UUID{};
 
-  auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
-    return !instance.SendSwapAndUpdateUUID(new_main_uuid);
+  auto const failed_to_swap = [this, &new_main_uuid](ReplicationInstance &instance) {
+    return !instance.SendSwapAndUpdateUUID(new_main_uuid) ||
+           !raft_state_.AppendUpdateUUIDForInstanceLog(instance.InstanceName(), new_main_uuid);
   };
 
   if (std::ranges::any_of(repl_instances_ | ranges::views::filter(is_not_new_main), failed_to_swap)) {
-    spdlog::error("Failed to swap uuid for all instances");
+    spdlog::error("Failed to swap uuid for all currently alive instances.");
     return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
   }
 
@@ -275,22 +311,28 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
                                &CoordinatorInstance::MainFailCallback)) {
     return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN;
   }
-
-  if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
+  if (!raft_state_.AppendUpdateUUIDForNewMainLog(new_main_uuid)) {
     return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
   }
 
-  if (!raft_state_.AppendSetInstanceAsMainLog(instance_name)) {
+  if (!raft_state_.AppendSetInstanceAsMainLog(instance_name, new_main_uuid)) {
     return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
   }
 
   spdlog::info("Instance {} promoted to main on leader", instance_name);
+
+  if (!new_main->EnableWritingOnMain()) {
+    return SetInstanceToMainCoordinatorStatus::ENABLE_WRITING_FAILED;
+  }
   return SetInstanceToMainCoordinatorStatus::SUCCESS;
 }
 
-auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig const &config)
+auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
     -> RegisterInstanceCoordinatorStatus {
   auto lock = std::lock_guard{coord_instance_lock_};
+  if (raft_state_.IsLockOpened()) {
+    return RegisterInstanceCoordinatorStatus::LOCK_OPENED;
+  }
 
   if (std::ranges::any_of(repl_instances_, [instance_name = config.instance_name](ReplicationInstance const &instance) {
         return instance.InstanceName() == instance_name;
@@ -310,11 +352,14 @@ auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig co
     return RegisterInstanceCoordinatorStatus::REPL_ENDPOINT_EXISTS;
   }
 
+  // TODO(antoniofilipovic) Check if this is an issue
   if (!raft_state_.RequestLeadership()) {
     return RegisterInstanceCoordinatorStatus::NOT_LEADER;
   }
 
-  auto const undo_action_ = [this]() { repl_instances_.pop_back(); };
+  if (!raft_state_.AppendOpenLockRegister(config)) {
+    return RegisterInstanceCoordinatorStatus::OPEN_LOCK;
+  }
 
   auto *new_instance = &repl_instances_.emplace_back(this, config, client_succ_cb_, client_fail_cb_,
                                                      &CoordinatorInstance::ReplicaSuccessCallback,
@@ -322,15 +367,12 @@ auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig co
 
   if (!new_instance->SendDemoteToReplicaRpc()) {
     spdlog::error("Failed to send demote to replica rpc for instance {}", config.instance_name);
-    undo_action_();
     return RegisterInstanceCoordinatorStatus::RPC_FAILED;
   }
 
   if (!raft_state_.AppendRegisterReplicationInstanceLog(config)) {
-    undo_action_();
     return RegisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
   }
-
   new_instance->StartFrequentCheck();
 
   spdlog::info("Instance {} registered", config.instance_name);
@@ -341,6 +383,11 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
     -> UnregisterInstanceCoordinatorStatus {
   auto lock = std::lock_guard{coord_instance_lock_};
 
+  if (raft_state_.IsLockOpened()) {
+    return UnregisterInstanceCoordinatorStatus::LOCK_OPENED;
+  }
+
+  // TODO(antoniofilipovic) Check if this is an issue
   if (!raft_state_.RequestLeadership()) {
     return UnregisterInstanceCoordinatorStatus::NOT_LEADER;
   }
@@ -354,19 +401,23 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
     return UnregisterInstanceCoordinatorStatus::NO_INSTANCE_WITH_NAME;
   }
 
-  auto const is_main = [this](ReplicationInstance const &instance) {
-    return IsMain(instance.InstanceName()) && instance.GetMainUUID() == raft_state_.GetUUID() && instance.IsAlive();
+  auto const is_current_main = [this](ReplicationInstance const &instance) {
+    return raft_state_.IsCurrentMain(instance.InstanceName()) && instance.IsAlive();
   };
 
-  if (is_main(*inst_to_remove)) {
+  if (is_current_main(*inst_to_remove)) {
     return UnregisterInstanceCoordinatorStatus::IS_MAIN;
   }
 
+  if (!raft_state_.AppendOpenLockUnregister(instance_name)) {
+    return UnregisterInstanceCoordinatorStatus::OPEN_LOCK;
+  }
+
   inst_to_remove->StopFrequentCheck();
 
-  auto curr_main = std::ranges::find_if(repl_instances_, is_main);
+  auto curr_main = std::ranges::find_if(repl_instances_, is_current_main);
 
-  if (curr_main != repl_instances_.end() && curr_main->IsAlive()) {
+  if (curr_main != repl_instances_.end()) {
     if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
       inst_to_remove->StartFrequentCheck();
       return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
@@ -382,20 +433,25 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
   return UnregisterInstanceCoordinatorStatus::SUCCESS;
 }
 
-auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
-                                                 std::string_view raft_address) -> void {
-  raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
+auto CoordinatorInstance::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
+  raft_state_.AddCoordinatorInstance(config);
+  // NOTE: We ignore error we added coordinator instance to networking stuff but not in raft log.
+  if (!raft_state_.AppendAddCoordinatorInstanceLog(config)) {
+    spdlog::error("Failed to append add coordinator instance log");
+  }
 }
 
 void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) {
   spdlog::trace("Instance {} performing main fail callback", repl_instance_name);
+  if (raft_state_.IsLockOpened()) {
+    spdlog::error("Returning from main fail callback as the last action didn't successfully finish");
+  }
+
   auto &repl_instance = FindReplicationInstance(repl_instance_name);
   repl_instance.OnFailPing();
-  const auto &repl_instance_uuid = repl_instance.GetMainUUID();
-  MG_ASSERT(repl_instance_uuid.has_value(), "Replication instance must have uuid set");
 
   // NOLINTNEXTLINE
-  if (!repl_instance.IsAlive() && raft_state_.GetUUID() == repl_instance_uuid.value()) {
+  if (!repl_instance.IsAlive() && raft_state_.IsCurrentMain(repl_instance_name)) {
     spdlog::info("Cluster without main instance, trying automatic failover");
     TryFailover();
   }
@@ -403,6 +459,12 @@ void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name)
 
 void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_name) {
   spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
+
+  if (raft_state_.IsLockOpened()) {
+    spdlog::error("Stopping main successful callback as the last action didn't successfully finish");
+    return;
+  }
+
   auto &repl_instance = FindReplicationInstance(repl_instance_name);
 
   if (repl_instance.IsAlive()) {
@@ -410,11 +472,8 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
     return;
   }
 
-  const auto &repl_instance_uuid = repl_instance.GetMainUUID();
-  MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
-
   // NOLINTNEXTLINE
-  if (raft_state_.GetUUID() == repl_instance_uuid.value()) {
+  if (raft_state_.IsCurrentMain(repl_instance.InstanceName())) {
     if (!repl_instance.EnableWritingOnMain()) {
       spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
       return;
@@ -424,9 +483,8 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
     return;
   }
 
-  if (!raft_state_.RequestLeadership()) {
-    spdlog::error("Demoting main instance {} to replica failed since the instance is not the leader!",
-                  repl_instance_name);
+  if (!raft_state_.AppendOpenLockSetInstanceToReplica(repl_instance.InstanceName())) {
+    spdlog::error("Failed to open lock for demoting OLD MAIN {} to REPLICA", repl_instance_name);
     return;
   }
 
@@ -439,29 +497,38 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
     return;
   }
 
-  if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetUUID())) {
+  if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetCurrentMainUUID())) {
     spdlog::error("Failed to swap uuid for demoted main instance {}", repl_instance_name);
     return;
   }
 
+  if (!raft_state_.AppendUpdateUUIDForInstanceLog(repl_instance_name, raft_state_.GetCurrentMainUUID())) {
+    spdlog::error("Failed to update log of changing instance uuid {} to {}", repl_instance_name,
+                  std::string{raft_state_.GetCurrentMainUUID()});
+    return;
+  }
+
   if (!raft_state_.AppendSetInstanceAsReplicaLog(repl_instance_name)) {
+    spdlog::error("Failed to append log that OLD MAIN was demoted to REPLICA {}", repl_instance_name);
     return;
   }
 }
 
 void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_name) {
   spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
-  auto &repl_instance = FindReplicationInstance(repl_instance_name);
 
-  if (!IsReplica(repl_instance_name)) {
-    spdlog::error("Aborting replica callback since instance {} is not replica anymore", repl_instance_name);
+  if (raft_state_.IsLockOpened()) {
+    spdlog::error("Stopping main successful callback as the last action didn't successfully finish");
     return;
   }
+
+  auto &repl_instance = FindReplicationInstance(repl_instance_name);
+
   // We need to get replicas UUID from time to time to ensure replica is listening to correct main
   // and that it didn't go down for less time than we could notice
   // We need to get id of main replica is listening to
   // and swap if necessary
-  if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetUUID())) {
+  if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetCurrentMainUUID())) {
     spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
     return;
   }
@@ -471,13 +538,14 @@ void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_
 
 void CoordinatorInstance::ReplicaFailCallback(std::string_view repl_instance_name) {
   spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
-  auto &repl_instance = FindReplicationInstance(repl_instance_name);
 
-  if (!IsReplica(repl_instance_name)) {
-    spdlog::error("Aborting replica fail callback since instance {} is not replica anymore", repl_instance_name);
+  if (raft_state_.IsLockOpened()) {
+    spdlog::error("Stopping main successful callback as the last action didn't successfully finish.");
     return;
   }
 
+  auto &repl_instance = FindReplicationInstance(repl_instance_name);
+
   repl_instance.OnFailPing();
 }
 
@@ -549,12 +617,63 @@ auto CoordinatorInstance::ChooseMostUpToDateInstance(std::span<InstanceNameDbHis
   return std::move(*new_main_res);
 }
 
-auto CoordinatorInstance::IsMain(std::string_view instance_name) const -> bool {
-  return raft_state_.IsMain(instance_name);
+auto CoordinatorInstance::HasMainState(std::string_view instance_name) const -> bool {
+  return raft_state_.HasMainState(instance_name);
 }
 
-auto CoordinatorInstance::IsReplica(std::string_view instance_name) const -> bool {
-  return raft_state_.IsReplica(instance_name);
+auto CoordinatorInstance::HasReplicaState(std::string_view instance_name) const -> bool {
+  return raft_state_.HasReplicaState(instance_name);
+}
+
+auto CoordinatorInstance::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
+  auto res = RoutingTable{};
+
+  auto const repl_instance_to_bolt = [](ReplicationInstanceState const &instance) {
+    return instance.config.BoltSocketAddress();
+  };
+
+  // TODO: (andi) This is wrong check, Fico will correct in #1819.
+  auto const is_instance_main = [&](ReplicationInstanceState const &instance) {
+    return instance.status == ReplicationRole::MAIN;
+  };
+
+  auto const is_instance_replica = [&](ReplicationInstanceState const &instance) {
+    return instance.status == ReplicationRole::REPLICA;
+  };
+
+  auto const &raft_log_repl_instances = raft_state_.GetReplicationInstances();
+
+  auto bolt_mains = raft_log_repl_instances | ranges::views::filter(is_instance_main) |
+                    ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
+  MG_ASSERT(bolt_mains.size() <= 1, "There can be at most one main instance active!");
+
+  if (!std::ranges::empty(bolt_mains)) {
+    res.emplace_back(std::move(bolt_mains), "WRITE");
+  }
+
+  auto bolt_replicas = raft_log_repl_instances | ranges::views::filter(is_instance_replica) |
+                       ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
+  if (!std::ranges::empty(bolt_replicas)) {
+    res.emplace_back(std::move(bolt_replicas), "READ");
+  }
+
+  auto const coord_instance_to_bolt = [](CoordinatorInstanceState const &instance) {
+    return instance.config.bolt_server.SocketAddress();
+  };
+
+  auto const &raft_log_coord_instances = raft_state_.GetCoordinatorInstances();
+  auto bolt_coords =
+      raft_log_coord_instances | ranges::views::transform(coord_instance_to_bolt) | ranges::to<std::vector>();
+
+  auto const &local_bolt_coord = routing.find("address");
+  if (local_bolt_coord == routing.end()) {
+    throw InvalidRoutingTableException("No bolt address found in routing table for the current coordinator!");
+  }
+
+  bolt_coords.push_back(local_bolt_coord->second);
+  res.emplace_back(std::move(bolt_coords), "ROUTE");
+
+  return res;
 }
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/coordinator_server.cpp b/src/coordination/coordinator_server.cpp
index 60dc5e348..327097830 100644
--- a/src/coordination/coordinator_server.cpp
+++ b/src/coordination/coordinator_server.cpp
@@ -18,8 +18,7 @@ namespace memgraph::coordination {
 
 namespace {
 
-auto CreateServerContext(const memgraph::coordination::CoordinatorServerConfig &config)
-    -> communication::ServerContext {
+auto CreateServerContext(const memgraph::coordination::ManagementServerConfig &config) -> communication::ServerContext {
   return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
                                                      config.ssl->verify_peer}
                       : communication::ServerContext{};
@@ -32,7 +31,7 @@ constexpr auto kCoordinatorServerThreads = 1;
 
 }  // namespace
 
-CoordinatorServer::CoordinatorServer(const CoordinatorServerConfig &config)
+CoordinatorServer::CoordinatorServer(const ManagementServerConfig &config)
     : rpc_server_context_{CreateServerContext(config)},
       rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
                   kCoordinatorServerThreads} {
diff --git a/src/coordination/coordinator_state.cpp b/src/coordination/coordinator_state.cpp
index f429cd5a7..0d6ce17c4 100644
--- a/src/coordination/coordinator_state.cpp
+++ b/src/coordination/coordinator_state.cpp
@@ -13,7 +13,7 @@
 
 #include "coordination/coordinator_state.hpp"
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "coordination/register_main_replica_coordinator_status.hpp"
 #include "flags/replication.hpp"
 #include "spdlog/spdlog.h"
@@ -25,15 +25,15 @@
 namespace memgraph::coordination {
 
 CoordinatorState::CoordinatorState() {
-  MG_ASSERT(!(FLAGS_raft_server_id && FLAGS_coordinator_server_port),
+  MG_ASSERT(!(FLAGS_coordinator_id && FLAGS_management_port),
             "Instance cannot be a coordinator and have registered coordinator server.");
 
   spdlog::info("Executing coordinator constructor");
-  if (FLAGS_coordinator_server_port) {
+  if (FLAGS_management_port) {
     spdlog::info("Coordinator server port set");
-    auto const config = CoordinatorServerConfig{
+    auto const config = ManagementServerConfig{
         .ip_address = kDefaultReplicationServerIp,
-        .port = static_cast<uint16_t>(FLAGS_coordinator_server_port),
+        .port = static_cast<uint16_t>(FLAGS_management_port),
     };
     spdlog::info("Executing coordinator constructor main replica");
 
@@ -41,7 +41,7 @@ CoordinatorState::CoordinatorState() {
   }
 }
 
-auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig const &config)
+auto CoordinatorState::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
     -> RegisterInstanceCoordinatorStatus {
   MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
             "Coordinator cannot register replica since variant holds wrong alternative");
@@ -98,11 +98,16 @@ auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
   return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
 }
 
-auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
-                                              std::string_view raft_address) -> void {
+auto CoordinatorState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
   MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
             "Coordinator cannot register replica since variant holds wrong alternative");
-  return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
+  return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(config);
+}
+
+auto CoordinatorState::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
+  MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
+            "Coordinator cannot get routing table since variant holds wrong alternative");
+  return std::get<CoordinatorInstance>(data_).GetRoutingTable(routing);
 }
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/coordinator_state_machine.cpp b/src/coordination/coordinator_state_machine.cpp
index 631c3c4d2..28c8b0768 100644
--- a/src/coordination/coordinator_state_machine.cpp
+++ b/src/coordination/coordinator_state_machine.cpp
@@ -20,18 +20,14 @@ constexpr int MAX_SNAPSHOTS = 3;
 
 namespace memgraph::coordination {
 
-auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
-  return cluster_state_.FindCurrentMainInstanceName();
-}
-
 auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
 
-auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool {
-  return cluster_state_.IsMain(instance_name);
+auto CoordinatorStateMachine::HasMainState(std::string_view instance_name) const -> bool {
+  return cluster_state_.HasMainState(instance_name);
 }
 
-auto CoordinatorStateMachine::IsReplica(std::string_view instance_name) const -> bool {
-  return cluster_state_.IsReplica(instance_name);
+auto CoordinatorStateMachine::HasReplicaState(std::string_view instance_name) const -> bool {
+  return cluster_state_.HasReplicaState(instance_name);
 }
 
 auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
@@ -42,7 +38,24 @@ auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
   return log_buf;
 }
 
-auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> {
+auto CoordinatorStateMachine::SerializeOpenLockRegister(CoordinatorToReplicaConfig const &config) -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE}, {"info", config}});
+}
+
+auto CoordinatorStateMachine::SerializeOpenLockUnregister(std::string_view instance_name) -> ptr<buffer> {
+  return CreateLog(
+      {{"action", RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE}, {"info", std::string{instance_name}}});
+}
+
+auto CoordinatorStateMachine::SerializeOpenLockFailover(std::string_view instance_name) -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::OPEN_LOCK_FAILOVER}, {"info", std::string(instance_name)}});
+}
+
+auto CoordinatorStateMachine::SerializeOpenLockSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN}, {"info", std::string(instance_name)}});
+}
+
+auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer> {
   return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
 }
 
@@ -50,35 +63,65 @@ auto CoordinatorStateMachine::SerializeUnregisterInstance(std::string_view insta
   return CreateLog({{"action", RaftLogAction::UNREGISTER_REPLICATION_INSTANCE}, {"info", instance_name}});
 }
 
-auto CoordinatorStateMachine::SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
-  return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_name}});
+auto CoordinatorStateMachine::SerializeSetInstanceAsMain(InstanceUUIDUpdate const &instance_uuid_change)
+    -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_uuid_change}});
 }
 
 auto CoordinatorStateMachine::SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
   return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
 }
 
-auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer> {
-  return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
+auto CoordinatorStateMachine::SerializeUpdateUUIDForNewMain(utils::UUID const &uuid) -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::UPDATE_UUID_OF_NEW_MAIN}, {"info", uuid}});
+}
+
+auto CoordinatorStateMachine::SerializeUpdateUUIDForInstance(InstanceUUIDUpdate const &instance_uuid_change)
+    -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::UPDATE_UUID_FOR_INSTANCE}, {"info", instance_uuid_change}});
+}
+
+auto CoordinatorStateMachine::SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config)
+    -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::ADD_COORDINATOR_INSTANCE}, {"info", config}});
+}
+
+auto CoordinatorStateMachine::SerializeOpenLockSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
+  return CreateLog({{"action", RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
 }
 
 auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
   buffer_serializer bs(data);
   auto const json = nlohmann::json::parse(bs.get_str());
-
   auto const action = json["action"].get<RaftLogAction>();
   auto const &info = json["info"];
 
   switch (action) {
+    case RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE: {
+      return {info.get<CoordinatorToReplicaConfig>(), action};
+    }
+    case RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE:
+      [[fallthrough]];
+    case RaftLogAction::OPEN_LOCK_FAILOVER:
+      [[fallthrough]];
+    case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN:
+      [[fallthrough]];
+    case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA: {
+      return {info.get<std::string>(), action};
+    }
     case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
-      return {info.get<CoordinatorClientConfig>(), action};
-    case RaftLogAction::UPDATE_UUID:
+      return {info.get<CoordinatorToReplicaConfig>(), action};
+    case RaftLogAction::UPDATE_UUID_OF_NEW_MAIN:
       return {info.get<utils::UUID>(), action};
-    case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
+    case RaftLogAction::UPDATE_UUID_FOR_INSTANCE:
     case RaftLogAction::SET_INSTANCE_AS_MAIN:
+      return {info.get<InstanceUUIDUpdate>(), action};
+    case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
       [[fallthrough]];
     case RaftLogAction::SET_INSTANCE_AS_REPLICA:
       return {info.get<std::string>(), action};
+    case RaftLogAction::ADD_COORDINATOR_INSTANCE:
+      return {info.get<CoordinatorToCoordinatorConfig>(), action};
   }
   throw std::runtime_error("Unknown action");
 }
@@ -133,6 +176,7 @@ auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /
   } else {
     // Object ID > 0: second object, put actual value.
     ctx->cluster_state_.Serialize(data_out);
+    is_last_obj = true;
   }
 
   return 0;
@@ -155,6 +199,7 @@ auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &ob
     DMG_ASSERT(entry != snapshots_.end());
     entry->second->cluster_state_ = cluster_state;
   }
+  obj_id++;
 }
 
 auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
@@ -205,11 +250,24 @@ auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -
   }
 }
 
-auto CoordinatorStateMachine::GetInstances() const -> std::vector<InstanceState> {
-  return cluster_state_.GetInstances();
+auto CoordinatorStateMachine::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
+  return cluster_state_.GetReplicationInstances();
 }
 
-auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); }
+auto CoordinatorStateMachine::GetCurrentMainUUID() const -> utils::UUID { return cluster_state_.GetCurrentMainUUID(); }
+
+auto CoordinatorStateMachine::IsCurrentMain(std::string_view instance_name) const -> bool {
+  return cluster_state_.IsCurrentMain(instance_name);
+}
+auto CoordinatorStateMachine::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
+  return cluster_state_.GetCoordinatorInstances();
+}
+
+auto CoordinatorStateMachine::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
+  return cluster_state_.GetInstanceUUID(instance_name);
+}
+
+auto CoordinatorStateMachine::IsLockOpened() const -> bool { return cluster_state_.IsLockOpened(); }
 
 }  // namespace memgraph::coordination
 #endif
diff --git a/src/coordination/coordinator_state_manager.cpp b/src/coordination/coordinator_state_manager.cpp
index b2fb81ea1..db49b1f21 100644
--- a/src/coordination/coordinator_state_manager.cpp
+++ b/src/coordination/coordinator_state_manager.cpp
@@ -33,6 +33,7 @@ CoordinatorStateManager::CoordinatorStateManager(int srv_id, std::string const &
 auto CoordinatorStateManager::load_config() -> ptr<cluster_config> {
   // Just return in-memory data in this example.
   // May require reading from disk here, if it has been written to disk.
+  spdlog::trace("Loading cluster config");
   return cluster_config_;
 }
 
@@ -41,6 +42,11 @@ auto CoordinatorStateManager::save_config(cluster_config const &config) -> void
   // Need to write to disk here, if want to make it durable.
   ptr<buffer> buf = config.serialize();
   cluster_config_ = cluster_config::deserialize(*buf);
+  spdlog::info("Saving cluster config.");
+  auto servers = cluster_config_->get_servers();
+  for (auto const &server : servers) {
+    spdlog::trace("Server id: {}, endpoint: {}", server->get_id(), server->get_endpoint());
+  }
 }
 
 auto CoordinatorStateManager::save_state(srv_state const &state) -> void {
diff --git a/src/coordination/include/coordination/coordinator_client.hpp b/src/coordination/include/coordination/coordinator_client.hpp
index 5d4795f81..875efaa45 100644
--- a/src/coordination/include/coordination/coordinator_client.hpp
+++ b/src/coordination/include/coordination/coordinator_client.hpp
@@ -13,7 +13,7 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "replication_coordination_glue/common.hpp"
 #include "rpc/client.hpp"
 #include "rpc_errors.hpp"
@@ -25,11 +25,11 @@ namespace memgraph::coordination {
 
 class CoordinatorInstance;
 using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
-using ReplicationClientsInfo = std::vector<ReplClientInfo>;
+using ReplicationClientsInfo = std::vector<ReplicationClientInfo>;
 
 class CoordinatorClient {
  public:
-  explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
+  explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
                              HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb);
 
   ~CoordinatorClient() = default;
@@ -62,7 +62,7 @@ class CoordinatorClient {
 
   auto SendGetInstanceUUIDRpc() const -> memgraph::utils::BasicResult<GetInstanceUUIDError, std::optional<utils::UUID>>;
 
-  auto ReplicationClientInfo() const -> ReplClientInfo;
+  auto ReplicationClientInfo() const -> ReplicationClientInfo;
 
   auto SendGetInstanceTimestampsRpc() const
       -> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>;
@@ -83,7 +83,7 @@ class CoordinatorClient {
   communication::ClientContext rpc_context_;
   mutable rpc::Client rpc_client_;
 
-  CoordinatorClientConfig config_;
+  CoordinatorToReplicaConfig config_;
   CoordinatorInstance *coord_instance_;
   HealthCheckClientCallback succ_cb_;
   HealthCheckClientCallback fail_cb_;
diff --git a/src/coordination/include/coordination/coordinator_communication_config.hpp b/src/coordination/include/coordination/coordinator_communication_config.hpp
new file mode 100644
index 000000000..56453d3ea
--- /dev/null
+++ b/src/coordination/include/coordination/coordinator_communication_config.hpp
@@ -0,0 +1,110 @@
+// Copyright 2024 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#pragma once
+
+#ifdef MG_ENTERPRISE
+
+#include "io/network/endpoint.hpp"
+#include "replication_coordination_glue/mode.hpp"
+#include "utils/string.hpp"
+
+#include <chrono>
+#include <cstdint>
+#include <optional>
+#include <string>
+
+#include <fmt/format.h>
+#include "json/json.hpp"
+#include "utils/uuid.hpp"
+
+namespace memgraph::coordination {
+
+inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
+
+struct ReplicationClientInfo {
+  std::string instance_name{};
+  replication_coordination_glue::ReplicationMode replication_mode{};
+  io::network::Endpoint replication_server;
+
+  friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
+};
+
+struct CoordinatorToReplicaConfig {
+  auto BoltSocketAddress() const -> std::string { return bolt_server.SocketAddress(); }
+  auto CoordinatorSocketAddress() const -> std::string { return mgt_server.SocketAddress(); }
+  auto ReplicationSocketAddress() const -> std::string {
+    return replication_client_info.replication_server.SocketAddress();
+  }
+
+  std::string instance_name{};
+  io::network::Endpoint mgt_server;
+  io::network::Endpoint bolt_server;
+  ReplicationClientInfo replication_client_info;
+
+  std::chrono::seconds instance_health_check_frequency_sec{1};
+  std::chrono::seconds instance_down_timeout_sec{5};
+  std::chrono::seconds instance_get_uuid_frequency_sec{10};
+
+  struct SSL {
+    std::string key_file;
+    std::string cert_file;
+    friend bool operator==(const SSL &, const SSL &) = default;
+  };
+
+  std::optional<SSL> ssl;
+
+  friend bool operator==(CoordinatorToReplicaConfig const &, CoordinatorToReplicaConfig const &) = default;
+};
+
+struct CoordinatorToCoordinatorConfig {
+  uint32_t coordinator_server_id{0};
+  io::network::Endpoint bolt_server;
+  io::network::Endpoint coordinator_server;
+
+  friend bool operator==(CoordinatorToCoordinatorConfig const &, CoordinatorToCoordinatorConfig const &) = default;
+};
+
+struct ManagementServerConfig {
+  std::string ip_address;
+  uint16_t port{};
+  struct SSL {
+    std::string key_file;
+    std::string cert_file;
+    std::string ca_file;
+    bool verify_peer{};
+    friend bool operator==(SSL const &, SSL const &) = default;
+  };
+
+  std::optional<SSL> ssl;
+
+  friend bool operator==(ManagementServerConfig const &, ManagementServerConfig const &) = default;
+};
+
+struct InstanceUUIDUpdate {
+  std::string instance_name;
+  memgraph::utils::UUID uuid;
+};
+
+void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config);
+void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config);
+
+void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config);
+void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config);
+
+void to_json(nlohmann::json &j, ReplicationClientInfo const &config);
+void from_json(nlohmann::json const &j, ReplicationClientInfo &config);
+
+void to_json(nlohmann::json &j, InstanceUUIDUpdate const &config);
+void from_json(nlohmann::json const &j, InstanceUUIDUpdate &config);
+
+}  // namespace memgraph::coordination
+#endif
diff --git a/src/coordination/include/coordination/coordinator_config.hpp b/src/coordination/include/coordination/coordinator_config.hpp
deleted file mode 100644
index 127a365eb..000000000
--- a/src/coordination/include/coordination/coordinator_config.hpp
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2024 Memgraph Ltd.
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
-// License, and you may not use this file except in compliance with the Business Source License.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-#pragma once
-
-#ifdef MG_ENTERPRISE
-
-#include "replication_coordination_glue/mode.hpp"
-#include "utils/string.hpp"
-
-#include <chrono>
-#include <cstdint>
-#include <optional>
-#include <string>
-
-#include <fmt/format.h>
-#include "json/json.hpp"
-
-namespace memgraph::coordination {
-
-inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
-
-struct CoordinatorClientConfig {
-  std::string instance_name;
-  std::string ip_address;
-  uint16_t port{};
-  std::chrono::seconds instance_health_check_frequency_sec{1};
-  std::chrono::seconds instance_down_timeout_sec{5};
-  std::chrono::seconds instance_get_uuid_frequency_sec{10};
-
-  auto CoordinatorSocketAddress() const -> std::string { return fmt::format("{}:{}", ip_address, port); }
-  auto ReplicationSocketAddress() const -> std::string {
-    return fmt::format("{}:{}", replication_client_info.replication_ip_address,
-                       replication_client_info.replication_port);
-  }
-
-  struct ReplicationClientInfo {
-    std::string instance_name;
-    replication_coordination_glue::ReplicationMode replication_mode{};
-    std::string replication_ip_address;
-    uint16_t replication_port{};
-
-    friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
-  };
-
-  ReplicationClientInfo replication_client_info;
-
-  struct SSL {
-    std::string key_file;
-    std::string cert_file;
-
-    friend bool operator==(const SSL &, const SSL &) = default;
-  };
-
-  std::optional<SSL> ssl;
-
-  friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default;
-};
-
-using ReplClientInfo = CoordinatorClientConfig::ReplicationClientInfo;
-
-struct CoordinatorServerConfig {
-  std::string ip_address;
-  uint16_t port{};
-  struct SSL {
-    std::string key_file;
-    std::string cert_file;
-    std::string ca_file;
-    bool verify_peer{};
-    friend bool operator==(SSL const &, SSL const &) = default;
-  };
-
-  std::optional<SSL> ssl;
-
-  friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
-};
-
-void to_json(nlohmann::json &j, CoordinatorClientConfig const &config);
-void from_json(nlohmann::json const &j, CoordinatorClientConfig &config);
-
-void to_json(nlohmann::json &j, ReplClientInfo const &config);
-void from_json(nlohmann::json const &j, ReplClientInfo &config);
-
-}  // namespace memgraph::coordination
-#endif
diff --git a/src/coordination/include/coordination/coordinator_exceptions.hpp b/src/coordination/include/coordination/coordinator_exceptions.hpp
index 7a967f80b..6cff2e8c1 100644
--- a/src/coordination/include/coordination/coordinator_exceptions.hpp
+++ b/src/coordination/include/coordination/coordinator_exceptions.hpp
@@ -94,5 +94,16 @@ class InvalidRaftLogActionException final : public utils::BasicException {
   SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException)
 };
 
+class InvalidRoutingTableException final : public utils::BasicException {
+ public:
+  explicit InvalidRoutingTableException(std::string_view what) noexcept : BasicException(what) {}
+
+  template <class... Args>
+  explicit InvalidRoutingTableException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
+      : InvalidRoutingTableException(fmt::format(fmt, std::forward<Args>(args)...)) {}
+
+  SPECIALIZE_GET_EXCEPTION_NAME(InvalidRoutingTableException)
+};
+
 }  // namespace memgraph::coordination
 #endif
diff --git a/src/coordination/include/coordination/coordinator_instance.hpp b/src/coordination/include/coordination/coordinator_instance.hpp
index 10549f468..5f74d1410 100644
--- a/src/coordination/include/coordination/coordinator_instance.hpp
+++ b/src/coordination/include/coordination/coordinator_instance.hpp
@@ -26,6 +26,8 @@
 
 namespace memgraph::coordination {
 
+using RoutingTable = std::vector<std::pair<std::vector<std::string>, std::string>>;
+
 struct NewMainRes {
   std::string most_up_to_date_instance;
   std::string latest_epoch;
@@ -36,8 +38,14 @@ using InstanceNameDbHistories = std::pair<std::string, replication_coordination_
 class CoordinatorInstance {
  public:
   CoordinatorInstance();
+  CoordinatorInstance(CoordinatorInstance const &) = delete;
+  CoordinatorInstance &operator=(CoordinatorInstance const &) = delete;
+  CoordinatorInstance(CoordinatorInstance &&) noexcept = delete;
+  CoordinatorInstance &operator=(CoordinatorInstance &&) noexcept = delete;
 
-  [[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
+  ~CoordinatorInstance() = default;
+
+  [[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
       -> RegisterInstanceCoordinatorStatus;
   [[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
       -> UnregisterInstanceCoordinatorStatus;
@@ -48,15 +56,17 @@ class CoordinatorInstance {
 
   auto TryFailover() -> void;
 
-  auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
+  auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
+
+  auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
 
   static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes;
 
+  auto HasMainState(std::string_view instance_name) const -> bool;
+
+  auto HasReplicaState(std::string_view instance_name) const -> bool;
+
  private:
-  HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
-
-  auto OnRaftCommitCallback(TRaftLog const &log_entry, RaftLogAction log_action) -> void;
-
   auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &;
 
   void MainFailCallback(std::string_view);
@@ -67,14 +77,14 @@ class CoordinatorInstance {
 
   void ReplicaFailCallback(std::string_view);
 
-  auto IsMain(std::string_view instance_name) const -> bool;
-  auto IsReplica(std::string_view instance_name) const -> bool;
-
+  HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
   // NOTE: Must be std::list because we rely on pointer stability.
-  // Leader and followers should both have same view on repl_instances_
   std::list<ReplicationInstance> repl_instances_;
   mutable utils::ResourceLock coord_instance_lock_{};
 
+  // Thread pool needs to be constructed before raft state as raft state can call thread pool
+  utils::ThreadPool thread_pool_;
+
   RaftState raft_state_;
 };
 
diff --git a/src/coordination/include/coordination/coordinator_rpc.hpp b/src/coordination/include/coordination/coordinator_rpc.hpp
index d799b2955..b0b466859 100644
--- a/src/coordination/include/coordination/coordinator_rpc.hpp
+++ b/src/coordination/include/coordination/coordinator_rpc.hpp
@@ -14,7 +14,7 @@
 #include "utils/uuid.hpp"
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "replication_coordination_glue/common.hpp"
 #include "rpc/messages.hpp"
 #include "slk/serialization.hpp"
@@ -28,14 +28,13 @@ struct PromoteReplicaToMainReq {
   static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader);
   static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder);
 
-  explicit PromoteReplicaToMainReq(const utils::UUID &uuid,
-                                   std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info)
+  explicit PromoteReplicaToMainReq(const utils::UUID &uuid, std::vector<ReplicationClientInfo> replication_clients_info)
       : main_uuid_(uuid), replication_clients_info(std::move(replication_clients_info)) {}
   PromoteReplicaToMainReq() = default;
 
   // get uuid here
   utils::UUID main_uuid_;
-  std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info;
+  std::vector<ReplicationClientInfo> replication_clients_info;
 };
 
 struct PromoteReplicaToMainRes {
@@ -60,12 +59,12 @@ struct DemoteMainToReplicaReq {
   static void Load(DemoteMainToReplicaReq *self, memgraph::slk::Reader *reader);
   static void Save(const DemoteMainToReplicaReq &self, memgraph::slk::Builder *builder);
 
-  explicit DemoteMainToReplicaReq(CoordinatorClientConfig::ReplicationClientInfo replication_client_info)
+  explicit DemoteMainToReplicaReq(ReplicationClientInfo replication_client_info)
       : replication_client_info(std::move(replication_client_info)) {}
 
   DemoteMainToReplicaReq() = default;
 
-  CoordinatorClientConfig::ReplicationClientInfo replication_client_info;
+  ReplicationClientInfo replication_client_info;
 };
 
 struct DemoteMainToReplicaRes {
diff --git a/src/coordination/include/coordination/coordinator_server.hpp b/src/coordination/include/coordination/coordinator_server.hpp
index 2a261bc32..52a0befc5 100644
--- a/src/coordination/include/coordination/coordinator_server.hpp
+++ b/src/coordination/include/coordination/coordinator_server.hpp
@@ -13,14 +13,14 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "rpc/server.hpp"
 
 namespace memgraph::coordination {
 
 class CoordinatorServer {
  public:
-  explicit CoordinatorServer(const CoordinatorServerConfig &config);
+  explicit CoordinatorServer(const ManagementServerConfig &config);
   CoordinatorServer(const CoordinatorServer &) = delete;
   CoordinatorServer(CoordinatorServer &&) = delete;
   CoordinatorServer &operator=(const CoordinatorServer &) = delete;
diff --git a/src/coordination/include/coordination/coordinator_slk.hpp b/src/coordination/include/coordination/coordinator_slk.hpp
index ee393b7b6..3d809da26 100644
--- a/src/coordination/include/coordination/coordinator_slk.hpp
+++ b/src/coordination/include/coordination/coordinator_slk.hpp
@@ -13,27 +13,37 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "replication_coordination_glue/common.hpp"
 #include "slk/serialization.hpp"
 #include "slk/streams.hpp"
 
 namespace memgraph::slk {
 
-using ReplicationClientInfo = coordination::CoordinatorClientConfig::ReplicationClientInfo;
+using ReplicationClientInfo = coordination::ReplicationClientInfo;
 
-inline void Save(const ReplicationClientInfo &obj, Builder *builder) {
+inline void Save(io::network::Endpoint const &obj, Builder *builder) {
+  Save(obj.address, builder);
+  Save(obj.port, builder);
+  Save(obj.family, builder);
+}
+
+inline void Load(io::network::Endpoint *obj, Reader *reader) {
+  Load(&obj->address, reader);
+  Load(&obj->port, reader);
+  Load(&obj->family, reader);
+}
+
+inline void Save(ReplicationClientInfo const &obj, Builder *builder) {
   Save(obj.instance_name, builder);
   Save(obj.replication_mode, builder);
-  Save(obj.replication_ip_address, builder);
-  Save(obj.replication_port, builder);
+  Save(obj.replication_server, builder);
 }
 
 inline void Load(ReplicationClientInfo *obj, Reader *reader) {
   Load(&obj->instance_name, reader);
   Load(&obj->replication_mode, reader);
-  Load(&obj->replication_ip_address, reader);
-  Load(&obj->replication_port, reader);
+  Load(&obj->replication_server, reader);
 }
 
 inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) {
diff --git a/src/coordination/include/coordination/coordinator_state.hpp b/src/coordination/include/coordination/coordinator_state.hpp
index 400c36940..f2a88e9b8 100644
--- a/src/coordination/include/coordination/coordinator_state.hpp
+++ b/src/coordination/include/coordination/coordinator_state.hpp
@@ -33,7 +33,7 @@ class CoordinatorState {
   CoordinatorState(CoordinatorState &&) noexcept = delete;
   CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
 
-  [[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
+  [[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
       -> RegisterInstanceCoordinatorStatus;
   [[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
       -> UnregisterInstanceCoordinatorStatus;
@@ -42,11 +42,13 @@ class CoordinatorState {
 
   auto ShowInstances() const -> std::vector<InstanceStatus>;
 
-  auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
+  auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
 
   // NOTE: The client code must check that the server exists before calling this method.
   auto GetCoordinatorServer() const -> CoordinatorServer &;
 
+  auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
+
  private:
   struct CoordinatorMainReplicaData {
     std::unique_ptr<CoordinatorServer> coordinator_server_;
diff --git a/src/coordination/include/coordination/raft_state.hpp b/src/coordination/include/coordination/raft_state.hpp
index 34da3e2a6..03e00df06 100644
--- a/src/coordination/include/coordination/raft_state.hpp
+++ b/src/coordination/include/coordination/raft_state.hpp
@@ -23,7 +23,7 @@
 namespace memgraph::coordination {
 
 class CoordinatorInstance;
-struct CoordinatorClientConfig;
+struct CoordinatorToReplicaConfig;
 
 using BecomeLeaderCb = std::function<void()>;
 using BecomeFollowerCb = std::function<void()>;
@@ -40,7 +40,7 @@ using raft_result = nuraft::cmd_result<ptr<buffer>>;
 
 class RaftState {
  private:
-  explicit RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
+  explicit RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t coordinator_id,
                      uint32_t raft_port, std::string raft_address);
 
   auto InitRaftServer() -> void;
@@ -58,30 +58,43 @@ class RaftState {
   auto InstanceName() const -> std::string;
   auto RaftSocketAddress() const -> std::string;
 
-  auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
+  auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
   auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>;
 
   auto RequestLeadership() -> bool;
   auto IsLeader() const -> bool;
 
-  auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
-  auto MainExists() const -> bool;
-  auto IsMain(std::string_view instance_name) const -> bool;
-  auto IsReplica(std::string_view instance_name) const -> bool;
-
-  auto AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool;
+  auto AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool;
   auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
-  auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool;
+  auto AppendSetInstanceAsMainLog(std::string_view instance_name, utils::UUID const &uuid) -> bool;
   auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
-  auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool;
+  auto AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool;
+  auto AppendUpdateUUIDForInstanceLog(std::string_view instance_name, utils::UUID const &uuid) -> bool;
+  auto AppendOpenLockRegister(CoordinatorToReplicaConfig const &) -> bool;
+  auto AppendOpenLockUnregister(std::string_view) -> bool;
+  auto AppendOpenLockFailover(std::string_view instance_name) -> bool;
+  auto AppendOpenLockSetInstanceToMain(std::string_view instance_name) -> bool;
+  auto AppendOpenLockSetInstanceToReplica(std::string_view instance_name) -> bool;
+  auto AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool;
 
-  auto GetInstances() const -> std::vector<InstanceState>;
-  auto GetUUID() const -> utils::UUID;
+  auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
+  // TODO: (andi) Do we need then GetAllCoordinators?
+  auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
+
+  auto MainExists() const -> bool;
+  auto HasMainState(std::string_view instance_name) const -> bool;
+  auto HasReplicaState(std::string_view instance_name) const -> bool;
+  auto IsCurrentMain(std::string_view instance_name) const -> bool;
+
+  auto GetCurrentMainUUID() const -> utils::UUID;
+  auto GetInstanceUUID(std::string_view) const -> utils::UUID;
+
+  auto IsLockOpened() const -> bool;
 
  private:
   // TODO: (andi) I think variables below can be abstracted/clean them.
   io::network::Endpoint raft_endpoint_;
-  uint32_t raft_server_id_;
+  uint32_t coordinator_id_;
 
   ptr<CoordinatorStateMachine> state_machine_;
   ptr<CoordinatorStateManager> state_manager_;
diff --git a/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp b/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp
index 13b58ff9f..4366d20a5 100644
--- a/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp
+++ b/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp
@@ -25,7 +25,9 @@ enum class RegisterInstanceCoordinatorStatus : uint8_t {
   NOT_LEADER,
   RPC_FAILED,
   RAFT_LOG_ERROR,
-  SUCCESS
+  SUCCESS,
+  LOCK_OPENED,
+  OPEN_LOCK
 };
 
 enum class UnregisterInstanceCoordinatorStatus : uint8_t {
@@ -36,6 +38,8 @@ enum class UnregisterInstanceCoordinatorStatus : uint8_t {
   NOT_LEADER,
   RAFT_LOG_ERROR,
   SUCCESS,
+  LOCK_OPENED,
+  OPEN_LOCK
 };
 
 enum class SetInstanceToMainCoordinatorStatus : uint8_t {
@@ -47,6 +51,9 @@ enum class SetInstanceToMainCoordinatorStatus : uint8_t {
   COULD_NOT_PROMOTE_TO_MAIN,
   SWAP_UUID_FAILED,
   SUCCESS,
+  LOCK_OPENED,
+  OPEN_LOCK,
+  ENABLE_WRITING_FAILED
 };
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/include/coordination/replication_instance.hpp b/src/coordination/include/coordination/replication_instance.hpp
index 7b5d73b81..19127e7eb 100644
--- a/src/coordination/include/coordination/replication_instance.hpp
+++ b/src/coordination/include/coordination/replication_instance.hpp
@@ -32,7 +32,7 @@ using HealthCheckInstanceCallback = void (CoordinatorInstance::*)(std::string_vi
 
 class ReplicationInstance {
  public:
-  ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckClientCallback succ_cb,
+  ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config, HealthCheckClientCallback succ_cb,
                       HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb,
                       HealthCheckInstanceCallback fail_instance_cb);
 
@@ -67,7 +67,7 @@ class ReplicationInstance {
   auto PauseFrequentCheck() -> void;
   auto ResumeFrequentCheck() -> void;
 
-  auto ReplicationClientInfo() const -> ReplClientInfo;
+  auto ReplicationClientInfo() const -> ReplicationClientInfo;
 
   auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool;
 
@@ -79,10 +79,6 @@ class ReplicationInstance {
 
   auto EnableWritingOnMain() -> bool;
 
-  auto SetNewMainUUID(utils::UUID const &main_uuid) -> void;
-  auto ResetMainUUID() -> void;
-  auto GetMainUUID() const -> std::optional<utils::UUID> const &;
-
   auto GetSuccessCallback() -> HealthCheckInstanceCallback &;
   auto GetFailCallback() -> HealthCheckInstanceCallback &;
 
@@ -92,19 +88,12 @@ class ReplicationInstance {
   bool is_alive_{false};
   std::chrono::system_clock::time_point last_check_of_uuid_{};
 
-  // for replica this is main uuid of current main
-  // for "main" main this same as in CoordinatorData
-  // it is set to nullopt when replica is down
-  // TLDR; when replica is down and comes back up we reset uuid of main replica is listening to
-  // so we need to send swap uuid again
-  std::optional<utils::UUID> main_uuid_;
-
   HealthCheckInstanceCallback succ_cb_;
   HealthCheckInstanceCallback fail_cb_;
 
   friend bool operator==(ReplicationInstance const &first, ReplicationInstance const &second) {
     return first.client_ == second.client_ && first.last_response_time_ == second.last_response_time_ &&
-           first.is_alive_ == second.is_alive_ && first.main_uuid_ == second.main_uuid_;
+           first.is_alive_ == second.is_alive_;
   }
 };
 
diff --git a/src/coordination/include/nuraft/coordinator_cluster_state.hpp b/src/coordination/include/nuraft/coordinator_cluster_state.hpp
index 11d539a14..5bd733c43 100644
--- a/src/coordination/include/nuraft/coordinator_cluster_state.hpp
+++ b/src/coordination/include/nuraft/coordinator_cluster_state.hpp
@@ -13,7 +13,7 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "nuraft/raft_log_action.hpp"
 #include "replication_coordination_glue/role.hpp"
 #include "utils/resource_lock.hpp"
@@ -32,19 +32,37 @@ namespace memgraph::coordination {
 
 using replication_coordination_glue::ReplicationRole;
 
-struct InstanceState {
-  CoordinatorClientConfig config;
+struct ReplicationInstanceState {
+  CoordinatorToReplicaConfig config;
   ReplicationRole status;
 
-  friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool {
-    return lhs.config == rhs.config && lhs.status == rhs.status;
+  // for replica this is main uuid of current main
+  // for "main" main this same as current_main_id_
+  // when replica is down and comes back up we reset uuid of main replica is listening to
+  // so we need to send swap uuid again
+  // For MAIN we don't enable writing until cluster is in healthy state
+  utils::UUID instance_uuid;
+
+  friend auto operator==(ReplicationInstanceState const &lhs, ReplicationInstanceState const &rhs) -> bool {
+    return lhs.config == rhs.config && lhs.status == rhs.status && lhs.instance_uuid == rhs.instance_uuid;
   }
 };
 
-void to_json(nlohmann::json &j, InstanceState const &instance_state);
-void from_json(nlohmann::json const &j, InstanceState &instance_state);
+// NOTE: Currently instance of coordinator doesn't change from the registration. Hence, just wrap
+// CoordinatorToCoordinatorConfig.
+struct CoordinatorInstanceState {
+  CoordinatorToCoordinatorConfig config;
 
-using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>;
+  friend auto operator==(CoordinatorInstanceState const &lhs, CoordinatorInstanceState const &rhs) -> bool {
+    return lhs.config == rhs.config;
+  }
+};
+
+void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state);
+void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state);
+
+using TRaftLog = std::variant<CoordinatorToReplicaConfig, std::string, utils::UUID, CoordinatorToCoordinatorConfig,
+                              InstanceUUIDUpdate>;
 
 using nuraft::buffer;
 using nuraft::buffer_serializer;
@@ -53,7 +71,8 @@ using nuraft::ptr;
 class CoordinatorClusterState {
  public:
   CoordinatorClusterState() = default;
-  explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances);
+  explicit CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances,
+                                   utils::UUID const &current_main_uuid, bool is_lock_opened);
 
   CoordinatorClusterState(CoordinatorClusterState const &);
   CoordinatorClusterState &operator=(CoordinatorClusterState const &);
@@ -62,15 +81,13 @@ class CoordinatorClusterState {
   CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept;
   ~CoordinatorClusterState() = default;
 
-  auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
-
   auto MainExists() const -> bool;
 
-  auto IsMain(std::string_view instance_name) const -> bool;
+  auto HasMainState(std::string_view instance_name) const -> bool;
 
-  auto IsReplica(std::string_view instance_name) const -> bool;
+  auto HasReplicaState(std::string_view instance_name) const -> bool;
 
-  auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void;
+  auto IsCurrentMain(std::string_view instance_name) const -> bool;
 
   auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
 
@@ -78,14 +95,22 @@ class CoordinatorClusterState {
 
   static auto Deserialize(buffer &data) -> CoordinatorClusterState;
 
-  auto GetInstances() const -> std::vector<InstanceState>;
+  auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
 
-  auto GetUUID() const -> utils::UUID;
+  auto GetCurrentMainUUID() const -> utils::UUID;
+
+  auto GetInstanceUUID(std::string_view) const -> utils::UUID;
+
+  auto IsLockOpened() const -> bool;
+
+  auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
 
  private:
-  std::map<std::string, InstanceState, std::less<>> instances_{};
-  utils::UUID uuid_{};
+  std::vector<CoordinatorInstanceState> coordinators_{};
+  std::map<std::string, ReplicationInstanceState, std::less<>> repl_instances_{};
+  utils::UUID current_main_uuid_{};
   mutable utils::ResourceLock log_lock_{};
+  bool is_lock_opened_{false};
 };
 
 }  // namespace memgraph::coordination
diff --git a/src/coordination/include/nuraft/coordinator_state_machine.hpp b/src/coordination/include/nuraft/coordinator_state_machine.hpp
index 836ac17a6..754cb45af 100644
--- a/src/coordination/include/nuraft/coordinator_state_machine.hpp
+++ b/src/coordination/include/nuraft/coordinator_state_machine.hpp
@@ -13,7 +13,7 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "nuraft/coordinator_cluster_state.hpp"
 #include "nuraft/raft_log_action.hpp"
 
@@ -40,19 +40,21 @@ class CoordinatorStateMachine : public state_machine {
   CoordinatorStateMachine &operator=(CoordinatorStateMachine const &) = delete;
   CoordinatorStateMachine(CoordinatorStateMachine &&) = delete;
   CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete;
-  ~CoordinatorStateMachine() override {}
-
-  auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
-  auto MainExists() const -> bool;
-  auto IsMain(std::string_view instance_name) const -> bool;
-  auto IsReplica(std::string_view instance_name) const -> bool;
+  ~CoordinatorStateMachine() override = default;
 
   static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
-  static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>;
+  static auto SerializeOpenLockRegister(CoordinatorToReplicaConfig const &config) -> ptr<buffer>;
+  static auto SerializeOpenLockUnregister(std::string_view instance_name) -> ptr<buffer>;
+  static auto SerializeOpenLockSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
+  static auto SerializeOpenLockFailover(std::string_view instance_name) -> ptr<buffer>;
+  static auto SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer>;
   static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
-  static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
+  static auto SerializeSetInstanceAsMain(InstanceUUIDUpdate const &instance_uuid_change) -> ptr<buffer>;
   static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
-  static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>;
+  static auto SerializeUpdateUUIDForNewMain(utils::UUID const &uuid) -> ptr<buffer>;
+  static auto SerializeUpdateUUIDForInstance(InstanceUUIDUpdate const &instance_uuid_change) -> ptr<buffer>;
+  static auto SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config) -> ptr<buffer>;
+  static auto SerializeOpenLockSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
 
   static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>;
 
@@ -80,8 +82,19 @@ class CoordinatorStateMachine : public state_machine {
 
   auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override;
 
-  auto GetInstances() const -> std::vector<InstanceState>;
-  auto GetUUID() const -> utils::UUID;
+  auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
+
+  auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
+
+  // Getters
+  auto MainExists() const -> bool;
+  auto HasMainState(std::string_view instance_name) const -> bool;
+  auto HasReplicaState(std::string_view instance_name) const -> bool;
+  auto IsCurrentMain(std::string_view instance_name) const -> bool;
+
+  auto GetCurrentMainUUID() const -> utils::UUID;
+  auto GetInstanceUUID(std::string_view instance_name) const -> utils::UUID;
+  auto IsLockOpened() const -> bool;
 
  private:
   struct SnapshotCtx {
diff --git a/src/coordination/include/nuraft/raft_log_action.hpp b/src/coordination/include/nuraft/raft_log_action.hpp
index 3f1b26dfa..ea1a4b9d7 100644
--- a/src/coordination/include/nuraft/raft_log_action.hpp
+++ b/src/coordination/include/nuraft/raft_log_action.hpp
@@ -23,20 +23,34 @@
 namespace memgraph::coordination {
 
 enum class RaftLogAction : uint8_t {
+  OPEN_LOCK_REGISTER_REPLICATION_INSTANCE,
+  OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE,
+  OPEN_LOCK_FAILOVER,
+  OPEN_LOCK_SET_INSTANCE_AS_MAIN,
+  OPEN_LOCK_SET_INSTANCE_AS_REPLICA,
   REGISTER_REPLICATION_INSTANCE,
   UNREGISTER_REPLICATION_INSTANCE,
   SET_INSTANCE_AS_MAIN,
   SET_INSTANCE_AS_REPLICA,
-  UPDATE_UUID
+  UPDATE_UUID_OF_NEW_MAIN,
+  ADD_COORDINATOR_INSTANCE,
+  UPDATE_UUID_FOR_INSTANCE,
 };
 
-NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {
-                                                {RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
-                                                {RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
-                                                {RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
-                                                {RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
-                                                {RaftLogAction::UPDATE_UUID, "update_uuid"},
-                                            })
+NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction,
+                             {{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
+                              {RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
+                              {RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
+                              {RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
+                              {RaftLogAction::UPDATE_UUID_OF_NEW_MAIN, "update_uuid_of_new_main"},
+                              {RaftLogAction::ADD_COORDINATOR_INSTANCE, "add_coordinator_instance"},
+                              {RaftLogAction::UPDATE_UUID_FOR_INSTANCE, "update_uuid_for_instance"},
+                              {RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE, "open_lock_register_instance"},
+                              {RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE,
+                               "open_lock_unregister_instance"},
+                              {RaftLogAction::OPEN_LOCK_FAILOVER, "open_lock_failover"},
+                              {RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN, "open_lock_set_instance_as_main"},
+                              {RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA, "open_lock_set_instance_as_replica"}})
 
 }  // namespace memgraph::coordination
 #endif
diff --git a/src/coordination/raft_state.cpp b/src/coordination/raft_state.cpp
index fd93160b6..db88169c8 100644
--- a/src/coordination/raft_state.cpp
+++ b/src/coordination/raft_state.cpp
@@ -12,8 +12,7 @@
 #ifdef MG_ENTERPRISE
 #include <chrono>
 
-#include <spdlog/spdlog.h>
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "coordination/coordinator_exceptions.hpp"
 #include "coordination/raft_state.hpp"
 #include "utils/counter.hpp"
@@ -31,12 +30,12 @@ using nuraft::raft_server;
 using nuraft::srv_config;
 using raft_result = cmd_result<ptr<buffer>>;
 
-RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
+RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t coordinator_id,
                      uint32_t raft_port, std::string raft_address)
     : raft_endpoint_(raft_address, raft_port),
-      raft_server_id_(raft_server_id),
+      coordinator_id_(coordinator_id),
       state_machine_(cs_new<CoordinatorStateMachine>()),
-      state_manager_(cs_new<CoordinatorStateManager>(raft_server_id_, raft_endpoint_.SocketAddress())),
+      state_manager_(cs_new<CoordinatorStateManager>(coordinator_id_, raft_endpoint_.SocketAddress())),
       logger_(nullptr),
       become_leader_cb_(std::move(become_leader_cb)),
       become_follower_cb_(std::move(become_follower_cb)) {}
@@ -63,13 +62,18 @@ auto RaftState::InitRaftServer() -> void {
   params.leadership_expiry_ = 200;
 
   raft_server::init_options init_opts;
+
   init_opts.raft_callback_ = [this](cb_func::Type event_type, cb_func::Param *param) -> nuraft::CbReturnCode {
     if (event_type == cb_func::BecomeLeader) {
       spdlog::info("Node {} became leader", param->leaderId);
       become_leader_cb_();
     } else if (event_type == cb_func::BecomeFollower) {
-      spdlog::info("Node {} became follower", param->myId);
+      // TODO(antoniofilipovic) Check what happens when becoming follower while doing failover
+      // There is no way to stop becoming a follower:
+      // https://github.com/eBay/NuRaft/blob/188947bcc73ce38ab1c3cf9d01015ca8a29decd9/src/raft_server.cxx#L1334-L1335
+      spdlog::trace("Got request to become follower");
       become_follower_cb_();
+      spdlog::trace("Node {} became follower", param->myId);
     }
     return CbReturnCode::Ok;
   };
@@ -82,7 +86,6 @@ auto RaftState::InitRaftServer() -> void {
   if (!raft_server_) {
     throw RaftServerStartException("Failed to launch raft server on {}", raft_endpoint_.SocketAddress());
   }
-
   auto maybe_stop = utils::ResettableCounter<20>();
   do {
     if (raft_server_->is_initialized()) {
@@ -95,11 +98,11 @@ auto RaftState::InitRaftServer() -> void {
 }
 
 auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState {
-  uint32_t raft_server_id = FLAGS_raft_server_id;
-  uint32_t raft_port = FLAGS_raft_server_port;
+  uint32_t coordinator_id = FLAGS_coordinator_id;
+  uint32_t raft_port = FLAGS_coordinator_port;
 
   auto raft_state =
-      RaftState(std::move(become_leader_cb), std::move(become_follower_cb), raft_server_id, raft_port, "127.0.0.1");
+      RaftState(std::move(become_leader_cb), std::move(become_follower_cb), coordinator_id, raft_port, "127.0.0.1");
 
   raft_state.InitRaftServer();
   return raft_state;
@@ -108,15 +111,14 @@ auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerC
 RaftState::~RaftState() { launcher_.shutdown(); }
 
 auto RaftState::InstanceName() const -> std::string {
-  return fmt::format("coordinator_{}", std::to_string(raft_server_id_));
+  return fmt::format("coordinator_{}", std::to_string(coordinator_id_));
 }
 
 auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
 
-auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address)
-    -> void {
-  auto const endpoint = fmt::format("{}:{}", raft_address, raft_port);
-  srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
+auto RaftState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
+  auto const endpoint = config.coordinator_server.SocketAddress();
+  srv_config const srv_config_to_add(static_cast<int>(config.coordinator_server_id), endpoint);
 
   auto cmd_result = raft_server_->add_srv(srv_config_to_add);
 
@@ -134,9 +136,9 @@ auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_po
   bool added{false};
   while (!maybe_stop()) {
     std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
-    const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id));
+    const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(config.coordinator_server_id));
     if (server_config) {
-      spdlog::trace("Server with id {} added to cluster", raft_server_id);
+      spdlog::trace("Server with id {} added to cluster", config.coordinator_server_id);
       added = true;
       break;
     }
@@ -158,7 +160,79 @@ auto RaftState::IsLeader() const -> bool { return raft_server_->is_leader(); }
 
 auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); }
 
-auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool {
+auto RaftState::AppendOpenLockRegister(CoordinatorToReplicaConfig const &config) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeOpenLockRegister(config);
+  auto const res = raft_server_->append_entries({new_log});
+
+  if (!res->get_accepted()) {
+    spdlog::error("Failed to accept request to open lock to register instance {}", config.instance_name);
+    return false;
+  }
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to open lock for registering instance {} with error code {}", config.instance_name,
+                  int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendOpenLockUnregister(std::string_view instance_name) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeOpenLockUnregister(instance_name);
+  auto const res = raft_server_->append_entries({new_log});
+
+  if (!res->get_accepted()) {
+    spdlog::error("Failed to accept request to open lock to unregister instance {}.", instance_name);
+    return false;
+  }
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to open lock for unregistering instance {} with error code {}", instance_name,
+                  int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendOpenLockFailover(std::string_view instance_name) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeOpenLockFailover(instance_name);
+  auto const res = raft_server_->append_entries({new_log});
+
+  if (!res->get_accepted()) {
+    spdlog::error("Failed to accept request to open lock for failover {}", instance_name);
+    return false;
+  }
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to open lock for failover to instance {} with error code {}", instance_name,
+                  int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendOpenLockSetInstanceToMain(std::string_view instance_name) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeOpenLockSetInstanceAsMain(instance_name);
+  auto const res = raft_server_->append_entries({new_log});
+
+  if (!res->get_accepted()) {
+    spdlog::error("Failed to accept request to open lock and set instance {} to MAIN", instance_name);
+    return false;
+  }
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to open lock to set instance {} to MAIN with error code {}", instance_name,
+                  int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool {
   auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config);
   auto const res = raft_server_->append_entries({new_log});
 
@@ -202,8 +276,9 @@ auto RaftState::AppendUnregisterReplicationInstanceLog(std::string_view instance
   return true;
 }
 
-auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool {
-  auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(instance_name);
+auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name, utils::UUID const &uuid) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(
+      InstanceUUIDUpdate{.instance_name = std::string{instance_name}, .uuid = uuid});
   auto const res = raft_server_->append_entries({new_log});
   if (!res->get_accepted()) {
     spdlog::error(
@@ -242,8 +317,28 @@ auto RaftState::AppendSetInstanceAsReplicaLog(std::string_view instance_name) ->
   return true;
 }
 
-auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
-  auto new_log = CoordinatorStateMachine::SerializeUpdateUUID(uuid);
+auto RaftState::AppendOpenLockSetInstanceToReplica(std::string_view instance_name) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeOpenLockSetInstanceAsReplica(instance_name);
+  auto const res = raft_server_->append_entries({new_log});
+  if (!res->get_accepted()) {
+    spdlog::error(
+        "Failed to accept request for demoting instance {}. Most likely the reason is that the instance is not "
+        "the leader.",
+        instance_name);
+    return false;
+  }
+  spdlog::info("Request for demoting instance {} accepted", instance_name);
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeUpdateUUIDForNewMain(uuid);
   auto const res = raft_server_->append_entries({new_log});
   if (!res->get_accepted()) {
     spdlog::error(
@@ -251,7 +346,7 @@ auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
         "the leader.");
     return false;
   }
-  spdlog::info("Request for updating UUID accepted");
+  spdlog::trace("Request for updating UUID accepted");
 
   if (res->get_result_code() != nuraft::cmd_result_code::OK) {
     spdlog::error("Failed to update UUID with error code {}", int(res->get_result_code()));
@@ -261,21 +356,75 @@ auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
   return true;
 }
 
-auto RaftState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
-  return state_machine_->FindCurrentMainInstanceName();
+auto RaftState::AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeAddCoordinatorInstance(config);
+  auto const res = raft_server_->append_entries({new_log});
+  if (!res->get_accepted()) {
+    spdlog::error(
+        "Failed to accept request for adding coordinator instance {}. Most likely the reason is that the instance is "
+        "not the leader.",
+        config.coordinator_server_id);
+    return false;
+  }
+
+  spdlog::info("Request for adding coordinator instance {} accepted", config.coordinator_server_id);
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to add coordinator instance {} with error code {}", config.coordinator_server_id,
+                  static_cast<int>(res->get_result_code()));
+    return false;
+  }
+
+  return true;
+}
+
+auto RaftState::AppendUpdateUUIDForInstanceLog(std::string_view instance_name, const utils::UUID &uuid) -> bool {
+  auto new_log = CoordinatorStateMachine::SerializeUpdateUUIDForInstance(
+      {.instance_name = std::string{instance_name}, .uuid = uuid});
+  auto const res = raft_server_->append_entries({new_log});
+  if (!res->get_accepted()) {
+    spdlog::error("Failed to accept request for updating UUID of instance.");
+    return false;
+  }
+  spdlog::trace("Request for updating UUID of instance accepted");
+
+  if (res->get_result_code() != nuraft::cmd_result_code::OK) {
+    spdlog::error("Failed to update UUID of instance with error code {}", int(res->get_result_code()));
+    return false;
+  }
+
+  return true;
 }
 
 auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); }
 
-auto RaftState::IsMain(std::string_view instance_name) const -> bool { return state_machine_->IsMain(instance_name); }
-
-auto RaftState::IsReplica(std::string_view instance_name) const -> bool {
-  return state_machine_->IsReplica(instance_name);
+auto RaftState::HasMainState(std::string_view instance_name) const -> bool {
+  return state_machine_->HasMainState(instance_name);
 }
 
-auto RaftState::GetInstances() const -> std::vector<InstanceState> { return state_machine_->GetInstances(); }
+auto RaftState::HasReplicaState(std::string_view instance_name) const -> bool {
+  return state_machine_->HasReplicaState(instance_name);
+}
 
-auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); }
+auto RaftState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
+  return state_machine_->GetReplicationInstances();
+}
+
+auto RaftState::GetCurrentMainUUID() const -> utils::UUID { return state_machine_->GetCurrentMainUUID(); }
+
+auto RaftState::IsCurrentMain(std::string_view instance_name) const -> bool {
+  return state_machine_->IsCurrentMain(instance_name);
+}
+
+auto RaftState::IsLockOpened() const -> bool { return state_machine_->IsLockOpened(); }
+
+auto RaftState::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
+  return state_machine_->GetInstanceUUID(instance_name);
+}
+
+auto RaftState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
+  return state_machine_->GetCoordinatorInstances();
+}
 
 }  // namespace memgraph::coordination
 #endif
diff --git a/src/coordination/replication_instance.cpp b/src/coordination/replication_instance.cpp
index ca7572ea7..00e4a98e0 100644
--- a/src/coordination/replication_instance.cpp
+++ b/src/coordination/replication_instance.cpp
@@ -20,7 +20,7 @@
 
 namespace memgraph::coordination {
 
-ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config,
+ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config,
                                          HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb,
                                          HealthCheckInstanceCallback succ_instance_cb,
                                          HealthCheckInstanceCallback fail_instance_cb)
@@ -56,7 +56,6 @@ auto ReplicationInstance::PromoteToMain(utils::UUID const &new_uuid, Replication
     return false;
   }
 
-  main_uuid_ = new_uuid;
   succ_cb_ = main_succ_cb;
   fail_cb_ = main_fail_cb;
 
@@ -82,7 +81,7 @@ auto ReplicationInstance::StopFrequentCheck() -> void { client_.StopFrequentChec
 auto ReplicationInstance::PauseFrequentCheck() -> void { client_.PauseFrequentCheck(); }
 auto ReplicationInstance::ResumeFrequentCheck() -> void { client_.ResumeFrequentCheck(); }
 
-auto ReplicationInstance::ReplicationClientInfo() const -> CoordinatorClientConfig::ReplicationClientInfo {
+auto ReplicationInstance::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
   return client_.ReplicationClientInfo();
 }
 
@@ -91,9 +90,6 @@ auto ReplicationInstance::GetFailCallback() -> HealthCheckInstanceCallback & { r
 
 auto ReplicationInstance::GetClient() -> CoordinatorClient & { return client_; }
 
-auto ReplicationInstance::SetNewMainUUID(utils::UUID const &main_uuid) -> void { main_uuid_ = main_uuid; }
-auto ReplicationInstance::GetMainUUID() const -> std::optional<utils::UUID> const & { return main_uuid_; }
-
 auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool {
   if (!IsReadyForUUIDPing()) {
     return true;
@@ -116,7 +112,6 @@ auto ReplicationInstance::SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid
   if (!replication_coordination_glue::SendSwapMainUUIDRpc(client_.RpcClient(), new_main_uuid)) {
     return false;
   }
-  SetNewMainUUID(new_main_uuid);
   return true;
 }
 
diff --git a/src/dbms/coordinator_handler.cpp b/src/dbms/coordinator_handler.cpp
index 292d50d3d..1f64892bc 100644
--- a/src/dbms/coordinator_handler.cpp
+++ b/src/dbms/coordinator_handler.cpp
@@ -20,7 +20,7 @@ namespace memgraph::dbms {
 CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state)
     : coordinator_state_(coordinator_state) {}
 
-auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
+auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
     -> coordination::RegisterInstanceCoordinatorStatus {
   return coordinator_state_.RegisterReplicationInstance(config);
 }
@@ -39,9 +39,8 @@ auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::Inst
   return coordinator_state_.ShowInstances();
 }
 
-auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
-                                                std::string_view raft_address) -> void {
-  coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
+auto CoordinatorHandler::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
+  coordinator_state_.AddCoordinatorInstance(config);
 }
 
 }  // namespace memgraph::dbms
diff --git a/src/dbms/coordinator_handler.hpp b/src/dbms/coordinator_handler.hpp
index 1c456134d..f3640736a 100644
--- a/src/dbms/coordinator_handler.hpp
+++ b/src/dbms/coordinator_handler.hpp
@@ -13,7 +13,7 @@
 
 #ifdef MG_ENTERPRISE
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "coordination/coordinator_state.hpp"
 #include "coordination/instance_status.hpp"
 #include "coordination/register_main_replica_coordinator_status.hpp"
@@ -30,7 +30,7 @@ class CoordinatorHandler {
 
   // TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to
   // RegisterInstance
-  auto RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
+  auto RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
       -> coordination::RegisterInstanceCoordinatorStatus;
 
   auto UnregisterReplicationInstance(std::string_view instance_name)
@@ -40,7 +40,7 @@ class CoordinatorHandler {
 
   auto ShowInstances() const -> std::vector<coordination::InstanceStatus>;
 
-  auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
+  auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
 
  private:
   coordination::CoordinatorState &coordinator_state_;
diff --git a/src/dbms/dbms_handler.hpp b/src/dbms/dbms_handler.hpp
index b0bbd5758..482423ebf 100644
--- a/src/dbms/dbms_handler.hpp
+++ b/src/dbms/dbms_handler.hpp
@@ -311,7 +311,7 @@ class DbmsHandler {
         stats.triggers += info.triggers;
         stats.streams += info.streams;
         ++stats.num_databases;
-        stats.indices += storage_info.label_indices + storage_info.label_property_indices;
+        stats.indices += storage_info.label_indices + storage_info.label_property_indices + storage_info.text_indices;
         stats.constraints += storage_info.existence_constraints + storage_info.unique_constraints;
         ++stats.storage_modes[(int)storage_info.storage_mode];
         ++stats.isolation_levels[(int)storage_info.isolation_level];
diff --git a/src/dbms/inmemory/replication_handlers.cpp b/src/dbms/inmemory/replication_handlers.cpp
index 69f04914c..f9ce7a9d8 100644
--- a/src/dbms/inmemory/replication_handlers.cpp
+++ b/src/dbms/inmemory/replication_handlers.cpp
@@ -615,6 +615,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
         auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
         if (!vertex)
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
+        // NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
         auto ret = vertex->AddLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
         if (ret.HasError() || !ret.GetValue())
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
@@ -627,6 +628,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
         auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
         if (!vertex)
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
+        // NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
         auto ret = vertex->RemoveLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
         if (ret.HasError() || !ret.GetValue())
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
@@ -640,6 +642,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
         auto vertex = transaction->FindVertex(delta.vertex_edge_set_property.gid, View::NEW);
         if (!vertex)
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
+        // NOTE: Phase 1 of the text search feature doesn't have replication in scope
         auto ret = vertex->SetProperty(transaction->NameToProperty(delta.vertex_edge_set_property.property),
                                        delta.vertex_edge_set_property.value);
         if (ret.HasError())
@@ -853,6 +856,14 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
           throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
         break;
       }
+      case WalDeltaData::Type::TEXT_INDEX_CREATE: {
+        // NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
+        break;
+      }
+      case WalDeltaData::Type::TEXT_INDEX_DROP: {
+        // NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
+        break;
+      }
       case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
         spdlog::trace("       Create existence constraint on :{} ({})", delta.operation_label_property.label,
                       delta.operation_label_property.property);
diff --git a/src/flags/experimental.cpp b/src/flags/experimental.cpp
index 123903c96..8c29142a1 100644
--- a/src/flags/experimental.cpp
+++ b/src/flags/experimental.cpp
@@ -18,14 +18,15 @@
 
 // Bolt server flags.
 // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
-DEFINE_string(experimental_enabled, "",
-              "Experimental features to be used, comma seperated. Options [system-replication, high-availability]");
-
+DEFINE_string(
+    experimental_enabled, "",
+    "Experimental features to be used, comma-separated. Options [system-replication, text-search, high-availability]");
 using namespace std::string_view_literals;
 
 namespace memgraph::flags {
 
 auto const mapping = std::map{std::pair{"system-replication"sv, Experiments::SYSTEM_REPLICATION},
+                              std::pair{"text-search"sv, Experiments::TEXT_SEARCH},
                               std::pair{"high-availability"sv, Experiments::HIGH_AVAILABILITY}};
 
 auto ExperimentsInstance() -> Experiments & {
@@ -45,7 +46,7 @@ bool AreExperimentsEnabled(Experiments experiments) {
 void InitializeExperimental() {
   namespace rv = ranges::views;
 
-  auto const connonicalize_string = [](auto &&rng) {
+  auto const canonicalize_string = [](auto &&rng) {
     auto const is_space = [](auto c) { return c == ' '; };
     auto const to_lower = [](unsigned char c) { return std::tolower(c); };
 
@@ -56,7 +57,7 @@ void InitializeExperimental() {
   auto const mapping_end = mapping.cend();
   using underlying_type = std::underlying_type_t<Experiments>;
   auto to_set = underlying_type{};
-  for (auto &&experiment : FLAGS_experimental_enabled | rv::split(',') | rv::transform(connonicalize_string)) {
+  for (auto &&experiment : FLAGS_experimental_enabled | rv::split(',') | rv::transform(canonicalize_string)) {
     if (auto it = mapping.find(experiment); it != mapping_end) {
       to_set |= static_cast<underlying_type>(it->second);
     }
diff --git a/src/flags/experimental.hpp b/src/flags/experimental.hpp
index 5a19889fe..0b209a4e8 100644
--- a/src/flags/experimental.hpp
+++ b/src/flags/experimental.hpp
@@ -23,7 +23,8 @@ namespace memgraph::flags {
 // old experiments can be reused once code cleanup has happened
 enum class Experiments : uint8_t {
   SYSTEM_REPLICATION = 1 << 0,
-  HIGH_AVAILABILITY = 1 << 1,
+  TEXT_SEARCH = 1 << 1,
+  HIGH_AVAILABILITY = 1 << 2,
 };
 
 bool AreExperimentsEnabled(Experiments experiments);
diff --git a/src/flags/replication.cpp b/src/flags/replication.cpp
index e6b71b942..3f8fd2400 100644
--- a/src/flags/replication.cpp
+++ b/src/flags/replication.cpp
@@ -13,11 +13,11 @@
 
 #ifdef MG_ENTERPRISE
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DEFINE_uint32(coordinator_server_port, 0, "Port on which coordinator servers will be started.");
+DEFINE_uint32(management_port, 0, "Port on which coordinator servers will be started.");
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DEFINE_uint32(raft_server_port, 0, "Port on which raft servers will be started.");
+DEFINE_uint32(coordinator_port, 0, "Port on which raft servers will be started.");
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DEFINE_uint32(raft_server_id, 0, "Unique ID of the raft server.");
+DEFINE_uint32(coordinator_id, 0, "Unique ID of the raft server.");
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
 DEFINE_uint32(instance_down_timeout_sec, 5, "Time duration after which an instance is considered down.");
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
diff --git a/src/flags/replication.hpp b/src/flags/replication.hpp
index 0a4982f12..e0d1aff8c 100644
--- a/src/flags/replication.hpp
+++ b/src/flags/replication.hpp
@@ -15,11 +15,11 @@
 
 #ifdef MG_ENTERPRISE
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DECLARE_uint32(coordinator_server_port);
+DECLARE_uint32(management_port);
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DECLARE_uint32(raft_server_port);
+DECLARE_uint32(coordinator_port);
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-DECLARE_uint32(raft_server_id);
+DECLARE_uint32(coordinator_id);
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
 DECLARE_uint32(instance_down_timeout_sec);
 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
diff --git a/src/flags/run_time_configurable.cpp b/src/flags/run_time_configurable.cpp
index a42ebd3d0..6c0fc54ac 100644
--- a/src/flags/run_time_configurable.cpp
+++ b/src/flags/run_time_configurable.cpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -73,11 +73,11 @@ constexpr auto kLogToStderrGFlagsKey = "also_log_to_stderr";
 constexpr auto kCartesianProductEnabledSettingKey = "cartesian-product-enabled";
 constexpr auto kCartesianProductEnabledGFlagsKey = "cartesian-product-enabled";
 
-// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-std::atomic<double> execution_timeout_sec_;  // Local cache-like thing
-
-// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
-std::atomic<bool> cartesian_product_enabled_{true};  // Local cache-like thing
+// NOLINTBEGIN(cppcoreguidelines-avoid-non-const-global-variables)
+// Local cache-like thing
+std::atomic<double> execution_timeout_sec_;
+std::atomic<bool> cartesian_product_enabled_{true};
+// NOLINTEND(cppcoreguidelines-avoid-non-const-global-variables)
 
 auto ToLLEnum(std::string_view val) {
   const auto ll_enum = memgraph::flags::LogLevelToEnum(val);
diff --git a/src/flags/run_time_configurable.hpp b/src/flags/run_time_configurable.hpp
index 944a0539f..b215d6540 100644
--- a/src/flags/run_time_configurable.hpp
+++ b/src/flags/run_time_configurable.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
diff --git a/src/glue/SessionHL.cpp b/src/glue/SessionHL.cpp
index 6a48f15ca..51a444a30 100644
--- a/src/glue/SessionHL.cpp
+++ b/src/glue/SessionHL.cpp
@@ -249,6 +249,40 @@ std::pair<std::vector<std::string>, std::optional<int>> SessionHL::Interpret(
   }
 }
 
+using memgraph::communication::bolt::Value;
+
+#ifdef MG_ENTERPRISE
+auto SessionHL::Route(std::map<std::string, Value> const &routing,
+                      std::vector<memgraph::communication::bolt::Value> const & /*bookmarks*/,
+                      std::map<std::string, Value> const & /*extra*/) -> std::map<std::string, Value> {
+  auto routing_map = ranges::views::transform(
+                         routing, [](auto const &pair) { return std::pair(pair.first, pair.second.ValueString()); }) |
+                     ranges::to<std::map<std::string, std::string>>();
+
+  auto routing_table_res = interpreter_.Route(routing_map);
+
+  auto create_server = [](auto const &server_info) -> Value {
+    auto const &[addresses, role] = server_info;
+    std::map<std::string, Value> server_map;
+    auto bolt_addresses = ranges::views::transform(addresses, [](auto const &addr) { return Value{addr}; }) |
+                          ranges::to<std::vector<Value>>();
+
+    server_map["addresses"] = std::move(bolt_addresses);
+    server_map["role"] = memgraph::communication::bolt::Value{role};
+    return Value{std::move(server_map)};
+  };
+
+  std::map<std::string, Value> communication_res;
+  communication_res["ttl"] = Value{routing_table_res.ttl};
+  communication_res["db"] = Value{};
+
+  auto servers = ranges::views::transform(routing_table_res.servers, create_server) | ranges::to<std::vector<Value>>();
+  communication_res["servers"] = memgraph::communication::bolt::Value{std::move(servers)};
+
+  return {{"rt", memgraph::communication::bolt::Value{std::move(communication_res)}}};
+}
+#endif
+
 void SessionHL::RollbackTransaction() {
   try {
     interpreter_.RollbackTransaction();
diff --git a/src/glue/SessionHL.hpp b/src/glue/SessionHL.hpp
index cf0280fcc..9360f96b2 100644
--- a/src/glue/SessionHL.hpp
+++ b/src/glue/SessionHL.hpp
@@ -55,6 +55,13 @@ class SessionHL final : public memgraph::communication::bolt::Session<memgraph::
       const std::string &query, const std::map<std::string, memgraph::communication::bolt::Value> &params,
       const std::map<std::string, memgraph::communication::bolt::Value> &extra) override;
 
+#ifdef MG_ENTERPRISE
+  auto Route(std::map<std::string, memgraph::communication::bolt::Value> const &routing,
+             std::vector<memgraph::communication::bolt::Value> const &bookmarks,
+             std::map<std::string, memgraph::communication::bolt::Value> const &extra)
+      -> std::map<std::string, memgraph::communication::bolt::Value> override;
+#endif
+
   std::map<std::string, memgraph::communication::bolt::Value> Pull(TEncoder *encoder, std::optional<int> n,
                                                                    std::optional<int> qid) override;
 
diff --git a/src/io/network/endpoint.cpp b/src/io/network/endpoint.cpp
index 6ed4a6753..c996055ff 100644
--- a/src/io/network/endpoint.cpp
+++ b/src/io/network/endpoint.cpp
@@ -82,8 +82,7 @@ bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
   return status == 0;
 }
 
-std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address,
-                                                            std::optional<uint16_t> default_port) {
+std::optional<Endpoint> Endpoint::ParseSocketOrAddress(std::string_view address, std::optional<uint16_t> default_port) {
   auto const parts = utils::SplitView(address, delimiter);
 
   if (parts.size() > 2) {
@@ -109,13 +108,13 @@ std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view add
   }();
 
   if (GetIpFamily(addr) == IpFamily::NONE) {
-    if (IsResolvableAddress(addr, *port)) {  // NOLINT
-      return std::pair{addr, *port};         // NOLINT
+    if (IsResolvableAddress(addr, *port)) {       // NOLINT
+      return Endpoint{std::string(addr), *port};  // NOLINT
     }
     return std::nullopt;
   }
 
-  return std::pair{addr, *port};  // NOLINT
+  return Endpoint{std::string(addr), *port};  // NOLINT
 }
 
 auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
@@ -138,4 +137,14 @@ auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
   return true;
 }
 
+void to_json(nlohmann::json &j, Endpoint const &config) {
+  j = nlohmann::json{{"address", config.address}, {"port", config.port}, {"family", config.family}};
+}
+
+void from_json(nlohmann::json const &j, Endpoint &config) {
+  config.address = j.at("address").get<std::string>();
+  config.port = j.at("port").get<uint16_t>();
+  config.family = j.at("family").get<Endpoint::IpFamily>();
+}
+
 }  // namespace memgraph::io::network
diff --git a/src/io/network/endpoint.hpp b/src/io/network/endpoint.hpp
index f46d28ace..c47c736ee 100644
--- a/src/io/network/endpoint.hpp
+++ b/src/io/network/endpoint.hpp
@@ -17,9 +17,9 @@
 #include <optional>
 #include <string>
 
-namespace memgraph::io::network {
+#include "json/json.hpp"
 
-using ParsedAddress = std::pair<std::string_view, uint16_t>;
+namespace memgraph::io::network {
 
 struct Endpoint {
   static const struct needs_resolving_t {
@@ -39,8 +39,8 @@ struct Endpoint {
 
   enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
 
-  static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address,
-                                                           std::optional<uint16_t> default_port = {});
+  static std::optional<Endpoint> ParseSocketOrAddress(std::string_view address,
+                                                      std::optional<uint16_t> default_port = {});
 
   std::string SocketAddress() const;
 
@@ -59,4 +59,7 @@ struct Endpoint {
   static auto ValidatePort(std::optional<uint16_t> port) -> bool;
 };
 
+void to_json(nlohmann::json &j, Endpoint const &config);
+void from_json(nlohmann::json const &j, Endpoint &config);
+
 }  // namespace memgraph::io::network
diff --git a/src/memgraph.cpp b/src/memgraph.cpp
index 617b4cacc..47f8b4d26 100644
--- a/src/memgraph.cpp
+++ b/src/memgraph.cpp
@@ -439,7 +439,7 @@ int main(int argc, char **argv) {
 
 #ifdef MG_ENTERPRISE
   // MAIN or REPLICA instance
-  if (FLAGS_coordinator_server_port) {
+  if (FLAGS_management_port) {
     memgraph::dbms::CoordinatorHandlers::Register(coordinator_state.GetCoordinatorServer(), replication_handler);
     MG_ASSERT(coordinator_state.GetCoordinatorServer().Start(), "Failed to start coordinator server!");
   }
diff --git a/src/query/db_accessor.hpp b/src/query/db_accessor.hpp
index 915ea9936..ee4988e4a 100644
--- a/src/query/db_accessor.hpp
+++ b/src/query/db_accessor.hpp
@@ -634,6 +634,24 @@ class DbAccessor final {
 
   bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) const { return accessor_->EdgeTypeIndexExists(edge_type); }
 
+  bool TextIndexExists(const std::string &index_name) const { return accessor_->TextIndexExists(index_name); }
+
+  void TextIndexAddVertex(const VertexAccessor &vertex) { accessor_->TextIndexAddVertex(vertex.impl_); }
+
+  void TextIndexUpdateVertex(const VertexAccessor &vertex, const std::vector<storage::LabelId> &removed_labels = {}) {
+    accessor_->TextIndexUpdateVertex(vertex.impl_, removed_labels);
+  }
+
+  std::vector<storage::Gid> TextIndexSearch(const std::string &index_name, const std::string &search_query,
+                                            text_search_mode search_mode) const {
+    return accessor_->TextIndexSearch(index_name, search_query, search_mode);
+  }
+
+  std::string TextIndexAggregate(const std::string &index_name, const std::string &search_query,
+                                 const std::string &aggregation_query) const {
+    return accessor_->TextIndexAggregate(index_name, search_query, aggregation_query);
+  }
+
   std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
     return accessor_->GetIndexStats(label);
   }
@@ -717,6 +735,12 @@ class DbAccessor final {
     return accessor_->DropIndex(edge_type);
   }
 
+  void CreateTextIndex(const std::string &index_name, storage::LabelId label) {
+    accessor_->CreateTextIndex(index_name, label, this);
+  }
+
+  void DropTextIndex(const std::string &index_name) { accessor_->DropTextIndex(index_name); }
+
   utils::BasicResult<storage::StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
       storage::LabelId label, storage::PropertyId property) {
     return accessor_->CreateExistenceConstraint(label, property);
diff --git a/src/query/dump.cpp b/src/query/dump.cpp
index f1dd08c8d..abc147ee8 100644
--- a/src/query/dump.cpp
+++ b/src/query/dump.cpp
@@ -252,6 +252,10 @@ void DumpLabelPropertyIndex(std::ostream *os, query::DbAccessor *dba, storage::L
       << ");";
 }
 
+void DumpTextIndex(std::ostream *os, query::DbAccessor *dba, const std::string &index_name, storage::LabelId label) {
+  *os << "CREATE TEXT INDEX " << EscapeName(index_name) << " ON :" << EscapeName(dba->LabelToName(label)) << ";";
+}
+
 void DumpExistenceConstraint(std::ostream *os, query::DbAccessor *dba, storage::LabelId label,
                              storage::PropertyId property) {
   *os << "CREATE CONSTRAINT ON (u:" << EscapeName(dba->LabelToName(label)) << ") ASSERT EXISTS (u."
@@ -286,6 +290,8 @@ PullPlanDump::PullPlanDump(DbAccessor *dba, dbms::DatabaseAccess db_acc)
                    CreateLabelIndicesPullChunk(),
                    // Dump all label property indices
                    CreateLabelPropertyIndicesPullChunk(),
+                   // Dump all text indices
+                   CreateTextIndicesPullChunk(),
                    // Dump all existence constraints
                    CreateExistenceConstraintsPullChunk(),
                    // Dump all unique constraints
@@ -412,6 +418,34 @@ PullPlanDump::PullChunk PullPlanDump::CreateLabelPropertyIndicesPullChunk() {
   };
 }
 
+PullPlanDump::PullChunk PullPlanDump::CreateTextIndicesPullChunk() {
+  // Dump all text indices
+  return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
+    // Delay the construction of indices vectors
+    if (!indices_info_) {
+      indices_info_.emplace(dba_->ListAllIndices());
+    }
+    const auto &text = indices_info_->text_indices;
+
+    size_t local_counter = 0;
+    while (global_index < text.size() && (!n || local_counter < *n)) {
+      std::ostringstream os;
+      const auto &text_index = text[global_index];
+      DumpTextIndex(&os, dba_, text_index.first, text_index.second);
+      stream->Result({TypedValue(os.str())});
+
+      ++global_index;
+      ++local_counter;
+    }
+
+    if (global_index == text.size()) {
+      return local_counter;
+    }
+
+    return std::nullopt;
+  };
+}
+
 PullPlanDump::PullChunk PullPlanDump::CreateExistenceConstraintsPullChunk() {
   return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
     // Delay the construction of constraint vectors
diff --git a/src/query/dump.hpp b/src/query/dump.hpp
index 05bd42967..0cf4a82a6 100644
--- a/src/query/dump.hpp
+++ b/src/query/dump.hpp
@@ -55,6 +55,7 @@ struct PullPlanDump {
 
   PullChunk CreateLabelIndicesPullChunk();
   PullChunk CreateLabelPropertyIndicesPullChunk();
+  PullChunk CreateTextIndicesPullChunk();
   PullChunk CreateExistenceConstraintsPullChunk();
   PullChunk CreateUniqueConstraintsPullChunk();
   PullChunk CreateInternalIndexPullChunk();
diff --git a/src/query/exceptions.hpp b/src/query/exceptions.hpp
index 147dc8710..a4c25fbae 100644
--- a/src/query/exceptions.hpp
+++ b/src/query/exceptions.hpp
@@ -433,4 +433,17 @@ class MultiDatabaseQueryInMulticommandTxException : public QueryException {
   SPECIALIZE_GET_EXCEPTION_NAME(MultiDatabaseQueryInMulticommandTxException)
 };
 
+class TextSearchException : public QueryException {
+  using QueryException::QueryException;
+  SPECIALIZE_GET_EXCEPTION_NAME(TextSearchException)
+};
+
+class TextSearchDisabledException : public TextSearchException {
+ public:
+  TextSearchDisabledException()
+      : TextSearchException(
+            "To use text indices and text search, start Memgraph with the experimental text search feature enabled.") {}
+  SPECIALIZE_GET_EXCEPTION_NAME(TextSearchDisabledException)
+};
+
 }  // namespace memgraph::query
diff --git a/src/query/frontend/ast/ast.cpp b/src/query/frontend/ast/ast.cpp
index 7da5c09a0..f0d09d453 100644
--- a/src/query/frontend/ast/ast.cpp
+++ b/src/query/frontend/ast/ast.cpp
@@ -189,6 +189,9 @@ constexpr utils::TypeInfo query::IndexQuery::kType{utils::TypeId::AST_INDEX_QUER
 constexpr utils::TypeInfo query::EdgeIndexQuery::kType{utils::TypeId::AST_EDGE_INDEX_QUERY, "EdgeIndexQuery",
                                                        &query::Query::kType};
 
+constexpr utils::TypeInfo query::TextIndexQuery::kType{utils::TypeId::AST_TEXT_INDEX_QUERY, "TextIndexQuery",
+                                                       &query::Query::kType};
+
 constexpr utils::TypeInfo query::Create::kType{utils::TypeId::AST_CREATE, "Create", &query::Clause::kType};
 
 constexpr utils::TypeInfo query::CallProcedure::kType{utils::TypeId::AST_CALL_PROCEDURE, "CallProcedure",
diff --git a/src/query/frontend/ast/ast.hpp b/src/query/frontend/ast/ast.hpp
index 29f7be3cf..e3d7bc0b2 100644
--- a/src/query/frontend/ast/ast.hpp
+++ b/src/query/frontend/ast/ast.hpp
@@ -2273,6 +2273,37 @@ class EdgeIndexQuery : public memgraph::query::Query {
   friend class AstStorage;
 };
 
+class TextIndexQuery : public memgraph::query::Query {
+ public:
+  static const utils::TypeInfo kType;
+  const utils::TypeInfo &GetTypeInfo() const override { return kType; }
+
+  enum class Action { CREATE, DROP };
+
+  TextIndexQuery() = default;
+
+  DEFVISITABLE(QueryVisitor<void>);
+
+  memgraph::query::TextIndexQuery::Action action_;
+  memgraph::query::LabelIx label_;
+  std::string index_name_;
+
+  TextIndexQuery *Clone(AstStorage *storage) const override {
+    TextIndexQuery *object = storage->Create<TextIndexQuery>();
+    object->action_ = action_;
+    object->label_ = storage->GetLabelIx(label_.name);
+    object->index_name_ = index_name_;
+    return object;
+  }
+
+ protected:
+  TextIndexQuery(Action action, LabelIx label, std::string index_name)
+      : action_(action), label_(std::move(label)), index_name_(index_name) {}
+
+ private:
+  friend class AstStorage;
+};
+
 class Create : public memgraph::query::Clause {
  public:
   static const utils::TypeInfo kType;
diff --git a/src/query/frontend/ast/ast_visitor.hpp b/src/query/frontend/ast/ast_visitor.hpp
index bf11878da..cc6aed138 100644
--- a/src/query/frontend/ast/ast_visitor.hpp
+++ b/src/query/frontend/ast/ast_visitor.hpp
@@ -83,6 +83,7 @@ class ExplainQuery;
 class ProfileQuery;
 class IndexQuery;
 class EdgeIndexQuery;
+class TextIndexQuery;
 class DatabaseInfoQuery;
 class SystemInfoQuery;
 class ConstraintQuery;
@@ -144,11 +145,11 @@ class ExpressionVisitor
 
 template <class TResult>
 class QueryVisitor
-    : public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery, AuthQuery,
-                            DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery,
-                            LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery,
-                            StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery,
-                            StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery,
-                            EdgeImportModeQuery, CoordinatorQuery> {};
+    : public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery,
+                            TextIndexQuery, AuthQuery, DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery,
+                            ReplicationQuery, LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery,
+                            CreateSnapshotQuery, StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery,
+                            TransactionQueueQuery, StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery,
+                            ShowDatabasesQuery, EdgeImportModeQuery, CoordinatorQuery> {};
 
 }  // namespace memgraph::query
diff --git a/src/query/frontend/ast/cypher_main_visitor.cpp b/src/query/frontend/ast/cypher_main_visitor.cpp
index 6da48c97c..35ccb3670 100644
--- a/src/query/frontend/ast/cypher_main_visitor.cpp
+++ b/src/query/frontend/ast/cypher_main_visitor.cpp
@@ -243,6 +243,13 @@ antlrcpp::Any CypherMainVisitor::visitIndexQuery(MemgraphCypher::IndexQueryConte
   return index_query;
 }
 
+antlrcpp::Any CypherMainVisitor::visitTextIndexQuery(MemgraphCypher::TextIndexQueryContext *ctx) {
+  MG_ASSERT(ctx->children.size() == 1, "TextIndexQuery should have exactly one child!");
+  auto *text_index_query = std::any_cast<TextIndexQuery *>(ctx->children[0]->accept(this));
+  query_ = text_index_query;
+  return text_index_query;
+}
+
 antlrcpp::Any CypherMainVisitor::visitCreateIndex(MemgraphCypher::CreateIndexContext *ctx) {
   auto *index_query = storage_->Create<IndexQuery>();
   index_query->action_ = IndexQuery::Action::CREATE;
@@ -286,6 +293,21 @@ antlrcpp::Any CypherMainVisitor::visitDropEdgeIndex(MemgraphCypher::DropEdgeInde
   return index_query;
 }
 
+antlrcpp::Any CypherMainVisitor::visitCreateTextIndex(MemgraphCypher::CreateTextIndexContext *ctx) {
+  auto *index_query = storage_->Create<TextIndexQuery>();
+  index_query->index_name_ = std::any_cast<std::string>(ctx->indexName()->accept(this));
+  index_query->action_ = TextIndexQuery::Action::CREATE;
+  index_query->label_ = AddLabel(std::any_cast<std::string>(ctx->labelName()->accept(this)));
+  return index_query;
+}
+
+antlrcpp::Any CypherMainVisitor::visitDropTextIndex(MemgraphCypher::DropTextIndexContext *ctx) {
+  auto *index_query = storage_->Create<TextIndexQuery>();
+  index_query->index_name_ = std::any_cast<std::string>(ctx->indexName()->accept(this));
+  index_query->action_ = TextIndexQuery::Action::DROP;
+  return index_query;
+}
+
 antlrcpp::Any CypherMainVisitor::visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) {
   MG_ASSERT(ctx->children.size() == 1, "AuthQuery should have exactly one child!");
   auto *auth_query = std::any_cast<AuthQuery *>(ctx->children[0]->accept(this));
diff --git a/src/query/frontend/ast/cypher_main_visitor.hpp b/src/query/frontend/ast/cypher_main_visitor.hpp
index 8c65345c8..53738af61 100644
--- a/src/query/frontend/ast/cypher_main_visitor.hpp
+++ b/src/query/frontend/ast/cypher_main_visitor.hpp
@@ -153,6 +153,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
    */
   antlrcpp::Any visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) override;
 
+  /**
+   * @return TextIndexQuery*
+   */
+  antlrcpp::Any visitTextIndexQuery(MemgraphCypher::TextIndexQueryContext *ctx) override;
+
   /**
    * @return ExplainQuery*
    */
@@ -500,7 +505,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
   antlrcpp::Any visitCreateIndex(MemgraphCypher::CreateIndexContext *ctx) override;
 
   /**
-   * @return DropIndex*
+   * @return IndexQuery*
    */
   antlrcpp::Any visitDropIndex(MemgraphCypher::DropIndexContext *ctx) override;
 
@@ -514,6 +519,16 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
    */
   antlrcpp::Any visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) override;
 
+  /**
+   * @return TextIndexQuery*
+   */
+  antlrcpp::Any visitCreateTextIndex(MemgraphCypher::CreateTextIndexContext *ctx) override;
+
+  /**
+   * @return TextIndexQuery*
+   */
+  antlrcpp::Any visitDropTextIndex(MemgraphCypher::DropTextIndexContext *ctx) override;
+
   /**
    * @return AuthQuery*
    */
diff --git a/src/query/frontend/opencypher/grammar/Cypher.g4 b/src/query/frontend/opencypher/grammar/Cypher.g4
index 7fa218598..911615314 100644
--- a/src/query/frontend/opencypher/grammar/Cypher.g4
+++ b/src/query/frontend/opencypher/grammar/Cypher.g4
@@ -25,6 +25,7 @@ statement : query ;
 
 query : cypherQuery
       | indexQuery
+      | textIndexQuery
       | explainQuery
       | profileQuery
       | databaseInfoQuery
@@ -65,6 +66,8 @@ cypherQuery : singleQuery ( cypherUnion )* ( queryMemoryLimit )? ;
 
 indexQuery : createIndex | dropIndex;
 
+textIndexQuery : createTextIndex | dropTextIndex;
+
 singleQuery : clause ( clause )* ;
 
 cypherUnion : ( UNION ALL singleQuery )
@@ -342,6 +345,12 @@ createIndex : CREATE INDEX ON ':' labelName ( '(' propertyKeyName ')' )? ;
 
 dropIndex : DROP INDEX ON ':' labelName ( '(' propertyKeyName ')' )? ;
 
+indexName : symbolicName ;
+
+createTextIndex : CREATE TEXT INDEX indexName ON ':' labelName ;
+
+dropTextIndex : DROP TEXT INDEX indexName ;
+
 doubleLiteral : FloatingLiteral ;
 
 cypherKeyword : ALL
diff --git a/src/query/frontend/opencypher/grammar/CypherLexer.g4 b/src/query/frontend/opencypher/grammar/CypherLexer.g4
index 3e3c640d6..fb8a30b0f 100644
--- a/src/query/frontend/opencypher/grammar/CypherLexer.g4
+++ b/src/query/frontend/opencypher/grammar/CypherLexer.g4
@@ -131,6 +131,7 @@ SHOW           : S H O W ;
 SINGLE         : S I N G L E ;
 STARTS         : S T A R T S ;
 STORAGE        : S T O R A G E ;
+TEXT           : T E X T ;
 THEN           : T H E N ;
 TRUE           : T R U E ;
 UNION          : U N I O N ;
diff --git a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4
index 378310c22..ad15d6213 100644
--- a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4
+++ b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4
@@ -134,6 +134,7 @@ symbolicName : UnescapedSymbolicName
 query : cypherQuery
       | indexQuery
       | edgeIndexQuery
+      | textIndexQuery
       | explainQuery
       | profileQuery
       | databaseInfoQuery
diff --git a/src/query/frontend/semantic/required_privileges.cpp b/src/query/frontend/semantic/required_privileges.cpp
index 15726e3e2..d87fcb10e 100644
--- a/src/query/frontend/semantic/required_privileges.cpp
+++ b/src/query/frontend/semantic/required_privileges.cpp
@@ -29,6 +29,8 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
 
   void Visit(EdgeIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
 
+  void Visit(TextIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
+
   void Visit(AnalyzeGraphQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
 
   void Visit(AuthQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::AUTH); }
diff --git a/src/query/frontend/semantic/symbol.hpp b/src/query/frontend/semantic/symbol.hpp
index 0cfb86608..1a5aa2756 100644
--- a/src/query/frontend/semantic/symbol.hpp
+++ b/src/query/frontend/semantic/symbol.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp
index 1322a7b99..1d02f8435 100644
--- a/src/query/interpreter.cpp
+++ b/src/query/interpreter.cpp
@@ -39,6 +39,7 @@
 #include "dbms/dbms_handler.hpp"
 #include "dbms/global.hpp"
 #include "dbms/inmemory/storage_helper.hpp"
+#include "flags/experimental.hpp"
 #include "flags/replication.hpp"
 #include "flags/run_time_configurable.hpp"
 #include "glue/communication.hpp"
@@ -327,15 +328,14 @@ class ReplQueryHandler {
 
     const auto repl_mode = convertToReplicationMode(sync_mode);
 
-    const auto maybe_ip_and_port =
+    auto maybe_endpoint =
         io::network::Endpoint::ParseSocketOrAddress(socket_address, memgraph::replication::kDefaultReplicationPort);
-    if (maybe_ip_and_port) {
-      const auto [ip, port] = *maybe_ip_and_port;
+    if (maybe_endpoint) {
       const auto replication_config =
           replication::ReplicationClientConfig{.name = name,
                                                .mode = repl_mode,
-                                               .ip_address = std::string(ip),
-                                               .port = port,
+                                               .ip_address = std::move(maybe_endpoint->address),
+                                               .port = maybe_endpoint->port,
                                                .replica_check_frequency = replica_check_frequency,
                                                .ssl = std::nullopt};
 
@@ -407,44 +407,51 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
       case RPC_FAILED:
         throw QueryRuntimeException(
             "Couldn't unregister replica instance because current main instance couldn't unregister replica!");
+      case LOCK_OPENED:
+        throw QueryRuntimeException("Couldn't unregister replica because the last action didn't finish successfully!");
+      case OPEN_LOCK:
+        throw QueryRuntimeException(
+            "Couldn't register instance as cluster didn't accept entering unregistration state!");
       case SUCCESS:
         break;
     }
   }
 
-  void RegisterReplicationInstance(std::string_view coordinator_socket_address,
-                                   std::string_view replication_socket_address,
+  void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
+                                   std::string_view replication_server,
                                    std::chrono::seconds const &instance_check_frequency,
                                    std::chrono::seconds const &instance_down_timeout,
                                    std::chrono::seconds const &instance_get_uuid_frequency,
                                    std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override {
-    const auto maybe_replication_ip_port = io::network::Endpoint::ParseSocketOrAddress(replication_socket_address);
-    if (!maybe_replication_ip_port) {
+    auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
+    if (!maybe_bolt_server) {
+      throw QueryRuntimeException("Invalid bolt socket address!");
+    }
+
+    auto const maybe_management_server = io::network::Endpoint::ParseSocketOrAddress(management_server);
+    if (!maybe_management_server) {
+      throw QueryRuntimeException("Invalid management socket address!");
+    }
+
+    auto const maybe_replication_server = io::network::Endpoint::ParseSocketOrAddress(replication_server);
+    if (!maybe_replication_server) {
       throw QueryRuntimeException("Invalid replication socket address!");
     }
 
-    const auto maybe_coordinator_ip_port = io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address);
-    if (!maybe_replication_ip_port) {
-      throw QueryRuntimeException("Invalid replication socket address!");
-    }
-
-    const auto [replication_ip, replication_port] = *maybe_replication_ip_port;
-    const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port;
-    const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
-        .instance_name = std::string(instance_name),
-        .replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
-        .replication_ip_address = std::string(replication_ip),
-        .replication_port = replication_port};
+    auto const repl_config =
+        coordination::ReplicationClientInfo{.instance_name = std::string(instance_name),
+                                            .replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
+                                            .replication_server = *maybe_replication_server};
 
     auto coordinator_client_config =
-        coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name),
-                                              .ip_address = std::string(coordinator_server_ip),
-                                              .port = coordinator_server_port,
-                                              .instance_health_check_frequency_sec = instance_check_frequency,
-                                              .instance_down_timeout_sec = instance_down_timeout,
-                                              .instance_get_uuid_frequency_sec = instance_get_uuid_frequency,
-                                              .replication_client_info = repl_config,
-                                              .ssl = std::nullopt};
+        coordination::CoordinatorToReplicaConfig{.instance_name = std::string(instance_name),
+                                                 .mgt_server = *maybe_management_server,
+                                                 .bolt_server = *maybe_bolt_server,
+                                                 .replication_client_info = repl_config,
+                                                 .instance_health_check_frequency_sec = instance_check_frequency,
+                                                 .instance_down_timeout_sec = instance_down_timeout,
+                                                 .instance_get_uuid_frequency_sec = instance_get_uuid_frequency,
+                                                 .ssl = std::nullopt};
 
     auto status = coordinator_handler_.RegisterReplicationInstance(coordinator_client_config);
     switch (status) {
@@ -467,20 +474,36 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
         throw QueryRuntimeException(
             "Couldn't register replica instance because setting instance to replica failed! Check logs on replica to "
             "find out more info!");
+      case LOCK_OPENED:
+        throw QueryRuntimeException(
+            "Couldn't register replica instance because because the last action didn't finish successfully!");
+      case OPEN_LOCK:
+        throw QueryRuntimeException(
+            "Couldn't register replica instance because cluster didn't accept registration query!");
       case SUCCESS:
         break;
     }
   }
 
-  auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view raft_socket_address) -> void override {
-    auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address);
-    if (maybe_ip_and_port) {
-      auto const [ip, port] = *maybe_ip_and_port;
-      spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port);
-      coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
-    } else {
-      spdlog::error("Invalid raft socket address {}.", raft_socket_address);
+  auto AddCoordinatorInstance(uint32_t coordinator_id, std::string_view bolt_server,
+                              std::string_view coordinator_server) -> void override {
+    auto const maybe_coordinator_server = io::network::Endpoint::ParseSocketOrAddress(coordinator_server);
+    if (!maybe_coordinator_server) {
+      throw QueryRuntimeException("Invalid coordinator socket address!");
     }
+
+    auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
+    if (!maybe_bolt_server) {
+      throw QueryRuntimeException("Invalid bolt socket address!");
+    }
+
+    auto const coord_coord_config =
+        coordination::CoordinatorToCoordinatorConfig{.coordinator_server_id = coordinator_id,
+                                                     .bolt_server = *maybe_bolt_server,
+                                                     .coordinator_server = *maybe_coordinator_server};
+
+    coordinator_handler_.AddCoordinatorInstance(coord_coord_config);
+    spdlog::info("Added instance on coordinator server {}", maybe_coordinator_server->SocketAddress());
   }
 
   void SetReplicationInstanceToMain(std::string_view instance_name) override {
@@ -502,6 +525,14 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
             "Couldn't set replica instance to main! Check coordinator and replica for more logs");
       case SWAP_UUID_FAILED:
         throw QueryRuntimeException("Couldn't set replica instance to main. Replicas didn't swap uuid of new main.");
+      case OPEN_LOCK:
+        throw QueryRuntimeException(
+            "Couldn't set replica instance to main as cluster didn't accept setting instance state.");
+      case LOCK_OPENED:
+        throw QueryRuntimeException(
+            "Couldn't register replica instance because because the last action didn't finish successfully!");
+      case ENABLE_WRITING_FAILED:
+        throw QueryRuntimeException("Instance promoted to MAIN, but couldn't enable writing to instance.");
       case SUCCESS:
         break;
     }
@@ -517,7 +548,7 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
 #endif
 
 /// returns false if the replication role can't be set
-/// @throw QueryRuntimeException if an error ocurred.
+/// @throw QueryRuntimeException if an error occurred.
 
 Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_context, const Parameters &parameters,
                          Interpreter &interpreter) {
@@ -930,10 +961,10 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
   switch (repl_query->action_) {
     case ReplicationQuery::Action::SET_REPLICATION_ROLE: {
 #ifdef MG_ENTERPRISE
-      if (FLAGS_raft_server_id) {
+      if (FLAGS_coordinator_id) {
         throw QueryRuntimeException("Coordinator can't set roles!");
       }
-      if (FLAGS_coordinator_server_port) {
+      if (FLAGS_management_port) {
         throw QueryRuntimeException("Can't set role manually on instance with coordinator server port.");
       }
 #endif
@@ -960,7 +991,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
     }
     case ReplicationQuery::Action::SHOW_REPLICATION_ROLE: {
 #ifdef MG_ENTERPRISE
-      if (FLAGS_raft_server_id) {
+      if (FLAGS_coordinator_id) {
         throw QueryRuntimeException("Coordinator doesn't have a replication role!");
       }
 #endif
@@ -981,7 +1012,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
     }
     case ReplicationQuery::Action::REGISTER_REPLICA: {
 #ifdef MG_ENTERPRISE
-      if (FLAGS_coordinator_server_port) {
+      if (FLAGS_management_port) {
         throw QueryRuntimeException("Can't register replica manually on instance with coordinator server port.");
       }
 #endif
@@ -1002,7 +1033,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
 
     case ReplicationQuery::Action::DROP_REPLICA: {
 #ifdef MG_ENTERPRISE
-      if (FLAGS_coordinator_server_port) {
+      if (FLAGS_management_port) {
         throw QueryRuntimeException("Can't drop replica manually on instance with coordinator server port.");
       }
 #endif
@@ -1017,7 +1048,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
     }
     case ReplicationQuery::Action::SHOW_REPLICAS: {
 #ifdef MG_ENTERPRISE
-      if (FLAGS_raft_server_id) {
+      if (FLAGS_coordinator_id) {
         throw QueryRuntimeException("Coordinator cannot call SHOW REPLICAS! Use SHOW INSTANCES instead.");
       }
 #endif
@@ -1164,7 +1195,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
   Callback callback;
   switch (coordinator_query->action_) {
     case CoordinatorQuery::Action::ADD_COORDINATOR_INSTANCE: {
-      if (!FLAGS_raft_server_id) {
+      if (!FLAGS_coordinator_id) {
         throw QueryRuntimeException("Only coordinator can add coordinator instance!");
       }
 
@@ -1196,8 +1227,9 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
       auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt();
 
       callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id,
+                     bolt_server = bolt_server_it->second,
                      coordinator_server = coordinator_server_it->second]() mutable {
-        handler.AddCoordinatorInstance(coord_server_id, coordinator_server);
+        handler.AddCoordinatorInstance(coord_server_id, bolt_server, coordinator_server);
         return std::vector<std::vector<TypedValue>>();
       };
 
@@ -1207,7 +1239,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
       return callback;
     }
     case CoordinatorQuery::Action::REGISTER_INSTANCE: {
-      if (!FLAGS_raft_server_id) {
+      if (!FLAGS_coordinator_id) {
         throw QueryRuntimeException("Only coordinator can register coordinator server!");
       }
       // TODO: MemoryResource for EvaluationContext, it should probably be passed as
@@ -1242,15 +1274,15 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
 
       callback.fn = [handler = CoordQueryHandler{*coordinator_state},
                      instance_health_check_frequency_sec = config.instance_health_check_frequency_sec,
-                     management_server = management_server_it->second,
-                     replication_server = replication_server_it->second, bolt_server = bolt_server_it->second,
+                     bolt_server = bolt_server_it->second, management_server = management_server_it->second,
+                     replication_server = replication_server_it->second,
                      instance_name = coordinator_query->instance_name_,
                      instance_down_timeout_sec = config.instance_down_timeout_sec,
                      instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec,
                      sync_mode = coordinator_query->sync_mode_]() mutable {
-        handler.RegisterReplicationInstance(management_server, replication_server, instance_health_check_frequency_sec,
-                                            instance_down_timeout_sec, instance_get_uuid_frequency_sec, instance_name,
-                                            sync_mode);
+        handler.RegisterReplicationInstance(bolt_server, management_server, replication_server,
+                                            instance_health_check_frequency_sec, instance_down_timeout_sec,
+                                            instance_get_uuid_frequency_sec, instance_name, sync_mode);
         return std::vector<std::vector<TypedValue>>();
       };
 
@@ -1260,7 +1292,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
       return callback;
     }
     case CoordinatorQuery::Action::UNREGISTER_INSTANCE:
-      if (!FLAGS_raft_server_id) {
+      if (!FLAGS_coordinator_id) {
         throw QueryRuntimeException("Only coordinator can register coordinator server!");
       }
       callback.fn = [handler = CoordQueryHandler{*coordinator_state},
@@ -1275,7 +1307,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
       return callback;
 
     case CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN: {
-      if (!FLAGS_raft_server_id) {
+      if (!FLAGS_coordinator_id) {
         throw QueryRuntimeException("Only coordinator can register coordinator server!");
       }
       // TODO: MemoryResource for EvaluationContext, it should probably be passed as
@@ -1292,7 +1324,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
       return callback;
     }
     case CoordinatorQuery::Action::SHOW_INSTANCES: {
-      if (!FLAGS_raft_server_id) {
+      if (!FLAGS_coordinator_id) {
         throw QueryRuntimeException("Only coordinator can run SHOW INSTANCES.");
       }
 
@@ -2709,6 +2741,75 @@ PreparedQuery PrepareEdgeIndexQuery(ParsedQuery parsed_query, bool in_explicit_t
       RWType::W};
 }
 
+PreparedQuery PrepareTextIndexQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
+                                    std::vector<Notification> *notifications, CurrentDB &current_db) {
+  if (in_explicit_transaction) {
+    throw IndexInMulticommandTxException();
+  }
+
+  auto *text_index_query = utils::Downcast<TextIndexQuery>(parsed_query.query);
+  std::function<void(Notification &)> handler;
+
+  // TODO: we will need transaction for replication
+  MG_ASSERT(current_db.db_acc_, "Text index query expects a current DB");
+  auto &db_acc = *current_db.db_acc_;
+
+  MG_ASSERT(current_db.db_transactional_accessor_, "Text index query expects a current DB transaction");
+  auto *dba = &*current_db.execution_db_accessor_;
+
+  // Creating an index influences computed plan costs.
+  auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
+    plan_cache->WithLock([&](auto &cache) { cache.reset(); });
+  };
+
+  auto *storage = db_acc->storage();
+  auto label = storage->NameToLabel(text_index_query->label_.name);
+  auto &index_name = text_index_query->index_name_;
+
+  Notification index_notification(SeverityLevel::INFO);
+  switch (text_index_query->action_) {
+    case TextIndexQuery::Action::CREATE: {
+      index_notification.code = NotificationCode::CREATE_INDEX;
+      index_notification.title = fmt::format("Created text index on label {}.", text_index_query->label_.name);
+      // TODO: not just storage + invalidate_plan_cache. Need a DB transaction (for replication)
+      handler = [dba, label, index_name,
+                 invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
+        if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+          throw TextSearchDisabledException();
+        }
+        dba->CreateTextIndex(index_name, label);
+        utils::OnScopeExit invalidator(invalidate_plan_cache);
+      };
+      break;
+    }
+    case TextIndexQuery::Action::DROP: {
+      index_notification.code = NotificationCode::DROP_INDEX;
+      index_notification.title = fmt::format("Dropped text index on label {}.", text_index_query->label_.name);
+      // TODO: not just storage + invalidate_plan_cache. Need a DB transaction (for replication)
+      handler = [dba, index_name,
+                 invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
+        if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+          throw TextSearchDisabledException();
+        }
+        dba->DropTextIndex(index_name);
+        utils::OnScopeExit invalidator(invalidate_plan_cache);
+      };
+      break;
+    }
+  }
+
+  return PreparedQuery{
+      {},
+      std::move(parsed_query.required_privileges),
+      [handler = std::move(handler), notifications, index_notification = std::move(index_notification)](
+          AnyStream * /*stream*/, std::optional<int> /*unused*/) mutable {
+        handler(index_notification);
+        notifications->push_back(index_notification);
+        return QueryHandlerResult::COMMIT;  // TODO: Will need to become COMMIT when we fix replication
+      },
+      RWType::W};
+}
+
 PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
                                InterpreterContext *interpreter_context, Interpreter &interpreter) {
   if (in_explicit_transaction) {
@@ -3499,7 +3600,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
   }
 
   MG_ASSERT(current_db.db_acc_, "Database info query expects a current DB");
-  MG_ASSERT(current_db.db_transactional_accessor_, "Database ifo query expects a current DB transaction");
+  MG_ASSERT(current_db.db_transactional_accessor_, "Database info query expects a current DB transaction");
   auto *dba = &*current_db.execution_db_accessor_;
 
   auto *info_query = utils::Downcast<DatabaseInfoQuery>(parsed_query.query);
@@ -3514,10 +3615,11 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
         const std::string_view label_index_mark{"label"};
         const std::string_view label_property_index_mark{"label+property"};
         const std::string_view edge_type_index_mark{"edge-type"};
+        const std::string_view text_index_mark{"text"};
         auto info = dba->ListAllIndices();
         auto storage_acc = database->Access();
         std::vector<std::vector<TypedValue>> results;
-        results.reserve(info.label.size() + info.label_property.size());
+        results.reserve(info.label.size() + info.label_property.size() + info.text_indices.size());
         for (const auto &item : info.label) {
           results.push_back({TypedValue(label_index_mark), TypedValue(storage->LabelToName(item)), TypedValue(),
                              TypedValue(static_cast<int>(storage_acc->ApproximateVertexCount(item)))});
@@ -3532,6 +3634,10 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
           results.push_back({TypedValue(edge_type_index_mark), TypedValue(storage->EdgeTypeToName(item)), TypedValue(),
                              TypedValue(static_cast<int>(storage_acc->ApproximateEdgeCount(item)))});
         }
+        for (const auto &[index_name, label] : info.text_indices) {
+          results.push_back({TypedValue(fmt::format("{} (name: {})", text_index_mark, index_name)),
+                             TypedValue(storage->LabelToName(label)), TypedValue(), TypedValue()});
+        }
         std::sort(results.begin(), results.end(), [&label_index_mark](const auto &record_1, const auto &record_2) {
           const auto type_1 = record_1[0].ValueString();
           const auto type_2 = record_2[0].ValueString();
@@ -4191,6 +4297,28 @@ void Interpreter::RollbackTransaction() {
   ResetInterpreter();
 }
 
+#ifdef MG_ENTERPRISE
+auto Interpreter::Route(std::map<std::string, std::string> const &routing) -> RouteResult {
+  // TODO: (andi) Test
+  if (!FLAGS_coordinator_id) {
+    auto const &address = routing.find("address");
+    if (address == routing.end()) {
+      throw QueryException("Routing table must contain address field.");
+    }
+
+    auto result = RouteResult{};
+    if (interpreter_context_->repl_state->IsMain()) {
+      result.servers.emplace_back(std::vector<std::string>{address->second}, "WRITE");
+    } else {
+      result.servers.emplace_back(std::vector<std::string>{address->second}, "READ");
+    }
+    return result;
+  }
+
+  return RouteResult{.servers = interpreter_context_->coordinator_state_->GetRoutingTable(routing)};
+}
+#endif
+
 #if MG_ENTERPRISE
 // Before Prepare or during Prepare, but single-threaded.
 // TODO: Is there any cleanup?
@@ -4293,20 +4421,22 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
         utils::Downcast<ProfileQuery>(parsed_query.query) || utils::Downcast<DumpQuery>(parsed_query.query) ||
         utils::Downcast<TriggerQuery>(parsed_query.query) || utils::Downcast<AnalyzeGraphQuery>(parsed_query.query) ||
         utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<EdgeIndexQuery>(parsed_query.query) ||
-        utils::Downcast<DatabaseInfoQuery>(parsed_query.query) || utils::Downcast<ConstraintQuery>(parsed_query.query);
+        utils::Downcast<TextIndexQuery>(parsed_query.query) || utils::Downcast<DatabaseInfoQuery>(parsed_query.query) ||
+        utils::Downcast<ConstraintQuery>(parsed_query.query);
 
     if (!in_explicit_transaction_ && requires_db_transaction) {
       // TODO: ATM only a single database, will change when we have multiple database transactions
       bool could_commit = utils::Downcast<CypherQuery>(parsed_query.query) != nullptr;
       bool unique = utils::Downcast<IndexQuery>(parsed_query.query) != nullptr ||
                     utils::Downcast<EdgeIndexQuery>(parsed_query.query) != nullptr ||
+                    utils::Downcast<TextIndexQuery>(parsed_query.query) != nullptr ||
                     utils::Downcast<ConstraintQuery>(parsed_query.query) != nullptr ||
                     upper_case_query.find(kSchemaAssert) != std::string::npos;
       SetupDatabaseTransaction(could_commit, unique);
     }
 
 #ifdef MG_ENTERPRISE
-    if (FLAGS_raft_server_id && !utils::Downcast<CoordinatorQuery>(parsed_query.query) &&
+    if (FLAGS_coordinator_id && !utils::Downcast<CoordinatorQuery>(parsed_query.query) &&
         !utils::Downcast<SettingQuery>(parsed_query.query)) {
       throw QueryRuntimeException("Coordinator can run only coordinator queries!");
     }
@@ -4337,6 +4467,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
     } else if (utils::Downcast<EdgeIndexQuery>(parsed_query.query)) {
       prepared_query = PrepareEdgeIndexQuery(std::move(parsed_query), in_explicit_transaction_,
                                              &query_execution->notifications, current_db_);
+    } else if (utils::Downcast<TextIndexQuery>(parsed_query.query)) {
+      prepared_query = PrepareTextIndexQuery(std::move(parsed_query), in_explicit_transaction_,
+                                             &query_execution->notifications, current_db_);
     } else if (utils::Downcast<AnalyzeGraphQuery>(parsed_query.query)) {
       prepared_query = PrepareAnalyzeGraphQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
     } else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
@@ -4434,7 +4567,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
         throw QueryException("Write query forbidden on the replica!");
       }
 #ifdef MG_ENTERPRISE
-      if (FLAGS_coordinator_server_port && !interpreter_context_->repl_state->IsMainWriteable()) {
+      if (FLAGS_management_port && !interpreter_context_->repl_state->IsMainWriteable()) {
         query_execution = nullptr;
         throw QueryException(
             "Write query forbidden on the main! Coordinator needs to enable writing on main by sending RPC message.");
diff --git a/src/query/interpreter.hpp b/src/query/interpreter.hpp
index 5366b4472..b6cb869a4 100644
--- a/src/query/interpreter.hpp
+++ b/src/query/interpreter.hpp
@@ -143,8 +143,8 @@ class CoordinatorQueryHandler {
   };
 
   /// @throw QueryRuntimeException if an error ocurred.
-  virtual void RegisterReplicationInstance(std::string_view coordinator_socket_address,
-                                           std::string_view replication_socket_address,
+  virtual void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
+                                           std::string_view replication_server,
                                            std::chrono::seconds const &instance_health_check_frequency,
                                            std::chrono::seconds const &instance_down_timeout,
                                            std::chrono::seconds const &instance_get_uuid_frequency,
@@ -160,7 +160,8 @@ class CoordinatorQueryHandler {
   virtual std::vector<coordination::InstanceStatus> ShowInstances() const = 0;
 
   /// @throw QueryRuntimeException if an error ocurred.
-  virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view coordinator_socket_address) -> void = 0;
+  virtual auto AddCoordinatorInstance(uint32_t coordinator_id, std::string_view bolt_server,
+                                      std::string_view coordinator_server) -> void = 0;
 };
 #endif
 
@@ -247,6 +248,14 @@ class Interpreter final {
     std::optional<std::string> db;
   };
 
+#ifdef MG_ENTERPRISE
+  struct RouteResult {
+    int ttl{300};
+    std::string db{};  // Currently not used since we don't have any specific replication groups etc.
+    coordination::RoutingTable servers{};
+  };
+#endif
+
   std::shared_ptr<QueryUserOrRole> user_or_role_{};
   bool in_explicit_transaction_{false};
   CurrentDB current_db_;
@@ -272,6 +281,10 @@ class Interpreter final {
                                      const std::map<std::string, storage::PropertyValue> &params,
                                      QueryExtras const &extras);
 
+#ifdef MG_ENTERPRISE
+  auto Route(std::map<std::string, std::string> const &routing) -> RouteResult;
+#endif
+
   /**
    * Execute the last prepared query and stream *all* of the results into the
    * given stream.
diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp
index 2b970cf49..ff6c1dc9a 100644
--- a/src/query/plan/operator.cpp
+++ b/src/query/plan/operator.cpp
@@ -32,6 +32,7 @@
 #include "spdlog/spdlog.h"
 
 #include "csv/parsing.hpp"
+#include "flags/experimental.hpp"
 #include "license/license.hpp"
 #include "query/auth_checker.hpp"
 #include "query/context.hpp"
@@ -266,6 +267,10 @@ VertexAccessor &CreateLocalVertex(const NodeCreationInfo &node_info, Frame *fram
   }
   MultiPropsInitChecked(&new_node, properties);
 
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    context.db_accessor->TextIndexAddVertex(new_node);
+  }
+
   (*frame)[node_info.symbol] = new_node;
   return (*frame)[node_info.symbol].ValueVertex();
 }
@@ -2991,6 +2996,9 @@ bool SetProperty::SetPropertyCursor::Pull(Frame &frame, ExecutionContext &contex
         context.trigger_context_collector->RegisterSetObjectProperty(lhs.ValueVertex(), self_.property_,
                                                                      TypedValue{std::move(old_value)}, TypedValue{rhs});
       }
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
+      }
       break;
     }
     case TypedValue::Type::Edge: {
@@ -3147,6 +3155,9 @@ void SetPropertiesOnRecord(TRecordAccessor *record, const TypedValue &rhs, SetPr
     case TypedValue::Type::Vertex: {
       PropertiesMap new_properties = get_props(rhs.ValueVertex());
       update_props(new_properties);
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        context->db_accessor->TextIndexUpdateVertex(rhs.ValueVertex());
+      }
       break;
     }
     case TypedValue::Type::Map: {
@@ -3204,6 +3215,9 @@ bool SetProperties::SetPropertiesCursor::Pull(Frame &frame, ExecutionContext &co
       }
 #endif
       SetPropertiesOnRecord(&lhs.ValueVertex(), rhs, self_.op_, &context, cached_name_id_);
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
+      }
       break;
     case TypedValue::Type::Edge:
 #ifdef MG_ENTERPRISE
@@ -3295,6 +3309,10 @@ bool SetLabels::SetLabelsCursor::Pull(Frame &frame, ExecutionContext &context) {
     }
   }
 
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    context.db_accessor->TextIndexUpdateVertex(vertex);
+  }
+
   return true;
 }
 
@@ -3366,6 +3384,9 @@ bool RemoveProperty::RemovePropertyCursor::Pull(Frame &frame, ExecutionContext &
       }
 #endif
       remove_prop(&lhs.ValueVertex());
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
+      }
       break;
     case TypedValue::Type::Edge:
 #ifdef MG_ENTERPRISE
@@ -3458,6 +3479,10 @@ bool RemoveLabels::RemoveLabelsCursor::Pull(Frame &frame, ExecutionContext &cont
     }
   }
 
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    context.db_accessor->TextIndexUpdateVertex(vertex, EvaluateLabels(self_.labels_, evaluator, context.db_accessor));
+  }
+
   return true;
 }
 
diff --git a/src/query/plan/vertex_count_cache.hpp b/src/query/plan/vertex_count_cache.hpp
index 802f4e09f..69e002c0a 100644
--- a/src/query/plan/vertex_count_cache.hpp
+++ b/src/query/plan/vertex_count_cache.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
diff --git a/src/query/procedure/mg_procedure_impl.cpp b/src/query/procedure/mg_procedure_impl.cpp
index d6ce3c7b7..a2bc23aa3 100644
--- a/src/query/procedure/mg_procedure_impl.cpp
+++ b/src/query/procedure/mg_procedure_impl.cpp
@@ -23,6 +23,8 @@
 #include <utility>
 #include <variant>
 
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
 #include "license/license.hpp"
 #include "mg_procedure.h"
 #include "module.hpp"
@@ -32,6 +34,7 @@
 #include "query/procedure/fmt.hpp"
 #include "query/procedure/mg_procedure_helpers.hpp"
 #include "query/stream/common.hpp"
+#include "storage/v2/indices/text_index.hpp"
 #include "storage/v2/property_value.hpp"
 #include "storage/v2/storage_mode.hpp"
 #include "storage/v2/view.hpp"
@@ -1843,6 +1846,11 @@ mgp_error mgp_vertex_set_property(struct mgp_vertex *v, const char *property_nam
     const auto result = std::visit(
         [prop_key, property_value](auto &impl) { return impl.SetProperty(prop_key, ToPropertyValue(*property_value)); },
         v->impl);
+    if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
+      auto v_impl = v->getImpl();
+      v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
+    }
+
     if (result.HasError()) {
       switch (result.GetError()) {
         case memgraph::storage::Error::DELETED_OBJECT:
@@ -1899,6 +1907,11 @@ mgp_error mgp_vertex_set_properties(struct mgp_vertex *v, struct mgp_map *proper
     }
 
     const auto result = v->getImpl().UpdateProperties(props);
+    if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
+      auto v_impl = v->getImpl();
+      v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
+    }
+
     if (result.HasError()) {
       switch (result.GetError()) {
         case memgraph::storage::Error::DELETED_OBJECT:
@@ -1956,6 +1969,10 @@ mgp_error mgp_vertex_add_label(struct mgp_vertex *v, mgp_label label) {
     }
 
     const auto result = std::visit([label_id](auto &impl) { return impl.AddLabel(label_id); }, v->impl);
+    if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
+      auto v_impl = v->getImpl();
+      v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
+    }
 
     if (result.HasError()) {
       switch (result.GetError()) {
@@ -1998,6 +2015,10 @@ mgp_error mgp_vertex_remove_label(struct mgp_vertex *v, mgp_label label) {
       throw ImmutableObjectException{"Cannot remove a label from an immutable vertex!"};
     }
     const auto result = std::visit([label_id](auto &impl) { return impl.RemoveLabel(label_id); }, v->impl);
+    if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
+      auto v_impl = v->getImpl();
+      v->graph->getImpl()->TextIndexUpdateVertex(v_impl, {label_id});
+    }
 
     if (result.HasError()) {
       switch (result.GetError()) {
@@ -2590,7 +2611,7 @@ mgp_error mgp_edge_iter_properties(mgp_edge *e, mgp_memory *memory, mgp_properti
 mgp_error mgp_graph_get_vertex_by_id(mgp_graph *graph, mgp_vertex_id id, mgp_memory *memory, mgp_vertex **result) {
   return WrapExceptions(
       [graph, id, memory]() -> mgp_vertex * {
-        std::optional<memgraph::query::VertexAccessor> maybe_vertex = std::visit(
+        auto maybe_vertex = std::visit(
             [graph, id](auto *impl) {
               return impl->FindVertex(memgraph::storage::Gid::FromInt(id.as_int), graph->view);
             },
@@ -2967,6 +2988,10 @@ mgp_error mgp_graph_create_vertex(struct mgp_graph *graph, mgp_memory *memory, m
         }
         auto *vertex = std::visit(
             [=](auto *impl) { return NewRawMgpObject<mgp_vertex>(memory, impl->InsertVertex(), graph); }, graph->impl);
+        if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH)) {
+          auto v_impl = vertex->getImpl();
+          vertex->graph->getImpl()->TextIndexAddVertex(v_impl);
+        }
 
         auto &ctx = graph->ctx;
         ctx->execution_stats[memgraph::query::ExecutionStats::Key::CREATED_NODES] += 1;
@@ -3324,6 +3349,140 @@ mgp_error mgp_graph_delete_edge(struct mgp_graph *graph, mgp_edge *edge) {
   });
 }
 
+mgp_error mgp_graph_has_text_index(mgp_graph *graph, const char *index_name, int *result) {
+  return WrapExceptions([graph, index_name, result]() {
+    std::visit(memgraph::utils::Overloaded{
+                   [&](memgraph::query::DbAccessor *impl) { *result = impl->TextIndexExists(index_name); },
+                   [&](memgraph::query::SubgraphDbAccessor *impl) {
+                     *result = impl->GetAccessor()->TextIndexExists(index_name);
+                   }},
+               graph->impl);
+  });
+}
+
+mgp_vertex *GetVertexByGid(mgp_graph *graph, memgraph::storage::Gid id, mgp_memory *memory) {
+  auto get_vertex_by_gid = memgraph::utils::Overloaded{
+      [graph, id, memory](memgraph::query::DbAccessor *impl) -> mgp_vertex * {
+        auto maybe_vertex = impl->FindVertex(id, graph->view);
+        if (!maybe_vertex) return nullptr;
+        return NewRawMgpObject<mgp_vertex>(memory, *maybe_vertex, graph);
+      },
+      [graph, id, memory](memgraph::query::SubgraphDbAccessor *impl) -> mgp_vertex * {
+        auto maybe_vertex = impl->FindVertex(id, graph->view);
+        if (!maybe_vertex) return nullptr;
+        return NewRawMgpObject<mgp_vertex>(
+            memory, memgraph::query::SubgraphVertexAccessor(*maybe_vertex, impl->getGraph()), graph);
+      }};
+  return std::visit(get_vertex_by_gid, graph->impl);
+}
+
+void WrapTextSearch(mgp_graph *graph, mgp_memory *memory, mgp_map **result,
+                    const std::vector<memgraph::storage::Gid> &vertex_ids = {},
+                    const std::optional<std::string> &error_msg = std::nullopt) {
+  if (const auto err = mgp_map_make_empty(memory, result); err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text search results failed during creation of a mgp_map");
+  }
+
+  mgp_value *error_value;
+  if (error_msg.has_value()) {
+    if (const auto err = mgp_value_make_string(error_msg.value().data(), memory, &error_value);
+        err != mgp_error::MGP_ERROR_NO_ERROR) {
+      throw std::logic_error("Retrieving text search results failed during creation of a string mgp_value");
+    }
+  }
+
+  mgp_list *search_results{};
+  if (const auto err = mgp_list_make_empty(vertex_ids.size(), memory, &search_results);
+      err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text search results failed during creation of a mgp_list");
+  }
+
+  for (const auto &vertex_id : vertex_ids) {
+    mgp_value *vertex;
+    if (const auto err = mgp_value_make_vertex(GetVertexByGid(graph, vertex_id, memory), &vertex);
+        err != mgp_error::MGP_ERROR_NO_ERROR) {
+      throw std::logic_error("Retrieving text search results failed during creation of a vertex mgp_value");
+    }
+    if (const auto err = mgp_list_append(search_results, vertex); err != mgp_error::MGP_ERROR_NO_ERROR) {
+      throw std::logic_error(
+          "Retrieving text search results failed during insertion of the mgp_value into the result list");
+    }
+  }
+
+  mgp_value *search_results_value;
+  if (const auto err = mgp_value_make_list(search_results, &search_results_value);
+      err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text search results failed during creation of a list mgp_value");
+  }
+
+  if (error_msg.has_value()) {
+    if (const auto err = mgp_map_insert(*result, "error_msg", error_value); err != mgp_error::MGP_ERROR_NO_ERROR) {
+      throw std::logic_error("Retrieving text index search error failed during insertion into mgp_map");
+    }
+    return;
+  }
+
+  if (const auto err = mgp_map_insert(*result, "search_results", search_results_value);
+      err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text index search results failed during insertion into mgp_map");
+  }
+}
+
+void WrapTextIndexAggregation(mgp_memory *memory, mgp_map **result, const std::string &aggregation_result,
+                              const std::optional<std::string> &error_msg = std::nullopt) {
+  if (const auto err = mgp_map_make_empty(memory, result); err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text search results failed during creation of a mgp_map");
+  }
+
+  mgp_value *aggregation_result_or_error_value;
+  if (const auto err = mgp_value_make_string(error_msg.value_or(aggregation_result).data(), memory,
+                                             &aggregation_result_or_error_value);
+      err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text search results failed during creation of a string mgp_value");
+  }
+
+  if (error_msg.has_value()) {
+    if (const auto err = mgp_map_insert(*result, "error_msg", aggregation_result_or_error_value);
+        err != mgp_error::MGP_ERROR_NO_ERROR) {
+      throw std::logic_error("Retrieving text index aggregation error failed during insertion into mgp_map");
+    }
+    return;
+  }
+
+  if (const auto err = mgp_map_insert(*result, "aggregation_results", aggregation_result_or_error_value);
+      err != mgp_error::MGP_ERROR_NO_ERROR) {
+    throw std::logic_error("Retrieving text index aggregation results failed during insertion into mgp_map");
+  }
+}
+
+mgp_error mgp_graph_search_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
+                                      text_search_mode search_mode, mgp_memory *memory, mgp_map **result) {
+  return WrapExceptions([graph, memory, index_name, search_query, search_mode, result]() {
+    std::vector<memgraph::storage::Gid> found_vertices_ids;
+    std::optional<std::string> error_msg = std::nullopt;
+    try {
+      found_vertices_ids = graph->getImpl()->TextIndexSearch(index_name, search_query, search_mode);
+    } catch (memgraph::query::QueryException &e) {
+      error_msg = e.what();
+    }
+    WrapTextSearch(graph, memory, result, found_vertices_ids, error_msg);
+  });
+}
+
+mgp_error mgp_graph_aggregate_over_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
+                                              const char *aggregation_query, mgp_memory *memory, mgp_map **result) {
+  return WrapExceptions([graph, memory, index_name, search_query, aggregation_query, result]() {
+    std::string search_results;
+    std::optional<std::string> error_msg = std::nullopt;
+    try {
+      search_results = graph->getImpl()->TextIndexAggregate(index_name, search_query, aggregation_query);
+    } catch (memgraph::query::QueryException &e) {
+      error_msg = e.what();
+    }
+    WrapTextIndexAggregation(memory, result, search_results, error_msg);
+  });
+}
+
 #ifdef MG_ENTERPRISE
 namespace {
 void NextPermitted(mgp_vertices_iterator &it) {
diff --git a/src/query/procedure/mg_procedure_impl.hpp b/src/query/procedure/mg_procedure_impl.hpp
index 17cac4eca..a91b4386c 100644
--- a/src/query/procedure/mg_procedure_impl.hpp
+++ b/src/query/procedure/mg_procedure_impl.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -562,6 +562,13 @@ struct mgp_graph {
   memgraph::query::ExecutionContext *ctx;
   memgraph::storage::StorageMode storage_mode;
 
+  memgraph::query::DbAccessor *getImpl() const {
+    return std::visit(
+        memgraph::utils::Overloaded{[](memgraph::query::DbAccessor *impl) { return impl; },
+                                    [](memgraph::query::SubgraphDbAccessor *impl) { return impl->GetAccessor(); }},
+        this->impl);
+  }
+
   static mgp_graph WritableGraph(memgraph::query::DbAccessor &acc, memgraph::storage::View view,
                                  memgraph::query::ExecutionContext &ctx) {
     return mgp_graph{&acc, view, &ctx, acc.GetStorageMode()};
diff --git a/src/replication/state.cpp b/src/replication/state.cpp
index 1155fdb51..f04d00761 100644
--- a/src/replication/state.cpp
+++ b/src/replication/state.cpp
@@ -56,7 +56,7 @@ ReplicationState::ReplicationState(std::optional<std::filesystem::path> durabili
   }
   auto replication_data = std::move(fetched_replication_data).GetValue();
 #ifdef MG_ENTERPRISE
-  if (FLAGS_coordinator_server_port && std::holds_alternative<RoleReplicaData>(replication_data)) {
+  if (FLAGS_management_port && std::holds_alternative<RoleReplicaData>(replication_data)) {
     spdlog::trace("Restarted replication uuid for replica");
     std::get<RoleReplicaData>(replication_data).uuid_.reset();
   }
@@ -254,7 +254,8 @@ bool ReplicationState::SetReplicationRoleMain(const utils::UUID &main_uuid) {
     return false;
   }
 
-  replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}, true, main_uuid};
+  // By default, writing on MAIN is disabled until cluster is in healthy state
+  replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}, /*is_writing enabled*/ false, main_uuid};
 
   return true;
 }
diff --git a/src/replication_handler/include/replication_handler/replication_handler.hpp b/src/replication_handler/include/replication_handler/replication_handler.hpp
index e1da19bfa..452ccce19 100644
--- a/src/replication_handler/include/replication_handler/replication_handler.hpp
+++ b/src/replication_handler/include/replication_handler/replication_handler.hpp
@@ -213,7 +213,7 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler {
               // We force sync replicas in other situation
               if (state == storage::replication::ReplicaState::DIVERGED_FROM_MAIN) {
 #ifdef MG_ENTERPRISE
-                return FLAGS_coordinator_server_port != 0;
+                return FLAGS_management_port != 0;
 #else
                 return false;
 #endif
diff --git a/src/storage/v2/CMakeLists.txt b/src/storage/v2/CMakeLists.txt
index ec5108d63..49601eb54 100644
--- a/src/storage/v2/CMakeLists.txt
+++ b/src/storage/v2/CMakeLists.txt
@@ -20,6 +20,7 @@ add_library(mg-storage-v2 STATIC
         vertex_info_cache.cpp
         storage.cpp
         indices/indices.cpp
+        indices/text_index.cpp
         all_vertices_iterable.cpp
         edges_iterable.cpp
         vertices_iterable.cpp
@@ -45,4 +46,5 @@ add_library(mg-storage-v2 STATIC
         inmemory/replication/recovery.cpp
 )
 
-target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils gflags absl::flat_hash_map mg-rpc mg-slk mg-events mg-memory)
+target_include_directories(mg-storage-v2 PUBLIC ${CMAKE_SOURCE_DIR}/include)
+target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils mg-flags gflags absl::flat_hash_map mg-rpc mg-slk mg-events mg-memory mgcxx_text_search tantivy_text_search)
diff --git a/src/storage/v2/config.hpp b/src/storage/v2/config.hpp
index bac77945d..d066b58f2 100644
--- a/src/storage/v2/config.hpp
+++ b/src/storage/v2/config.hpp
@@ -140,7 +140,7 @@ struct Config {
 inline auto ReplicationStateRootPath(memgraph::storage::Config const &config) -> std::optional<std::filesystem::path> {
   if (!config.durability.restore_replication_state_on_startup
 #ifdef MG_ENTERPRISE
-      && !FLAGS_coordinator_server_port
+      && !FLAGS_management_port
 #endif
   ) {
     spdlog::warn(
diff --git a/src/storage/v2/disk/durable_metadata.cpp b/src/storage/v2/disk/durable_metadata.cpp
index fe2c558ae..c1f44a587 100644
--- a/src/storage/v2/disk/durable_metadata.cpp
+++ b/src/storage/v2/disk/durable_metadata.cpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -26,6 +26,7 @@ constexpr const char *kVertexCountDescr = "vertex_count";
 constexpr const char *kEdgeDountDescr = "edge_count";
 constexpr const char *kLabelIndexStr = "label_index";
 constexpr const char *kLabelPropertyIndexStr = "label_property_index";
+constexpr const char *kTextIndexStr = "text_index";
 constexpr const char *kExistenceConstraintsStr = "existence_constraints";
 constexpr const char *kUniqueConstraintsStr = "unique_constraints";
 }  // namespace
@@ -41,7 +42,7 @@ DurableMetadata::DurableMetadata(const Config &config)
 DurableMetadata::DurableMetadata(DurableMetadata &&other) noexcept
     : durability_kvstore_(std::move(other.durability_kvstore_)), config_(std::move(other.config_)) {}
 
-void DurableMetadata::SaveBeforeClosingDB(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count) {
+void DurableMetadata::UpdateMetaData(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count) {
   durability_kvstore_.Put(kLastTransactionStartTimeStamp, std::to_string(timestamp));
   durability_kvstore_.Put(kVertexCountDescr, std::to_string(vertex_count));
   durability_kvstore_.Put(kEdgeDountDescr, std::to_string(edge_count));
@@ -144,6 +145,31 @@ bool DurableMetadata::PersistLabelPropertyIndexAndExistenceConstraintDeletion(La
   return true;
 }
 
+bool DurableMetadata::PersistTextIndexCreation(const std::string &index_name, LabelId label) {
+  const std::string index_name_label_pair = index_name + "," + label.ToString();
+  if (auto text_index_store = durability_kvstore_.Get(kTextIndexStr); text_index_store.has_value()) {
+    std::string &value = text_index_store.value();
+    value += "|";
+    value += index_name_label_pair;
+    return durability_kvstore_.Put(kTextIndexStr, value);
+  }
+  return durability_kvstore_.Put(kTextIndexStr, index_name_label_pair);
+}
+
+bool DurableMetadata::PersistTextIndexDeletion(const std::string &index_name, LabelId label) {
+  const std::string index_name_label_pair = index_name + "," + label.ToString();
+  if (auto text_index_store = durability_kvstore_.Get(kTextIndexStr); text_index_store.has_value()) {
+    const std::string &value = text_index_store.value();
+    std::vector<std::string> text_indices = utils::Split(value, "|");
+    std::erase(text_indices, index_name_label_pair);
+    if (text_indices.empty()) {
+      return durability_kvstore_.Delete(kTextIndexStr);
+    }
+    return durability_kvstore_.Put(kTextIndexStr, utils::Join(text_indices, "|"));
+  }
+  return true;
+}
+
 bool DurableMetadata::PersistUniqueConstraintCreation(LabelId label, const std::set<PropertyId> &properties) {
   const std::string entry = utils::GetKeyForUniqueConstraintsDurability(label, properties);
 
diff --git a/src/storage/v2/disk/durable_metadata.hpp b/src/storage/v2/disk/durable_metadata.hpp
index 168cce469..06a26ac15 100644
--- a/src/storage/v2/disk/durable_metadata.hpp
+++ b/src/storage/v2/disk/durable_metadata.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -41,7 +41,7 @@ class DurableMetadata {
   std::optional<std::vector<std::string>> LoadExistenceConstraintInfoIfExists() const;
   std::optional<std::vector<std::string>> LoadUniqueConstraintInfoIfExists() const;
 
-  void SaveBeforeClosingDB(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count);
+  void UpdateMetaData(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count);
 
   bool PersistLabelIndexCreation(LabelId label);
 
@@ -53,6 +53,10 @@ class DurableMetadata {
   bool PersistLabelPropertyIndexAndExistenceConstraintDeletion(LabelId label, PropertyId property,
                                                                const std::string &key);
 
+  bool PersistTextIndexCreation(const std::string &index_name, LabelId label);
+
+  bool PersistTextIndexDeletion(const std::string &index_name, LabelId label);
+
   bool PersistUniqueConstraintCreation(LabelId label, const std::set<PropertyId> &properties);
 
   bool PersistUniqueConstraintDeletion(LabelId label, const std::set<PropertyId> &properties);
diff --git a/src/storage/v2/disk/storage.cpp b/src/storage/v2/disk/storage.cpp
index 8f8ef5e06..a0c15485c 100644
--- a/src/storage/v2/disk/storage.cpp
+++ b/src/storage/v2/disk/storage.cpp
@@ -29,6 +29,8 @@
 #include <rocksdb/utilities/transaction.h>
 #include <rocksdb/utilities/transaction_db.h>
 
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
 #include "kvstore/kvstore.hpp"
 #include "spdlog/spdlog.h"
 #include "storage/v2/constraints/unique_constraints.hpp"
@@ -272,8 +274,8 @@ DiskStorage::DiskStorage(Config config)
 }
 
 DiskStorage::~DiskStorage() {
-  durable_metadata_.SaveBeforeClosingDB(timestamp_, vertex_count_.load(std::memory_order_acquire),
-                                        edge_count_.load(std::memory_order_acquire));
+  durable_metadata_.UpdateMetaData(timestamp_, vertex_count_.load(std::memory_order_acquire),
+                                   edge_count_.load(std::memory_order_acquire));
   logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->vertex_chandle));
   logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->edge_chandle));
   logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->out_edges_chandle));
@@ -856,6 +858,7 @@ StorageInfo DiskStorage::GetInfo(memgraph::replication_coordination_glue::Replic
     const auto &lbl = access->ListAllIndices();
     info.label_indices = lbl.label.size();
     info.label_property_indices = lbl.label_property.size();
+    info.text_indices = lbl.text_indices.size();
     const auto &con = access->ListAllConstraints();
     info.existence_constraints = con.existence.size();
     info.unique_constraints = con.unique.size();
@@ -1670,6 +1673,18 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
         case MetadataDelta::Action::LABEL_PROPERTY_INDEX_STATS_CLEAR: {
           throw utils::NotYetImplemented("ClearIndexStats(stats) is not implemented for DiskStorage.");
         } break;
+        case MetadataDelta::Action::TEXT_INDEX_CREATE: {
+          const auto &info = md_delta.text_index;
+          if (!disk_storage->durable_metadata_.PersistTextIndexCreation(info.index_name, info.label)) {
+            return StorageManipulationError{PersistenceError{}};
+          }
+        } break;
+        case MetadataDelta::Action::TEXT_INDEX_DROP: {
+          const auto &info = md_delta.text_index;
+          if (!disk_storage->durable_metadata_.PersistTextIndexDeletion(info.index_name, info.label)) {
+            return StorageManipulationError{PersistenceError{}};
+          }
+        } break;
         case MetadataDelta::Action::EXISTENCE_CONSTRAINT_CREATE: {
           const auto &info = md_delta.label_property;
           if (!disk_storage->durable_metadata_.PersistLabelPropertyIndexAndExistenceConstraintCreation(
@@ -1768,7 +1783,11 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
     return StorageManipulationError{SerializationError{}};
   }
   spdlog::trace("rocksdb: Commit successful");
-
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    disk_storage->indices_.text_index_.Commit();
+  }
+  disk_storage->durable_metadata_.UpdateMetaData(disk_storage->timestamp_, disk_storage->vertex_count_,
+                                                 disk_storage->edge_count_);
   is_transaction_active_ = false;
 
   return {};
@@ -1886,6 +1905,9 @@ void DiskStorage::DiskAccessor::Abort() {
   // query_plan_accumulate_aggregate.cpp
   transaction_.disk_transaction_->Rollback();
   transaction_.disk_transaction_->ClearSnapshot();
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    storage_->indices_.text_index_.Rollback();
+  }
   delete transaction_.disk_transaction_;
   transaction_.disk_transaction_ = nullptr;
   is_transaction_active_ = false;
@@ -2096,7 +2118,11 @@ IndicesInfo DiskStorage::DiskAccessor::ListAllIndices() const {
   auto *disk_label_index = static_cast<DiskLabelIndex *>(on_disk->indices_.label_index_.get());
   auto *disk_label_property_index =
       static_cast<DiskLabelPropertyIndex *>(on_disk->indices_.label_property_index_.get());
-  return {disk_label_index->ListIndices(), disk_label_property_index->ListIndices()};
+  auto &text_index = storage_->indices_.text_index_;
+  return {disk_label_index->ListIndices(),
+          disk_label_property_index->ListIndices(),
+          {/* edge type indices */},
+          text_index.ListIndices()};
 }
 ConstraintsInfo DiskStorage::DiskAccessor::ListAllConstraints() const {
   auto *disk_storage = static_cast<DiskStorage *>(storage_);
diff --git a/src/storage/v2/disk/storage.hpp b/src/storage/v2/disk/storage.hpp
index 9b74c0af0..e5c164d0c 100644
--- a/src/storage/v2/disk/storage.hpp
+++ b/src/storage/v2/disk/storage.hpp
@@ -303,6 +303,8 @@ class DiskStorage final : public Storage {
 
   EdgeImportMode GetEdgeImportMode() const;
 
+  DurableMetadata *GetDurableMetadata() { return &durable_metadata_; }
+
  private:
   void LoadPersistingMetadataInfo();
 
diff --git a/src/storage/v2/durability/durability.cpp b/src/storage/v2/durability/durability.cpp
index fbbedbee5..db8bcd93b 100644
--- a/src/storage/v2/durability/durability.cpp
+++ b/src/storage/v2/durability/durability.cpp
@@ -151,7 +151,8 @@ void RecoverConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadat
 
 void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
                             utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
-                            const std::optional<ParallelizedSchemaCreationInfo> &parallel_exec_info) {
+                            const std::optional<ParallelizedSchemaCreationInfo> &parallel_exec_info,
+                            const std::optional<std::filesystem::path> &storage_dir) {
   spdlog::info("Recreating indices from metadata.");
 
   // Recover label indices.
@@ -211,6 +212,26 @@ void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadat
   }
   spdlog::info("Edge-type indices are recreated.");
 
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    // Recover text indices.
+    spdlog::info("Recreating {} text indices from metadata.", indices_metadata.text_indices.size());
+    auto &mem_text_index = indices->text_index_;
+    for (const auto &[index_name, label] : indices_metadata.text_indices) {
+      try {
+        if (!storage_dir.has_value()) {
+          throw RecoveryFailure("There must exist a storage directory in order to recover text indices!");
+        }
+
+        mem_text_index.RecoverIndex(storage_dir.value(), index_name, label, vertices->access(), name_id_mapper);
+      } catch (...) {
+        throw RecoveryFailure("The text index must be created here!");
+      }
+      spdlog::info("Text index {} on :{} is recreated from metadata", index_name,
+                   name_id_mapper->IdToName(label.AsUint()));
+    }
+    spdlog::info("Text indices are recreated.");
+  }
+
   spdlog::info("Indices are recreated.");
 }
 
@@ -331,8 +352,13 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
     repl_storage_state.epoch_.SetEpoch(std::move(recovered_snapshot->snapshot_info.epoch_id));
 
     if (!utils::DirExists(wal_directory_)) {
+      std::optional<std::filesystem::path> storage_dir = std::nullopt;
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        storage_dir = config.durability.storage_directory;
+      }
+
       RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
-                             GetParallelExecInfoIndices(recovery_info, config));
+                             GetParallelExecInfoIndices(recovery_info, config), storage_dir);
       RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
                          GetParallelExecInfo(recovery_info, config));
       return recovered_snapshot->recovery_info;
@@ -467,8 +493,13 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
     spdlog::info("All necessary WAL files are loaded successfully.");
   }
 
+  std::optional<std::filesystem::path> storage_dir = std::nullopt;
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    storage_dir = config.durability.storage_directory;
+  }
+
   RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
-                         GetParallelExecInfoIndices(recovery_info, config));
+                         GetParallelExecInfoIndices(recovery_info, config), storage_dir);
   RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
                      GetParallelExecInfo(recovery_info, config));
 
diff --git a/src/storage/v2/durability/durability.hpp b/src/storage/v2/durability/durability.hpp
index 97e2c7efc..5170b3b04 100644
--- a/src/storage/v2/durability/durability.hpp
+++ b/src/storage/v2/durability/durability.hpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -102,7 +102,8 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
 /// @throw RecoveryFailure
 void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
                             utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
-                            const std::optional<ParallelizedSchemaCreationInfo> &parallel_exec_info = std::nullopt);
+                            const std::optional<ParallelizedSchemaCreationInfo> &parallel_exec_info = std::nullopt,
+                            const std::optional<std::filesystem::path> &storage_dir = std::nullopt);
 
 // Helper function used to recover all discovered constraints. The
 // constraints must be recovered after the data recovery is done
diff --git a/src/storage/v2/durability/marker.hpp b/src/storage/v2/durability/marker.hpp
index ac0cc074d..18d693e51 100644
--- a/src/storage/v2/durability/marker.hpp
+++ b/src/storage/v2/durability/marker.hpp
@@ -64,6 +64,8 @@ enum class Marker : uint8_t {
   DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR = 0x64,
   DELTA_EDGE_TYPE_INDEX_CREATE = 0x65,
   DELTA_EDGE_TYPE_INDEX_DROP = 0x66,
+  DELTA_TEXT_INDEX_CREATE = 0x67,
+  DELTA_TEXT_INDEX_DROP = 0x68,
 
   VALUE_FALSE = 0x00,
   VALUE_TRUE = 0xff,
@@ -110,6 +112,8 @@ static const Marker kMarkersAll[] = {
     Marker::DELTA_LABEL_PROPERTY_INDEX_DROP,
     Marker::DELTA_EDGE_TYPE_INDEX_CREATE,
     Marker::DELTA_EDGE_TYPE_INDEX_DROP,
+    Marker::DELTA_TEXT_INDEX_CREATE,
+    Marker::DELTA_TEXT_INDEX_DROP,
     Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE,
     Marker::DELTA_EXISTENCE_CONSTRAINT_DROP,
     Marker::DELTA_UNIQUE_CONSTRAINT_CREATE,
diff --git a/src/storage/v2/durability/metadata.hpp b/src/storage/v2/durability/metadata.hpp
index c8ee27b2f..f36fc068d 100644
--- a/src/storage/v2/durability/metadata.hpp
+++ b/src/storage/v2/durability/metadata.hpp
@@ -44,6 +44,7 @@ struct RecoveredIndicesAndConstraints {
     std::vector<std::pair<LabelId, LabelIndexStats>> label_stats;
     std::vector<std::pair<LabelId, std::pair<PropertyId, LabelPropertyIndexStats>>> label_property_stats;
     std::vector<EdgeTypeId> edge;
+    std::vector<std::pair<std::string, LabelId>> text_indices;
   } indices;
 
   struct ConstraintsMetadata {
diff --git a/src/storage/v2/durability/serialization.cpp b/src/storage/v2/durability/serialization.cpp
index 28ba64943..becfa7f34 100644
--- a/src/storage/v2/durability/serialization.cpp
+++ b/src/storage/v2/durability/serialization.cpp
@@ -353,6 +353,8 @@ std::optional<PropertyValue> Decoder::ReadPropertyValue() {
     case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
     case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
     case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
+    case Marker::DELTA_TEXT_INDEX_CREATE:
+    case Marker::DELTA_TEXT_INDEX_DROP:
     case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
     case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
     case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
@@ -459,6 +461,8 @@ bool Decoder::SkipPropertyValue() {
     case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
     case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
     case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
+    case Marker::DELTA_TEXT_INDEX_CREATE:
+    case Marker::DELTA_TEXT_INDEX_DROP:
     case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
     case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
     case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
diff --git a/src/storage/v2/durability/snapshot.cpp b/src/storage/v2/durability/snapshot.cpp
index 5fea3dfa5..0e3bb96e3 100644
--- a/src/storage/v2/durability/snapshot.cpp
+++ b/src/storage/v2/durability/snapshot.cpp
@@ -13,6 +13,8 @@
 
 #include <thread>
 
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
 #include "spdlog/spdlog.h"
 #include "storage/v2/durability/exceptions.hpp"
 #include "storage/v2/durability/paths.hpp"
@@ -2004,6 +2006,24 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
       spdlog::info("Metadata of edge-type indices are recovered.");
     }
 
+    // Recover text indices.
+    if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+      auto size = snapshot.ReadUint();
+      if (!size) throw RecoveryFailure("Couldn't recover the number of text indices!");
+      spdlog::info("Recovering metadata of {} text indices.", *size);
+      for (uint64_t i = 0; i < *size; ++i) {
+        auto index_name = snapshot.ReadString();
+        if (!index_name.has_value()) throw RecoveryFailure("Couldn't read text index name!");
+        auto label = snapshot.ReadUint();
+        if (!label) throw RecoveryFailure("Couldn't read text index label!");
+        AddRecoveredIndexConstraint(&indices_constraints.indices.text_indices,
+                                    {index_name.value(), get_label_from_id(*label)}, "The text index already exists!");
+        SPDLOG_TRACE("Recovered metadata of text index {} for :{}", index_name.value(),
+                     name_id_mapper->IdToName(snapshot_id_map.at(*label)));
+      }
+      spdlog::info("Metadata of text indices are recovered.");
+    }
+
     spdlog::info("Metadata of indices are recovered.");
   }
 
@@ -2493,6 +2513,16 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
         write_mapping(item);
       }
     }
+
+    // Write text indices.
+    if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+      auto text_indices = storage->indices_.text_index_.ListIndices();
+      snapshot.WriteUint(text_indices.size());
+      for (const auto &[index_name, label] : text_indices) {
+        snapshot.WriteString(index_name);
+        write_mapping(label);
+      }
+    }
   }
 
   // Write constraints.
diff --git a/src/storage/v2/durability/storage_global_operation.hpp b/src/storage/v2/durability/storage_global_operation.hpp
index 7dd635e9d..d9c77b6c6 100644
--- a/src/storage/v2/durability/storage_global_operation.hpp
+++ b/src/storage/v2/durability/storage_global_operation.hpp
@@ -25,6 +25,8 @@ enum class StorageMetadataOperation {
   LABEL_PROPERTY_INDEX_STATS_CLEAR,
   EDGE_TYPE_INDEX_CREATE,
   EDGE_TYPE_INDEX_DROP,
+  TEXT_INDEX_CREATE,
+  TEXT_INDEX_DROP,
   EXISTENCE_CONSTRAINT_CREATE,
   EXISTENCE_CONSTRAINT_DROP,
   UNIQUE_CONSTRAINT_CREATE,
diff --git a/src/storage/v2/durability/wal.cpp b/src/storage/v2/durability/wal.cpp
index 5c40ab1c5..c684d818c 100644
--- a/src/storage/v2/durability/wal.cpp
+++ b/src/storage/v2/durability/wal.cpp
@@ -99,6 +99,10 @@ Marker OperationToMarker(StorageMetadataOperation operation) {
       return Marker::DELTA_EDGE_TYPE_INDEX_CREATE;
     case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP:
       return Marker::DELTA_EDGE_TYPE_INDEX_DROP;
+    case StorageMetadataOperation::TEXT_INDEX_CREATE:
+      return Marker::DELTA_TEXT_INDEX_CREATE;
+    case StorageMetadataOperation::TEXT_INDEX_DROP:
+      return Marker::DELTA_TEXT_INDEX_DROP;
     case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
       return Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE;
     case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
@@ -172,6 +176,10 @@ WalDeltaData::Type MarkerToWalDeltaDataType(Marker marker) {
       return WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE;
     case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
       return WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP;
+    case Marker::DELTA_TEXT_INDEX_CREATE:
+      return WalDeltaData::Type::TEXT_INDEX_CREATE;
+    case Marker::DELTA_TEXT_INDEX_DROP:
+      return WalDeltaData::Type::TEXT_INDEX_DROP;
     case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_SET:
       return WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET;
     case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
@@ -382,6 +390,21 @@ WalDeltaData ReadSkipWalDeltaData(BaseDecoder *decoder) {
           if (!decoder->SkipString()) throw RecoveryFailure("Invalid WAL data!");
         }
       }
+      break;
+    }
+    case WalDeltaData::Type::TEXT_INDEX_CREATE:
+    case WalDeltaData::Type::TEXT_INDEX_DROP: {
+      if constexpr (read_data) {
+        auto index_name = decoder->ReadString();
+        if (!index_name) throw RecoveryFailure("Invalid WAL data!");
+        delta.operation_text.index_name = std::move(*index_name);
+        auto label = decoder->ReadString();
+        if (!label) throw RecoveryFailure("Invalid WAL data!");
+        delta.operation_text.label = std::move(*label);
+      } else {
+        if (!decoder->SkipString() || !decoder->SkipString()) throw RecoveryFailure("Invalid WAL data!");
+      }
+      break;
     }
   }
 
@@ -529,6 +552,12 @@ bool operator==(const WalDeltaData &a, const WalDeltaData &b) {
 
     case WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE:
     case WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP:
+    case WalDeltaData::Type::TEXT_INDEX_CREATE:
+      return a.operation_text.index_name == b.operation_text.index_name &&
+             a.operation_text.label == b.operation_text.label;
+    case WalDeltaData::Type::TEXT_INDEX_DROP:
+      return a.operation_text.index_name == b.operation_text.index_name &&
+             a.operation_text.label == b.operation_text.label;
     case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE:
     case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP:
       return a.operation_label_property.label == b.operation_label_property.label &&
@@ -675,7 +704,8 @@ void EncodeTransactionEnd(BaseEncoder *encoder, uint64_t timestamp) {
 }
 
 void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
-                     LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
+                     const std::optional<std::string> text_index_name, LabelId label,
+                     const std::set<PropertyId> &properties, const LabelIndexStats &stats,
                      const LabelPropertyIndexStats &property_stats, uint64_t timestamp) {
   encoder->WriteMarker(Marker::SECTION_DELTA);
   encoder->WriteUint(timestamp);
@@ -731,6 +761,14 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
     case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP: {
       MG_ASSERT(false, "Invalid function  call!");
     }
+    case StorageMetadataOperation::TEXT_INDEX_CREATE:
+    case StorageMetadataOperation::TEXT_INDEX_DROP: {
+      MG_ASSERT(text_index_name.has_value(), "Text indices must be named!");
+      encoder->WriteMarker(OperationToMarker(operation));
+      encoder->WriteString(text_index_name.value());
+      encoder->WriteString(name_id_mapper->IdToName(label.AsUint()));
+      break;
+    }
   }
 }
 
@@ -752,6 +790,8 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
     case StorageMetadataOperation::LABEL_INDEX_STATS_SET:
     case StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE:
     case StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP:
+    case StorageMetadataOperation::TEXT_INDEX_CREATE:
+    case StorageMetadataOperation::TEXT_INDEX_DROP:
     case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
     case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
     case StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_SET:
@@ -1000,6 +1040,20 @@ RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConst
                                     "The label index stats doesn't exist!");
           break;
         }
+        case WalDeltaData::Type::TEXT_INDEX_CREATE: {
+          auto index_name = delta.operation_text.index_name;
+          auto label = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_text.label));
+          AddRecoveredIndexConstraint(&indices_constraints->indices.text_indices, {index_name, label},
+                                      "The text index already exists!");
+          break;
+        }
+        case WalDeltaData::Type::TEXT_INDEX_DROP: {
+          auto index_name = delta.operation_text.index_name;
+          auto label = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_text.label));
+          RemoveRecoveredIndexConstraint(&indices_constraints->indices.text_indices, {index_name, label},
+                                         "The text index doesn't exist!");
+          break;
+        }
         case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
           auto label_id = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_label_property.label));
           auto property_id = PropertyId::FromUint(name_id_mapper->NameToId(delta.operation_label_property.property));
@@ -1148,10 +1202,11 @@ void WalFile::AppendTransactionEnd(uint64_t timestamp) {
   UpdateStats(timestamp);
 }
 
-void WalFile::AppendOperation(StorageMetadataOperation operation, LabelId label, const std::set<PropertyId> &properties,
-                              const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats,
-                              uint64_t timestamp) {
-  EncodeOperation(&wal_, name_id_mapper_, operation, label, properties, stats, property_stats, timestamp);
+void WalFile::AppendOperation(StorageMetadataOperation operation, const std::optional<std::string> text_index_name,
+                              LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
+                              const LabelPropertyIndexStats &property_stats, uint64_t timestamp) {
+  EncodeOperation(&wal_, name_id_mapper_, operation, text_index_name, label, properties, stats, property_stats,
+                  timestamp);
   UpdateStats(timestamp);
 }
 
diff --git a/src/storage/v2/durability/wal.hpp b/src/storage/v2/durability/wal.hpp
index 516487e0d..4990e6979 100644
--- a/src/storage/v2/durability/wal.hpp
+++ b/src/storage/v2/durability/wal.hpp
@@ -69,6 +69,8 @@ struct WalDeltaData {
     LABEL_PROPERTY_INDEX_STATS_CLEAR,
     EDGE_INDEX_CREATE,
     EDGE_INDEX_DROP,
+    TEXT_INDEX_CREATE,
+    TEXT_INDEX_DROP,
     EXISTENCE_CONSTRAINT_CREATE,
     EXISTENCE_CONSTRAINT_DROP,
     UNIQUE_CONSTRAINT_CREATE,
@@ -127,6 +129,11 @@ struct WalDeltaData {
     std::string property;
     std::string stats;
   } operation_label_property_stats;
+
+  struct {
+    std::string index_name;
+    std::string label;
+  } operation_text;
 };
 
 bool operator==(const WalDeltaData &a, const WalDeltaData &b);
@@ -163,6 +170,8 @@ constexpr bool IsWalDeltaDataTypeTransactionEndVersion15(const WalDeltaData::Typ
     case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR:
     case WalDeltaData::Type::EDGE_INDEX_CREATE:
     case WalDeltaData::Type::EDGE_INDEX_DROP:
+    case WalDeltaData::Type::TEXT_INDEX_CREATE:
+    case WalDeltaData::Type::TEXT_INDEX_DROP:
     case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE:
     case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP:
     case WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE:
@@ -213,7 +222,8 @@ void EncodeTransactionEnd(BaseEncoder *encoder, uint64_t timestamp);
 
 /// Function used to encode non-transactional operation.
 void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
-                     LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
+                     const std::optional<std::string> text_index_name, LabelId label,
+                     const std::set<PropertyId> &properties, const LabelIndexStats &stats,
                      const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
 
 void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
@@ -248,8 +258,9 @@ class WalFile {
 
   void AppendTransactionEnd(uint64_t timestamp);
 
-  void AppendOperation(StorageMetadataOperation operation, LabelId label, const std::set<PropertyId> &properties,
-                       const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
+  void AppendOperation(StorageMetadataOperation operation, const std::optional<std::string> text_index_name,
+                       LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
+                       const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
 
   void AppendOperation(StorageMetadataOperation operation, EdgeTypeId edge_type, uint64_t timestamp);
 
diff --git a/src/storage/v2/indices/indices.cpp b/src/storage/v2/indices/indices.cpp
index 6068f888f..1cbde2362 100644
--- a/src/storage/v2/indices/indices.cpp
+++ b/src/storage/v2/indices/indices.cpp
@@ -16,6 +16,7 @@
 #include "storage/v2/inmemory/edge_type_index.hpp"
 #include "storage/v2/inmemory/label_index.hpp"
 #include "storage/v2/inmemory/label_property_index.hpp"
+#include "storage/v2/storage.hpp"
 
 namespace memgraph::storage {
 
diff --git a/src/storage/v2/indices/indices.hpp b/src/storage/v2/indices/indices.hpp
index 40cff577f..6f1bc44db 100644
--- a/src/storage/v2/indices/indices.hpp
+++ b/src/storage/v2/indices/indices.hpp
@@ -18,6 +18,7 @@
 #include "storage/v2/indices/edge_type_index.hpp"
 #include "storage/v2/indices/label_index.hpp"
 #include "storage/v2/indices/label_property_index.hpp"
+#include "storage/v2/indices/text_index.hpp"
 #include "storage/v2/storage_mode.hpp"
 
 namespace memgraph::storage {
@@ -31,12 +32,12 @@ struct Indices {
   Indices &operator=(Indices &&) = delete;
   ~Indices() = default;
 
-  /// This function should be called from garbage collection to clean-up the
+  /// This function should be called from garbage collection to clean up the
   /// index.
   /// TODO: unused in disk indices
   void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) const;
 
-  /// Surgical removal of entries that was inserted this transaction
+  /// Surgical removal of entries that were inserted in this transaction
   /// TODO: unused in disk indices
   void AbortEntries(LabelId labelId, std::span<Vertex *const> vertices, uint64_t exact_start_timestamp) const;
   void AbortEntries(PropertyId property, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
@@ -71,6 +72,7 @@ struct Indices {
   std::unique_ptr<LabelIndex> label_index_;
   std::unique_ptr<LabelPropertyIndex> label_property_index_;
   std::unique_ptr<EdgeTypeIndex> edge_type_index_;
+  mutable TextIndex text_index_;
 };
 
 }  // namespace memgraph::storage
diff --git a/src/storage/v2/indices/text_index.cpp b/src/storage/v2/indices/text_index.cpp
new file mode 100644
index 000000000..1c9488097
--- /dev/null
+++ b/src/storage/v2/indices/text_index.cpp
@@ -0,0 +1,430 @@
+// Copyright 2024 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include "storage/v2/indices/text_index.hpp"
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
+#include "query/db_accessor.hpp"
+#include "storage/v2/view.hpp"
+#include "text_search.hpp"
+
+namespace memgraph::storage {
+
+std::string GetPropertyName(PropertyId prop_id, memgraph::query::DbAccessor *db) { return db->PropertyToName(prop_id); }
+
+std::string GetPropertyName(PropertyId prop_id, NameIdMapper *name_id_mapper) {
+  return name_id_mapper->IdToName(prop_id.AsUint());
+}
+
+inline std::string TextIndex::MakeIndexPath(const std::filesystem::path &storage_dir, const std::string &index_name) {
+  return (storage_dir / kTextIndicesDirectory / index_name).string();
+}
+
+void TextIndex::CreateEmptyIndex(const std::filesystem::path &storage_dir, const std::string &index_name,
+                                 LabelId label) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  if (index_.contains(index_name)) {
+    throw query::TextSearchException("Text index \"{}\" already exists.", index_name);
+  }
+
+  try {
+    nlohmann::json mappings = {};
+    mappings["properties"] = {};
+    mappings["properties"]["metadata"] = {{"type", "json"}, {"fast", true}, {"stored", true}, {"text", true}};
+    mappings["properties"]["data"] = {{"type", "json"}, {"fast", true}, {"stored", true}, {"text", true}};
+    mappings["properties"]["all"] = {{"type", "text"}, {"fast", true}, {"stored", true}, {"text", true}};
+
+    index_.emplace(index_name, TextIndexData{.context_ = mgcxx::text_search::create_index(
+                                                 MakeIndexPath(storage_dir, index_name),
+                                                 mgcxx::text_search::IndexConfig{.mappings = mappings.dump()}),
+                                             .scope_ = label});
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+  label_to_index_.emplace(label, index_name);
+}
+
+template <typename T>
+nlohmann::json TextIndex::SerializeProperties(const std::map<PropertyId, PropertyValue> &properties, T *name_resolver) {
+  nlohmann::json serialized_properties = nlohmann::json::value_t::object;
+  for (const auto &[prop_id, prop_value] : properties) {
+    switch (prop_value.type()) {
+      case PropertyValue::Type::Bool:
+        serialized_properties[GetPropertyName(prop_id, name_resolver)] = prop_value.ValueBool();
+        break;
+      case PropertyValue::Type::Int:
+        serialized_properties[GetPropertyName(prop_id, name_resolver)] = prop_value.ValueInt();
+        break;
+      case PropertyValue::Type::Double:
+        serialized_properties[GetPropertyName(prop_id, name_resolver)] = prop_value.ValueDouble();
+        break;
+      case PropertyValue::Type::String:
+        serialized_properties[GetPropertyName(prop_id, name_resolver)] = prop_value.ValueString();
+        break;
+      case PropertyValue::Type::Null:
+      case PropertyValue::Type::List:
+      case PropertyValue::Type::Map:
+      case PropertyValue::Type::TemporalData:
+      default:
+        continue;
+    }
+  }
+
+  return serialized_properties;
+}
+
+std::string TextIndex::StringifyProperties(const std::map<PropertyId, PropertyValue> &properties) {
+  std::vector<std::string> indexable_properties_as_string;
+  for (const auto &[_, prop_value] : properties) {
+    switch (prop_value.type()) {
+      case PropertyValue::Type::Bool:
+        indexable_properties_as_string.push_back(prop_value.ValueBool() ? "true" : "false");
+        break;
+      case PropertyValue::Type::Int:
+        indexable_properties_as_string.push_back(std::to_string(prop_value.ValueInt()));
+        break;
+      case PropertyValue::Type::Double:
+        indexable_properties_as_string.push_back(std::to_string(prop_value.ValueDouble()));
+        break;
+      case PropertyValue::Type::String:
+        indexable_properties_as_string.push_back(prop_value.ValueString());
+        break;
+      // NOTE: As the following types aren‘t indexed in Tantivy, they don’t appear in the property value string either.
+      case PropertyValue::Type::Null:
+      case PropertyValue::Type::List:
+      case PropertyValue::Type::Map:
+      case PropertyValue::Type::TemporalData:
+      default:
+        continue;
+    }
+  }
+  return utils::Join(indexable_properties_as_string, " ");
+}
+
+std::vector<mgcxx::text_search::Context *> TextIndex::GetApplicableTextIndices(const std::vector<LabelId> &labels) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  std::vector<mgcxx::text_search::Context *> applicable_text_indices;
+  for (const auto &label : labels) {
+    if (label_to_index_.contains(label)) {
+      applicable_text_indices.push_back(&index_.at(label_to_index_.at(label)).context_);
+    }
+  }
+  return applicable_text_indices;
+}
+
+void TextIndex::LoadNodeToTextIndices(const std::int64_t gid, const nlohmann::json &properties,
+                                      const std::string &property_values_as_str,
+                                      const std::vector<mgcxx::text_search::Context *> &applicable_text_indices) {
+  if (applicable_text_indices.empty()) {
+    return;
+  }
+
+  // NOTE: Text indexes are presently all-property indices. If we allow text indexes restricted to specific properties,
+  // an indexable document should be created for each applicable index.
+  nlohmann::json document = {};
+  document["data"] = properties;
+  document["all"] = property_values_as_str;
+  document["metadata"] = {};
+  document["metadata"]["gid"] = gid;
+  document["metadata"]["deleted"] = false;
+  document["metadata"]["is_node"] = true;
+
+  for (auto *index_context : applicable_text_indices) {
+    try {
+      mgcxx::text_search::add_document(
+          *index_context,
+          mgcxx::text_search::DocumentInput{
+              .data = document.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)},
+          kDoSkipCommit);
+    } catch (const std::exception &e) {
+      throw query::TextSearchException("Tantivy error: {}", e.what());
+    }
+  }
+}
+
+void TextIndex::CommitLoadedNodes(mgcxx::text_search::Context &index_context) {
+  // As CREATE TEXT INDEX (...) queries don’t accumulate deltas, db_transactional_accessor_->Commit() does not reach
+  // the code area where changes to indices are committed. To get around that without needing to commit text indices
+  // after every such query, we commit here.
+  try {
+    mgcxx::text_search::commit(index_context);
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+}
+
+void TextIndex::AddNode(
+    Vertex *vertex_after_update, NameIdMapper *name_id_mapper,
+    const std::optional<std::vector<mgcxx::text_search::Context *>> &maybe_applicable_text_indices) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  auto applicable_text_indices =
+      maybe_applicable_text_indices.value_or(GetApplicableTextIndices(vertex_after_update->labels));
+  if (applicable_text_indices.empty()) {
+    return;
+  }
+
+  auto vertex_properties = vertex_after_update->properties.Properties();
+  LoadNodeToTextIndices(vertex_after_update->gid.AsInt(), SerializeProperties(vertex_properties, name_id_mapper),
+                        StringifyProperties(vertex_properties), applicable_text_indices);
+}
+
+void TextIndex::UpdateNode(Vertex *vertex_after_update, NameIdMapper *name_id_mapper,
+                           const std::vector<LabelId> &removed_labels) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  if (!removed_labels.empty()) {
+    auto indexes_to_remove_node_from = GetApplicableTextIndices(removed_labels);
+    RemoveNode(vertex_after_update, indexes_to_remove_node_from);
+  }
+
+  auto applicable_text_indices = GetApplicableTextIndices(vertex_after_update->labels);
+  if (applicable_text_indices.empty()) return;
+  RemoveNode(vertex_after_update, applicable_text_indices);
+  AddNode(vertex_after_update, name_id_mapper, applicable_text_indices);
+}
+
+void TextIndex::RemoveNode(
+    Vertex *vertex_after_update,
+    const std::optional<std::vector<mgcxx::text_search::Context *>> &maybe_applicable_text_indices) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  auto search_node_to_be_deleted =
+      mgcxx::text_search::SearchInput{.search_query = fmt::format("metadata.gid:{}", vertex_after_update->gid.AsInt())};
+
+  for (auto *index_context :
+       maybe_applicable_text_indices.value_or(GetApplicableTextIndices(vertex_after_update->labels))) {
+    try {
+      mgcxx::text_search::delete_document(*index_context, search_node_to_be_deleted, kDoSkipCommit);
+    } catch (const std::exception &e) {
+      throw query::TextSearchException("Tantivy error: {}", e.what());
+    }
+  }
+}
+
+void TextIndex::CreateIndex(const std::filesystem::path &storage_dir, const std::string &index_name, LabelId label,
+                            memgraph::query::DbAccessor *db) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  CreateEmptyIndex(storage_dir, index_name, label);
+
+  for (const auto &v : db->Vertices(View::NEW)) {
+    if (!v.HasLabel(View::NEW, label).GetValue()) {
+      continue;
+    }
+
+    auto vertex_properties = v.Properties(View::NEW).GetValue();
+    LoadNodeToTextIndices(v.Gid().AsInt(), SerializeProperties(vertex_properties, db),
+                          StringifyProperties(vertex_properties), {&index_.at(index_name).context_});
+  }
+
+  CommitLoadedNodes(index_.at(index_name).context_);
+}
+
+void TextIndex::RecoverIndex(const std::filesystem::path &storage_dir, const std::string &index_name, LabelId label,
+                             memgraph::utils::SkipList<Vertex>::Accessor vertices, NameIdMapper *name_id_mapper) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  // Clear Tantivy-internal files if they exist from previous sessions
+  std::filesystem::remove_all(storage_dir / kTextIndicesDirectory / index_name);
+
+  CreateEmptyIndex(storage_dir, index_name, label);
+
+  for (const auto &v : vertices) {
+    if (std::find(v.labels.begin(), v.labels.end(), label) == v.labels.end()) {
+      continue;
+    }
+
+    auto vertex_properties = v.properties.Properties();
+    LoadNodeToTextIndices(v.gid.AsInt(), SerializeProperties(vertex_properties, name_id_mapper),
+                          StringifyProperties(vertex_properties), {&index_.at(index_name).context_});
+  }
+
+  CommitLoadedNodes(index_.at(index_name).context_);
+}
+
+LabelId TextIndex::DropIndex(const std::filesystem::path &storage_dir, const std::string &index_name) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  if (!index_.contains(index_name)) {
+    throw query::TextSearchException("Text index \"{}\" doesn’t exist.", index_name);
+  }
+
+  try {
+    mgcxx::text_search::drop_index(MakeIndexPath(storage_dir, index_name));
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+  auto deleted_index_label = index_.at(index_name).scope_;
+
+  index_.erase(index_name);
+  std::erase_if(label_to_index_, [index_name](const auto &item) { return item.second == index_name; });
+
+  return deleted_index_label;
+}
+
+bool TextIndex::IndexExists(const std::string &index_name) const { return index_.contains(index_name); }
+
+mgcxx::text_search::SearchOutput TextIndex::SearchGivenProperties(const std::string &index_name,
+                                                                  const std::string &search_query) {
+  try {
+    return mgcxx::text_search::search(
+        index_.at(index_name).context_,
+        mgcxx::text_search::SearchInput{.search_query = search_query, .return_fields = {"metadata"}});
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+
+  return mgcxx::text_search::SearchOutput{};
+}
+
+mgcxx::text_search::SearchOutput TextIndex::RegexSearch(const std::string &index_name,
+                                                        const std::string &search_query) {
+  try {
+    return mgcxx::text_search::regex_search(
+        index_.at(index_name).context_,
+        mgcxx::text_search::SearchInput{
+            .search_fields = {"all"}, .search_query = search_query, .return_fields = {"metadata"}});
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+
+  return mgcxx::text_search::SearchOutput{};
+}
+
+mgcxx::text_search::SearchOutput TextIndex::SearchAllProperties(const std::string &index_name,
+                                                                const std::string &search_query) {
+  try {
+    return mgcxx::text_search::search(
+        index_.at(index_name).context_,
+        mgcxx::text_search::SearchInput{
+            .search_fields = {"all"}, .search_query = search_query, .return_fields = {"metadata"}});
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+
+  return mgcxx::text_search::SearchOutput{};
+}
+
+std::vector<Gid> TextIndex::Search(const std::string &index_name, const std::string &search_query,
+                                   text_search_mode search_mode) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  if (!index_.contains(index_name)) {
+    throw query::TextSearchException("Text index \"{}\" doesn’t exist.", index_name);
+  }
+
+  mgcxx::text_search::SearchOutput search_results;
+  switch (search_mode) {
+    case text_search_mode::SPECIFIED_PROPERTIES:
+      search_results = SearchGivenProperties(index_name, search_query);
+      break;
+    case text_search_mode::REGEX:
+      search_results = RegexSearch(index_name, search_query);
+      break;
+    case text_search_mode::ALL_PROPERTIES:
+      search_results = SearchAllProperties(index_name, search_query);
+      break;
+    default:
+      throw query::TextSearchException(
+          "Unsupported search mode: please use one of text_search.search, text_search.search_all, or "
+          "text_search.regex_search.");
+  }
+
+  std::vector<Gid> found_nodes;
+  for (const auto &doc : search_results.docs) {
+    // The CXX .data() method (https://cxx.rs/binding/string.html) may overestimate string length, causing JSON parsing
+    // errors downstream. We prevent this by resizing the converted string with the correctly-working .length() method.
+    std::string doc_string = doc.data.data();
+    doc_string.resize(doc.data.length());
+    auto doc_json = nlohmann::json::parse(doc_string);
+    found_nodes.push_back(storage::Gid::FromString(doc_json["metadata"]["gid"].dump()));
+  }
+  return found_nodes;
+}
+
+std::string TextIndex::Aggregate(const std::string &index_name, const std::string &search_query,
+                                 const std::string &aggregation_query) {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  if (!index_.contains(index_name)) {
+    throw query::TextSearchException("Text index \"{}\" doesn’t exist.", index_name);
+  }
+
+  mgcxx::text_search::DocumentOutput aggregation_result;
+  try {
+    aggregation_result = mgcxx::text_search::aggregate(
+        index_.at(index_name).context_,
+        mgcxx::text_search::SearchInput{
+            .search_fields = {"all"}, .search_query = search_query, .aggregation_query = aggregation_query});
+
+  } catch (const std::exception &e) {
+    throw query::TextSearchException("Tantivy error: {}", e.what());
+  }
+  // The CXX .data() method (https://cxx.rs/binding/string.html) may overestimate string length, causing JSON parsing
+  // errors downstream. We prevent this by resizing the converted string with the correctly-working .length() method.
+  std::string result_string = aggregation_result.data.data();
+  result_string.resize(aggregation_result.data.length());
+  return result_string;
+}
+
+void TextIndex::Commit() {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  for (auto &[_, index_data] : index_) {
+    mgcxx::text_search::commit(index_data.context_);
+  }
+}
+
+void TextIndex::Rollback() {
+  if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    throw query::TextSearchDisabledException();
+  }
+
+  for (auto &[_, index_data] : index_) {
+    mgcxx::text_search::rollback(index_data.context_);
+  }
+}
+
+std::vector<std::pair<std::string, LabelId>> TextIndex::ListIndices() const {
+  std::vector<std::pair<std::string, LabelId>> ret;
+  ret.reserve(index_.size());
+  for (const auto &[index_name, index_data] : index_) {
+    ret.emplace_back(index_name, index_data.scope_);
+  }
+  return ret;
+}
+
+}  // namespace memgraph::storage
diff --git a/src/storage/v2/indices/text_index.hpp b/src/storage/v2/indices/text_index.hpp
new file mode 100644
index 000000000..af4748c6e
--- /dev/null
+++ b/src/storage/v2/indices/text_index.hpp
@@ -0,0 +1,105 @@
+// Copyright 2024 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#pragma once
+
+#include <json/json.hpp>
+#include "mg_procedure.h"
+#include "storage/v2/id_types.hpp"
+#include "storage/v2/name_id_mapper.hpp"
+#include "storage/v2/vertex.hpp"
+#include "text_search.hpp"
+
+namespace memgraph::query {
+class DbAccessor;
+}
+
+namespace memgraph::storage {
+struct TextIndexData {
+  mgcxx::text_search::Context context_;
+  LabelId scope_;
+};
+
+class TextIndex {
+ private:
+  static constexpr bool kDoSkipCommit = true;
+  static constexpr std::string_view kTextIndicesDirectory = "text_indices";
+
+  inline std::string MakeIndexPath(const std::filesystem::path &storage_dir, const std::string &index_name);
+
+  void CreateEmptyIndex(const std::filesystem::path &storage_dir, const std::string &index_name, LabelId label);
+
+  template <typename T>
+  nlohmann::json SerializeProperties(const std::map<PropertyId, PropertyValue> &properties, T *name_resolver);
+
+  std::string StringifyProperties(const std::map<PropertyId, PropertyValue> &properties);
+
+  std::vector<mgcxx::text_search::Context *> GetApplicableTextIndices(const std::vector<LabelId> &labels);
+
+  void LoadNodeToTextIndices(const std::int64_t gid, const nlohmann::json &properties,
+                             const std::string &property_values_as_str,
+                             const std::vector<mgcxx::text_search::Context *> &applicable_text_indices);
+
+  void CommitLoadedNodes(mgcxx::text_search::Context &index_context);
+
+  mgcxx::text_search::SearchOutput SearchGivenProperties(const std::string &index_name,
+                                                         const std::string &search_query);
+
+  mgcxx::text_search::SearchOutput RegexSearch(const std::string &index_name, const std::string &search_query);
+
+  mgcxx::text_search::SearchOutput SearchAllProperties(const std::string &index_name, const std::string &search_query);
+
+ public:
+  TextIndex() = default;
+
+  TextIndex(const TextIndex &) = delete;
+  TextIndex(TextIndex &&) = delete;
+  TextIndex &operator=(const TextIndex &) = delete;
+  TextIndex &operator=(TextIndex &&) = delete;
+
+  ~TextIndex() = default;
+
+  std::map<std::string, TextIndexData> index_;
+  std::map<LabelId, std::string> label_to_index_;
+
+  void AddNode(
+      Vertex *vertex, NameIdMapper *name_id_mapper,
+      const std::optional<std::vector<mgcxx::text_search::Context *>> &maybe_applicable_text_indices = std::nullopt);
+
+  void UpdateNode(Vertex *vertex, NameIdMapper *name_id_mapper, const std::vector<LabelId> &removed_labels = {});
+
+  void RemoveNode(
+      Vertex *vertex,
+      const std::optional<std::vector<mgcxx::text_search::Context *>> &maybe_applicable_text_indices = std::nullopt);
+
+  void CreateIndex(const std::filesystem::path &storage_dir, const std::string &index_name, LabelId label,
+                   memgraph::query::DbAccessor *db);
+
+  void RecoverIndex(const std::filesystem::path &storage_dir, const std::string &index_name, LabelId label,
+                    memgraph::utils::SkipList<Vertex>::Accessor vertices, NameIdMapper *name_id_mapper);
+
+  LabelId DropIndex(const std::filesystem::path &storage_dir, const std::string &index_name);
+
+  bool IndexExists(const std::string &index_name) const;
+
+  std::vector<Gid> Search(const std::string &index_name, const std::string &search_query, text_search_mode search_mode);
+
+  std::string Aggregate(const std::string &index_name, const std::string &search_query,
+                        const std::string &aggregation_query);
+
+  void Commit();
+
+  void Rollback();
+
+  std::vector<std::pair<std::string, LabelId>> ListIndices() const;
+};
+
+}  // namespace memgraph::storage
diff --git a/src/storage/v2/inmemory/storage.cpp b/src/storage/v2/inmemory/storage.cpp
index b8d74f392..4d732e190 100644
--- a/src/storage/v2/inmemory/storage.cpp
+++ b/src/storage/v2/inmemory/storage.cpp
@@ -15,6 +15,8 @@
 #include <functional>
 #include <optional>
 #include "dbms/constants.hpp"
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
 #include "memory/global_memory_control.hpp"
 #include "storage/v2/durability/durability.hpp"
 #include "storage/v2/durability/snapshot.hpp"
@@ -916,6 +918,10 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
       commit_timestamp_.reset();  // We have aborted, hence we have not committed
       return StorageManipulationError{*unique_constraint_violation};
     }
+
+    if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+      mem_storage->indices_.text_index_.Commit();
+    }
   }
 
   is_transaction_active_ = false;
@@ -1239,6 +1245,9 @@ void InMemoryStorage::InMemoryAccessor::Abort() {
       for (auto const &[property, prop_vertices] : property_cleanup) {
         storage_->indices_.AbortEntries(property, prop_vertices, transaction_.start_timestamp);
       }
+      if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+        storage_->indices_.text_index_.Rollback();
+      }
 
       // VERTICES
       {
@@ -1877,6 +1886,7 @@ StorageInfo InMemoryStorage::GetInfo(memgraph::replication_coordination_glue::Re
     const auto &lbl = access->ListAllIndices();
     info.label_indices = lbl.label.size();
     info.label_property_indices = lbl.label_property.size();
+    info.text_indices = lbl.text_indices.size();
     const auto &con = access->ListAllConstraints();
     info.existence_constraints = con.existence.size();
     info.unique_constraints = con.unique.size();
@@ -2138,6 +2148,16 @@ bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final
         AppendToWalDataDefinition(durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_CLEAR, info.label,
                                   final_commit_timestamp);
       } break;
+      case MetadataDelta::Action::TEXT_INDEX_CREATE: {
+        const auto &info = md_delta.text_index;
+        AppendToWalDataDefinition(durability::StorageMetadataOperation::TEXT_INDEX_CREATE, info.index_name, info.label,
+                                  final_commit_timestamp);
+      } break;
+      case MetadataDelta::Action::TEXT_INDEX_DROP: {
+        const auto &info = md_delta.text_index;
+        AppendToWalDataDefinition(durability::StorageMetadataOperation::TEXT_INDEX_DROP, info.index_name, info.label,
+                                  final_commit_timestamp);
+      } break;
       case MetadataDelta::Action::EXISTENCE_CONSTRAINT_CREATE: {
         const auto &info = md_delta.label_property;
         AppendToWalDataDefinition(durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE, info.label,
@@ -2168,11 +2188,13 @@ bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final
   return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this, std::move(db_acc));
 }
 
-void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
+void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation,
+                                                const std::optional<std::string> text_index_name, LabelId label,
                                                 const std::set<PropertyId> &properties, LabelIndexStats stats,
                                                 LabelPropertyIndexStats property_stats,
                                                 uint64_t final_commit_timestamp) {
-  wal_file_->AppendOperation(operation, label, properties, stats, property_stats, final_commit_timestamp);
+  wal_file_->AppendOperation(operation, text_index_name, label, properties, stats, property_stats,
+                             final_commit_timestamp);
   repl_storage_state_.AppendOperation(operation, label, properties, stats, property_stats, final_commit_timestamp);
 }
 
@@ -2186,12 +2208,13 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera
                                                 const std::set<PropertyId> &properties,
                                                 LabelPropertyIndexStats property_stats,
                                                 uint64_t final_commit_timestamp) {
-  return AppendToWalDataDefinition(operation, label, properties, {}, property_stats, final_commit_timestamp);
+  return AppendToWalDataDefinition(operation, std::nullopt, label, properties, {}, property_stats,
+                                   final_commit_timestamp);
 }
 
 void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
                                                 LabelIndexStats stats, uint64_t final_commit_timestamp) {
-  return AppendToWalDataDefinition(operation, label, {}, stats, {}, final_commit_timestamp);
+  return AppendToWalDataDefinition(operation, std::nullopt, label, {}, stats, {}, final_commit_timestamp);
 }
 
 void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
@@ -2205,6 +2228,12 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera
   return AppendToWalDataDefinition(operation, label, {}, {}, final_commit_timestamp);
 }
 
+void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation,
+                                                const std::optional<std::string> text_index_name, LabelId label,
+                                                uint64_t final_commit_timestamp) {
+  return AppendToWalDataDefinition(operation, text_index_name, label, {}, {}, {}, final_commit_timestamp);
+}
+
 utils::BasicResult<InMemoryStorage::CreateSnapshotError> InMemoryStorage::CreateSnapshot(
     memgraph::replication_coordination_glue::ReplicationRole replication_role) {
   using memgraph::replication_coordination_glue::ReplicationRole;
@@ -2332,7 +2361,9 @@ IndicesInfo InMemoryStorage::InMemoryAccessor::ListAllIndices() const {
   auto *mem_label_property_index =
       static_cast<InMemoryLabelPropertyIndex *>(in_memory->indices_.label_property_index_.get());
   auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
-  return {mem_label_index->ListIndices(), mem_label_property_index->ListIndices(), mem_edge_type_index->ListIndices()};
+  auto &text_index = storage_->indices_.text_index_;
+  return {mem_label_index->ListIndices(), mem_label_property_index->ListIndices(), mem_edge_type_index->ListIndices(),
+          text_index.ListIndices()};
 }
 ConstraintsInfo InMemoryStorage::InMemoryAccessor::ListAllConstraints() const {
   const auto *mem_storage = static_cast<InMemoryStorage *>(storage_);
diff --git a/src/storage/v2/inmemory/storage.hpp b/src/storage/v2/inmemory/storage.hpp
index 47e969f0e..31596353b 100644
--- a/src/storage/v2/inmemory/storage.hpp
+++ b/src/storage/v2/inmemory/storage.hpp
@@ -400,7 +400,7 @@ class InMemoryStorage final : public Storage {
   StorageInfo GetBaseInfo() override;
   StorageInfo GetInfo(memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
 
-  /// Return true in all cases excepted if any sync replicas have not sent confirmation.
+  /// Return true in all cases except if any sync replicas have not sent confirmation.
   [[nodiscard]] bool AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp,
                                  DatabaseAccessProtector db_acc);
   void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
@@ -414,9 +414,13 @@ class InMemoryStorage final : public Storage {
   void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
                                  const std::set<PropertyId> &properties, LabelPropertyIndexStats property_stats,
                                  uint64_t final_commit_timestamp);
-  void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
+  void AppendToWalDataDefinition(durability::StorageMetadataOperation operation,
+                                 const std::optional<std::string> text_index_name, LabelId label,
                                  const std::set<PropertyId> &properties, LabelIndexStats stats,
                                  LabelPropertyIndexStats property_stats, uint64_t final_commit_timestamp);
+  void AppendToWalDataDefinition(durability::StorageMetadataOperation operation,
+                                 const std::optional<std::string> text_index_name, LabelId label,
+                                 uint64_t final_commit_timestamp);
 
   uint64_t CommitTimestamp(std::optional<uint64_t> desired_commit_timestamp = {});
 
diff --git a/src/storage/v2/metadata_delta.hpp b/src/storage/v2/metadata_delta.hpp
index b34966a62..e4616161d 100644
--- a/src/storage/v2/metadata_delta.hpp
+++ b/src/storage/v2/metadata_delta.hpp
@@ -37,6 +37,8 @@ struct MetadataDelta {
     LABEL_PROPERTY_INDEX_STATS_CLEAR,
     EDGE_INDEX_CREATE,
     EDGE_INDEX_DROP,
+    TEXT_INDEX_CREATE,
+    TEXT_INDEX_DROP,
     EXISTENCE_CONSTRAINT_CREATE,
     EXISTENCE_CONSTRAINT_DROP,
     UNIQUE_CONSTRAINT_CREATE,
@@ -63,6 +65,10 @@ struct MetadataDelta {
   } edge_index_create;
   static constexpr struct EdgeIndexDrop {
   } edge_index_drop;
+  static constexpr struct TextIndexCreate {
+  } text_index_create;
+  static constexpr struct TextIndexDrop {
+  } text_index_drop;
   static constexpr struct ExistenceConstraintCreate {
   } existence_constraint_create;
   static constexpr struct ExistenceConstraintDrop {
@@ -98,6 +104,12 @@ struct MetadataDelta {
 
   MetadataDelta(EdgeIndexDrop /*tag*/, EdgeTypeId edge_type) : action(Action::EDGE_INDEX_DROP), edge_type(edge_type) {}
 
+  MetadataDelta(TextIndexCreate /*tag*/, std::string index_name, LabelId label)
+      : action(Action::TEXT_INDEX_CREATE), text_index{index_name, label} {}
+
+  MetadataDelta(TextIndexDrop /*tag*/, std::string index_name, LabelId label)
+      : action(Action::TEXT_INDEX_DROP), text_index{index_name, label} {}
+
   MetadataDelta(ExistenceConstraintCreate /*tag*/, LabelId label, PropertyId property)
       : action(Action::EXISTENCE_CONSTRAINT_CREATE), label_property{label, property} {}
 
@@ -127,6 +139,8 @@ struct MetadataDelta {
       case Action::LABEL_PROPERTY_INDEX_STATS_CLEAR:
       case Action::EDGE_INDEX_CREATE:
       case Action::EDGE_INDEX_DROP:
+      case Action::TEXT_INDEX_CREATE:
+      case Action::TEXT_INDEX_DROP:
       case Action::EXISTENCE_CONSTRAINT_CREATE:
       case Action::EXISTENCE_CONSTRAINT_DROP:
         break;
@@ -164,6 +178,11 @@ struct MetadataDelta {
       PropertyId property;
       LabelPropertyIndexStats stats;
     } label_property_stats;
+
+    struct {
+      std::string index_name;
+      LabelId label;
+    } text_index;
   };
 };
 
diff --git a/src/storage/v2/property_store.cpp b/src/storage/v2/property_store.cpp
index adf3440a2..0cfee0f98 100644
--- a/src/storage/v2/property_store.cpp
+++ b/src/storage/v2/property_store.cpp
@@ -118,7 +118,7 @@ enum class Type : uint8_t {
   STRING = 0x50,
   LIST = 0x60,
   MAP = 0x70,
-  TEMPORAL_DATA = 0x80
+  TEMPORAL_DATA = 0x80,
 };
 
 const uint8_t kMaskType = 0xf0;
diff --git a/src/storage/v2/replication/replication_client.cpp b/src/storage/v2/replication/replication_client.cpp
index a02c1eff0..008d4b619 100644
--- a/src/storage/v2/replication/replication_client.cpp
+++ b/src/storage/v2/replication/replication_client.cpp
@@ -92,7 +92,7 @@ void ReplicationStorageClient::UpdateReplicaState(Storage *storage, DatabaseAcce
           client_name, client_name, client_name);
     };
 #ifdef MG_ENTERPRISE
-    if (!FLAGS_coordinator_server_port) {
+    if (!FLAGS_management_port) {
       log_error();
       return;
     }
@@ -406,8 +406,9 @@ void ReplicaStream::AppendOperation(durability::StorageMetadataOperation operati
                                     const std::set<PropertyId> &properties, const LabelIndexStats &stats,
                                     const LabelPropertyIndexStats &property_stats, uint64_t timestamp) {
   replication::Encoder encoder(stream_.GetBuilder());
-  EncodeOperation(&encoder, storage_->name_id_mapper_.get(), operation, label, properties, stats, property_stats,
-                  timestamp);
+  // NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2) -> text index name not sent here
+  EncodeOperation(&encoder, storage_->name_id_mapper_.get(), operation, std::nullopt, label, properties, stats,
+                  property_stats, timestamp);
 }
 
 void ReplicaStream::AppendOperation(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
diff --git a/src/storage/v2/storage.cpp b/src/storage/v2/storage.cpp
index 536a504a0..db4bec8be 100644
--- a/src/storage/v2/storage.cpp
+++ b/src/storage/v2/storage.cpp
@@ -13,6 +13,8 @@
 #include "absl/container/flat_hash_set.h"
 #include "spdlog/spdlog.h"
 
+#include "flags/experimental.hpp"
+#include "flags/run_time_configurable.hpp"
 #include "storage/v2/disk/name_id_mapper.hpp"
 #include "storage/v2/storage.hpp"
 #include "storage/v2/transaction.hpp"
@@ -273,6 +275,12 @@ Storage::Accessor::DetachDelete(std::vector<VertexAccessor *> nodes, std::vector
     return maybe_deleted_vertices.GetError();
   }
 
+  if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
+    for (auto *node : nodes_to_delete) {
+      storage_->indices_.text_index_.RemoveNode(node);
+    }
+  }
+
   auto deleted_vertices = maybe_deleted_vertices.GetValue();
 
   return std::make_optional<ReturnType>(std::move(deleted_vertices), std::move(deleted_edges));
@@ -543,4 +551,19 @@ void Storage::Accessor::MarkEdgeAsDeleted(Edge *edge) {
   }
 }
 
+void Storage::Accessor::CreateTextIndex(const std::string &index_name, LabelId label, query::DbAccessor *db) {
+  MG_ASSERT(unique_guard_.owns_lock(), "Creating a text index requires unique access to storage!");
+  storage_->indices_.text_index_.CreateIndex(storage_->config_.durability.storage_directory, index_name, label, db);
+  transaction_.md_deltas.emplace_back(MetadataDelta::text_index_create, index_name, label);
+  memgraph::metrics::IncrementCounter(memgraph::metrics::ActiveTextIndices);
+}
+
+void Storage::Accessor::DropTextIndex(const std::string &index_name) {
+  MG_ASSERT(unique_guard_.owns_lock(), "Dropping a text index requires unique access to storage!");
+  auto deleted_index_label =
+      storage_->indices_.text_index_.DropIndex(storage_->config_.durability.storage_directory, index_name);
+  transaction_.md_deltas.emplace_back(MetadataDelta::text_index_drop, index_name, deleted_index_label);
+  memgraph::metrics::DecrementCounter(memgraph::metrics::ActiveTextIndices);
+}
+
 }  // namespace memgraph::storage
diff --git a/src/storage/v2/storage.hpp b/src/storage/v2/storage.hpp
index 7c984b963..ed00fc21e 100644
--- a/src/storage/v2/storage.hpp
+++ b/src/storage/v2/storage.hpp
@@ -20,6 +20,7 @@
 
 #include "io/network/endpoint.hpp"
 #include "kvstore/kvstore.hpp"
+#include "mg_procedure.h"
 #include "query/exceptions.hpp"
 #include "replication/config.hpp"
 #include "replication/replication_server.hpp"
@@ -53,6 +54,7 @@ extern const Event SnapshotCreationLatency_us;
 
 extern const Event ActiveLabelIndices;
 extern const Event ActiveLabelPropertyIndices;
+extern const Event ActiveTextIndices;
 }  // namespace memgraph::metrics
 
 namespace memgraph::storage {
@@ -63,6 +65,7 @@ struct IndicesInfo {
   std::vector<LabelId> label;
   std::vector<std::pair<LabelId, PropertyId>> label_property;
   std::vector<EdgeTypeId> edge_type;
+  std::vector<std::pair<std::string, LabelId>> text_indices;
 };
 
 struct ConstraintsInfo {
@@ -78,6 +81,7 @@ struct StorageInfo {
   uint64_t disk_usage;
   uint64_t label_indices;
   uint64_t label_property_indices;
+  uint64_t text_indices;
   uint64_t existence_constraints;
   uint64_t unique_constraints;
   StorageMode storage_mode;
@@ -95,6 +99,7 @@ static inline nlohmann::json ToJson(const StorageInfo &info) {
   res["disk"] = info.disk_usage;
   res["label_indices"] = info.label_indices;
   res["label_prop_indices"] = info.label_property_indices;
+  res["text_indices"] = info.text_indices;
   res["existence_constraints"] = info.existence_constraints;
   res["unique_constraints"] = info.unique_constraints;
   res["storage_mode"] = storage::StorageModeToString(info.storage_mode);
@@ -232,6 +237,28 @@ class Storage {
 
     virtual bool EdgeTypeIndexExists(EdgeTypeId edge_type) const = 0;
 
+    bool TextIndexExists(const std::string &index_name) const {
+      return storage_->indices_.text_index_.IndexExists(index_name);
+    }
+
+    void TextIndexAddVertex(const VertexAccessor &vertex) {
+      storage_->indices_.text_index_.AddNode(vertex.vertex_, storage_->name_id_mapper_.get());
+    }
+
+    void TextIndexUpdateVertex(const VertexAccessor &vertex, const std::vector<LabelId> &removed_labels = {}) {
+      storage_->indices_.text_index_.UpdateNode(vertex.vertex_, storage_->name_id_mapper_.get(), removed_labels);
+    }
+
+    std::vector<Gid> TextIndexSearch(const std::string &index_name, const std::string &search_query,
+                                     text_search_mode search_mode) const {
+      return storage_->indices_.text_index_.Search(index_name, search_query, search_mode);
+    }
+
+    std::string TextIndexAggregate(const std::string &index_name, const std::string &search_query,
+                                   const std::string &aggregation_query) const {
+      return storage_->indices_.text_index_.Aggregate(index_name, search_query, aggregation_query);
+    }
+
     virtual IndicesInfo ListAllIndices() const = 0;
 
     virtual ConstraintsInfo ListAllConstraints() const = 0;
@@ -286,6 +313,10 @@ class Storage {
 
     virtual utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(EdgeTypeId edge_type) = 0;
 
+    void CreateTextIndex(const std::string &index_name, LabelId label, query::DbAccessor *db);
+
+    void DropTextIndex(const std::string &index_name);
+
     virtual utils::BasicResult<StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
         LabelId label, PropertyId property) = 0;
 
diff --git a/src/utils/event_counter.cpp b/src/utils/event_counter.cpp
index 54ff4ed5c..7b1579a93 100644
--- a/src/utils/event_counter.cpp
+++ b/src/utils/event_counter.cpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -60,6 +60,7 @@
                                                                                                                      \
   M(ActiveLabelIndices, Index, "Number of active label indices in the system.")                                      \
   M(ActiveLabelPropertyIndices, Index, "Number of active label property indices in the system.")                     \
+  M(ActiveTextIndices, Index, "Number of active text indices in the system.")                                        \
                                                                                                                      \
   M(StreamsCreated, Stream, "Number of Streams created.")                                                            \
   M(MessagesConsumed, Stream, "Number of consumed streamed messages.")                                               \
diff --git a/src/utils/thread_pool.cpp b/src/utils/thread_pool.cpp
index 8d16e6c0b..215cca35c 100644
--- a/src/utils/thread_pool.cpp
+++ b/src/utils/thread_pool.cpp
@@ -1,4 +1,4 @@
-// Copyright 2022 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -10,7 +10,6 @@
 // licenses/APL.txt.
 
 #include "utils/thread_pool.hpp"
-
 namespace memgraph::utils {
 
 ThreadPool::ThreadPool(const size_t pool_size) {
diff --git a/src/utils/typeinfo.hpp b/src/utils/typeinfo.hpp
index 77910f731..aeb62d2c1 100644
--- a/src/utils/typeinfo.hpp
+++ b/src/utils/typeinfo.hpp
@@ -187,6 +187,7 @@ enum class TypeId : uint64_t {
   AST_PROFILE_QUERY,
   AST_INDEX_QUERY,
   AST_EDGE_INDEX_QUERY,
+  AST_TEXT_INDEX_QUERY,
   AST_CREATE,
   AST_CALL_PROCEDURE,
   AST_MATCH,
diff --git a/tests/drivers/go/v5/docs_quick_start.go b/tests/drivers/go/v5/docs_quick_start.go
index 69805acc1..5788ed703 100644
--- a/tests/drivers/go/v5/docs_quick_start.go
+++ b/tests/drivers/go/v5/docs_quick_start.go
@@ -13,12 +13,13 @@ func handle_if_error(err error) {
   }
 
 func main() {
-    dbUri := "bolt://localhost:7687"
-    driver, err := neo4j.NewDriver(dbUri, neo4j.BasicAuth("", "", ""))
-    if err != nil {
-        log.Fatal("An error occurred opening conn: %s", err)
-    }
-    defer driver.Close()
+  fmt.Println("Started running docs_quick_start.go test")
+  dbUri := "bolt://localhost:7687"
+  driver, err := neo4j.NewDriver(dbUri, neo4j.BasicAuth("", "", ""))
+  if err != nil {
+    log.Fatal("An error occurred opening conn: %s", err)
+  }
+  defer driver.Close()
 
 	session := driver.NewSession(neo4j.SessionConfig{})
 	defer session.Close()
@@ -33,7 +34,7 @@ func main() {
 
 	_,err = session.WriteTransaction(testAll)
 	handle_if_error(err)
-	fmt.Println("All ok!")
+	fmt.Println("doc_quick_start.go test finished successfully.")
 }
 
 func clearDatabase(tx neo4j.Transaction) (interface{}, error) {
@@ -75,15 +76,14 @@ func testAll(tx neo4j.Transaction) (interface{}, error) {
 	handle_if_error(err)
 	age, err := neo4j.GetProperty[int64](node_value, "age")
 	handle_if_error(err)
-  
+
 	if label != "Person" && name != "Alice" && age != 22 {
 	  return nil, fmt.Errorf("Data doesn't match.")
 	}
-  
+
 	fmt.Println("Label", label)
 	fmt.Println("name", name)
 	fmt.Println("age", age)
 
     return result.Consume()
 }
-
diff --git a/tests/drivers/go/v5/go.mod b/tests/drivers/go/v5/go.mod
index a44baf405..f05f98dc6 100644
--- a/tests/drivers/go/v5/go.mod
+++ b/tests/drivers/go/v5/go.mod
@@ -3,6 +3,6 @@ module bolt-test
 go 1.18
 
 require (
-	github.com/neo4j/neo4j-go-driver/v5 v5.13.0 // indirect
+	github.com/neo4j/neo4j-go-driver/v5 v5.18.0 // indirect
 	golang.org/dl v0.0.0-20230502172222-5216546bad51 // indirect
 )
diff --git a/tests/drivers/go/v5/go.sum b/tests/drivers/go/v5/go.sum
index dc85aef95..1c956d94a 100644
--- a/tests/drivers/go/v5/go.sum
+++ b/tests/drivers/go/v5/go.sum
@@ -8,5 +8,7 @@ github.com/neo4j/neo4j-go-driver/v5 v5.9.0 h1:TYxT0RSiwnvVFia90V7TLnRXv8HkdQQ6rT
 github.com/neo4j/neo4j-go-driver/v5 v5.9.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
 github.com/neo4j/neo4j-go-driver/v5 v5.13.0 h1:NmyUxh4LYTdcJdI6EnazHyUKu1f0/BPiHCYUZUZIGQw=
 github.com/neo4j/neo4j-go-driver/v5 v5.13.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
+github.com/neo4j/neo4j-go-driver/v5 v5.18.0 h1:3dmYsCYt/Fc/bPeSyGRGGfn/T6h06/OmHm72OFQKa3c=
+github.com/neo4j/neo4j-go-driver/v5 v5.18.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
 golang.org/dl v0.0.0-20230502172222-5216546bad51 h1:Bmo/kmR2hzyhGt3jjtl1ghkCqa5LINbB9D3QTkiLJIY=
 golang.org/dl v0.0.0-20230502172222-5216546bad51/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
diff --git a/tests/drivers/go/v5/read_routing.go b/tests/drivers/go/v5/read_routing.go
new file mode 100644
index 000000000..e8c2ffba2
--- /dev/null
+++ b/tests/drivers/go/v5/read_routing.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+	"fmt"
+	"github.com/neo4j/neo4j-go-driver/v5/neo4j"
+)
+
+func read_messages(uri string) {
+	username := ""
+	password := ""
+
+	// Connect to Memgraph
+	driver, err := neo4j.NewDriver(uri, neo4j.BasicAuth(username, password, ""))
+	if err != nil {
+		panic(err)
+	}
+	defer driver.Close()
+
+	// Use AccessModeRead for read transactions
+	session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
+	defer session.Close()
+
+	greeting, err := session.ReadTransaction(func(transaction neo4j.Transaction) (interface{}, error) {
+		result, err := transaction.Run("MATCH (n:Greeting) RETURN n.message AS message LIMIT 1", nil)
+		if err != nil {
+			return nil, err
+		}
+
+		if result.Next() {
+			return result.Record().Values[0], nil
+		}
+
+		return nil, result.Err()
+	})
+
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Println(greeting)
+
+}
+
+// Test checks that you can use bolt+routing for connecting to main and coordinators for reading.
+func main() {
+	fmt.Println("Started running read_route.go test")
+	read_messages("neo4j://localhost:7690") // coordinator_1
+	read_messages("neo4j://localhost:7691") // coordinator_2
+	read_messages("neo4j://localhost:7692") // coordinator_3
+	fmt.Println("Successfully finished running coordinator_route.go test")
+}
diff --git a/tests/drivers/go/v5/run.sh b/tests/drivers/go/v5/run.sh
index cbe31bd26..344495f15 100755
--- a/tests/drivers/go/v5/run.sh
+++ b/tests/drivers/go/v5/run.sh
@@ -18,4 +18,3 @@ done
 
 go get github.com/neo4j/neo4j-go-driver/v5
 go run docs_quick_start.go
-# go run parallel_edge_import.go
diff --git a/tests/drivers/go/v5/run_cluster_tests.sh b/tests/drivers/go/v5/run_cluster_tests.sh
new file mode 100755
index 000000000..9ccd7b0c0
--- /dev/null
+++ b/tests/drivers/go/v5/run_cluster_tests.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -e
+
+GO_VERSION="1.18.9"
+GO_VERSION_DIR="/opt/go$GO_VERSION"
+if [ -f "$GO_VERSION_DIR/go/bin/go" ]; then
+    export GOROOT="$GO_VERSION_DIR/go"
+    export GOPATH="$HOME/go$GO_VERSION"
+    export PATH="$GO_VERSION_DIR/go/bin:$PATH"
+fi
+
+# check if go is installed
+for i in go; do
+  if ! which $i >/dev/null; then
+    echo "Please install $i!"
+    exit 1
+  fi
+done
+
+go get github.com/neo4j/neo4j-go-driver/v5
+go run write_routing.go
+go run read_routing.go
diff --git a/tests/drivers/go/v5/write_routing.go b/tests/drivers/go/v5/write_routing.go
new file mode 100644
index 000000000..f77dd29ca
--- /dev/null
+++ b/tests/drivers/go/v5/write_routing.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+	"fmt"
+	"github.com/neo4j/neo4j-go-driver/v5/neo4j"
+)
+
+func create_message(uri string) {
+	username := ""
+	password := ""
+
+	// Connect to Memgraph
+	driver, err := neo4j.NewDriver(uri, neo4j.BasicAuth(username, password, ""))
+	if err != nil {
+		panic(err)
+	}
+	defer driver.Close()
+
+	session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
+	defer session.Close()
+
+	greeting, err := session.WriteTransaction(func(transaction neo4j.Transaction) (interface{}, error) {
+		result, err := transaction.Run("CREATE (n:Greeting) SET n.message = $message RETURN n.message", map[string]interface{}{
+			"message": "Hello, World!",
+		})
+		if err != nil {
+			return nil, err
+		}
+
+		if result.Next() {
+			return result.Record().Values[0], nil
+		}
+
+		return nil, result.Err()
+	})
+
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Println(greeting)
+}
+
+// Test checks that you can use bolt+routing for connecting to main and coordinators for writing.
+func main() {
+	fmt.Println("Started running main_route.go test")
+	create_message("neo4j://localhost:7690") // coordinator_1
+	create_message("neo4j://localhost:7691") // coordinator_2
+	create_message("neo4j://localhost:7692") // coordinator_3
+	fmt.Println("Successfully finished running main_route.go test")
+}
diff --git a/tests/drivers/java/v5_8/pom.xml b/tests/drivers/java/v5_8/pom.xml
index 6db6a6ded..6db821683 100644
--- a/tests/drivers/java/v5_8/pom.xml
+++ b/tests/drivers/java/v5_8/pom.xml
@@ -104,6 +104,45 @@
                             <goal>single</goal>
                         </goals>
                     </execution>
+                    <execution>
+                        <id>build-e</id>
+                        <configuration>
+                            <archive>
+                                <manifest>
+                                    <mainClass>memgraph.WriteRouting</mainClass>
+                                </manifest>
+                            </archive>
+                            <descriptorRefs>
+                                <descriptorRef>jar-with-dependencies</descriptorRef>
+                            </descriptorRefs>
+                            <appendAssemblyId>false</appendAssemblyId>
+                            <finalName>WriteRouting</finalName>
+                        </configuration>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>single</goal>
+                        </goals>
+                      </execution>
+                      <execution>
+                        <id>build-f</id>
+                        <configuration>
+                            <archive>
+                                <manifest>
+                                    <mainClass>memgraph.ReadRouting</mainClass>
+                                </manifest>
+                            </archive>
+                            <descriptorRefs>
+                                <descriptorRef>jar-with-dependencies</descriptorRef>
+                            </descriptorRefs>
+                            <appendAssemblyId>false</appendAssemblyId>
+                            <finalName>ReadRouting</finalName>
+                        </configuration>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>single</goal>
+                        </goals>
+                    </execution>
+
                 </executions>
             </plugin>
         </plugins>
diff --git a/tests/drivers/java/v5_8/run.sh b/tests/drivers/java/v5_8/run.sh
index 03400e385..cb3ebb2ca 100755
--- a/tests/drivers/java/v5_8/run.sh
+++ b/tests/drivers/java/v5_8/run.sh
@@ -36,4 +36,3 @@ mvn clean package
 java -jar target/DocsHowToQuery.jar
 java -jar target/MaxQueryLength.jar
 java -jar target/Transactions.jar
-# java -jar target/ParallelEdgeImport.jar
diff --git a/tests/drivers/java/v5_8/run_cluster_tests.sh b/tests/drivers/java/v5_8/run_cluster_tests.sh
new file mode 100755
index 000000000..0b01d5de4
--- /dev/null
+++ b/tests/drivers/java/v5_8/run_cluster_tests.sh
@@ -0,0 +1,37 @@
+#!/bin/bash -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd "$DIR"
+
+if [ -d "/usr/lib/jvm/java-17-oracle" ]; then
+  export JAVA_HOME="/usr/lib/jvm/java-17-oracle"
+fi
+if [ -d "/usr/lib/jvm/java-17-openjdk-amd64" ]; then
+  export JAVA_HOME="/usr/lib/jvm/java-17-openjdk-amd64"
+fi
+if [ -d "/opt/apache-maven-3.9.3" ]; then
+  export M2_HOME="/opt/apache-maven-3.9.3"
+fi
+export PATH="$JAVA_HOME/bin:$M2_HOME/bin:$PATH"
+
+for i in java mvn; do
+    if ! which $i >/dev/null; then
+        echo "Please install $i!"
+        exit 1
+    fi
+done
+
+JAVA_VER=$(java -version 2>&1 >/dev/null | grep 'version' | cut -d "\"" -f2 | cut -d "." -f1)
+if [ $JAVA_VER -ne 17 ]
+then
+    echo "neo4j-java-driver v5.8 requires Java 17. Please install it!"
+    exit 1
+fi
+
+# CentOS 7 doesn't have Java version that supports var keyword
+source ../../../../environment/util.sh
+
+mvn clean package
+
+java -jar target/WriteRouting.jar
+java -jar target/ReadRouting.jar
diff --git a/tests/drivers/java/v5_8/src/main/java/memgraph/ReadRouting.java b/tests/drivers/java/v5_8/src/main/java/memgraph/ReadRouting.java
new file mode 100644
index 000000000..b8654a890
--- /dev/null
+++ b/tests/drivers/java/v5_8/src/main/java/memgraph/ReadRouting.java
@@ -0,0 +1,35 @@
+package memgraph;
+
+import static org.neo4j.driver.Values.parameters;
+
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import org.neo4j.driver.AuthTokens;
+import org.neo4j.driver.Driver;
+import org.neo4j.driver.GraphDatabase;
+import org.neo4j.driver.Session;
+import org.neo4j.driver.Transaction;
+
+public class ReadRouting {
+    private Driver driver;
+
+    private void readMessage(String uri) {
+        driver = GraphDatabase.driver(uri, AuthTokens.basic("", ""));
+        try (Session session = driver.session()) {
+            String greeting = session.readTransaction(tx -> {
+                var result = tx.run("MATCH (n:Greeting) RETURN n.message AS message");
+                System.out.println("Read txn passed!");
+                return "OK";
+            });
+        }
+    }
+
+    public static void main(String... args) {
+        System.out.println("Started running ReadRoutingTest...");
+        ReadRouting greeter = new ReadRouting();
+        greeter.readMessage("neo4j://localhost:7690"); // coordinator_1
+        greeter.readMessage("neo4j://localhost:7691"); // coordinator_2
+        greeter.readMessage("neo4j://localhost:7692"); // coordinator_3
+        System.out.println("All good!");
+    }
+}
diff --git a/tests/drivers/java/v5_8/src/main/java/memgraph/WriteRouting.java b/tests/drivers/java/v5_8/src/main/java/memgraph/WriteRouting.java
new file mode 100644
index 000000000..df3948558
--- /dev/null
+++ b/tests/drivers/java/v5_8/src/main/java/memgraph/WriteRouting.java
@@ -0,0 +1,44 @@
+package memgraph;
+
+import static org.neo4j.driver.Values.parameters;
+
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import org.neo4j.driver.AuthTokens;
+import org.neo4j.driver.Config;
+import org.neo4j.driver.Driver;
+import org.neo4j.driver.GraphDatabase;
+import org.neo4j.driver.Result;
+import org.neo4j.driver.Session;
+import org.neo4j.driver.Transaction;
+import org.neo4j.driver.TransactionWork;
+import org.neo4j.driver.exceptions.ClientException;
+import org.neo4j.driver.exceptions.TransientException;
+
+public class WriteRouting {
+    private Driver driver;
+
+    private void createMessage(String uri) {
+        driver = GraphDatabase.driver(uri, AuthTokens.basic("", ""));
+        try (Session session = driver.session()) {
+            String greeting = session.writeTransaction(tx -> {
+                var result = tx.run("CREATE (n:Greeting) SET n.message = $message RETURN n.message",
+                                    parameters("message", "Hello, World!"));
+                if (result.hasNext()) {
+                    return result.single().get(0).asString();
+                }
+                throw new RuntimeException("No result found.");
+            });
+            System.out.println(greeting);
+        }
+    }
+
+    public static void main(String... args) {
+        System.out.println("Started running WriteRoutingTest...");
+        WriteRouting greeter = new WriteRouting();
+        greeter.createMessage("neo4j://localhost:7690"); // coordinator_1
+        greeter.createMessage("neo4j://localhost:7691"); // coordinator_2
+        greeter.createMessage("neo4j://localhost:7692"); // coordinator_3
+        System.out.println("All good!");
+    }
+}
diff --git a/tests/drivers/node/v5_8/read_routing.js b/tests/drivers/node/v5_8/read_routing.js
new file mode 100644
index 000000000..905b184d3
--- /dev/null
+++ b/tests/drivers/node/v5_8/read_routing.js
@@ -0,0 +1,59 @@
+const neo4j = require('neo4j-driver');
+
+function die() {
+  session.close();
+  driver.close();
+  process.exit(1);
+}
+
+function Neo4jService(uri) {
+  const driver = neo4j.driver(uri, neo4j.auth.basic("", ""));
+
+  async function readGreeting() {
+    const session = driver.session({ defaultAccessMode: neo4j.session.READ });
+    try {
+      const result = await session.readTransaction(tx =>
+        tx.run('MATCH (n:Greeting) RETURN n.message AS message')
+      );
+      console.log("Read txn finished");
+    } finally {
+      await session.close();
+    }
+  }
+
+  async function close() {
+    await driver.close();
+  }
+
+  return {
+    readGreeting,
+    close
+  };
+}
+
+async function readGreetingsFromUri(uri) {
+  const service = Neo4jService(uri);
+  await service.readGreeting();
+  await service.close();
+}
+
+async function main() {
+  console.log("Started reading route");
+  const uris = [
+    'neo4j://localhost:7690',
+    'neo4j://localhost:7691',
+    'neo4j://localhost:7692'
+  ];
+
+  try {
+    for (const uri of uris) {
+      await readGreetingsFromUri(uri);
+    }
+  } catch (error) {
+    console.error('An error occurred:', error);
+    die();
+  }
+  console.log("Finished reading route");
+}
+
+main().catch(error => console.error(error));
diff --git a/tests/drivers/node/v5_8/run.sh b/tests/drivers/node/v5_8/run.sh
index 276fdbb2b..a24c5110c 100755
--- a/tests/drivers/node/v5_8/run.sh
+++ b/tests/drivers/node/v5_8/run.sh
@@ -15,4 +15,3 @@ fi
 
 node docs_how_to_query.js
 node max_query_length.js
-# node parallel_edge_import.js
diff --git a/tests/drivers/node/v5_8/run_cluster_tests.sh b/tests/drivers/node/v5_8/run_cluster_tests.sh
new file mode 100755
index 000000000..3f4fee5ff
--- /dev/null
+++ b/tests/drivers/node/v5_8/run_cluster_tests.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd "$DIR"
+
+if ! which node >/dev/null; then
+    echo "Please install nodejs!"
+    exit 1
+fi
+
+if [ ! -d node_modules ]; then
+    # Driver generated with: `npm install neo4j-driver`
+    npm install --no-package-lock --no-save neo4j-driver@5.8.0
+fi
+
+node write_routing.js
+node read_routing.js
diff --git a/tests/drivers/node/v5_8/write_routing.js b/tests/drivers/node/v5_8/write_routing.js
new file mode 100644
index 000000000..fdb4b74d9
--- /dev/null
+++ b/tests/drivers/node/v5_8/write_routing.js
@@ -0,0 +1,59 @@
+const neo4j = require('neo4j-driver');
+
+function die() {
+  session.close();
+  driver.close();
+  process.exit(1);
+}
+
+function Neo4jService(uri) {
+  const driver = neo4j.driver(uri, neo4j.auth.basic("", ""));
+
+  async function createGreeting() {
+    const session = driver.session({ defaultAccessMode: neo4j.session.WRITE });
+    try {
+      const result = await session.writeTransaction(tx =>
+        tx.run('CREATE (n:Greeting {message: "Hello NodeJs"}) RETURN n.message AS message')
+      );
+      console.log("Write txn finished");
+    } finally {
+      await session.close();
+    }
+  }
+
+  async function close() {
+    await driver.close();
+  }
+
+  return {
+    createGreeting,
+    close
+  };
+}
+
+async function createGreetingsFromUri(uri) {
+  const service = Neo4jService(uri);
+  await service.createGreeting();
+  await service.close();
+}
+
+async function main() {
+  console.log("Started writing route");
+  const uris = [
+    'neo4j://localhost:7690',
+    'neo4j://localhost:7691',
+    'neo4j://localhost:7692'
+  ];
+
+  try {
+    for (const uri of uris) {
+      await createGreetingsFromUri(uri);
+    }
+  } catch (error) {
+    console.error('An error occurred:', error);
+    die();
+  }
+  console.log("Finished writing route");
+}
+
+main().catch(error => console.error(error));
diff --git a/tests/drivers/python/v5_8/read_routing.py b/tests/drivers/python/v5_8/read_routing.py
new file mode 100644
index 000000000..b08982aa3
--- /dev/null
+++ b/tests/drivers/python/v5_8/read_routing.py
@@ -0,0 +1,41 @@
+from neo4j import GraphDatabase
+
+
+class Neo4jService:
+    def __init__(self, uri, user="", password=""):
+        self.driver = GraphDatabase.driver(uri, auth=(user, password))
+
+    def close(self):
+        self.driver.close()
+
+    def read_greeting(self):
+        with self.driver.session() as session:
+            session.execute_read(self._create_and_return_greeting)
+            print("Read txn passed!")
+
+    @staticmethod
+    def _create_and_return_greeting(tx):
+        tx.run("MATCH (n:Greeting) RETURN n.message AS message")
+
+
+def read_greetings_from_uri(uri):
+    service = Neo4jService(uri)
+    service.read_greeting()
+    service.close()
+
+
+def main():
+    print("Started reading route")
+    uris = ["neo4j://localhost:7690", "neo4j://localhost:7691", "neo4j://localhost:7692"]
+
+    try:
+        for uri in uris:
+            read_greetings_from_uri(uri)
+    except Exception as error:
+        print(f"An error occurred: {error}")
+        exit(-1)
+    print("Finished reading route")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/drivers/python/v5_8/run_cluster_tests.sh b/tests/drivers/python/v5_8/run_cluster_tests.sh
new file mode 100755
index 000000000..f22c1a8da
--- /dev/null
+++ b/tests/drivers/python/v5_8/run_cluster_tests.sh
@@ -0,0 +1,25 @@
+#!/bin/bash -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd "$DIR"
+
+# system check
+if ! which virtualenv >/dev/null; then
+    echo "Please install virtualenv!"
+    exit 1
+fi
+
+# setup virtual environment
+if [ ! -d "ve3" ]; then
+    virtualenv -p python3 ve3 || exit 1
+    source ve3/bin/activate
+    python3 -m pip install neo4j==5.8.0 || exit 1
+    deactivate
+fi
+
+# activate virtualenv
+source ve3/bin/activate
+
+# execute test
+python3 write_routing.py || exit 1
+python3 read_routing.py || exit 1
diff --git a/tests/drivers/python/v5_8/write_routing.py b/tests/drivers/python/v5_8/write_routing.py
new file mode 100644
index 000000000..427d6e6f2
--- /dev/null
+++ b/tests/drivers/python/v5_8/write_routing.py
@@ -0,0 +1,41 @@
+from neo4j import GraphDatabase
+
+
+class Neo4jService:
+    def __init__(self, uri, user="", password=""):
+        self.driver = GraphDatabase.driver(uri, auth=(user, password))
+
+    def close(self):
+        self.driver.close()
+
+    def create_greeting(self):
+        with self.driver.session() as session:
+            session.execute_write(self._create_and_return_greeting)
+            print("Write txn passed!")
+
+    @staticmethod
+    def _create_and_return_greeting(tx):
+        tx.run("CREATE (n:Greeting {message: 'Hello from Python'}) RETURN n.message AS message")
+
+
+def create_greetings_from_uri(uri):
+    service = Neo4jService(uri)
+    service.create_greeting()
+    service.close()
+
+
+def main():
+    print("Started writing route")
+    uris = ["neo4j://localhost:7690", "neo4j://localhost:7691", "neo4j://localhost:7692"]
+
+    try:
+        for uri in uris:
+            create_greetings_from_uri(uri)
+    except Exception as error:
+        print(f"An error occurred: {error}")
+        exit(-1)
+    print("Finished writing route")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/drivers/run_cluster.sh b/tests/drivers/run_cluster.sh
new file mode 100755
index 000000000..6931c082b
--- /dev/null
+++ b/tests/drivers/run_cluster.sh
@@ -0,0 +1,203 @@
+#!/bin/bash
+
+pushd () { command pushd "$@" > /dev/null; }
+popd () { command popd "$@" > /dev/null; }
+
+function wait_for_server {
+    port=$1
+    while ! nc -z -w 1 127.0.0.1 $port; do
+        sleep 0.1
+    done
+    sleep 1
+}
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd "$DIR"
+
+# create a temporary directory.
+tmpdir=/tmp/memgraph_drivers
+if [ -d $tmpdir ]; then
+    rm -rf $tmpdir
+fi
+
+mkdir -p $tmpdir
+
+# find memgraph binaries.
+binary_dir="$DIR/../../build"
+
+# Start instance_1
+$binary_dir/memgraph \
+    --bolt-port=7687 \
+    --data-directory=$tmpdir/instance_1/ \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/instance1.log \
+    --also-log-to-stderr \
+    --management-port=10011 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_instance_1=$!
+wait_for_server 7687
+
+# Start instance_2
+$binary_dir/memgraph \
+    --bolt-port=7688 \
+    --data-directory=$tmpdir/instance_2 \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/instance2.log \
+    --also-log-to-stderr \
+    --management-port=10012 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_instance_2=$!
+wait_for_server 7688
+
+# Start instance_3
+$binary_dir/memgraph \
+    --bolt-port=7689 \
+    --data-directory=$tmpdir/instance_3 \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/instance3.log \
+    --also-log-to-stderr \
+    --management-port=10013 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_instance_3=$!
+wait_for_server 7689
+
+
+# Start coordinator_1
+$binary_dir/memgraph \
+    --bolt-port=7690 \
+    --data-directory=$tmpdir/coordinator_1 \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/coordinator1.log \
+    --also-log-to-stderr \
+    --coordinator-id=1 \
+    --coordinator-port=10111 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_coordinator_1=$!
+wait_for_server 7690
+
+# Start coordinator_2
+$binary_dir/memgraph \
+    --bolt-port=7691 \
+    --data-directory=$tmpdir/coordinator_2 \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/coordinator2.log \
+    --also-log-to-stderr \
+    --coordinator-id=2 \
+    --coordinator-port=10112 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_coordinator_2=$!
+wait_for_server 7691
+
+# Start coordinator_3
+$binary_dir/memgraph \
+    --bolt-port=7692 \
+    --data-directory=$tmpdir/coordinator_3 \
+    --query-execution-timeout-sec=5 \
+    --bolt-session-inactivity-timeout=10 \
+    --bolt-server-name-for-init="Neo4j/1.1" \
+    --bolt-cert-file="" \
+    --log-file=$tmpdir/logs/coordinator3.log \
+    --also-log-to-stderr \
+    --coordinator-id=3 \
+    --coordinator-port=10113 \
+    --experimental-enabled=high-availability \
+    --log-level ERROR &
+pid_coordinator_3=$!
+wait_for_server 7692
+
+sleep 5
+
+echo 'ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "127.0.0.1:7691", "coordinator_server":  "127.0.0.1:10112"};' | $binary_dir/bin/mgconsole --port 7690
+echo 'ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "127.0.0.1:7692", "coordinator_server":  "127.0.0.1:10113"};' | $binary_dir/bin/mgconsole --port 7690
+echo 'REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "127.0.0.1:7687", "management_server": "127.0.0.1:10011", "replication_server": "127.0.0.1:10001"};'  | $binary_dir/bin/mgconsole --port 7690
+echo 'REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "127.0.0.1:7688", "management_server": "127.0.0.1:10012", "replication_server": "127.0.0.1:10002"};'  | $binary_dir/bin/mgconsole --port 7690
+echo 'REGISTER INSTANCE instance_3 WITH CONFIG {"bolt_server": "127.0.0.1:7689", "management_server": "127.0.0.1:10013", "replication_server": "127.0.0.1:10003"};'  | $binary_dir/bin/mgconsole --port 7690
+echo 'SET INSTANCE instance_1 TO MAIN;' | $binary_dir/bin/mgconsole --port 7690
+
+
+code_test=0
+for lang in *; do
+    if [ ! -d $lang ]; then continue; fi
+    pushd $lang
+    echo "Running tests for language: $lang"
+    for version in *; do
+        if [ ! -d $version ]; then continue; fi
+        pushd $version
+        if [ -f "run_cluster_tests.sh" ]; then
+            echo "Running version: $version"
+            ./run_cluster_tests.sh
+            code_test=$?
+            if [ $code_test -ne 0 ]; then
+                echo "FAILED: $lang-$version"
+                break
+            fi
+        fi
+        popd
+    done;
+    popd
+done
+
+
+# Function to stop a process by PID and check its exit code
+stop_process() {
+    local pid=$1 # Capture the PID from the first argument
+
+    # Stop the process
+    kill $pid
+    wait $pid
+    local exit_code=$? # Capture the exit code
+
+    # Check the process's exit code
+    if [ $exit_code -ne 0 ]; then
+        echo "The process with PID $pid didn't terminate properly!"
+        exit $exit_code
+    else
+        echo "Process with PID $pid terminated successfully."
+    fi
+}
+
+echo "Stopping coordinator1"
+stop_process $pid_coordinator_1
+echo "Stopping coordinator2"
+stop_process $pid_coordinator_2
+echo "Stopping coordinator3"
+stop_process $pid_coordinator_3
+
+echo "Stopping instance1"
+stop_process $pid_instance_1
+echo "Stopping instance2"
+stop_process $pid_instance_2
+echo "Stopping instance3"
+stop_process $pid_instance_3
+
+
+# Check test exit code.
+if [ $code_test -ne 0 ]; then
+    echo "One of the tests failed!"
+    exit $code_test
+fi
+
+# Temporary directory cleanup.
+if [ -d $tmpdir ]; then
+    rm -rf $tmpdir
+fi
diff --git a/tests/e2e/configuration/default_config.py b/tests/e2e/configuration/default_config.py
index 8213e73bb..8e84f080a 100644
--- a/tests/e2e/configuration/default_config.py
+++ b/tests/e2e/configuration/default_config.py
@@ -59,9 +59,9 @@ startup_config_dict = {
         "Time in seconds after which inactive Bolt sessions will be closed.",
     ),
     "cartesian_product_enabled": ("true", "true", "Enable cartesian product expansion."),
-    "coordinator_server_port": ("0", "0", "Port on which coordinator servers will be started."),
-    "raft_server_port": ("0", "0", "Port on which raft servers will be started."),
-    "raft_server_id": ("0", "0", "Unique ID of the raft server."),
+    "management_port": ("0", "0", "Port on which coordinator servers will be started."),
+    "coordinator_port": ("0", "0", "Port on which raft servers will be started."),
+    "coordinator_id": ("0", "0", "Unique ID of the raft server."),
     "instance_down_timeout_sec": ("5", "5", "Time duration after which an instance is considered down."),
     "instance_health_check_frequency_sec": ("1", "1", "The time duration between two health checks/pings."),
     "instance_get_uuid_frequency_sec": ("10", "10", "The time duration between two instance uuid checks."),
@@ -236,6 +236,6 @@ startup_config_dict = {
     "experimental_enabled": (
         "",
         "",
-        "Experimental features to be used, comma seperated. Options [system-replication, high-availability]",
+        "Experimental features to be used, comma-separated. Options [system-replication, text-search, high-availability]",
     ),
 }
diff --git a/tests/e2e/high_availability/common.py b/tests/e2e/high_availability/common.py
index 2157b29ca..adfabd87a 100644
--- a/tests/e2e/high_availability/common.py
+++ b/tests/e2e/high_availability/common.py
@@ -30,14 +30,3 @@ def safe_execute(function, *args):
         function(*args)
     except:
         pass
-
-
-# NOTE: Repeated execution because it can fail if Raft server is not up
-def add_coordinator(cursor, query):
-    for _ in range(10):
-        try:
-            execute_and_fetch_all(cursor, query)
-            return True
-        except Exception:
-            pass
-    return False
diff --git a/tests/e2e/high_availability/coord_cluster_registration.py b/tests/e2e/high_availability/coord_cluster_registration.py
index 13aaf27fe..ffb51e0e1 100644
--- a/tests/e2e/high_availability/coord_cluster_registration.py
+++ b/tests/e2e/high_availability/coord_cluster_registration.py
@@ -16,7 +16,7 @@ import tempfile
 
 import interactive_mg_runner
 import pytest
-from common import add_coordinator, connect, execute_and_fetch_all, safe_execute
+from common import connect, execute_and_fetch_all, safe_execute
 from mg_utils import mg_sleep_and_assert
 
 interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -36,7 +36,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7687",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10011",
         ],
         "log_file": "instance_1.log",
@@ -50,7 +50,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7688",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10012",
         ],
         "log_file": "instance_2.log",
@@ -64,7 +64,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7689",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10013",
         ],
         "log_file": "instance_3.log",
@@ -77,8 +77,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7690",
             "--log-level=TRACE",
-            "--raft-server-id=1",
-            "--raft-server-port=10111",
+            "--coordinator-id=1",
+            "--coordinator-port=10111",
         ],
         "log_file": "coordinator1.log",
         "setup_queries": [],
@@ -89,8 +89,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7691",
             "--log-level=TRACE",
-            "--raft-server-id=2",
-            "--raft-server-port=10112",
+            "--coordinator-id=2",
+            "--coordinator-port=10112",
         ],
         "log_file": "coordinator2.log",
         "setup_queries": [],
@@ -101,8 +101,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7692",
             "--log-level=TRACE",
-            "--raft-server-id=3",
-            "--raft-server-port=10113",
+            "--coordinator-id=3",
+            "--coordinator-port=10113",
         ],
         "log_file": "coordinator3.log",
         "setup_queries": [],
@@ -110,147 +110,150 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
 }
 
 
-def test_register_repl_instances_then_coordinators():
-    safe_execute(shutil.rmtree, TEMP_DIR)
-    interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
-
-    coordinator3_cursor = connect(host="localhost", port=7692).cursor()
-
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
-    )
-    execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
-    )
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
-    )
-
-    def check_coordinator3():
-        return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
-
-    expected_cluster_coord3 = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-    mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
-
-    coordinator1_cursor = connect(host="localhost", port=7690).cursor()
-
-    def check_coordinator1():
-        return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "replica"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-
-    coordinator2_cursor = connect(host="localhost", port=7691).cursor()
-
-    def check_coordinator2():
-        return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-
-
-def test_register_coordinator_then_repl_instances():
-    safe_execute(shutil.rmtree, TEMP_DIR)
-    interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
-
-    coordinator3_cursor = connect(host="localhost", port=7692).cursor()
-
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
-    )
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
-    )
-    execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
-
-    def check_coordinator3():
-        return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
-
-    expected_cluster_coord3 = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-    mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
-
-    coordinator1_cursor = connect(host="localhost", port=7690).cursor()
-
-    def check_coordinator1():
-        return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "replica"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-
-    coordinator2_cursor = connect(host="localhost", port=7691).cursor()
-
-    def check_coordinator2():
-        return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+# def test_register_repl_instances_then_coordinators():
+#     safe_execute(shutil.rmtree, TEMP_DIR)
+#     interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
+#
+#     coordinator3_cursor = connect(host="localhost", port=7692).cursor()
+#
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
+#     )
+#     execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
+#     )
+#
+#     def check_coordinator3():
+#         return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
+#
+#     expected_cluster_coord3 = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#     mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
+#
+#     coordinator1_cursor = connect(host="localhost", port=7690).cursor()
+#
+#     def check_coordinator1():
+#         return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "replica"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#
+#     coordinator2_cursor = connect(host="localhost", port=7691).cursor()
+#
+#     def check_coordinator2():
+#         return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#
+#
+# def test_register_coordinator_then_repl_instances():
+#     safe_execute(shutil.rmtree, TEMP_DIR)
+#     interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
+#
+#     coordinator3_cursor = connect(host="localhost", port=7692).cursor()
+#
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
+#     )
+#     execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
+#
+#     def check_coordinator3():
+#         return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
+#
+#     expected_cluster_coord3 = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#     mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
+#
+#     coordinator1_cursor = connect(host="localhost", port=7690).cursor()
+#
+#     def check_coordinator1():
+#         return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "replica"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#
+#     coordinator2_cursor = connect(host="localhost", port=7691).cursor()
+#
+#     def check_coordinator2():
+#         return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
 
 
 def test_coordinators_communication_with_restarts():
+    # 1 Start all instances
     safe_execute(shutil.rmtree, TEMP_DIR)
+
+    # 1
     interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
 
     coordinator3_cursor = connect(host="localhost", port=7692).cursor()
 
-    assert add_coordinator(
+    execute_and_fetch_all(
         coordinator3_cursor,
         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
     )
-    assert add_coordinator(
+    execute_and_fetch_all(
         coordinator3_cursor,
         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
     )
@@ -310,284 +313,284 @@ def test_coordinators_communication_with_restarts():
 
 
 # # TODO: (andi) Test when dealing with distributed coordinators that you can register on one coordinator and unregister from any other coordinator
-@pytest.mark.parametrize(
-    "kill_instance",
-    [True, False],
-)
-def test_unregister_replicas(kill_instance):
-    safe_execute(shutil.rmtree, TEMP_DIR)
-    interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
-
-    coordinator1_cursor = connect(host="localhost", port=7690).cursor()
-    coordinator2_cursor = connect(host="localhost", port=7691).cursor()
-    coordinator3_cursor = connect(host="localhost", port=7692).cursor()
-
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
-    )
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
-    )
-    execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
-
-    def check_coordinator1():
-        return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
-
-    def check_coordinator2():
-        return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
-
-    def check_coordinator3():
-        return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
-
-    main_cursor = connect(host="localhost", port=7689).cursor()
-
-    def check_main():
-        return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "replica"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    expected_replicas = [
-        (
-            "instance_1",
-            "127.0.0.1:10001",
-            "sync",
-            {"ts": 0, "behind": None, "status": "ready"},
-            {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
-        ),
-        (
-            "instance_2",
-            "127.0.0.1:10002",
-            "sync",
-            {"ts": 0, "behind": None, "status": "ready"},
-            {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
-        ),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-    mg_sleep_and_assert(expected_replicas, check_main)
-
-    if kill_instance:
-        interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1")
-    execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_1")
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    expected_replicas = [
-        (
-            "instance_2",
-            "127.0.0.1:10002",
-            "sync",
-            {"ts": 0, "behind": None, "status": "ready"},
-            {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
-        ),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-    mg_sleep_and_assert(expected_replicas, check_main)
-
-    if kill_instance:
-        interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_2")
-    execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_2")
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-    expected_replicas = []
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-    mg_sleep_and_assert(expected_replicas, check_main)
-
-
-def test_unregister_main():
-    safe_execute(shutil.rmtree, TEMP_DIR)
-    interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
-
-    coordinator1_cursor = connect(host="localhost", port=7690).cursor()
-    coordinator2_cursor = connect(host="localhost", port=7691).cursor()
-    coordinator3_cursor = connect(host="localhost", port=7692).cursor()
-
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
-    )
-    assert add_coordinator(
-        coordinator3_cursor,
-        "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
-    )
-    execute_and_fetch_all(
-        coordinator3_cursor,
-        "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
-    )
-    execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
-
-    def check_coordinator1():
-        return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
-
-    def check_coordinator2():
-        return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
-
-    def check_coordinator3():
-        return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "replica"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-
-    try:
-        execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
-    except Exception as e:
-        assert (
-            str(e)
-            == "Alive main instance can't be unregistered! Shut it down to trigger failover and then unregister it!"
-        )
-
-    interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3")
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "main"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-        ("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "main"),
-        ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),
-    ]
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-
-    execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
-
-    expected_cluster = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "127.0.0.1:10011", "up", "main"),
-        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
-    ]
-
-    expected_cluster_shared = [
-        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
-        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
-        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
-        ("instance_1", "", "", "unknown", "main"),
-        ("instance_2", "", "", "unknown", "replica"),
-    ]
-
-    expected_replicas = [
-        (
-            "instance_2",
-            "127.0.0.1:10002",
-            "sync",
-            {"ts": 0, "behind": None, "status": "ready"},
-            {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
-        ),
-    ]
-
-    main_cursor = connect(host="localhost", port=7687).cursor()
-
-    def check_main():
-        return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
-
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
-    mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
-    mg_sleep_and_assert(expected_cluster, check_coordinator3)
-    mg_sleep_and_assert(expected_replicas, check_main)
+# @pytest.mark.parametrize(
+#     "kill_instance",
+#     [True, False],
+# )
+# def test_unregister_replicas(kill_instance):
+#     safe_execute(shutil.rmtree, TEMP_DIR)
+#     interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
+#
+#     coordinator1_cursor = connect(host="localhost", port=7690).cursor()
+#     coordinator2_cursor = connect(host="localhost", port=7691).cursor()
+#     coordinator3_cursor = connect(host="localhost", port=7692).cursor()
+#
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
+#     )
+#     execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
+#
+#     def check_coordinator1():
+#         return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
+#
+#     def check_coordinator2():
+#         return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
+#
+#     def check_coordinator3():
+#         return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
+#
+#     main_cursor = connect(host="localhost", port=7689).cursor()
+#
+#     def check_main():
+#         return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "replica"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     expected_replicas = [
+#         (
+#             "instance_1",
+#             "127.0.0.1:10001",
+#             "sync",
+#             {"ts": 0, "behind": None, "status": "ready"},
+#             {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
+#         ),
+#         (
+#             "instance_2",
+#             "127.0.0.1:10002",
+#             "sync",
+#             {"ts": 0, "behind": None, "status": "ready"},
+#             {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
+#         ),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#     mg_sleep_and_assert(expected_replicas, check_main)
+#
+#     if kill_instance:
+#         interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1")
+#     execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_1")
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     expected_replicas = [
+#         (
+#             "instance_2",
+#             "127.0.0.1:10002",
+#             "sync",
+#             {"ts": 0, "behind": None, "status": "ready"},
+#             {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
+#         ),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#     mg_sleep_and_assert(expected_replicas, check_main)
+#
+#     if kill_instance:
+#         interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_2")
+#     execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_2")
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#     expected_replicas = []
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#     mg_sleep_and_assert(expected_replicas, check_main)
+#
+#
+# def test_unregister_main():
+#     safe_execute(shutil.rmtree, TEMP_DIR)
+#     interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
+#
+#     coordinator1_cursor = connect(host="localhost", port=7690).cursor()
+#     coordinator2_cursor = connect(host="localhost", port=7691).cursor()
+#     coordinator3_cursor = connect(host="localhost", port=7692).cursor()
+#
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
+#     )
+#     execute_and_fetch_all(
+#         coordinator3_cursor,
+#         "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
+#     )
+#     execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
+#
+#     def check_coordinator1():
+#         return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
+#
+#     def check_coordinator2():
+#         return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
+#
+#     def check_coordinator3():
+#         return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "replica"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#
+#     try:
+#         execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
+#     except Exception as e:
+#         assert (
+#             str(e)
+#             == "Alive main instance can't be unregistered! Shut it down to trigger failover and then unregister it!"
+#         )
+#
+#     interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3")
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "main"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#         ("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "main"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#         ("instance_3", "", "", "unknown", "main"),
+#     ]
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#
+#     execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
+#
+#     expected_cluster = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "127.0.0.1:10011", "up", "main"),
+#         ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+#     ]
+#
+#     expected_cluster_shared = [
+#         ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+#         ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+#         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+#         ("instance_1", "", "", "unknown", "main"),
+#         ("instance_2", "", "", "unknown", "replica"),
+#     ]
+#
+#     expected_replicas = [
+#         (
+#             "instance_2",
+#             "127.0.0.1:10002",
+#             "sync",
+#             {"ts": 0, "behind": None, "status": "ready"},
+#             {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
+#         ),
+#     ]
+#
+#     main_cursor = connect(host="localhost", port=7687).cursor()
+#
+#     def check_main():
+#         return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
+#
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
+#     mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
+#     mg_sleep_and_assert(expected_cluster, check_coordinator3)
+#     mg_sleep_and_assert(expected_replicas, check_main)
 
 
 if __name__ == "__main__":
diff --git a/tests/e2e/high_availability/disable_writing_on_main_after_restart.py b/tests/e2e/high_availability/disable_writing_on_main_after_restart.py
index 517bf346f..66264fe0d 100644
--- a/tests/e2e/high_availability/disable_writing_on_main_after_restart.py
+++ b/tests/e2e/high_availability/disable_writing_on_main_after_restart.py
@@ -16,7 +16,7 @@ import tempfile
 
 import interactive_mg_runner
 import pytest
-from common import add_coordinator, connect, execute_and_fetch_all, safe_execute
+from common import connect, execute_and_fetch_all, safe_execute
 from mg_utils import mg_sleep_and_assert
 
 interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -36,7 +36,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7687",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10011",
             "--also-log-to-stderr",
             "--instance-health-check-frequency-sec",
@@ -55,7 +55,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7688",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10012",
             "--also-log-to-stderr",
             "--instance-health-check-frequency-sec",
@@ -74,7 +74,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7689",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10013",
             "--also-log-to-stderr",
             "--instance-health-check-frequency-sec",
@@ -92,8 +92,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7690",
             "--log-level=TRACE",
-            "--raft-server-id=1",
-            "--raft-server-port=10111",
+            "--coordinator-id=1",
+            "--coordinator-port=10111",
         ],
         "log_file": "coordinator1.log",
         "setup_queries": [],
@@ -104,8 +104,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7691",
             "--log-level=TRACE",
-            "--raft-server-id=2",
-            "--raft-server-port=10112",
+            "--coordinator-id=2",
+            "--coordinator-port=10112",
         ],
         "log_file": "coordinator2.log",
         "setup_queries": [],
@@ -116,8 +116,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7692",
             "--log-level=TRACE",
-            "--raft-server-id=3",
-            "--raft-server-port=10113",
+            "--coordinator-id=3",
+            "--coordinator-port=10113",
             "--also-log-to-stderr",
         ],
         "log_file": "coordinator3.log",
@@ -137,11 +137,11 @@ def test_writing_disabled_on_main_restart():
         "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
     )
     execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
-    assert add_coordinator(
+    execute_and_fetch_all(
         coordinator3_cursor,
         "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
     )
-    assert add_coordinator(
+    execute_and_fetch_all(
         coordinator3_cursor,
         "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
     )
diff --git a/tests/e2e/high_availability/distributed_coords.py b/tests/e2e/high_availability/distributed_coords.py
index 59e083545..dff668ec1 100644
--- a/tests/e2e/high_availability/distributed_coords.py
+++ b/tests/e2e/high_availability/distributed_coords.py
@@ -13,6 +13,7 @@ import os
 import shutil
 import sys
 import tempfile
+import time
 
 import interactive_mg_runner
 import pytest
@@ -40,7 +41,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7687",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10011",
         ],
         "log_file": "instance_1.log",
@@ -54,7 +55,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7688",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10012",
         ],
         "log_file": "instance_2.log",
@@ -68,7 +69,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7689",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10013",
         ],
         "log_file": "instance_3.log",
@@ -81,8 +82,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7690",
             "--log-level=TRACE",
-            "--raft-server-id=1",
-            "--raft-server-port=10111",
+            "--coordinator-id=1",
+            "--coordinator-port=10111",
         ],
         "log_file": "coordinator1.log",
         "setup_queries": [],
@@ -93,8 +94,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7691",
             "--log-level=TRACE",
-            "--raft-server-id=2",
-            "--raft-server-port=10112",
+            "--coordinator-id=2",
+            "--coordinator-port=10112",
         ],
         "log_file": "coordinator2.log",
         "setup_queries": [],
@@ -105,8 +106,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7692",
             "--log-level=TRACE",
-            "--raft-server-id=3",
-            "--raft-server-port=10113",
+            "--coordinator-id=3",
+            "--coordinator-port=10113",
         ],
         "log_file": "coordinator3.log",
         "setup_queries": [
@@ -130,7 +131,7 @@ def get_instances_description_no_setup():
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
             ],
             "log_file": "instance_1.log",
@@ -144,7 +145,7 @@ def get_instances_description_no_setup():
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
             ],
             "log_file": "instance_2.log",
@@ -158,7 +159,7 @@ def get_instances_description_no_setup():
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
             ],
             "log_file": "instance_3.log",
@@ -171,8 +172,8 @@ def get_instances_description_no_setup():
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator1.log",
             "data_directory": f"{TEMP_DIR}/coordinator_1",
@@ -184,8 +185,8 @@ def get_instances_description_no_setup():
                 "--bolt-port",
                 "7691",
                 "--log-level=TRACE",
-                "--raft-server-id=2",
-                "--raft-server-port=10112",
+                "--coordinator-id=2",
+                "--coordinator-port=10112",
             ],
             "log_file": "coordinator2.log",
             "data_directory": f"{TEMP_DIR}/coordinator_2",
@@ -197,8 +198,8 @@ def get_instances_description_no_setup():
                 "--bolt-port",
                 "7692",
                 "--log-level=TRACE",
-                "--raft-server-id=3",
-                "--raft-server-port=10113",
+                "--coordinator-id=3",
+                "--coordinator-port=10113",
             ],
             "log_file": "coordinator3.log",
             "data_directory": f"{TEMP_DIR}/coordinator_3",
@@ -261,7 +262,7 @@ def test_old_main_comes_back_on_new_leader_as_replica():
         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
         ("instance_1", "", "", "unknown", "main"),
         ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),  # TODO: (andi) Will become unknown.
+        ("instance_3", "", "", "unknown", "unknown"),
     ]
     mg_sleep_and_assert_any_function(leader_data, [show_instances_coord1, show_instances_coord2])
     mg_sleep_and_assert_any_function(follower_data, [show_instances_coord1, show_instances_coord2])
@@ -456,7 +457,7 @@ def test_distributed_automatic_failover_with_leadership_change():
         ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
         ("instance_1", "", "", "unknown", "main"),
         ("instance_2", "", "", "unknown", "replica"),
-        ("instance_3", "", "", "unknown", "main"),  # TODO: (andi) Will become unknown.
+        ("instance_3", "", "", "unknown", "unknown"),
     ]
     mg_sleep_and_assert_any_function(leader_data, [show_instances_coord1, show_instances_coord2])
     mg_sleep_and_assert_any_function(follower_data, [show_instances_coord1, show_instances_coord2])
@@ -640,7 +641,7 @@ def test_registering_4_coords():
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
             ],
             "log_file": "instance_1.log",
@@ -654,7 +655,7 @@ def test_registering_4_coords():
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
             ],
             "log_file": "instance_2.log",
@@ -668,7 +669,7 @@ def test_registering_4_coords():
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
             ],
             "log_file": "instance_3.log",
@@ -681,8 +682,8 @@ def test_registering_4_coords():
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator1.log",
             "setup_queries": [],
@@ -693,8 +694,8 @@ def test_registering_4_coords():
                 "--bolt-port",
                 "7691",
                 "--log-level=TRACE",
-                "--raft-server-id=2",
-                "--raft-server-port=10112",
+                "--coordinator-id=2",
+                "--coordinator-port=10112",
             ],
             "log_file": "coordinator2.log",
             "setup_queries": [],
@@ -705,8 +706,8 @@ def test_registering_4_coords():
                 "--bolt-port",
                 "7692",
                 "--log-level=TRACE",
-                "--raft-server-id=3",
-                "--raft-server-port=10113",
+                "--coordinator-id=3",
+                "--coordinator-port=10113",
             ],
             "log_file": "coordinator3.log",
             "setup_queries": [],
@@ -717,8 +718,8 @@ def test_registering_4_coords():
                 "--bolt-port",
                 "7693",
                 "--log-level=TRACE",
-                "--raft-server-id=4",
-                "--raft-server-port=10114",
+                "--coordinator-id=4",
+                "--coordinator-port=10114",
             ],
             "log_file": "coordinator4.log",
             "setup_queries": [
@@ -775,7 +776,7 @@ def test_registering_coord_log_store():
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
             ],
             "log_file": "instance_1.log",
@@ -789,7 +790,7 @@ def test_registering_coord_log_store():
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
             ],
             "log_file": "instance_2.log",
@@ -803,7 +804,7 @@ def test_registering_coord_log_store():
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
             ],
             "log_file": "instance_3.log",
@@ -816,8 +817,8 @@ def test_registering_coord_log_store():
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator1.log",
             "setup_queries": [],
@@ -828,8 +829,8 @@ def test_registering_coord_log_store():
                 "--bolt-port",
                 "7691",
                 "--log-level=TRACE",
-                "--raft-server-id=2",
-                "--raft-server-port=10112",
+                "--coordinator-id=2",
+                "--coordinator-port=10112",
             ],
             "log_file": "coordinator2.log",
             "setup_queries": [],
@@ -840,8 +841,8 @@ def test_registering_coord_log_store():
                 "--bolt-port",
                 "7692",
                 "--log-level=TRACE",
-                "--raft-server-id=3",
-                "--raft-server-port=10113",
+                "--coordinator-id=3",
+                "--coordinator-port=10113",
             ],
             "log_file": "coordinator3.log",
             "setup_queries": [],
@@ -852,8 +853,8 @@ def test_registering_coord_log_store():
                 "--bolt-port",
                 "7693",
                 "--log-level=TRACE",
-                "--raft-server-id=4",
-                "--raft-server-port=10114",
+                "--coordinator-id=4",
+                "--coordinator-port=10114",
             ],
             "log_file": "coordinator4.log",
             "setup_queries": [
@@ -911,7 +912,7 @@ def test_registering_coord_log_store():
 
         bolt_port = f"--bolt-port={bolt_port_id}"
 
-        manag_server_port = f"--coordinator-server-port={manag_port_id}"
+        manag_server_port = f"--management-port={manag_port_id}"
 
         args_desc.append(bolt_port)
         args_desc.append(manag_server_port)
@@ -1092,8 +1093,8 @@ def test_multiple_failovers_in_row_no_leadership_change():
                 "",
                 "",
                 "unknown",
-                "main",
-            ),  # TODO(antoniofilipovic) change to unknown after PR with transitions
+                "unknown",
+            ),
         ]
     )
 
@@ -1119,9 +1120,9 @@ def test_multiple_failovers_in_row_no_leadership_change():
     follower_data.extend(coordinator_data)
     follower_data.extend(
         [
-            ("instance_1", "", "", "unknown", "main"),
-            ("instance_2", "", "", "unknown", "main"),  # TODO(antoniofilipovic) change to unknown
-            ("instance_3", "", "", "unknown", "main"),  # TODO(antoniofilipovic) change to unknown
+            ("instance_1", "", "", "unknown", "unknown"),
+            ("instance_2", "", "", "unknown", "main"),
+            ("instance_3", "", "", "unknown", "unknown"),
         ]
     )
 
@@ -1149,7 +1150,7 @@ def test_multiple_failovers_in_row_no_leadership_change():
     follower_data.extend(coordinator_data)
     follower_data.extend(
         [
-            ("instance_1", "", "", "unknown", "main"),  # TODO(antoniofilipovic) change to unknown
+            ("instance_1", "", "", "unknown", "unknown"),
             ("instance_2", "", "", "unknown", "main"),
             ("instance_3", "", "", "unknown", "replica"),
         ]
@@ -1177,8 +1178,8 @@ def test_multiple_failovers_in_row_no_leadership_change():
     follower_data.extend(coordinator_data)
     follower_data.extend(
         [
-            ("instance_1", "", "", "unknown", "main"),  # TODO(antoniofilipovic) change to unknown
-            ("instance_2", "", "", "unknown", "main"),  # TODO(antoniofilipovic) change to unknown
+            ("instance_1", "", "", "unknown", "unknown"),
+            ("instance_2", "", "", "unknown", "unknown"),
             ("instance_3", "", "", "unknown", "main"),
         ]
     )
@@ -1258,5 +1259,166 @@ def test_multiple_failovers_in_row_no_leadership_change():
     mg_sleep_and_assert(1, get_vertex_count_func(connect(port=7688, host="localhost").cursor()))
 
 
+def test_multiple_old_mains_single_failover():
+    # Goal of this test is to check when leadership changes
+    # and we have old MAIN down, that we don't start failover
+    # 1. Start all instances.
+    # 2. Kill the main instance
+    # 3. Do failover
+    # 4. Kill other main
+    # 5. Kill leader
+    # 6. Leave first main down, and start second main
+    # 7. Second main should write data to new instance all the time
+
+    # 1
+    safe_execute(shutil.rmtree, TEMP_DIR)
+    inner_instances_description = get_instances_description_no_setup()
+
+    interactive_mg_runner.start_all(inner_instances_description)
+
+    setup_queries = [
+        "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
+        "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
+        "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
+        "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
+        "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
+        "SET INSTANCE instance_3 TO MAIN",
+    ]
+    coord_cursor_3 = connect(host="localhost", port=7692).cursor()
+    for query in setup_queries:
+        execute_and_fetch_all(coord_cursor_3, query)
+
+    def retrieve_data_show_repl_cluster():
+        return sorted(list(execute_and_fetch_all(coord_cursor_3, "SHOW INSTANCES;")))
+
+    coordinators = [
+        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+    ]
+
+    basic_instances = [
+        ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
+        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+        ("instance_3", "", "127.0.0.1:10013", "up", "main"),
+    ]
+
+    expected_data_on_coord = []
+    expected_data_on_coord.extend(coordinators)
+    expected_data_on_coord.extend(basic_instances)
+
+    mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster)
+
+    # 2
+
+    interactive_mg_runner.kill(inner_instances_description, "instance_3")
+
+    # 3
+
+    basic_instances = [
+        ("instance_1", "", "127.0.0.1:10011", "up", "main"),
+        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+        ("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
+    ]
+
+    expected_data_on_coord = []
+    expected_data_on_coord.extend(coordinators)
+    expected_data_on_coord.extend(basic_instances)
+
+    mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster)
+
+    # 4
+
+    interactive_mg_runner.kill(inner_instances_description, "instance_1")
+
+    # 5
+    interactive_mg_runner.kill(inner_instances_description, "coordinator_3")
+
+    # 6
+
+    interactive_mg_runner.start(inner_instances_description, "instance_1")
+
+    # 7
+
+    coord_cursor_1 = connect(host="localhost", port=7690).cursor()
+
+    def show_instances_coord1():
+        return sorted(list(execute_and_fetch_all(coord_cursor_1, "SHOW INSTANCES;")))
+
+    coord_cursor_2 = connect(host="localhost", port=7691).cursor()
+
+    def show_instances_coord2():
+        return sorted(list(execute_and_fetch_all(coord_cursor_2, "SHOW INSTANCES;")))
+
+    leader_data = [
+        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+        ("instance_1", "", "127.0.0.1:10011", "up", "main"),
+        ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
+        ("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
+    ]
+    mg_sleep_and_assert_any_function(leader_data, [show_instances_coord1, show_instances_coord2])
+
+    follower_data = [
+        ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
+        ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
+        ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
+        ("instance_1", "", "", "unknown", "main"),
+        ("instance_2", "", "", "unknown", "replica"),
+        ("instance_3", "", "", "unknown", "unknown"),
+    ]
+    mg_sleep_and_assert_any_function(leader_data, [show_instances_coord1, show_instances_coord2])
+    mg_sleep_and_assert_any_function(follower_data, [show_instances_coord1, show_instances_coord2])
+
+    instance_1_cursor = connect(host="localhost", port=7687).cursor()
+
+    def show_replicas():
+        return sorted(list(execute_and_fetch_all(instance_1_cursor, "SHOW REPLICAS;")))
+
+    replicas = [
+        (
+            "instance_2",
+            "127.0.0.1:10002",
+            "sync",
+            {"behind": None, "status": "ready", "ts": 0},
+            {"memgraph": {"behind": 0, "status": "ready", "ts": 0}},
+        ),
+        (
+            "instance_3",
+            "127.0.0.1:10003",
+            "sync",
+            {"behind": None, "status": "invalid", "ts": 0},
+            {"memgraph": {"behind": 0, "status": "invalid", "ts": 0}},
+        ),
+    ]
+    mg_sleep_and_assert_collection(replicas, show_replicas)
+
+    def get_vertex_count_func(cursor):
+        def get_vertex_count():
+            return execute_and_fetch_all(cursor, "MATCH (n) RETURN count(n)")[0][0]
+
+        return get_vertex_count
+
+    vertex_count = 0
+    instance_1_cursor = connect(port=7687, host="localhost").cursor()
+    instance_2_cursor = connect(port=7688, host="localhost").cursor()
+
+    mg_sleep_and_assert(vertex_count, get_vertex_count_func(instance_1_cursor))
+    mg_sleep_and_assert(vertex_count, get_vertex_count_func(instance_2_cursor))
+
+    time_slept = 0
+    failover_time = 5
+    while time_slept < failover_time:
+        with pytest.raises(Exception) as e:
+            execute_and_fetch_all(instance_1_cursor, "CREATE ();")
+        vertex_count += 1
+
+        assert vertex_count == execute_and_fetch_all(instance_1_cursor, "MATCH (n) RETURN count(n);")[0][0]
+        assert vertex_count == execute_and_fetch_all(instance_2_cursor, "MATCH (n) RETURN count(n);")[0][0]
+        time.sleep(0.1)
+        time_slept += 0.1
+
+
 if __name__ == "__main__":
     sys.exit(pytest.main([__file__, "-rA"]))
diff --git a/tests/e2e/high_availability/manual_setting_replicas.py b/tests/e2e/high_availability/manual_setting_replicas.py
index b0b0965bc..02d0ea4e9 100644
--- a/tests/e2e/high_availability/manual_setting_replicas.py
+++ b/tests/e2e/high_availability/manual_setting_replicas.py
@@ -31,7 +31,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7687",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10013",
         ],
         "log_file": "main.log",
diff --git a/tests/e2e/high_availability/not_replicate_from_old_main.py b/tests/e2e/high_availability/not_replicate_from_old_main.py
index d9729f650..3e328a544 100644
--- a/tests/e2e/high_availability/not_replicate_from_old_main.py
+++ b/tests/e2e/high_availability/not_replicate_from_old_main.py
@@ -153,7 +153,7 @@ def test_not_replicate_old_main_register_new_cluster():
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
             ],
             "log_file": "instance_1.log",
@@ -167,7 +167,7 @@ def test_not_replicate_old_main_register_new_cluster():
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
             ],
             "log_file": "instance_2.log",
@@ -180,8 +180,8 @@ def test_not_replicate_old_main_register_new_cluster():
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [
@@ -220,7 +220,7 @@ def test_not_replicate_old_main_register_new_cluster():
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
             ],
             "log_file": "instance_3.log",
@@ -233,8 +233,8 @@ def test_not_replicate_old_main_register_new_cluster():
                 "--bolt-port",
                 "7691",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10112",
+                "--coordinator-id=1",
+                "--coordinator-port=10112",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [],
diff --git a/tests/e2e/high_availability/single_coordinator.py b/tests/e2e/high_availability/single_coordinator.py
index 1d839b4fc..6582ddfec 100644
--- a/tests/e2e/high_availability/single_coordinator.py
+++ b/tests/e2e/high_availability/single_coordinator.py
@@ -35,7 +35,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7688",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10011",
             "--replication-restore-state-on-startup=true",
             "--storage-recover-on-startup=false",
@@ -52,7 +52,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7689",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10012",
             "--replication-restore-state-on-startup=true",
             "--storage-recover-on-startup=false",
@@ -69,7 +69,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "7687",
             "--log-level",
             "TRACE",
-            "--coordinator-server-port",
+            "--management-port",
             "10013",
             "--replication-restore-state-on-startup=true",
             "--storage-recover-on-startup=false",
@@ -85,8 +85,8 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
             "--bolt-port",
             "7690",
             "--log-level=TRACE",
-            "--raft-server-id=1",
-            "--raft-server-port=10111",
+            "--coordinator-id=1",
+            "--coordinator-port=10111",
         ],
         "log_file": "coordinator.log",
         "setup_queries": [
@@ -126,7 +126,7 @@ def test_replication_works_on_failover_replica_1_epoch_2_commits_away(data_recov
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -144,7 +144,7 @@ def test_replication_works_on_failover_replica_1_epoch_2_commits_away(data_recov
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -162,7 +162,7 @@ def test_replication_works_on_failover_replica_1_epoch_2_commits_away(data_recov
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -180,8 +180,8 @@ def test_replication_works_on_failover_replica_1_epoch_2_commits_away(data_recov
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [
@@ -337,7 +337,7 @@ def test_replication_works_on_failover_replica_2_epochs_more_commits_away(data_r
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -355,7 +355,7 @@ def test_replication_works_on_failover_replica_2_epochs_more_commits_away(data_r
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -373,7 +373,7 @@ def test_replication_works_on_failover_replica_2_epochs_more_commits_away(data_r
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -392,7 +392,7 @@ def test_replication_works_on_failover_replica_2_epochs_more_commits_away(data_r
                 "7691",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10014",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -410,8 +410,8 @@ def test_replication_works_on_failover_replica_2_epochs_more_commits_away(data_r
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [
@@ -624,7 +624,7 @@ def test_replication_forcefully_works_on_failover_replica_misses_epoch(data_reco
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -642,7 +642,7 @@ def test_replication_forcefully_works_on_failover_replica_misses_epoch(data_reco
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -660,7 +660,7 @@ def test_replication_forcefully_works_on_failover_replica_misses_epoch(data_reco
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -679,7 +679,7 @@ def test_replication_forcefully_works_on_failover_replica_misses_epoch(data_reco
                 "7691",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10014",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -697,8 +697,8 @@ def test_replication_forcefully_works_on_failover_replica_misses_epoch(data_reco
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [
@@ -911,7 +911,7 @@ def test_replication_correct_replica_chosen_up_to_date_data(data_recovery):
                 "7688",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10011",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -929,7 +929,7 @@ def test_replication_correct_replica_chosen_up_to_date_data(data_recovery):
                 "7689",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10012",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -947,7 +947,7 @@ def test_replication_correct_replica_chosen_up_to_date_data(data_recovery):
                 "7687",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10013",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -966,7 +966,7 @@ def test_replication_correct_replica_chosen_up_to_date_data(data_recovery):
                 "7691",
                 "--log-level",
                 "TRACE",
-                "--coordinator-server-port",
+                "--management-port",
                 "10014",
                 "--replication-restore-state-on-startup",
                 "true",
@@ -984,8 +984,8 @@ def test_replication_correct_replica_chosen_up_to_date_data(data_recovery):
                 "--bolt-port",
                 "7690",
                 "--log-level=TRACE",
-                "--raft-server-id=1",
-                "--raft-server-port=10111",
+                "--coordinator-id=1",
+                "--coordinator-port=10111",
             ],
             "log_file": "coordinator.log",
             "setup_queries": [
diff --git a/tests/e2e/high_availability/workloads.yaml b/tests/e2e/high_availability/workloads.yaml
index aaf76fc6b..9d3bd3126 100644
--- a/tests/e2e/high_availability/workloads.yaml
+++ b/tests/e2e/high_availability/workloads.yaml
@@ -1,19 +1,19 @@
 ha_cluster: &ha_cluster
   cluster:
     replica_1:
-      args: ["--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level=TRACE", "--coordinator-server-port=10011"]
+      args: ["--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level=TRACE", "--management-port=10011"]
       log_file: "replication-e2e-replica1.log"
       setup_queries: []
     replica_2:
-      args: ["--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level=TRACE", "--coordinator-server-port=10012"]
+      args: ["--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level=TRACE", "--management-port=10012"]
       log_file: "replication-e2e-replica2.log"
       setup_queries: []
     main:
-      args: ["--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level=TRACE", "--coordinator-server-port=10013"]
+      args: ["--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level=TRACE", "--management-port=10013"]
       log_file: "replication-e2e-main.log"
       setup_queries: []
     coordinator:
-      args: ["--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10111"]
+      args: ["--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", "--coordinator-id=1", "--coordinator-port=10111"]
       log_file: "replication-e2e-coordinator.log"
       setup_queries: [
         "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
diff --git a/tests/e2e/replication/common.hpp b/tests/e2e/replication/common.hpp
index 1938eb0f3..e2ec43978 100644
--- a/tests/e2e/replication/common.hpp
+++ b/tests/e2e/replication/common.hpp
@@ -37,10 +37,9 @@ auto ParseDatabaseEndpoints(const std::string &database_endpoints_str) {
   const auto db_endpoints_strs = memgraph::utils::SplitView(database_endpoints_str, ",");
   std::vector<memgraph::io::network::Endpoint> database_endpoints;
   for (const auto &db_endpoint_str : db_endpoints_strs) {
-    const auto maybe_host_port = memgraph::io::network::Endpoint::ParseSocketOrAddress(db_endpoint_str, 7687);
-    MG_ASSERT(maybe_host_port);
-    auto const [ip, port] = *maybe_host_port;
-    database_endpoints.emplace_back(std::string(ip), port);
+    auto maybe_endpoint = memgraph::io::network::Endpoint::ParseSocketOrAddress(db_endpoint_str, 7687);
+    MG_ASSERT(maybe_endpoint);
+    database_endpoints.emplace_back(std::move(*maybe_endpoint));
   }
   return database_endpoints;
 }
diff --git a/tests/e2e/text_search/CMakeLists.txt b/tests/e2e/text_search/CMakeLists.txt
new file mode 100644
index 000000000..db2af7a11
--- /dev/null
+++ b/tests/e2e/text_search/CMakeLists.txt
@@ -0,0 +1,6 @@
+function(copy_text_search_e2e_python_files FILE_NAME)
+    copy_e2e_python_files(text_search ${FILE_NAME})
+endfunction()
+
+copy_text_search_e2e_python_files(common.py)
+copy_text_search_e2e_python_files(test_text_search.py)
diff --git a/tests/e2e/text_search/common.py b/tests/e2e/text_search/common.py
new file mode 100644
index 000000000..0f28351d3
--- /dev/null
+++ b/tests/e2e/text_search/common.py
@@ -0,0 +1,87 @@
+# Copyright 2023 Memgraph Ltd.
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+# License, and you may not use this file except in compliance with the Business Source License.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+import typing
+
+import mgclient
+import pytest
+from gqlalchemy import Memgraph
+
+
+def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}) -> typing.List[tuple]:
+    cursor.execute(query, params)
+    return cursor.fetchall()
+
+
+@pytest.fixture
+def connect(**kwargs) -> mgclient.Connection:
+    connection = mgclient.connect(host="localhost", port=7687, **kwargs)
+    connection.autocommit = True
+    cursor = connection.cursor()
+    execute_and_fetch_all(cursor, """USE DATABASE memgraph""")
+    try:
+        execute_and_fetch_all(cursor, """DROP DATABASE clean""")
+    except:
+        pass
+    execute_and_fetch_all(cursor, """MATCH (n) DETACH DELETE n""")
+    yield connection
+
+
+@pytest.fixture
+def memgraph(**kwargs) -> Memgraph:
+    memgraph = Memgraph()
+
+    yield memgraph
+
+    memgraph.drop_database()
+    memgraph.drop_indexes()
+
+
+@pytest.fixture
+def memgraph_with_text_indexed_data(**kwargs) -> Memgraph:
+    memgraph = Memgraph()
+
+    memgraph.execute(
+        """CREATE (:Document {title: "Rules2024", version: 1, fulltext: "random works", date: date("2023-11-14")});"""
+    )
+    memgraph.execute(
+        """CREATE (:Document {title: "Rules2023", version: 9, fulltext: "text Rules2024", date: date("2023-11-14")});"""
+    )
+    memgraph.execute(
+        """CREATE (:Document:Revision {title: "Rules2024", version: 2, fulltext: "random words", date: date("2023-12-15")});"""
+    )
+    memgraph.execute("""CREATE (:Revision {title: "OperationSchema", version: 3, date: date("2023-10-01")});""")
+    memgraph.execute("""CREATE TEXT INDEX complianceDocuments ON :Document;""")
+
+    yield memgraph
+
+    memgraph.execute("""DROP TEXT INDEX complianceDocuments;""")
+    memgraph.drop_database()
+    memgraph.drop_indexes()
+
+
+@pytest.fixture
+def memgraph_with_mixed_data(**kwargs) -> Memgraph:
+    memgraph = Memgraph()
+
+    memgraph.execute(
+        """CREATE (:Document:Revision {title: "Rules2024", version: 1, date: date("2023-11-14"), contents: "Lorem ipsum dolor sit amet"});"""
+    )
+    memgraph.execute(
+        """CREATE (:Revision {title: "Rules2024", version: 2, date: date("2023-12-15"), contents: "consectetur adipiscing elit"});"""
+    )
+    memgraph.execute("""CREATE TEXT INDEX complianceDocuments ON :Document;""")
+
+    yield memgraph
+
+    memgraph.execute("""DROP TEXT INDEX complianceDocuments;""")
+    memgraph.drop_database()
+    memgraph.drop_indexes()
diff --git a/tests/e2e/text_search/test_text_search.py b/tests/e2e/text_search/test_text_search.py
new file mode 100644
index 000000000..8d538d464
--- /dev/null
+++ b/tests/e2e/text_search/test_text_search.py
@@ -0,0 +1,206 @@
+# Copyright 2024 Memgraph Ltd.
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+# License, and you may not use this file except in compliance with the Business Source License.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+import json
+import re
+import sys
+
+import gqlalchemy
+import mgclient
+import pytest
+from common import memgraph, memgraph_with_mixed_data, memgraph_with_text_indexed_data
+
+GET_RULES_2024_DOCUMENT = """CALL libtext.search("complianceDocuments", "data.title:Rules2024") YIELD node
+             RETURN node.title AS title, node.version AS version
+             ORDER BY version ASC, title ASC;"""
+
+
+def test_create_index(memgraph):
+    memgraph.execute("""CREATE TEXT INDEX exampleIndex ON :Document;""")
+
+    index_info = memgraph.execute_and_fetch("""SHOW INDEX INFO""")
+
+    assert list(index_info) == [
+        {"index type": "text (name: exampleIndex)", "label": "Document", "property": None, "count": None}
+    ]
+
+
+def test_drop_index(memgraph):
+    memgraph.execute("""DROP TEXT INDEX exampleIndex;""")
+
+    index_info = memgraph.execute_and_fetch("""SHOW INDEX INFO""")
+
+    assert list(index_info) == []
+
+
+def test_create_existing_index(memgraph):
+    memgraph.execute("""CREATE TEXT INDEX duplicatedIndex ON :Document;""")
+    with pytest.raises(
+        gqlalchemy.exceptions.GQLAlchemyDatabaseError, match='Text index "duplicatedIndex" already exists.'
+    ) as _:
+        memgraph.execute("""CREATE TEXT INDEX duplicatedIndex ON :Document;""")
+    memgraph.execute("""DROP TEXT INDEX duplicatedIndex;""")  # cleanup
+
+
+def test_drop_nonexistent_index(memgraph):
+    with pytest.raises(
+        gqlalchemy.exceptions.GQLAlchemyDatabaseError, match='Text index "noSuchIndex" doesn’t exist.'
+    ) as _:
+        memgraph.execute("""DROP TEXT INDEX noSuchIndex;""")
+
+
+def test_text_search_given_property(memgraph_with_text_indexed_data):
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(GET_RULES_2024_DOCUMENT))
+
+    assert len(result) == 2 and result == [{"title": "Rules2024", "version": 1}, {"title": "Rules2024", "version": 2}]
+
+
+def test_text_search_all_properties(memgraph_with_text_indexed_data):
+    SEARCH_QUERY = "Rules2024"
+
+    ALL_PROPERTIES_QUERY = f"""CALL libtext.search_all("complianceDocuments", "{SEARCH_QUERY}") YIELD node
+             RETURN node
+             ORDER BY node.version ASC, node.title ASC;"""
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(ALL_PROPERTIES_QUERY))
+    result_nodes = [record["node"] for record in result]
+
+    assert len(result) == 3 and (
+        result_nodes[0].title == SEARCH_QUERY
+        and result_nodes[1].title == SEARCH_QUERY
+        and SEARCH_QUERY in result_nodes[2].fulltext
+    )
+
+
+def test_regex_text_search(memgraph_with_text_indexed_data):
+    REGEX_QUERY = """CALL libtext.regex_search("complianceDocuments", "wor.*s") YIELD node
+             RETURN node
+             ORDER BY node.version ASC, node.title ASC;"""
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(REGEX_QUERY))
+
+    assert (
+        len(result) == 2
+        and re.search("wor.*s", result[0]["node"].fulltext)
+        and re.search("wor.*s", result[1]["node"].fulltext)
+        # In this test, all values matching the regex string are found in the .node property only ^
+    )
+
+
+def test_text_search_aggregate(memgraph_with_text_indexed_data):
+    input_aggregation = json.dumps({"count": {"value_count": {"field": "metadata.gid"}}}, separators=(",", ":"))
+    expected_aggregation = json.dumps({"count": {"value": 2.0}}, separators=(",", ":"))
+
+    AGGREGATION_QUERY = f"""CALL libtext.aggregate("complianceDocuments", "data.title:Rules2024", '{input_aggregation}')
+                YIELD aggregation
+                RETURN aggregation;"""
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(AGGREGATION_QUERY))
+
+    assert len(result) == 1 and result[0]["aggregation"] == expected_aggregation
+
+
+def test_text_search_query_boolean(memgraph_with_text_indexed_data):
+    BOOLEAN_QUERY = """CALL libtext.search("complianceDocuments", "(data.title:Rules2023 OR data.title:Rules2024) AND data.fulltext:words") YIELD node
+                RETURN node.title AS title, node.version AS version
+                ORDER BY version ASC, title ASC;"""
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(BOOLEAN_QUERY))
+
+    assert len(result) == 1 and result == [{"title": "Rules2024", "version": 2}]
+
+
+def test_create_indexed_node(memgraph_with_text_indexed_data):
+    memgraph_with_text_indexed_data.execute("""CREATE (:Document {title: "Rules2024", version: 3});""")
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(GET_RULES_2024_DOCUMENT))
+
+    assert len(result) == 3 and result == [
+        {"title": "Rules2024", "version": 1},
+        {"title": "Rules2024", "version": 2},
+        {"title": "Rules2024", "version": 3},
+    ]
+
+
+def test_delete_indexed_node(memgraph_with_text_indexed_data):
+    memgraph_with_text_indexed_data.execute("""MATCH (n:Document {title: "Rules2024", version: 2}) DETACH DELETE n;""")
+
+    result = list(memgraph_with_text_indexed_data.execute_and_fetch(GET_RULES_2024_DOCUMENT))
+
+    assert len(result) == 1 and result == [{"title": "Rules2024", "version": 1}]
+
+
+def test_add_indexed_label(memgraph_with_mixed_data):
+    memgraph_with_mixed_data.execute("""MATCH (n:Revision {version:2}) SET n:Document;""")
+
+    result = list(memgraph_with_mixed_data.execute_and_fetch(GET_RULES_2024_DOCUMENT))
+
+    assert len(result) == 2 and result == [{"title": "Rules2024", "version": 1}, {"title": "Rules2024", "version": 2}]
+
+
+def test_remove_indexed_label(memgraph_with_mixed_data):
+    memgraph_with_mixed_data.execute("""MATCH (n:Document {version: 1}) REMOVE n:Document;""")
+
+    result = list(memgraph_with_mixed_data.execute_and_fetch(GET_RULES_2024_DOCUMENT))
+
+    assert len(result) == 0
+
+
+def test_update_text_property_of_indexed_node(memgraph_with_text_indexed_data):
+    memgraph_with_text_indexed_data.execute("""MATCH (n:Document {version:1}) SET n.title = "Rules2030";""")
+
+    result = list(
+        memgraph_with_text_indexed_data.execute_and_fetch(
+            """CALL libtext.search("complianceDocuments", "data.title:Rules2030") YIELD node
+             RETURN node.title AS title, node.version AS version
+             ORDER BY version ASC, title ASC;"""
+        )
+    )
+
+    assert len(result) == 1 and result == [{"title": "Rules2030", "version": 1}]
+
+
+def test_add_unindexable_property_to_indexed_node(memgraph_with_text_indexed_data):
+    try:
+        memgraph_with_text_indexed_data.execute("""MATCH (n:Document {version:1}) SET n.randomList = [2, 3, 4, 5];""")
+    except Exception:
+        assert False
+
+
+def test_remove_indexable_property_from_indexed_node(memgraph_with_text_indexed_data):
+    try:
+        memgraph_with_text_indexed_data.execute(
+            """MATCH (n:Document {version:1}) REMOVE n.title, n.version, n.fulltext, n.date;"""
+        )
+    except Exception:
+        assert False
+
+
+def test_remove_unindexable_property_from_indexed_node(memgraph_with_text_indexed_data):
+    try:
+        memgraph_with_text_indexed_data.execute_and_fetch(
+            """MATCH (n:Document {date: date("2023-12-15")}) REMOVE n.date;"""
+        )
+    except Exception:
+        assert False
+
+
+def test_text_search_nonexistent_index(memgraph_with_text_indexed_data):
+    NONEXISTENT_INDEX_QUERY = """CALL libtext.search("noSuchIndex", "data.fulltext:words") YIELD node
+                RETURN node.title AS title, node.version AS version
+                ORDER BY version ASC, title ASC;"""
+
+    with pytest.raises(mgclient.DatabaseError, match='Text index "noSuchIndex" doesn’t exist.') as _:
+        list(memgraph_with_text_indexed_data.execute_and_fetch(NONEXISTENT_INDEX_QUERY))
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__, "-rA"]))
diff --git a/tests/e2e/text_search/test_text_search_disabled.py b/tests/e2e/text_search/test_text_search_disabled.py
new file mode 100644
index 000000000..064f7b409
--- /dev/null
+++ b/tests/e2e/text_search/test_text_search_disabled.py
@@ -0,0 +1,69 @@
+# Copyright 2024 Memgraph Ltd.
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+# License, and you may not use this file except in compliance with the Business Source License.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+import json
+import sys
+
+import gqlalchemy
+import pytest
+from common import memgraph
+
+TEXT_SEARCH_DISABLED_ERROR = (
+    "To use text indices and text search, start Memgraph with the experimental text search feature enabled."
+)
+
+
+def test_create_index(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        memgraph.execute("""CREATE TEXT INDEX exampleIndex ON :Document;""")
+
+
+def test_drop_index(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        memgraph.execute("""DROP TEXT INDEX exampleIndex;""")
+
+
+def test_text_search_given_property(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        memgraph.execute(
+            """CALL libtext.search("complianceDocuments", "data.title:Rules2024") YIELD node
+             RETURN node;"""
+        )
+
+
+def test_text_search_all_properties(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        memgraph.execute(
+            """CALL libtext.search_all("complianceDocuments", "Rules2024") YIELD node
+             RETURN node;"""
+        )
+
+
+def test_regex_text_search(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        memgraph.execute(
+            """CALL libtext.regex_search("complianceDocuments", "wor.*s") YIELD node
+             RETURN node;"""
+        )
+
+
+def test_text_search_aggregate(memgraph):
+    with pytest.raises(gqlalchemy.exceptions.GQLAlchemyDatabaseError, match=TEXT_SEARCH_DISABLED_ERROR) as _:
+        input_aggregation = json.dumps({"count": {"value_count": {"field": "metadata.gid"}}}, separators=(",", ":"))
+
+        memgraph.execute(
+            f"""CALL libtext.aggregate("complianceDocuments", "wor.*s", '{input_aggregation}') YIELD aggregation
+            RETURN aggregation;"""
+        )
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__, "-rA"]))
diff --git a/tests/e2e/text_search/workloads.yaml b/tests/e2e/text_search/workloads.yaml
new file mode 100644
index 000000000..5b1640715
--- /dev/null
+++ b/tests/e2e/text_search/workloads.yaml
@@ -0,0 +1,33 @@
+text_search_cluster: &text_search_cluster
+  cluster:
+    main:
+      args:
+        [
+          "--bolt-port",
+          "7687",
+          "--log-level=TRACE",
+          "--experimental-enabled=text-search",
+        ]
+      log_file: "text_search.log"
+      setup_queries: []
+      validation_queries: []
+
+text_search_disabled_cluster: &text_search_disabled_cluster
+  cluster:
+    main:
+      args: ["--bolt-port", "7687", "--log-level=TRACE"]
+      log_file: "text_search.log"
+      setup_queries: []
+      validation_queries: []
+
+workloads:
+  - name: "Test behavior of text search in Memgraph"
+    binary: "tests/e2e/pytest_runner.sh"
+    proc: "tests/e2e/text_search/query_modules/"
+    args: ["text_search/test_text_search.py"]
+    <<: *text_search_cluster
+  - name: "Test behavior of text search in Memgraph when disabled"
+    binary: "tests/e2e/pytest_runner.sh"
+    proc: "tests/e2e/text_search/query_modules/"
+    args: ["text_search/test_text_search_disabled.py"]
+    <<: *text_search_disabled_cluster
diff --git a/tests/jepsen/run.sh b/tests/jepsen/run.sh
index 040491c3f..d94dbf8de 100755
--- a/tests/jepsen/run.sh
+++ b/tests/jepsen/run.sh
@@ -178,8 +178,16 @@ PROCESS_RESULTS() {
 
 CLUSTER_UP() {
   PRINT_CONTEXT
-  "$script_dir/jepsen/docker/bin/up" --daemon
-  sleep 10
+  local cnt=0
+  while [[ "$cnt" < 5 ]]; do
+    if ! "$script_dir/jepsen/docker/bin/up" --daemon; then
+      cnt=$((cnt + 1))
+      continue
+    else
+      sleep 10
+      break
+    fi
+  done
   # Ensure all SSH connections between Jepsen containers work
   for node in $(docker ps --filter name=jepsen* --filter status=running --format "{{.Names}}"); do
       if [ "$node" == "jepsen-control" ]; then
diff --git a/tests/mgbench/benchmark.py b/tests/mgbench/benchmark.py
index cd3fb846f..9c8f1a7d2 100755
--- a/tests/mgbench/benchmark.py
+++ b/tests/mgbench/benchmark.py
@@ -632,10 +632,12 @@ def run_isolated_workload_without_authorization(vendor_runner, client, queries,
 
 
 def setup_indices_and_import_dataset(client, vendor_runner, generated_queries, workload, storage_mode):
-    vendor_runner.start_db_init(VENDOR_RUNNER_IMPORT)
+    if benchmark_context.vendor_name == "memgraph":
+        # Neo4j will get started just before import -> without this if statement it would try to start it twice
+        vendor_runner.start_db_init(VENDOR_RUNNER_IMPORT)
     log.info("Executing database index setup")
     start_time = time.time()
-
+    import_results = None
     if generated_queries:
         client.execute(queries=workload.indexes_generator(), num_workers=1)
         log.info("Finished setting up indexes.")
diff --git a/tests/mgbench/graph_bench.py b/tests/mgbench/graph_bench.py
index f329cfcb7..bcba55324 100644
--- a/tests/mgbench/graph_bench.py
+++ b/tests/mgbench/graph_bench.py
@@ -127,8 +127,6 @@ def run_full_benchmarks(
         ],
     ]
 
-    assert not realistic or not mixed, "Cannot run both realistic and mixed workload, please select one!"
-
     if realistic:
         # Configurations for full workload
         for count, write, read, update, analytical in realistic:
diff --git a/tests/mgbench/mg_ondisk_vs_neo4j_pokec.sh b/tests/mgbench/mg_ondisk_vs_neo4j_pokec.sh
new file mode 100644
index 000000000..0381448fa
--- /dev/null
+++ b/tests/mgbench/mg_ondisk_vs_neo4j_pokec.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+# Currently only pokec dataset is modified to be used with memgraph on-disk storage
+
+pushd () { command pushd "$@" > /dev/null; }
+popd () { command popd "$@" > /dev/null; }
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+pushd "$SCRIPT_DIR"
+
+# Help function
+function show_help() {
+    echo "Usage: $0 [OPTIONS]"
+    echo "Options:"
+    echo "  -n, --neo4j-path     Path to Neo4j binary"
+    echo "  -m, --memgraph-path  Path to Memgraph binary"
+    echo "  -w, --num-workers    Number of workers for benchmark and import"
+    echo "  -d, --dataset_size   dataset_size (small, medium, large)"
+    echo "  -h, --help           Show this help message"
+    exit 0
+}
+
+# Default values
+neo4j_path="/usr/share/neo4j/bin/neo4j"
+memgraph_path="../../build/memgraph"
+num_workers=12
+dataset_size="small"
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+    key="$1"
+    case $key in
+        -n|--neo4j-path)
+            neo4j_path="$2"
+            shift
+            shift
+            ;;
+        -m|--memgraph-path)
+            memgraph_path="$2"
+            shift
+            shift
+            ;;
+        -w|--num-workers)
+            num_workers="$2"
+            shift
+            shift
+            ;;
+        -d|--dataset_size)
+            dataset_size="$2"
+            shift
+            shift
+            ;;
+        -h|--help)
+            show_help
+            ;;
+        *)
+            echo "Invalid option: $1"
+            show_help
+            ;;
+    esac
+done
+
+if [ ! -d "pokec_${dataset_size}_results" ]; then
+    mkdir "pokec_${dataset_size}_results"
+fi
+
+# Run Python: Mgbench - Neo4j
+echo "Running Python: Mgbench - Neo4j"
+python3  benchmark.py vendor-native \
+    --vendor-binary "$neo4j_path" \
+    --vendor-name neo4j \
+    --num-workers-for-benchmark "$num_workers" \
+    --num-workers-for-import "$num_workers" \
+    --no-load-query-counts \
+    --export-results "pokec_${dataset_size}_results/neo4j_${dataset_size}_pokec.json" \
+    "pokec_disk/${dataset_size}/*/*" \
+    --vendor-specific "config=$neo4j_path/conf/neo4j.conf" \
+    --no-authorization
+
+# Run Python: Mgbench - Memgraph - on-disk
+echo "Running Python: Mgbench - Memgraph - on-disk"
+python3 benchmark.py vendor-native \
+    --vendor-binary "$memgraph_path" \
+    --vendor-name memgraph \
+    --num-workers-for-benchmark "$num_workers" \
+    --num-workers-for-import "$num_workers" \
+    --no-load-query-counts \
+    --export-results-on-disk-txn "pokec_${dataset_size}_results/on_disk_${dataset_size}_pokec.json" \
+    --export-results "pokec_${dataset_size}_results/on_disk_export_${dataset_size}_pokec.json" \
+    "pokec_disk/${dataset_size}/*/*" \
+    --no-authorization \
+    --vendor-specific "data-directory=benchmark_datadir" "storage-mode=ON_DISK_TRANSACTIONAL"
+
+echo "Comparing results"
+python3 compare_results.py --compare \
+    "pokec_${dataset_size}_results/neo4j_${dataset_size}_pokec.json" \
+    "pokec_${dataset_size}_results/on_disk_${dataset_size}_pokec.json" \
+    --output \
+    "pokec_${dataset_size}_results/neo4j_vs_mg_ondisk_results.html" \
+    --different-vendors
diff --git a/tests/mgbench/runners.py b/tests/mgbench/runners.py
index 155ceac06..005bcb60f 100644
--- a/tests/mgbench/runners.py
+++ b/tests/mgbench/runners.py
@@ -634,7 +634,7 @@ class Neo4j(BaseRunner):
             exit_proc = subprocess.run(args=[self._neo4j_binary, "stop"], capture_output=True, check=True)
             return exit_proc.returncode, usage
         else:
-            return 0
+            return 0, 0
 
     def start_db_init(self, workload):
         if self._performance_tracking:
diff --git a/tests/mgbench/workloads/base.py b/tests/mgbench/workloads/base.py
index 5264dcba9..ab4c21059 100644
--- a/tests/mgbench/workloads/base.py
+++ b/tests/mgbench/workloads/base.py
@@ -160,12 +160,7 @@ class Workload(ABC):
             raise ValueError("Vendor does not have INDEX for dataset!")
 
     def _set_local_files(self) -> None:
-        if not self.disk_workload:
-            if self.LOCAL_FILE is not None:
-                self._local_file = self.LOCAL_FILE.get(self._variant, None)
-            else:
-                self._local_file = None
-        else:
+        if self.disk_workload and self._vendor != "neo4j":
             if self.LOCAL_FILE_NODES is not None:
                 self._local_file_nodes = self.LOCAL_FILE_NODES.get(self._variant, None)
             else:
@@ -175,14 +170,14 @@ class Workload(ABC):
                 self._local_file_edges = self.LOCAL_FILE_EDGES.get(self._variant, None)
             else:
                 self._local_file_edges = None
+        else:
+            if self.LOCAL_FILE is not None:
+                self._local_file = self.LOCAL_FILE.get(self._variant, None)
+            else:
+                self._local_file = None
 
     def _set_url_files(self) -> None:
-        if not self.disk_workload:
-            if self.URL_FILE is not None:
-                self._url_file = self.URL_FILE.get(self._variant, None)
-            else:
-                self._url_file = None
-        else:
+        if self.disk_workload and self._vendor != "neo4j":
             if self.URL_FILE_NODES is not None:
                 self._url_file_nodes = self.URL_FILE_NODES.get(self._variant, None)
             else:
@@ -191,6 +186,11 @@ class Workload(ABC):
                 self._url_file_edges = self.URL_FILE_EDGES.get(self._variant, None)
             else:
                 self._url_file_edges = None
+        else:
+            if self.URL_FILE is not None:
+                self._url_file = self.URL_FILE.get(self._variant, None)
+            else:
+                self._url_file = None
 
     def _set_local_index_file(self) -> None:
         if self.LOCAL_INDEX_FILE is not None:
@@ -205,10 +205,10 @@ class Workload(ABC):
             self._url_index = None
 
     def prepare(self, directory):
-        if not self.disk_workload:
-            self._prepare_dataset_for_in_memory_workload(directory)
-        else:
+        if self.disk_workload and self._vendor != "neo4j":
             self._prepare_dataset_for_on_disk_workload(directory)
+        else:
+            self._prepare_dataset_for_in_memory_workload(directory)
 
         if self._local_index is not None:
             print("Using local index file:", self._local_index)
diff --git a/tests/mgbench/workloads/disk_pokec.py b/tests/mgbench/workloads/disk_pokec.py
index f19110a0c..a296e4836 100644
--- a/tests/mgbench/workloads/disk_pokec.py
+++ b/tests/mgbench/workloads/disk_pokec.py
@@ -13,7 +13,8 @@ import random
 
 from benchmark_context import BenchmarkContext
 from workloads.base import Workload
-from workloads.importers.disk_importer_pokec import ImporterPokec
+from workloads.importers.disk_importer_pokec import DiskImporterPokec
+from workloads.importers.importer_pokec import ImporterPokec
 
 
 class Pokec(Workload):
@@ -22,6 +23,12 @@ class Pokec(Workload):
     DEFAULT_VARIANT = "small"
     FILE = None
 
+    URL_FILE = {
+        "small": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec/benchmark/pokec_small_import.cypher",
+        "medium": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec/benchmark/pokec_medium_import.cypher",
+        "large": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec/benchmark/pokec_large.setup.cypher.gz",
+    }
+
     URL_FILE_NODES = {
         "small": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec_disk/benchmark/pokec_small_import_nodes.cypher",
         "medium": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec_disk/benchmark/pokec_medium_import_nodes.cypher",
@@ -42,7 +49,7 @@ class Pokec(Workload):
 
     URL_INDEX_FILE = {
         "memgraph": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec_disk/benchmark/memgraph.cypher",
-        "neo4j": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec_disk/benchmark/neo4j.cypher",
+        "neo4j": "https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/pokec/benchmark/neo4j.cypher",
     }
 
     PROPERTIES_ON_EDGES = False
@@ -51,15 +58,26 @@ class Pokec(Workload):
         super().__init__(variant, benchmark_context=benchmark_context, disk_workload=True)
 
     def custom_import(self) -> bool:
-        importer = ImporterPokec(
-            benchmark_context=self.benchmark_context,
-            dataset_name=self.NAME,
-            index_file=self._file_index,
-            dataset_nodes_file=self._node_file,
-            dataset_edges_file=self._edge_file,
-            variant=self._variant,
-        )
-        return importer.execute_import()
+        if self._vendor == "neo4j":
+            importer = ImporterPokec(
+                benchmark_context=self.benchmark_context,
+                dataset_name=self.NAME,
+                index_file=self._file_index,
+                dataset_file=self._file,
+                variant=self._variant,
+            )
+            return importer.execute_import()
+
+        else:
+            importer = DiskImporterPokec(
+                benchmark_context=self.benchmark_context,
+                dataset_name=self.NAME,
+                index_file=self._file_index,
+                dataset_nodes_file=self._node_file,
+                dataset_edges_file=self._edge_file,
+                variant=self._variant,
+            )
+            return importer.execute_import()
 
     # Helpers used to generate the queries
     def _get_random_vertex(self):
@@ -214,12 +232,22 @@ class Pokec(Workload):
     # OK
     def benchmark__arango__allshortest_paths(self):
         vertex_from, vertex_to = self._get_random_from_to()
-        return (
+        memgraph = (
             "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
             "MATCH p=(n)-[*allshortest 2 (r, n | 1) total_weight]->(m) "
             "RETURN extract(n in nodes(p) | n.id) AS path",
             {"from": vertex_from, "to": vertex_to},
         )
+        neo4j = (
+            "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
+            "MATCH p = allShortestPaths((n)-[*..2]->(m)) "
+            "RETURN [node in nodes(p) | node.id] AS path",
+            {"from": vertex_from, "to": vertex_to},
+        )
+        if self._vendor == "neo4j":
+            return neo4j
+        else:
+            return memgraph
 
     # Our benchmark queries
 
diff --git a/tests/mgbench/workloads/importers/disk_importer_pokec.py b/tests/mgbench/workloads/importers/disk_importer_pokec.py
index 560d7da9e..f487dc8f3 100644
--- a/tests/mgbench/workloads/importers/disk_importer_pokec.py
+++ b/tests/mgbench/workloads/importers/disk_importer_pokec.py
@@ -17,7 +17,7 @@ from constants import *
 from runners import BaseRunner
 
 
-class ImporterPokec:
+class DiskImporterPokec:
     def __init__(
         self,
         benchmark_context: BenchmarkContext,
diff --git a/tests/mgbench/workloads/pokec.py b/tests/mgbench/workloads/pokec.py
index 6733d38f2..4c05796b2 100644
--- a/tests/mgbench/workloads/pokec.py
+++ b/tests/mgbench/workloads/pokec.py
@@ -167,30 +167,62 @@ class Pokec(Workload):
 
     def benchmark__arango__shortest_path(self):
         vertex_from, vertex_to = self._get_random_from_to()
-        return (
+        memgraph = (
             "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
             "MATCH p=(n)-[*bfs..15]->(m) "
             "RETURN extract(n in nodes(p) | n.id) AS path",
             {"from": vertex_from, "to": vertex_to},
         )
+        neo4j = (
+            "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
+            "MATCH p=shortestPath((n)-[*..15]->(m)) "
+            "RETURN [n in nodes(p) | n.id] AS path",
+            {"from": vertex_from, "to": vertex_to},
+        )
+        if self._vendor == "memgraph":
+            return memgraph
+        else:
+            return neo4j
 
     def benchmark__arango__shortest_path_with_filter(self):
         vertex_from, vertex_to = self._get_random_from_to()
-        return (
+        memgraph = (
             "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
             "MATCH p=(n)-[*bfs..15 (e, n | n.age >= 18)]->(m) "
             "RETURN extract(n in nodes(p) | n.id) AS path",
             {"from": vertex_from, "to": vertex_to},
         )
 
+        neo4j = (
+            "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
+            "MATCH p=shortestPath((n)-[*..15]->(m)) "
+            "WHERE all(node in nodes(p) WHERE node.age >= 18) "
+            "RETURN [n in nodes(p) | n.id] AS path",
+            {"from": vertex_from, "to": vertex_to},
+        )
+        if self._vendor == "memgraph":
+            return memgraph
+        else:
+            return neo4j
+
     def benchmark__arango__allshortest_paths(self):
         vertex_from, vertex_to = self._get_random_from_to()
-        return (
+        memgraph = (
             "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
             "MATCH p=(n)-[*allshortest 2 (r, n | 1) total_weight]->(m) "
             "RETURN extract(n in nodes(p) | n.id) AS path",
             {"from": vertex_from, "to": vertex_to},
         )
+        neo4j = (
+            "MATCH (n:User {id: $from}), (m:User {id: $to}) WITH n, m "
+            "MATCH p = allShortestPaths((n)-[*..2]->(m)) "
+            "RETURN [node in nodes(p) | node.id] AS path",
+            {"from": vertex_from, "to": vertex_to},
+        )
+        if self._vendor == "memgraph":
+            return memgraph
+        else:
+            return neo4j
 
     # Our benchmark queries
 
diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt
index 44b24b6f6..008211af3 100644
--- a/tests/unit/CMakeLists.txt
+++ b/tests/unit/CMakeLists.txt
@@ -446,9 +446,16 @@ target_link_libraries(${test_prefix}raft_log_serialization gflags mg-coordinatio
 target_include_directories(${test_prefix}raft_log_serialization PRIVATE ${CMAKE_SOURCE_DIR}/include)
 endif()
 
-# Test Raft log serialization
+# Test CoordinatorClusterState
 if(MG_ENTERPRISE)
 add_unit_test(coordinator_cluster_state.cpp)
 target_link_libraries(${test_prefix}coordinator_cluster_state gflags mg-coordination mg-repl_coord_glue)
 target_include_directories(${test_prefix}coordinator_cluster_state PRIVATE ${CMAKE_SOURCE_DIR}/include)
 endif()
+
+# Test Raft log serialization
+if(MG_ENTERPRISE)
+add_unit_test(routing_table.cpp)
+target_link_libraries(${test_prefix}routing_table gflags mg-coordination mg-repl_coord_glue)
+target_include_directories(${test_prefix}routing_table PRIVATE ${CMAKE_SOURCE_DIR}/include)
+endif()
diff --git a/tests/unit/bolt_session.cpp b/tests/unit/bolt_session.cpp
index f0f3ae14c..411e13e3d 100644
--- a/tests/unit/bolt_session.cpp
+++ b/tests/unit/bolt_session.cpp
@@ -1,4 +1,4 @@
-// Copyright 2023 Memgraph Ltd.
+// Copyright 2024 Memgraph Ltd.
 //
 // Use of this software is governed by the Business Source License
 // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@@ -114,6 +114,14 @@ class TestSession final : public Session<TestInputStream, TestOutputStream> {
 
   bool Authenticate(const std::string & /*username*/, const std::string & /*password*/) override { return true; }
 
+#ifdef MG_ENTERPRISE
+  auto Route(std::map<std::string, Value> const & /*routing*/,
+             std::vector<memgraph::communication::bolt::Value> const & /*bookmarks*/,
+             std::map<std::string, Value> const & /*extra*/) -> std::map<std::string, Value> override {
+    return {};
+  }
+#endif
+
   std::optional<std::string> GetServerNameForInit() override { return std::nullopt; }
 
   void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &) override {}
@@ -1027,104 +1035,115 @@ TEST(BoltSession, Noop) {
   }
 }
 
-TEST(BoltSession, Route) {
-  // Memgraph does not support route message, but it handles it
-  {
-    SCOPED_TRACE("v1");
-    INIT_VARS;
+TEST(BoltSession, Route){{SCOPED_TRACE("v1");
+INIT_VARS;
 
-    ExecuteHandshake(input_stream, session, output);
-    ExecuteInit(input_stream, session, output);
-    ASSERT_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)), SessionException);
-    EXPECT_EQ(session.state_, State::Close);
-  }
-  {
-    SCOPED_TRACE("v4");
-    INIT_VARS;
+ExecuteHandshake(input_stream, session, output);
+ExecuteInit(input_stream, session, output);
+ASSERT_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)), SessionException);
+EXPECT_EQ(session.state_, State::Close);
+}
+#ifdef MG_ENTERPRISE
+{
+  SCOPED_TRACE("v4");
+  INIT_VARS;
 
-    ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp);
-    ExecuteInit(input_stream, session, output, true);
-    ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)));
-    static constexpr uint8_t expected_resp[] = {
-        0x00 /*two bytes of chunk header, chunk contains 64 bytes of data*/,
-        0x40,
-        0xb1 /*TinyStruct1*/,
-        0x7f /*Failure*/,
-        0xa2 /*TinyMap with 2 items*/,
-        0x84 /*TinyString with 4 chars*/,
-        'c',
-        'o',
-        'd',
-        'e',
-        0x82 /*TinyString with 2 chars*/,
-        '6',
-        '6',
-        0x87 /*TinyString with 7 chars*/,
-        'm',
-        'e',
-        's',
-        's',
-        'a',
-        'g',
-        'e',
-        0xd0 /*String*/,
-        0x2b /*With 43 chars*/,
-        'R',
-        'o',
-        'u',
-        't',
-        'e',
-        ' ',
-        'm',
-        'e',
-        's',
-        's',
-        'a',
-        'g',
-        'e',
-        ' ',
-        'i',
-        's',
-        ' ',
-        'n',
-        'o',
-        't',
-        ' ',
-        's',
-        'u',
-        'p',
-        'p',
-        'o',
-        'r',
-        't',
-        'e',
-        'd',
-        ' ',
-        'i',
-        'n',
-        ' ',
-        'M',
-        'e',
-        'm',
-        'g',
-        'r',
-        'a',
-        'p',
-        'h',
-        '!',
-        0x00 /*Terminating zeros*/,
-        0x00,
-    };
-    EXPECT_EQ(input_stream.size(), 0U);
-    CheckOutput(output, expected_resp, sizeof(expected_resp));
-    EXPECT_EQ(session.state_, State::Error);
+  ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp);
+  ExecuteInit(input_stream, session, output, true);
+  ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)));
 
-    SCOPED_TRACE("Try to reset connection after ROUTE failed");
-    ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4::reset_req, sizeof(v4::reset_req)));
-    EXPECT_EQ(input_stream.size(), 0U);
-    CheckOutput(output, success_resp, sizeof(success_resp));
-    EXPECT_EQ(session.state_, State::Idle);
-  }
+  EXPECT_EQ(session.state_, State::Idle);
+  CheckSuccessMessage(output);
+}
+#else
+{
+  SCOPED_TRACE("v4");
+  INIT_VARS;
+
+  ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp);
+  ExecuteInit(input_stream, session, output, true);
+  ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)));
+  static constexpr uint8_t expected_resp[] = {
+      0x00 /*two bytes of chunk header, chunk contains 64 bytes of data*/,
+      0x40,
+      0xb1 /*TinyStruct1*/,
+      0x7f /*Failure*/,
+      0xa2 /*TinyMap with 2 items*/,
+      0x84 /*TinyString with 4 chars*/,
+      'c',
+      'o',
+      'd',
+      'e',
+      0x82 /*TinyString with 2 chars*/,
+      '6',
+      '6',
+      0x87 /*TinyString with 7 chars*/,
+      'm',
+      'e',
+      's',
+      's',
+      'a',
+      'g',
+      'e',
+      0xd0 /*String*/,
+      0x2b /*With 43 chars*/,
+      'R',
+      'o',
+      'u',
+      't',
+      'e',
+      ' ',
+      'm',
+      'e',
+      's',
+      's',
+      'a',
+      'g',
+      'e',
+      ' ',
+      'i',
+      's',
+      ' ',
+      'n',
+      'o',
+      't',
+      ' ',
+      's',
+      'u',
+      'p',
+      'p',
+      'o',
+      'r',
+      't',
+      'e',
+      'd',
+      ' ',
+      'i',
+      'n',
+      ' ',
+      'M',
+      'e',
+      'm',
+      'g',
+      'r',
+      'a',
+      'p',
+      'h',
+      '!',
+      0x00 /*Terminating zeros*/,
+      0x00,
+  };
+  EXPECT_EQ(input_stream.size(), 0U);
+  CheckOutput(output, expected_resp, sizeof(expected_resp));
+  EXPECT_EQ(session.state_, State::Error);
+
+  SCOPED_TRACE("Try to reset connection after ROUTE failed");
+  ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4::reset_req, sizeof(v4::reset_req)));
+  EXPECT_EQ(input_stream.size(), 0U);
+  CheckOutput(output, success_resp, sizeof(success_resp));
+  EXPECT_EQ(session.state_, State::Idle);
+}
+#endif
 }
 
 TEST(BoltSession, Rollback) {
diff --git a/tests/unit/clearing_old_disk_data.cpp b/tests/unit/clearing_old_disk_data.cpp
index 395391e12..58682a845 100644
--- a/tests/unit/clearing_old_disk_data.cpp
+++ b/tests/unit/clearing_old_disk_data.cpp
@@ -179,3 +179,35 @@ TEST_F(ClearingOldDiskDataTest, TestNumOfEntriesWithEdgeValueUpdate) {
 
   ASSERT_EQ(disk_test_utils::GetRealNumberOfEntriesInRocksDB(tx_db), 5);
 }
+
+TEST_F(ClearingOldDiskDataTest, TestTimestampAfterCommit) {
+  auto *tx_db = disk_storage->GetRocksDBStorage()->db_;
+  ASSERT_EQ(disk_test_utils::GetRealNumberOfEntriesInRocksDB(tx_db), 0);
+
+  auto acc1 = disk_storage->Access(ReplicationRole::MAIN);
+  auto vertex1 = acc1->CreateVertex();
+  auto label1 = acc1->NameToLabel("DiskLabel");
+  auto property1 = acc1->NameToProperty("DiskProperty");
+  ASSERT_TRUE(vertex1.AddLabel(label1).HasValue());
+  ASSERT_TRUE(vertex1.SetProperty(property1, memgraph::storage::PropertyValue(10)).HasValue());
+  ASSERT_FALSE(acc1->Commit().HasError());
+  ASSERT_EQ(disk_test_utils::GetRealNumberOfEntriesInRocksDB(tx_db), 1);
+
+  auto saved_timestamp = disk_storage->GetDurableMetadata()->LoadTimestampIfExists();
+  ASSERT_EQ(saved_timestamp.has_value(), true);
+  ASSERT_EQ(disk_storage->timestamp_, saved_timestamp);
+
+  auto acc2 = disk_storage->Access(ReplicationRole::MAIN);
+  auto vertex2 = acc2->CreateVertex();
+  auto label2 = acc2->NameToLabel("DiskLabel2");
+  auto property2 = acc2->NameToProperty("DiskProperty2");
+
+  ASSERT_TRUE(vertex2.AddLabel(label2).HasValue());
+  ASSERT_TRUE(vertex2.SetProperty(property2, memgraph::storage::PropertyValue(10)).HasValue());
+  ASSERT_FALSE(acc2->Commit().HasError());
+  ASSERT_EQ(disk_test_utils::GetRealNumberOfEntriesInRocksDB(tx_db), 2);
+
+  saved_timestamp = disk_storage->GetDurableMetadata()->LoadTimestampIfExists();
+  ASSERT_EQ(saved_timestamp.has_value(), true);
+  ASSERT_EQ(disk_storage->timestamp_, saved_timestamp);
+}
diff --git a/tests/unit/coordinator_cluster_state.cpp b/tests/unit/coordinator_cluster_state.cpp
index 8df2797f2..e7ccf2ada 100644
--- a/tests/unit/coordinator_cluster_state.cpp
+++ b/tests/unit/coordinator_cluster_state.cpp
@@ -10,6 +10,7 @@
 // licenses/APL.txt.
 
 #include "nuraft/coordinator_cluster_state.hpp"
+#include "io/network/endpoint.hpp"
 #include "nuraft/coordinator_state_machine.hpp"
 #include "replication_coordination_glue/role.hpp"
 
@@ -21,11 +22,12 @@
 
 #include "libnuraft/nuraft.hxx"
 
-using memgraph::coordination::CoordinatorClientConfig;
 using memgraph::coordination::CoordinatorClusterState;
 using memgraph::coordination::CoordinatorStateMachine;
-using memgraph::coordination::InstanceState;
+using memgraph::coordination::CoordinatorToReplicaConfig;
 using memgraph::coordination::RaftLogAction;
+using memgraph::coordination::ReplicationInstanceState;
+using memgraph::io::network::Endpoint;
 using memgraph::replication_coordination_glue::ReplicationMode;
 using memgraph::replication_coordination_glue::ReplicationRole;
 using nuraft::buffer;
@@ -42,20 +44,22 @@ class CoordinatorClusterStateTest : public ::testing::Test {
                                      "MG_tests_unit_coordinator_cluster_state"};
 };
 
-TEST_F(CoordinatorClusterStateTest, InstanceStateSerialization) {
-  InstanceState instance_state{
-      CoordinatorClientConfig{"instance3",
-                              "127.0.0.1",
-                              10112,
-                              std::chrono::seconds{1},
-                              std::chrono::seconds{5},
-                              std::chrono::seconds{10},
-                              {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001},
-                              .ssl = std::nullopt},
+TEST_F(CoordinatorClusterStateTest, ReplicationInstanceStateSerialization) {
+  ReplicationInstanceState instance_state{
+      CoordinatorToReplicaConfig{.instance_name = "instance3",
+                                 .mgt_server = Endpoint{"127.0.0.1", 10112},
+                                 .bolt_server = Endpoint{"127.0.0.1", 7687},
+                                 .replication_client_info = {.instance_name = "instance_name",
+                                                             .replication_mode = ReplicationMode::ASYNC,
+                                                             .replication_server = Endpoint{"127.0.0.1", 10001}},
+                                 .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                 .instance_down_timeout_sec = std::chrono::seconds{5},
+                                 .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
+                                 .ssl = std::nullopt},
       ReplicationRole::MAIN};
 
   nlohmann::json j = instance_state;
-  InstanceState deserialized_instance_state = j.get<InstanceState>();
+  ReplicationInstanceState deserialized_instance_state = j.get<ReplicationInstanceState>();
 
   EXPECT_EQ(instance_state.config, deserialized_instance_state.config);
   EXPECT_EQ(instance_state.status, deserialized_instance_state.status);
@@ -65,13 +69,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
   auto coordinator_cluster_state = memgraph::coordination::CoordinatorClusterState{};
 
   {
-    CoordinatorClientConfig config{"instance1",
-                                   "127.0.0.1",
-                                   10111,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance1",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10111},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7687},
+                                   .replication_client_info = {.instance_name = "instance1",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10001}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -80,13 +87,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
     coordinator_cluster_state.DoAction(payload, action);
   }
   {
-    CoordinatorClientConfig config{"instance2",
-                                   "127.0.0.1",
-                                   10112,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10002},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance2",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10112},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7688},
+                                   .replication_client_info = {.instance_name = "instance2",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10002}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -95,13 +105,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
     coordinator_cluster_state.DoAction(payload, action);
   }
   {
-    CoordinatorClientConfig config{"instance3",
-                                   "127.0.0.1",
-                                   10113,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10003},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance3",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10113},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7689},
+                                   .replication_client_info = {.instance_name = "instance3",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10003}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -110,13 +123,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
     coordinator_cluster_state.DoAction(payload, action);
   }
   {
-    CoordinatorClientConfig config{"instance4",
-                                   "127.0.0.1",
-                                   10114,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10004},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance4",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10114},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7690},
+                                   .replication_client_info = {.instance_name = "instance4",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10004}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -125,13 +141,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
     coordinator_cluster_state.DoAction(payload, action);
   }
   {
-    CoordinatorClientConfig config{"instance5",
-                                   "127.0.0.1",
-                                   10115,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10005},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance5",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10115},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7691},
+                                   .replication_client_info = {.instance_name = "instance5",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10005}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -140,13 +159,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
     coordinator_cluster_state.DoAction(payload, action);
   }
   {
-    CoordinatorClientConfig config{"instance6",
-                                   "127.0.0.1",
-                                   10116,
-                                   std::chrono::seconds{1},
-                                   std::chrono::seconds{5},
-                                   std::chrono::seconds{10},
-                                   {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10006},
+    auto config =
+        CoordinatorToReplicaConfig{.instance_name = "instance6",
+                                   .mgt_server = Endpoint{"127.0.0.1", 10116},
+                                   .bolt_server = Endpoint{"127.0.0.1", 7692},
+                                   .replication_client_info = {.instance_name = "instance6",
+                                                               .replication_mode = ReplicationMode::ASYNC,
+                                                               .replication_server = Endpoint{"127.0.0.1", 10006}},
+                                   .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                   .instance_down_timeout_sec = std::chrono::seconds{5},
+                                   .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
                                    .ssl = std::nullopt};
 
     auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@@ -159,5 +181,6 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
   coordinator_cluster_state.Serialize(data);
 
   auto deserialized_coordinator_cluster_state = CoordinatorClusterState::Deserialize(*data);
-  ASSERT_EQ(coordinator_cluster_state.GetInstances(), deserialized_coordinator_cluster_state.GetInstances());
+  ASSERT_EQ(coordinator_cluster_state.GetReplicationInstances(),
+            deserialized_coordinator_cluster_state.GetReplicationInstances());
 }
diff --git a/tests/unit/query_dump.cpp b/tests/unit/query_dump.cpp
index a2ca2864d..2dd1e7ac7 100644
--- a/tests/unit/query_dump.cpp
+++ b/tests/unit/query_dump.cpp
@@ -71,6 +71,11 @@ struct DatabaseState {
     std::string property;
   };
 
+  struct TextItem {
+    std::string index_name;
+    std::string label;
+  };
+
   struct LabelPropertiesItem {
     std::string label;
     std::set<std::string, std::less<>> properties;
@@ -80,6 +85,7 @@ struct DatabaseState {
   std::set<Edge> edges;
   std::set<LabelItem> label_indices;
   std::set<LabelPropertyItem> label_property_indices;
+  std::set<TextItem> text_indices;
   std::set<LabelPropertyItem> existence_constraints;
   std::set<LabelPropertiesItem> unique_constraints;
 };
@@ -106,6 +112,10 @@ bool operator<(const DatabaseState::LabelPropertyItem &first, const DatabaseStat
   return first.property < second.property;
 }
 
+bool operator<(const DatabaseState::TextItem &first, const DatabaseState::TextItem &second) {
+  return first.index_name < second.index_name && first.label < second.label;
+}
+
 bool operator<(const DatabaseState::LabelPropertiesItem &first, const DatabaseState::LabelPropertiesItem &second) {
   if (first.label != second.label) return first.label < second.label;
   return first.properties < second.properties;
@@ -128,6 +138,10 @@ bool operator==(const DatabaseState::LabelPropertyItem &first, const DatabaseSta
   return first.label == second.label && first.property == second.property;
 }
 
+bool operator==(const DatabaseState::TextItem &first, const DatabaseState::TextItem &second) {
+  return first.index_name == second.index_name && first.label == second.label;
+}
+
 bool operator==(const DatabaseState::LabelPropertiesItem &first, const DatabaseState::LabelPropertiesItem &second) {
   return first.label == second.label && first.properties == second.properties;
 }
@@ -185,6 +199,7 @@ DatabaseState GetState(memgraph::storage::Storage *db) {
   // Capture all indices
   std::set<DatabaseState::LabelItem> label_indices;
   std::set<DatabaseState::LabelPropertyItem> label_property_indices;
+  std::set<DatabaseState::TextItem> text_indices;
   {
     auto info = dba->ListAllIndices();
     for (const auto &item : info.label) {
@@ -193,6 +208,9 @@ DatabaseState GetState(memgraph::storage::Storage *db) {
     for (const auto &item : info.label_property) {
       label_property_indices.insert({dba->LabelToName(item.first), dba->PropertyToName(item.second)});
     }
+    for (const auto &item : info.text_indices) {
+      text_indices.insert({item.first, dba->LabelToName(item.second)});
+    }
   }
 
   // Capture all constraints
@@ -212,7 +230,8 @@ DatabaseState GetState(memgraph::storage::Storage *db) {
     }
   }
 
-  return {vertices, edges, label_indices, label_property_indices, existence_constraints, unique_constraints};
+  return {vertices,          edges, label_indices, label_property_indices, text_indices, existence_constraints,
+          unique_constraints};
 }
 
 auto Execute(memgraph::query::InterpreterContext *context, memgraph::dbms::DatabaseAccess db,
diff --git a/tests/unit/raft_log_serialization.cpp b/tests/unit/raft_log_serialization.cpp
index 8550cf5b8..3f24b43c7 100644
--- a/tests/unit/raft_log_serialization.cpp
+++ b/tests/unit/raft_log_serialization.cpp
@@ -9,7 +9,8 @@
 // by the Apache License, Version 2.0, included in the file
 // licenses/APL.txt.
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
+#include "io/network/endpoint.hpp"
 #include "nuraft/coordinator_state_machine.hpp"
 #include "nuraft/raft_log_action.hpp"
 #include "utils/file.hpp"
@@ -19,10 +20,11 @@
 #include <gtest/gtest.h>
 #include "json/json.hpp"
 
-using memgraph::coordination::CoordinatorClientConfig;
 using memgraph::coordination::CoordinatorStateMachine;
+using memgraph::coordination::CoordinatorToReplicaConfig;
 using memgraph::coordination::RaftLogAction;
-using memgraph::coordination::ReplClientInfo;
+using memgraph::coordination::ReplicationClientInfo;
+using memgraph::io::network::Endpoint;
 using memgraph::replication_coordination_glue::ReplicationMode;
 using memgraph::utils::UUID;
 
@@ -36,26 +38,29 @@ class RaftLogSerialization : public ::testing::Test {
 };
 
 TEST_F(RaftLogSerialization, ReplClientInfo) {
-  ReplClientInfo info{"instance_name", ReplicationMode::SYNC, "127.0.0.1", 10111};
+  ReplicationClientInfo info{.instance_name = "instance_name",
+                             .replication_mode = ReplicationMode::SYNC,
+                             .replication_server = Endpoint{"127.0.0.1", 10111}};
 
   nlohmann::json j = info;
-  ReplClientInfo info2 = j.get<memgraph::coordination::ReplClientInfo>();
+  ReplicationClientInfo info2 = j.get<memgraph::coordination::ReplicationClientInfo>();
 
   ASSERT_EQ(info, info2);
 }
 
-TEST_F(RaftLogSerialization, CoordinatorClientConfig) {
-  CoordinatorClientConfig config{"instance3",
-                                 "127.0.0.1",
-                                 10112,
-                                 std::chrono::seconds{1},
-                                 std::chrono::seconds{5},
-                                 std::chrono::seconds{10},
-                                 {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001},
-                                 .ssl = std::nullopt};
+TEST_F(RaftLogSerialization, CoordinatorToReplicaConfig) {
+  CoordinatorToReplicaConfig config{.instance_name = "instance3",
+                                    .mgt_server = Endpoint{"127.0.0.1", 10112},
+                                    .replication_client_info = {.instance_name = "instance_name",
+                                                                .replication_mode = ReplicationMode::ASYNC,
+                                                                .replication_server = Endpoint{"127.0.0.1", 10001}},
+                                    .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                    .instance_down_timeout_sec = std::chrono::seconds{5},
+                                    .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
+                                    .ssl = std::nullopt};
 
   nlohmann::json j = config;
-  CoordinatorClientConfig config2 = j.get<memgraph::coordination::CoordinatorClientConfig>();
+  CoordinatorToReplicaConfig config2 = j.get<memgraph::coordination::CoordinatorToReplicaConfig>();
 
   ASSERT_EQ(config, config2);
 }
@@ -96,8 +101,8 @@ TEST_F(RaftLogSerialization, RaftLogActionDemote) {
   ASSERT_EQ(action, action2);
 }
 
-TEST_F(RaftLogSerialization, RaftLogActionUpdateUUID) {
-  auto action = RaftLogAction::UPDATE_UUID;
+TEST_F(RaftLogSerialization, RaftLogActionUpdateUUIDForInstance) {
+  auto action = RaftLogAction::UPDATE_UUID_FOR_INSTANCE;
 
   nlohmann::json j = action;
   RaftLogAction action2 = j.get<memgraph::coordination::RaftLogAction>();
@@ -106,19 +111,20 @@ TEST_F(RaftLogSerialization, RaftLogActionUpdateUUID) {
 }
 
 TEST_F(RaftLogSerialization, RegisterInstance) {
-  CoordinatorClientConfig config{"instance3",
-                                 "127.0.0.1",
-                                 10112,
-                                 std::chrono::seconds{1},
-                                 std::chrono::seconds{5},
-                                 std::chrono::seconds{10},
-                                 {"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001},
-                                 .ssl = std::nullopt};
+  CoordinatorToReplicaConfig config{.instance_name = "instance3",
+                                    .mgt_server = Endpoint{"127.0.0.1", 10112},
+                                    .replication_client_info = {.instance_name = "instance_name",
+                                                                .replication_mode = ReplicationMode::ASYNC,
+                                                                .replication_server = Endpoint{"127.0.0.1", 10001}},
+                                    .instance_health_check_frequency_sec = std::chrono::seconds{1},
+                                    .instance_down_timeout_sec = std::chrono::seconds{5},
+                                    .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
+                                    .ssl = std::nullopt};
 
   auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
   auto [payload, action] = CoordinatorStateMachine::DecodeLog(*buffer);
   ASSERT_EQ(action, RaftLogAction::REGISTER_REPLICATION_INSTANCE);
-  ASSERT_EQ(config, std::get<CoordinatorClientConfig>(payload));
+  ASSERT_EQ(config, std::get<CoordinatorToReplicaConfig>(payload));
 }
 
 TEST_F(RaftLogSerialization, UnregisterInstance) {
@@ -129,10 +135,14 @@ TEST_F(RaftLogSerialization, UnregisterInstance) {
 }
 
 TEST_F(RaftLogSerialization, SetInstanceAsMain) {
-  auto buffer = CoordinatorStateMachine::SerializeSetInstanceAsMain("instance3");
+  auto instance_uuid_update =
+      memgraph::coordination::InstanceUUIDUpdate{.instance_name = "instance3", .uuid = memgraph::utils::UUID{}};
+  auto buffer = CoordinatorStateMachine::SerializeSetInstanceAsMain(instance_uuid_update);
   auto [payload, action] = CoordinatorStateMachine::DecodeLog(*buffer);
   ASSERT_EQ(action, RaftLogAction::SET_INSTANCE_AS_MAIN);
-  ASSERT_EQ("instance3", std::get<std::string>(payload));
+  ASSERT_EQ(instance_uuid_update.instance_name,
+            std::get<memgraph::coordination::InstanceUUIDUpdate>(payload).instance_name);
+  ASSERT_EQ(instance_uuid_update.uuid, std::get<memgraph::coordination::InstanceUUIDUpdate>(payload).uuid);
 }
 
 TEST_F(RaftLogSerialization, SetInstanceAsReplica) {
@@ -142,10 +152,10 @@ TEST_F(RaftLogSerialization, SetInstanceAsReplica) {
   ASSERT_EQ("instance3", std::get<std::string>(payload));
 }
 
-TEST_F(RaftLogSerialization, UpdateUUID) {
+TEST_F(RaftLogSerialization, UpdateUUIDForNewMain) {
   UUID uuid;
-  auto buffer = CoordinatorStateMachine::SerializeUpdateUUID(uuid);
+  auto buffer = CoordinatorStateMachine::SerializeUpdateUUIDForNewMain(uuid);
   auto [payload, action] = CoordinatorStateMachine::DecodeLog(*buffer);
-  ASSERT_EQ(action, RaftLogAction::UPDATE_UUID);
+  ASSERT_EQ(action, RaftLogAction::UPDATE_UUID_OF_NEW_MAIN);
   ASSERT_EQ(uuid, std::get<UUID>(payload));
 }
diff --git a/tests/unit/routing_table.cpp b/tests/unit/routing_table.cpp
new file mode 100644
index 000000000..42815d461
--- /dev/null
+++ b/tests/unit/routing_table.cpp
@@ -0,0 +1,176 @@
+// Copyright 2024 Memgraph Ltd.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+// License, and you may not use this file except in compliance with the Business Source License.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+#include "auth/auth.hpp"
+#include "coordination/coordinator_instance.hpp"
+#include "flags/run_time_configurable.hpp"
+#include "interpreter_faker.hpp"
+#include "io/network/endpoint.hpp"
+#include "license/license.hpp"
+#include "replication_handler/replication_handler.hpp"
+#include "storage/v2/config.hpp"
+
+#include "utils/file.hpp"
+
+#include <gflags/gflags.h>
+#include <gtest/gtest.h>
+#include "json/json.hpp"
+
+using memgraph::coordination::CoordinatorInstance;
+using memgraph::coordination::CoordinatorToCoordinatorConfig;
+using memgraph::coordination::CoordinatorToReplicaConfig;
+using memgraph::coordination::RaftState;
+using memgraph::coordination::ReplicationClientInfo;
+using memgraph::io::network::Endpoint;
+using memgraph::replication::ReplicationHandler;
+using memgraph::replication_coordination_glue::ReplicationMode;
+using memgraph::storage::Config;
+
+// class MockCoordinatorInstance : CoordinatorInstance {
+//   auto AddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config) -> void override {}
+// };
+
+class RoutingTableTest : public ::testing::Test {
+ protected:
+  std::filesystem::path main_data_directory{std::filesystem::temp_directory_path() /
+                                            "MG_tests_unit_coordinator_cluster_state"};
+  std::filesystem::path repl1_data_directory{std::filesystem::temp_directory_path() /
+                                             "MG_test_unit_storage_v2_replication_repl"};
+  std::filesystem::path repl2_data_directory{std::filesystem::temp_directory_path() /
+                                             "MG_test_unit_storage_v2_replication_repl2"};
+  void SetUp() override { Clear(); }
+
+  void TearDown() override { Clear(); }
+
+  Config main_conf = [&] {
+    Config config{
+        .durability =
+            {
+                .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            },
+        .salient.items = {.properties_on_edges = true},
+    };
+    UpdatePaths(config, main_data_directory);
+    return config;
+  }();
+  Config repl1_conf = [&] {
+    Config config{
+        .durability =
+            {
+                .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            },
+        .salient.items = {.properties_on_edges = true},
+    };
+    UpdatePaths(config, repl1_data_directory);
+    return config;
+  }();
+  Config repl2_conf = [&] {
+    Config config{
+        .durability =
+            {
+                .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
+            },
+        .salient.items = {.properties_on_edges = true},
+    };
+    UpdatePaths(config, repl2_data_directory);
+    return config;
+  }();
+
+  const std::string local_host = ("127.0.0.1");
+  const std::array<uint16_t, 2> ports{10000, 20000};
+  const std::array<std::string, 2> replicas = {"REPLICA1", "REPLICA2"};
+
+ private:
+  void Clear() {
+    if (std::filesystem::exists(main_data_directory)) std::filesystem::remove_all(main_data_directory);
+    if (std::filesystem::exists(repl1_data_directory)) std::filesystem::remove_all(repl1_data_directory);
+    if (std::filesystem::exists(repl2_data_directory)) std::filesystem::remove_all(repl2_data_directory);
+  }
+};
+
+struct MinMemgraph {
+  MinMemgraph(const memgraph::storage::Config &conf)
+      : auth{conf.durability.storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}},
+        repl_state{ReplicationStateRootPath(conf)},
+        dbms{conf, repl_state
+#ifdef MG_ENTERPRISE
+             ,
+             auth, true
+#endif
+        },
+        db_acc{dbms.Get()},
+        db{*db_acc.get()},
+        repl_handler(repl_state, dbms
+#ifdef MG_ENTERPRISE
+                     ,
+                     system_, auth
+#endif
+        ) {
+  }
+  memgraph::auth::SynchedAuth auth;
+  memgraph::system::System system_;
+  memgraph::replication::ReplicationState repl_state;
+  memgraph::dbms::DbmsHandler dbms;
+  memgraph::dbms::DatabaseAccess db_acc;
+  memgraph::dbms::Database &db;
+  ReplicationHandler repl_handler;
+};
+;
+
+TEST_F(RoutingTableTest, GetSingleRouterRoutingTable) {
+  CoordinatorInstance instance1;
+  auto routing = std::map<std::string, std::string>{{"address", "localhost:7688"}};
+  auto routing_table = instance1.GetRoutingTable(routing);
+
+  ASSERT_EQ(routing_table.size(), 1);
+
+  auto const routers = routing_table[0];
+  ASSERT_EQ(routers.first, std::vector<std::string>{"localhost:7688"});
+  ASSERT_EQ(routers.second, "ROUTE");
+}
+
+TEST_F(RoutingTableTest, GetMixedRoutingTable) {
+  auto instance1 = RaftState::MakeRaftState([]() {}, []() {});
+  auto routing = std::map<std::string, std::string>{{"address", "localhost:7690"}};
+  instance1.AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig{
+      .instance_name = "instance2",
+      .mgt_server = Endpoint{"127.0.0.1", 10011},
+      .bolt_server = Endpoint{"127.0.0.1", 7687},
+      .replication_client_info = ReplicationClientInfo{.instance_name = "instance2",
+                                                       .replication_mode = ReplicationMode::ASYNC,
+                                                       .replication_server = Endpoint{"127.0.0.1", 10001}}});
+  instance1.GetAllCoordinators();
+  // auto routing_table = instance1.GetRoutingTable(routing);
+
+  // ASSERT_EQ(routing_table.size(), 1);
+  // auto const routers = routing_table[0];
+  // ASSERT_EQ(routers.second, "ROUTE");
+}
+
+// TEST_F(RoutingTableTest, GetMultipleRoutersRoutingTable) {
+//
+//   CoordinatorInstance instance1;
+//   instance1.AddCoordinatorInstance(CoordinatorToCoordinatorConfig{.coordinator_server_id = 1,
+//                                                                   .bolt_server = Endpoint{"127.0.0.1", 7689},
+//                                                                   .coordinator_server = Endpoint{"127.0.0.1",
+//                                                                   10111}});
+//
+//   auto routing = std::map<std::string, std::string>{{"address", "localhost:7688"}};
+//   auto routing_table = instance1.GetRoutingTable(routing);
+//
+//   ASSERT_EQ(routing_table.size(), 1);
+//
+//   auto const routers = routing_table[0];
+//   ASSERT_EQ(routers.second, "ROUTE");
+//   ASSERT_EQ(routers.first.size(), 2);
+//   auto const expected_routers = std::vector<std::string>{"localhost:7689", "localhost:7688"};
+//   ASSERT_EQ(routers.first, expected_routers);
+// }
diff --git a/tests/unit/slk_advanced.cpp b/tests/unit/slk_advanced.cpp
index f41946388..46254746a 100644
--- a/tests/unit/slk_advanced.cpp
+++ b/tests/unit/slk_advanced.cpp
@@ -11,8 +11,9 @@
 
 #include <gtest/gtest.h>
 
-#include "coordination/coordinator_config.hpp"
+#include "coordination/coordinator_communication_config.hpp"
 #include "coordination/coordinator_slk.hpp"
+#include "io/network/endpoint.hpp"
 #include "replication/config.hpp"
 #include "replication_coordination_glue/mode.hpp"
 #include "slk_common.hpp"
@@ -20,6 +21,8 @@
 #include "storage/v2/replication/slk.hpp"
 #include "storage/v2/temporal.hpp"
 
+using memgraph::io::network::Endpoint;
+
 TEST(SlkAdvanced, PropertyValueList) {
   std::vector<memgraph::storage::PropertyValue> original{
       memgraph::storage::PropertyValue("hello world!"),
@@ -119,24 +122,19 @@ TEST(SlkAdvanced, PropertyValueComplex) {
 }
 
 TEST(SlkAdvanced, ReplicationClientConfigs) {
-  using ReplicationClientInfo = memgraph::coordination::CoordinatorClientConfig::ReplicationClientInfo;
+  using ReplicationClientInfo = memgraph::coordination::ReplicationClientInfo;
   using ReplicationClientInfoVec = std::vector<ReplicationClientInfo>;
   using ReplicationMode = memgraph::replication_coordination_glue::ReplicationMode;
 
   ReplicationClientInfoVec original{ReplicationClientInfo{.instance_name = "replica1",
                                                           .replication_mode = ReplicationMode::SYNC,
-                                                          .replication_ip_address = "127.0.0.1",
-                                                          .replication_port = 10000},
+                                                          .replication_server = Endpoint{"127.0.0.1", 10000}},
                                     ReplicationClientInfo{.instance_name = "replica2",
                                                           .replication_mode = ReplicationMode::ASYNC,
-                                                          .replication_ip_address = "127.0.1.1",
-                                                          .replication_port = 10010},
-                                    ReplicationClientInfo{
-                                        .instance_name = "replica3",
-                                        .replication_mode = ReplicationMode::ASYNC,
-                                        .replication_ip_address = "127.1.1.1",
-                                        .replication_port = 1110,
-                                    }};
+                                                          .replication_server = Endpoint{"127.0.0.1", 10010}},
+                                    ReplicationClientInfo{.instance_name = "replica3",
+                                                          .replication_mode = ReplicationMode::ASYNC,
+                                                          .replication_server = Endpoint{"127.0.0.1", 10011}}};
 
   memgraph::slk::Loopback loopback;
   auto builder = loopback.GetBuilder();
diff --git a/tests/unit/storage_v2_decoder_encoder.cpp b/tests/unit/storage_v2_decoder_encoder.cpp
index 15db49b1c..0264e2287 100644
--- a/tests/unit/storage_v2_decoder_encoder.cpp
+++ b/tests/unit/storage_v2_decoder_encoder.cpp
@@ -358,6 +358,8 @@ TEST_F(DecoderEncoderTest, PropertyValueInvalidMarker) {
         case memgraph::storage::durability::Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
         case memgraph::storage::durability::Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
         case memgraph::storage::durability::Marker::DELTA_EDGE_TYPE_INDEX_DROP:
+        case memgraph::storage::durability::Marker::DELTA_TEXT_INDEX_CREATE:
+        case memgraph::storage::durability::Marker::DELTA_TEXT_INDEX_DROP:
         case memgraph::storage::durability::Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
         case memgraph::storage::durability::Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
         case memgraph::storage::durability::Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
diff --git a/tests/unit/storage_v2_get_info.cpp b/tests/unit/storage_v2_get_info.cpp
index 71dbc1a8d..ee5c1bb62 100644
--- a/tests/unit/storage_v2_get_info.cpp
+++ b/tests/unit/storage_v2_get_info.cpp
@@ -146,6 +146,7 @@ TYPED_TEST(InfoTest, InfoCheck) {
   ASSERT_LT(info.disk_usage, 1000'000);
   ASSERT_EQ(info.label_indices, 1);
   ASSERT_EQ(info.label_property_indices, 1);
+  ASSERT_EQ(info.text_indices, 0);
   ASSERT_EQ(info.existence_constraints, 0);
   ASSERT_EQ(info.unique_constraints, 2);
   ASSERT_EQ(info.storage_mode, this->mode);
diff --git a/tests/unit/storage_v2_wal_file.cpp b/tests/unit/storage_v2_wal_file.cpp
index 4094090f5..a94b20590 100644
--- a/tests/unit/storage_v2_wal_file.cpp
+++ b/tests/unit/storage_v2_wal_file.cpp
@@ -53,6 +53,10 @@ memgraph::storage::durability::WalDeltaData::Type StorageMetadataOperationToWalD
       return memgraph::storage::durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET;
     case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_CLEAR:
       return memgraph::storage::durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR;
+    case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_CREATE:
+      return memgraph::storage::durability::WalDeltaData::Type::TEXT_INDEX_CREATE;
+    case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_DROP:
+      return memgraph::storage::durability::WalDeltaData::Type::TEXT_INDEX_DROP;
     case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
       return memgraph::storage::durability::WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE;
     case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
@@ -252,7 +256,7 @@ class DeltaGenerator final {
         ASSERT_TRUE(false) << "Unexpected statistics operation!";
       }
     }
-    wal_file_.AppendOperation(operation, label_id, property_ids, l_stats, lp_stats, timestamp_);
+    wal_file_.AppendOperation(operation, std::nullopt, label_id, property_ids, l_stats, lp_stats, timestamp_);
     if (valid_) {
       UpdateStats(timestamp_, 1);
       memgraph::storage::durability::WalDeltaData data;
@@ -271,6 +275,8 @@ class DeltaGenerator final {
           break;
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE:
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP:
+        case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_CREATE:
+        case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_DROP:
         case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
         case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
           data.operation_label_property.label = label;
@@ -313,6 +319,8 @@ class DeltaGenerator final {
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_INDEX_STATS_SET:
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE:
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP:
+        case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_CREATE:
+        case memgraph::storage::durability::StorageMetadataOperation::TEXT_INDEX_DROP:
         case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
         case memgraph::storage::durability::StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:;
         case memgraph::storage::durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_SET: