Support bolt+routing (#1796)

This commit is contained in:
Andi 2024-03-21 07:41:26 +01:00 committed by GitHub
parent 9629f10166
commit f699c0b37f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
68 changed files with 2131 additions and 897 deletions

View File

@ -257,6 +257,17 @@ jobs:
--organization-name $MEMGRAPH_ORGANIZATION_NAME \ --organization-name $MEMGRAPH_ORGANIZATION_NAME \
test-memgraph drivers test-memgraph drivers
- name: Run HA driver tests
run: |
./release/package/mgbuild.sh \
--toolchain $TOOLCHAIN \
--os $OS \
--arch $ARCH \
--threads $THREADS \
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
test-memgraph drivers-high-availability
- name: Run integration tests - name: Run integration tests
run: | run: |
./release/package/mgbuild.sh \ ./release/package/mgbuild.sh \
@ -278,7 +289,7 @@ jobs:
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \ --enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
--organization-name $MEMGRAPH_ORGANIZATION_NAME \ --organization-name $MEMGRAPH_ORGANIZATION_NAME \
test-memgraph cppcheck-and-clang-format test-memgraph cppcheck-and-clang-format
- name: Save cppcheck and clang-format errors - name: Save cppcheck and clang-format errors
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:

View File

@ -48,9 +48,9 @@ SUPPORTED_ARCHS=(
) )
SUPPORTED_TESTS=( SUPPORTED_TESTS=(
clang-tidy cppcheck-and-clang-format code-analysis clang-tidy cppcheck-and-clang-format code-analysis
code-coverage drivers durability e2e gql-behave code-coverage drivers drivers-high-availability durability e2e gql-behave
integration leftover-CTest macro-benchmark integration leftover-CTest macro-benchmark
mgbench stress-plain stress-ssl mgbench stress-plain stress-ssl
unit unit-coverage upload-to-bench-graph unit unit-coverage upload-to-bench-graph
) )
@ -116,7 +116,7 @@ print_help () {
echo -e "\nToolchain v5 supported OSs:" echo -e "\nToolchain v5 supported OSs:"
echo -e " \"${SUPPORTED_OS_V5[*]}\"" echo -e " \"${SUPPORTED_OS_V5[*]}\""
echo -e "\nExample usage:" echo -e "\nExample usage:"
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run" echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run"
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community" echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community"
@ -296,7 +296,7 @@ build_memgraph () {
docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/" docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/"
fi fi
# Change ownership of copied files so the mg user inside container can access them # Change ownership of copied files so the mg user inside container can access them
docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR" docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..." echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS" docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
@ -318,10 +318,9 @@ build_memgraph () {
# Define cmake command # Define cmake command
local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .." local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .."
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO && $cmake_cmd" docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO && $cmake_cmd"
# ' is used instead of " because we need to run make within the allowed # ' is used instead of " because we need to run make within the allowed
# container resources. # container resources.
# Default value for $threads is 0 instead of $(nproc) because macos # Default value for $threads is 0 instead of $(nproc) because macos
# doesn't support the nproc command. # doesn't support the nproc command.
# 0 is set for default value and checked here because mgbuild containers # 0 is set for default value and checked here because mgbuild containers
# support nproc # support nproc
@ -363,7 +362,7 @@ copy_memgraph() {
local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph" local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph"
local host_output_path="$PROJECT_ROOT/build/memgraph" local host_output_path="$PROJECT_ROOT/build/memgraph"
mkdir -p "$PROJECT_ROOT/build" mkdir -p "$PROJECT_ROOT/build"
docker cp -L $build_container:$container_output_path $host_output_path docker cp -L $build_container:$container_output_path $host_output_path
echo "Binary saved to $host_output_path" echo "Binary saved to $host_output_path"
;; ;;
--build-logs) --build-logs)
@ -371,7 +370,7 @@ copy_memgraph() {
local container_output_path="$MGBUILD_ROOT_DIR/build/logs" local container_output_path="$MGBUILD_ROOT_DIR/build/logs"
local host_output_path="$PROJECT_ROOT/build/logs" local host_output_path="$PROJECT_ROOT/build/logs"
mkdir -p "$PROJECT_ROOT/build" mkdir -p "$PROJECT_ROOT/build"
docker cp -L $build_container:$container_output_path $host_output_path docker cp -L $build_container:$container_output_path $host_output_path
echo "Build logs saved to $host_output_path" echo "Build logs saved to $host_output_path"
;; ;;
--package) --package)
@ -418,6 +417,9 @@ test_memgraph() {
drivers) drivers)
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh' docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh'
;; ;;
drivers-high-availability)
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run_cluster.sh'
;;
integration) integration)
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh' docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh'
;; ;;
@ -664,4 +666,4 @@ case $command in
echo "Error: Unknown command '$command'" echo "Error: Unknown command '$command'"
exit 1 exit 1
;; ;;
esac esac

View File

@ -1,4 +1,4 @@
// Copyright 2023 Memgraph Ltd. // Copyright 2024 Memgraph Ltd.
// //
// Use of this software is governed by the Business Source License // Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -88,6 +88,12 @@ class Session {
virtual void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &run_time_info) = 0; virtual void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &run_time_info) = 0;
#ifdef MG_ENTERPRISE
virtual auto Route(std::map<std::string, Value> const &routing,
std::vector<memgraph::communication::bolt::Value> const &bookmarks,
std::map<std::string, Value> const &extra) -> std::map<std::string, Value> = 0;
#endif
/** /**
* Put results of the processed query in the `encoder`. * Put results of the processed query in the `encoder`.
* *

View File

@ -1,4 +1,4 @@
// Copyright 2023 Memgraph Ltd. // Copyright 2024 Memgraph Ltd.
// //
// Use of this software is governed by the Business Source License // Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -79,9 +79,9 @@ State RunHandlerV4(Signature signature, TSession &session, State state, Marker m
} }
case Signature::Route: { case Signature::Route: {
if constexpr (bolt_minor >= 3) { if constexpr (bolt_minor >= 3) {
if (signature == Signature::Route) return HandleRoute<TSession>(session, marker); return HandleRoute<TSession>(session, marker);
} else { } else {
spdlog::trace("Supported only in bolt v4.3"); spdlog::trace("Supported only in bolt versions >= 4.3");
return State::Close; return State::Close;
} }
} }

View File

@ -478,9 +478,6 @@ State HandleGoodbye() {
template <typename TSession> template <typename TSession>
State HandleRoute(TSession &session, const Marker marker) { State HandleRoute(TSession &session, const Marker marker) {
// Route message is not implemented since it is Neo4j specific, therefore we will receive it and inform user that
// there is no implementation. Before that, we have to read out the fields from the buffer to leave it in a clean
// state.
if (marker != Marker::TinyStruct3) { if (marker != Marker::TinyStruct3) {
spdlog::trace("Expected TinyStruct3 marker, but received 0x{:02x}!", utils::UnderlyingCast(marker)); spdlog::trace("Expected TinyStruct3 marker, but received 0x{:02x}!", utils::UnderlyingCast(marker));
return State::Close; return State::Close;
@ -496,11 +493,27 @@ State HandleRoute(TSession &session, const Marker marker) {
spdlog::trace("Couldn't read bookmarks field!"); spdlog::trace("Couldn't read bookmarks field!");
return State::Close; return State::Close;
} }
// TODO: (andi) Fix Bolt versions
Value db; Value db;
if (!session.decoder_.ReadValue(&db)) { if (!session.decoder_.ReadValue(&db)) {
spdlog::trace("Couldn't read db field!"); spdlog::trace("Couldn't read db field!");
return State::Close; return State::Close;
} }
#ifdef MG_ENTERPRISE
try {
auto res = session.Route(routing.ValueMap(), bookmarks.ValueList(), {});
if (!session.encoder_.MessageSuccess(std::move(res))) {
spdlog::trace("Couldn't send result of routing!");
return State::Close;
}
return State::Idle;
} catch (const std::exception &e) {
return HandleFailure(session, e);
}
#else
session.encoder_buffer_.Clear(); session.encoder_buffer_.Clear();
bool fail_sent = bool fail_sent =
session.encoder_.MessageFailure({{"code", "66"}, {"message", "Route message is not supported in Memgraph!"}}); session.encoder_.MessageFailure({{"code", "66"}, {"message", "Route message is not supported in Memgraph!"}});
@ -509,6 +522,7 @@ State HandleRoute(TSession &session, const Marker marker) {
return State::Close; return State::Close;
} }
return State::Error; return State::Error;
#endif
} }
template <typename TSession> template <typename TSession>

View File

@ -6,7 +6,7 @@ target_sources(mg-coordination
include/coordination/coordinator_state.hpp include/coordination/coordinator_state.hpp
include/coordination/coordinator_rpc.hpp include/coordination/coordinator_rpc.hpp
include/coordination/coordinator_server.hpp include/coordination/coordinator_server.hpp
include/coordination/coordinator_config.hpp include/coordination/coordinator_communication_config.hpp
include/coordination/coordinator_exceptions.hpp include/coordination/coordinator_exceptions.hpp
include/coordination/coordinator_slk.hpp include/coordination/coordinator_slk.hpp
include/coordination/coordinator_instance.hpp include/coordination/coordinator_instance.hpp
@ -23,7 +23,7 @@ target_sources(mg-coordination
include/nuraft/coordinator_state_manager.hpp include/nuraft/coordinator_state_manager.hpp
PRIVATE PRIVATE
coordinator_config.cpp coordinator_communication_config.cpp
coordinator_client.cpp coordinator_client.cpp
coordinator_state.cpp coordinator_state.cpp
coordinator_rpc.cpp coordinator_rpc.cpp

View File

@ -14,7 +14,7 @@
#include "coordination/coordinator_client.hpp" #include "coordination/coordinator_client.hpp"
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "coordination/coordinator_rpc.hpp" #include "coordination/coordinator_rpc.hpp"
#include "replication_coordination_glue/common.hpp" #include "replication_coordination_glue/common.hpp"
#include "replication_coordination_glue/messages.hpp" #include "replication_coordination_glue/messages.hpp"
@ -23,18 +23,17 @@
namespace memgraph::coordination { namespace memgraph::coordination {
namespace { namespace {
auto CreateClientContext(memgraph::coordination::CoordinatorClientConfig const &config) auto CreateClientContext(memgraph::coordination::CoordinatorToReplicaConfig const &config)
-> communication::ClientContext { -> communication::ClientContext {
return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file} return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file}
: communication::ClientContext{}; : communication::ClientContext{};
} }
} // namespace } // namespace
CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config, CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb) HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb)
: rpc_context_{CreateClientContext(config)}, : rpc_context_{CreateClientContext(config)},
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port), rpc_client_{config.mgt_server, &rpc_context_},
&rpc_context_},
config_{std::move(config)}, config_{std::move(config)},
coord_instance_{coord_instance}, coord_instance_{coord_instance},
succ_cb_{std::move(succ_cb)}, succ_cb_{std::move(succ_cb)},
@ -86,7 +85,9 @@ void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); } void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); } void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
auto CoordinatorClient::ReplicationClientInfo() const -> ReplClientInfo { return config_.replication_client_info; } auto CoordinatorClient::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
return config_.replication_client_info;
}
auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid, auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid,
ReplicationClientsInfo replication_clients_info) const -> bool { ReplicationClientsInfo replication_clients_info) const -> bool {

View File

@ -18,86 +18,88 @@
namespace memgraph::coordination { namespace memgraph::coordination {
void to_json(nlohmann::json &j, InstanceState const &instance_state) { void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state) {
j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}}; j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
} }
void from_json(nlohmann::json const &j, InstanceState &instance_state) { void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state) {
j.at("config").get_to(instance_state.config); j.at("config").get_to(instance_state.config);
j.at("status").get_to(instance_state.status); j.at("status").get_to(instance_state.status);
} }
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances) CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances)
: instances_{std::move(instances)} {} : repl_instances_{std::move(instances)} {}
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {} CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other)
: repl_instances_{other.repl_instances_} {}
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) { CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
if (this == &other) { if (this == &other) {
return *this; return *this;
} }
instances_ = other.instances_; repl_instances_ = other.repl_instances_;
return *this; return *this;
} }
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
: instances_{std::move(other.instances_)} {} : repl_instances_{std::move(other.repl_instances_)} {}
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept { CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
if (this == &other) { if (this == &other) {
return *this; return *this;
} }
instances_ = std::move(other.instances_); repl_instances_ = std::move(other.repl_instances_);
return *this; return *this;
} }
auto CoordinatorClusterState::MainExists() const -> bool { auto CoordinatorClusterState::MainExists() const -> bool {
auto lock = std::shared_lock{log_lock_}; auto lock = std::shared_lock{log_lock_};
return std::ranges::any_of(instances_, return std::ranges::any_of(repl_instances_,
[](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; }); [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
} }
auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool { auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
auto lock = std::shared_lock{log_lock_}; auto lock = std::shared_lock{log_lock_};
auto const it = instances_.find(instance_name); auto const it = repl_instances_.find(instance_name);
return it != instances_.end() && it->second.status == ReplicationRole::MAIN; return it != repl_instances_.end() && it->second.status == ReplicationRole::MAIN;
} }
auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool { auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
auto lock = std::shared_lock{log_lock_}; auto lock = std::shared_lock{log_lock_};
auto const it = instances_.find(instance_name); auto const it = repl_instances_.find(instance_name);
return it != instances_.end() && it->second.status == ReplicationRole::REPLICA; return it != repl_instances_.end() && it->second.status == ReplicationRole::REPLICA;
} }
auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void { auto CoordinatorClusterState::InsertInstance(std::string instance_name, ReplicationInstanceState instance_state)
-> void {
auto lock = std::lock_guard{log_lock_}; auto lock = std::lock_guard{log_lock_};
instances_.insert_or_assign(std::move(instance_name), std::move(instance_state)); repl_instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
} }
auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void { auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
auto lock = std::lock_guard{log_lock_}; auto lock = std::lock_guard{log_lock_};
switch (log_action) { switch (log_action) {
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: { case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
auto const &config = std::get<CoordinatorClientConfig>(log_entry); auto const &config = std::get<CoordinatorToReplicaConfig>(log_entry);
instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA}; repl_instances_[config.instance_name] = ReplicationInstanceState{config, ReplicationRole::REPLICA};
break; break;
} }
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: { case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
auto const instance_name = std::get<std::string>(log_entry); auto const instance_name = std::get<std::string>(log_entry);
instances_.erase(instance_name); repl_instances_.erase(instance_name);
break; break;
} }
case RaftLogAction::SET_INSTANCE_AS_MAIN: { case RaftLogAction::SET_INSTANCE_AS_MAIN: {
auto const instance_name = std::get<std::string>(log_entry); auto const instance_name = std::get<std::string>(log_entry);
auto it = instances_.find(instance_name); auto it = repl_instances_.find(instance_name);
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!"); MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
it->second.status = ReplicationRole::MAIN; it->second.status = ReplicationRole::MAIN;
break; break;
} }
case RaftLogAction::SET_INSTANCE_AS_REPLICA: { case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
auto const instance_name = std::get<std::string>(log_entry); auto const instance_name = std::get<std::string>(log_entry);
auto it = instances_.find(instance_name); auto it = repl_instances_.find(instance_name);
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!"); MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
it->second.status = ReplicationRole::REPLICA; it->second.status = ReplicationRole::REPLICA;
break; break;
} }
@ -105,13 +107,18 @@ auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_act
uuid_ = std::get<utils::UUID>(log_entry); uuid_ = std::get<utils::UUID>(log_entry);
break; break;
} }
case RaftLogAction::ADD_COORDINATOR_INSTANCE: {
auto const &config = std::get<CoordinatorToCoordinatorConfig>(log_entry);
coordinators_.emplace_back(CoordinatorInstanceState{config});
break;
}
} }
} }
auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void { auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
auto lock = std::shared_lock{log_lock_}; auto lock = std::shared_lock{log_lock_};
auto const log = nlohmann::json(instances_).dump(); auto const log = nlohmann::json(repl_instances_).dump();
data = buffer::alloc(sizeof(uint32_t) + log.size()); data = buffer::alloc(sizeof(uint32_t) + log.size());
buffer_serializer bs(data); buffer_serializer bs(data);
@ -121,27 +128,22 @@ auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState { auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
buffer_serializer bs(data); buffer_serializer bs(data);
auto const j = nlohmann::json::parse(bs.get_str()); auto const j = nlohmann::json::parse(bs.get_str());
auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>(); auto instances = j.get<std::map<std::string, ReplicationInstanceState, std::less<>>>();
return CoordinatorClusterState{std::move(instances)}; return CoordinatorClusterState{std::move(instances)};
} }
auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> { auto CoordinatorClusterState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
auto lock = std::shared_lock{log_lock_}; auto lock = std::shared_lock{log_lock_};
return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>; return repl_instances_ | ranges::views::values | ranges::to<std::vector<ReplicationInstanceState>>;
}
auto CoordinatorClusterState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
auto lock = std::shared_lock{log_lock_};
return coordinators_;
} }
auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; } auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
auto lock = std::shared_lock{log_lock_};
auto const it =
std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
if (it == instances_.end()) {
return {};
}
return it->first;
}
} // namespace memgraph::coordination } // namespace memgraph::coordination
#endif #endif

View File

@ -11,43 +11,53 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
namespace memgraph::coordination { namespace memgraph::coordination {
void to_json(nlohmann::json &j, ReplClientInfo const &config) { void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config) {
j = nlohmann::json{{"coordinator_server_id", config.coordinator_server_id},
{"coordinator_server", config.coordinator_server},
{"bolt_server", config.bolt_server}};
}
void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config) {
config.coordinator_server_id = j.at("coordinator_server_id").get<uint32_t>();
config.coordinator_server = j.at("coordinator_server").get<io::network::Endpoint>();
config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
}
void to_json(nlohmann::json &j, ReplicationClientInfo const &config) {
j = nlohmann::json{{"instance_name", config.instance_name}, j = nlohmann::json{{"instance_name", config.instance_name},
{"replication_mode", config.replication_mode}, {"replication_mode", config.replication_mode},
{"replication_ip_address", config.replication_ip_address}, {"replication_server", config.replication_server}};
{"replication_port", config.replication_port}};
} }
void from_json(nlohmann::json const &j, ReplClientInfo &config) { void from_json(nlohmann::json const &j, ReplicationClientInfo &config) {
config.instance_name = j.at("instance_name").get<std::string>(); config.instance_name = j.at("instance_name").get<std::string>();
config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>(); config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
config.replication_ip_address = j.at("replication_ip_address").get<std::string>(); config.replication_server = j.at("replication_server").get<io::network::Endpoint>();
config.replication_port = j.at("replication_port").get<uint16_t>();
} }
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) { void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config) {
j = nlohmann::json{{"instance_name", config.instance_name}, j = nlohmann::json{{"instance_name", config.instance_name},
{"ip_address", config.ip_address}, {"mgt_server", config.mgt_server},
{"port", config.port}, {"bolt_server", config.bolt_server},
{"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()}, {"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
{"instance_down_timeout_sec", config.instance_down_timeout_sec.count()}, {"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
{"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()}, {"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
{"replication_client_info", config.replication_client_info}}; {"replication_client_info", config.replication_client_info}};
} }
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) { void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config) {
config.instance_name = j.at("instance_name").get<std::string>(); config.instance_name = j.at("instance_name").get<std::string>();
config.ip_address = j.at("ip_address").get<std::string>(); config.mgt_server = j.at("mgt_server").get<io::network::Endpoint>();
config.port = j.at("port").get<uint16_t>(); config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
config.instance_health_check_frequency_sec = config.instance_health_check_frequency_sec =
std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()}; std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()}; config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()}; config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>(); config.replication_client_info = j.at("replication_client_info").get<ReplicationClientInfo>();
} }
} // namespace memgraph::coordination } // namespace memgraph::coordination

View File

@ -95,8 +95,8 @@ void CoordinatorHandlers::DemoteMainToReplicaHandler(replication::ReplicationHan
slk::Load(&req, req_reader); slk::Load(&req, req_reader);
const replication::ReplicationServerConfig clients_config{ const replication::ReplicationServerConfig clients_config{
.ip_address = req.replication_client_info.replication_ip_address, .ip_address = req.replication_client_info.replication_server.address,
.port = req.replication_client_info.replication_port}; .port = req.replication_client_info.replication_server.port};
if (!replication_handler.SetReplicationRoleReplica(clients_config, std::nullopt)) { if (!replication_handler.SetReplicationRoleReplica(clients_config, std::nullopt)) {
spdlog::error("Demoting main to replica failed!"); spdlog::error("Demoting main to replica failed!");
@ -136,8 +136,8 @@ void CoordinatorHandlers::PromoteReplicaToMainHandler(replication::ReplicationHa
return replication::ReplicationClientConfig{ return replication::ReplicationClientConfig{
.name = repl_info_config.instance_name, .name = repl_info_config.instance_name,
.mode = repl_info_config.replication_mode, .mode = repl_info_config.replication_mode,
.ip_address = repl_info_config.replication_ip_address, .ip_address = repl_info_config.replication_server.address,
.port = repl_info_config.replication_port, .port = repl_info_config.replication_server.port,
}; };
}; };

View File

@ -14,7 +14,6 @@
#include "coordination/coordinator_instance.hpp" #include "coordination/coordinator_instance.hpp"
#include "coordination/coordinator_exceptions.hpp" #include "coordination/coordinator_exceptions.hpp"
#include "coordination/fmt.hpp"
#include "dbms/constants.hpp" #include "dbms/constants.hpp"
#include "nuraft/coordinator_state_machine.hpp" #include "nuraft/coordinator_state_machine.hpp"
#include "nuraft/coordinator_state_manager.hpp" #include "nuraft/coordinator_state_manager.hpp"
@ -34,7 +33,7 @@ CoordinatorInstance::CoordinatorInstance()
: raft_state_(RaftState::MakeRaftState( : raft_state_(RaftState::MakeRaftState(
[this]() { [this]() {
spdlog::info("Leader changed, starting all replication instances!"); spdlog::info("Leader changed, starting all replication instances!");
auto const instances = raft_state_.GetInstances(); auto const instances = raft_state_.GetReplicationInstances();
auto replicas = instances | ranges::views::filter([](auto const &instance) { auto replicas = instances | ranges::views::filter([](auto const &instance) {
return instance.status == ReplicationRole::REPLICA; return instance.status == ReplicationRole::REPLICA;
}); });
@ -133,7 +132,7 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
.health = "unknown"}; .health = "unknown"};
}; };
std::ranges::transform(raft_state_.GetInstances(), std::back_inserter(instances_status), std::ranges::transform(raft_state_.GetReplicationInstances(), std::back_inserter(instances_status),
process_repl_instance_as_follower); process_repl_instance_as_follower);
} }
@ -288,7 +287,7 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
return SetInstanceToMainCoordinatorStatus::SUCCESS; return SetInstanceToMainCoordinatorStatus::SUCCESS;
} }
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig const &config) auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
-> RegisterInstanceCoordinatorStatus { -> RegisterInstanceCoordinatorStatus {
auto lock = std::lock_guard{coord_instance_lock_}; auto lock = std::lock_guard{coord_instance_lock_};
@ -382,9 +381,12 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
return UnregisterInstanceCoordinatorStatus::SUCCESS; return UnregisterInstanceCoordinatorStatus::SUCCESS;
} }
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, auto CoordinatorInstance::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
std::string_view raft_address) -> void { raft_state_.AddCoordinatorInstance(config);
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address); // NOTE: We ignore error we added coordinator instance to networkign stuff but not in raft log.
if (!raft_state_.AppendAddCoordinatorInstanceLog(config)) {
spdlog::error("Failed to append add coordinator instance log");
}
} }
void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) { void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) {
@ -557,5 +559,56 @@ auto CoordinatorInstance::IsReplica(std::string_view instance_name) const -> boo
return raft_state_.IsReplica(instance_name); return raft_state_.IsReplica(instance_name);
} }
auto CoordinatorInstance::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
auto res = RoutingTable{};
auto const repl_instance_to_bolt = [](ReplicationInstanceState const &instance) {
return instance.config.BoltSocketAddress();
};
// TODO: (andi) This is wrong check, Fico will correct in #1819.
auto const is_instance_main = [&](ReplicationInstanceState const &instance) {
return instance.status == ReplicationRole::MAIN;
};
auto const is_instance_replica = [&](ReplicationInstanceState const &instance) {
return instance.status == ReplicationRole::REPLICA;
};
auto const &raft_log_repl_instances = raft_state_.GetReplicationInstances();
auto bolt_mains = raft_log_repl_instances | ranges::views::filter(is_instance_main) |
ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
MG_ASSERT(bolt_mains.size() <= 1, "There can be at most one main instance active!");
if (!std::ranges::empty(bolt_mains)) {
res.emplace_back(std::move(bolt_mains), "WRITE");
}
auto bolt_replicas = raft_log_repl_instances | ranges::views::filter(is_instance_replica) |
ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
if (!std::ranges::empty(bolt_replicas)) {
res.emplace_back(std::move(bolt_replicas), "READ");
}
auto const coord_instance_to_bolt = [](CoordinatorInstanceState const &instance) {
return instance.config.bolt_server.SocketAddress();
};
auto const &raft_log_coord_instances = raft_state_.GetCoordinatorInstances();
auto bolt_coords =
raft_log_coord_instances | ranges::views::transform(coord_instance_to_bolt) | ranges::to<std::vector>();
auto const &local_bolt_coord = routing.find("address");
if (local_bolt_coord == routing.end()) {
throw InvalidRoutingTableException("No bolt address found in routing table for the current coordinator!");
}
bolt_coords.push_back(local_bolt_coord->second);
res.emplace_back(std::move(bolt_coords), "ROUTE");
return res;
}
} // namespace memgraph::coordination } // namespace memgraph::coordination
#endif #endif

View File

@ -18,8 +18,7 @@ namespace memgraph::coordination {
namespace { namespace {
auto CreateServerContext(const memgraph::coordination::CoordinatorServerConfig &config) auto CreateServerContext(const memgraph::coordination::ManagementServerConfig &config) -> communication::ServerContext {
-> communication::ServerContext {
return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file, return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
config.ssl->verify_peer} config.ssl->verify_peer}
: communication::ServerContext{}; : communication::ServerContext{};
@ -32,7 +31,7 @@ constexpr auto kCoordinatorServerThreads = 1;
} // namespace } // namespace
CoordinatorServer::CoordinatorServer(const CoordinatorServerConfig &config) CoordinatorServer::CoordinatorServer(const ManagementServerConfig &config)
: rpc_server_context_{CreateServerContext(config)}, : rpc_server_context_{CreateServerContext(config)},
rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_, rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
kCoordinatorServerThreads} { kCoordinatorServerThreads} {

View File

@ -13,7 +13,7 @@
#include "coordination/coordinator_state.hpp" #include "coordination/coordinator_state.hpp"
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "coordination/register_main_replica_coordinator_status.hpp" #include "coordination/register_main_replica_coordinator_status.hpp"
#include "flags/replication.hpp" #include "flags/replication.hpp"
#include "spdlog/spdlog.h" #include "spdlog/spdlog.h"
@ -31,7 +31,7 @@ CoordinatorState::CoordinatorState() {
spdlog::info("Executing coordinator constructor"); spdlog::info("Executing coordinator constructor");
if (FLAGS_coordinator_server_port) { if (FLAGS_coordinator_server_port) {
spdlog::info("Coordinator server port set"); spdlog::info("Coordinator server port set");
auto const config = CoordinatorServerConfig{ auto const config = ManagementServerConfig{
.ip_address = kDefaultReplicationServerIp, .ip_address = kDefaultReplicationServerIp,
.port = static_cast<uint16_t>(FLAGS_coordinator_server_port), .port = static_cast<uint16_t>(FLAGS_coordinator_server_port),
}; };
@ -41,7 +41,7 @@ CoordinatorState::CoordinatorState() {
} }
} }
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig const &config) auto CoordinatorState::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
-> RegisterInstanceCoordinatorStatus { -> RegisterInstanceCoordinatorStatus {
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_), MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
"Coordinator cannot register replica since variant holds wrong alternative"); "Coordinator cannot register replica since variant holds wrong alternative");
@ -98,11 +98,16 @@ auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_; return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
} }
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, auto CoordinatorState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
std::string_view raft_address) -> void {
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_), MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
"Coordinator cannot register replica since variant holds wrong alternative"); "Coordinator cannot register replica since variant holds wrong alternative");
return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(raft_server_id, raft_port, raft_address); return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(config);
}
auto CoordinatorState::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
"Coordinator cannot get routing table since variant holds wrong alternative");
return std::get<CoordinatorInstance>(data_).GetRoutingTable(routing);
} }
} // namespace memgraph::coordination } // namespace memgraph::coordination

View File

@ -20,10 +20,6 @@ constexpr int MAX_SNAPSHOTS = 3;
namespace memgraph::coordination { namespace memgraph::coordination {
auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
return cluster_state_.FindCurrentMainInstanceName();
}
auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); } auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool { auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool {
@ -42,7 +38,7 @@ auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
return log_buf; return log_buf;
} }
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> { auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer> {
return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}}); return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
} }
@ -62,6 +58,11 @@ auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> pt
return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}}); return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
} }
auto CoordinatorStateMachine::SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config)
-> ptr<buffer> {
return CreateLog({{"action", RaftLogAction::ADD_COORDINATOR_INSTANCE}, {"info", config}});
}
auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> { auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
buffer_serializer bs(data); buffer_serializer bs(data);
auto const json = nlohmann::json::parse(bs.get_str()); auto const json = nlohmann::json::parse(bs.get_str());
@ -71,7 +72,7 @@ auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, Raf
switch (action) { switch (action) {
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
return {info.get<CoordinatorClientConfig>(), action}; return {info.get<CoordinatorToReplicaConfig>(), action};
case RaftLogAction::UPDATE_UUID: case RaftLogAction::UPDATE_UUID:
return {info.get<utils::UUID>(), action}; return {info.get<utils::UUID>(), action};
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
@ -79,6 +80,8 @@ auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, Raf
[[fallthrough]]; [[fallthrough]];
case RaftLogAction::SET_INSTANCE_AS_REPLICA: case RaftLogAction::SET_INSTANCE_AS_REPLICA:
return {info.get<std::string>(), action}; return {info.get<std::string>(), action};
case RaftLogAction::ADD_COORDINATOR_INSTANCE:
return {info.get<CoordinatorToCoordinatorConfig>(), action};
} }
throw std::runtime_error("Unknown action"); throw std::runtime_error("Unknown action");
} }
@ -133,6 +136,7 @@ auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /
} else { } else {
// Object ID > 0: second object, put actual value. // Object ID > 0: second object, put actual value.
ctx->cluster_state_.Serialize(data_out); ctx->cluster_state_.Serialize(data_out);
is_last_obj = true;
} }
return 0; return 0;
@ -155,6 +159,7 @@ auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &ob
DMG_ASSERT(entry != snapshots_.end()); DMG_ASSERT(entry != snapshots_.end());
entry->second->cluster_state_ = cluster_state; entry->second->cluster_state_ = cluster_state;
} }
obj_id++;
} }
auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool { auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
@ -205,8 +210,12 @@ auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -
} }
} }
auto CoordinatorStateMachine::GetInstances() const -> std::vector<InstanceState> { auto CoordinatorStateMachine::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
return cluster_state_.GetInstances(); return cluster_state_.GetReplicationInstances();
}
auto CoordinatorStateMachine::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
return cluster_state_.GetCoordinatorInstances();
} }
auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); } auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); }

View File

@ -33,6 +33,7 @@ CoordinatorStateManager::CoordinatorStateManager(int srv_id, std::string const &
auto CoordinatorStateManager::load_config() -> ptr<cluster_config> { auto CoordinatorStateManager::load_config() -> ptr<cluster_config> {
// Just return in-memory data in this example. // Just return in-memory data in this example.
// May require reading from disk here, if it has been written to disk. // May require reading from disk here, if it has been written to disk.
spdlog::trace("Loading cluster config");
return cluster_config_; return cluster_config_;
} }
@ -41,6 +42,11 @@ auto CoordinatorStateManager::save_config(cluster_config const &config) -> void
// Need to write to disk here, if want to make it durable. // Need to write to disk here, if want to make it durable.
ptr<buffer> buf = config.serialize(); ptr<buffer> buf = config.serialize();
cluster_config_ = cluster_config::deserialize(*buf); cluster_config_ = cluster_config::deserialize(*buf);
spdlog::info("Saving cluster config.");
auto servers = cluster_config_->get_servers();
for (auto const &server : servers) {
spdlog::trace("Server id: {}, endpoint: {}", server->get_id(), server->get_endpoint());
}
} }
auto CoordinatorStateManager::save_state(srv_state const &state) -> void { auto CoordinatorStateManager::save_state(srv_state const &state) -> void {

View File

@ -13,7 +13,7 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "replication_coordination_glue/common.hpp" #include "replication_coordination_glue/common.hpp"
#include "rpc/client.hpp" #include "rpc/client.hpp"
#include "rpc_errors.hpp" #include "rpc_errors.hpp"
@ -25,11 +25,11 @@ namespace memgraph::coordination {
class CoordinatorInstance; class CoordinatorInstance;
using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>; using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
using ReplicationClientsInfo = std::vector<ReplClientInfo>; using ReplicationClientsInfo = std::vector<ReplicationClientInfo>;
class CoordinatorClient { class CoordinatorClient {
public: public:
explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config, explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb); HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb);
~CoordinatorClient() = default; ~CoordinatorClient() = default;
@ -62,7 +62,7 @@ class CoordinatorClient {
auto SendGetInstanceUUIDRpc() const -> memgraph::utils::BasicResult<GetInstanceUUIDError, std::optional<utils::UUID>>; auto SendGetInstanceUUIDRpc() const -> memgraph::utils::BasicResult<GetInstanceUUIDError, std::optional<utils::UUID>>;
auto ReplicationClientInfo() const -> ReplClientInfo; auto ReplicationClientInfo() const -> ReplicationClientInfo;
auto SendGetInstanceTimestampsRpc() const auto SendGetInstanceTimestampsRpc() const
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>; -> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>;
@ -83,7 +83,7 @@ class CoordinatorClient {
communication::ClientContext rpc_context_; communication::ClientContext rpc_context_;
mutable rpc::Client rpc_client_; mutable rpc::Client rpc_client_;
CoordinatorClientConfig config_; CoordinatorToReplicaConfig config_;
CoordinatorInstance *coord_instance_; CoordinatorInstance *coord_instance_;
HealthCheckClientCallback succ_cb_; HealthCheckClientCallback succ_cb_;
HealthCheckClientCallback fail_cb_; HealthCheckClientCallback fail_cb_;

View File

@ -13,6 +13,7 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "io/network/endpoint.hpp"
#include "replication_coordination_glue/mode.hpp" #include "replication_coordination_glue/mode.hpp"
#include "utils/string.hpp" #include "utils/string.hpp"
@ -28,46 +29,50 @@ namespace memgraph::coordination {
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0"; inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
struct CoordinatorClientConfig { struct ReplicationClientInfo {
std::string instance_name; std::string instance_name{};
std::string ip_address; replication_coordination_glue::ReplicationMode replication_mode{};
uint16_t port{}; io::network::Endpoint replication_server;
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
};
struct CoordinatorToReplicaConfig {
auto BoltSocketAddress() const -> std::string { return bolt_server.SocketAddress(); }
auto CoordinatorSocketAddress() const -> std::string { return mgt_server.SocketAddress(); }
auto ReplicationSocketAddress() const -> std::string {
return replication_client_info.replication_server.SocketAddress();
}
std::string instance_name{};
io::network::Endpoint mgt_server;
io::network::Endpoint bolt_server;
ReplicationClientInfo replication_client_info;
std::chrono::seconds instance_health_check_frequency_sec{1}; std::chrono::seconds instance_health_check_frequency_sec{1};
std::chrono::seconds instance_down_timeout_sec{5}; std::chrono::seconds instance_down_timeout_sec{5};
std::chrono::seconds instance_get_uuid_frequency_sec{10}; std::chrono::seconds instance_get_uuid_frequency_sec{10};
auto CoordinatorSocketAddress() const -> std::string { return fmt::format("{}:{}", ip_address, port); }
auto ReplicationSocketAddress() const -> std::string {
return fmt::format("{}:{}", replication_client_info.replication_ip_address,
replication_client_info.replication_port);
}
struct ReplicationClientInfo {
std::string instance_name;
replication_coordination_glue::ReplicationMode replication_mode{};
std::string replication_ip_address;
uint16_t replication_port{};
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
};
ReplicationClientInfo replication_client_info;
struct SSL { struct SSL {
std::string key_file; std::string key_file;
std::string cert_file; std::string cert_file;
friend bool operator==(const SSL &, const SSL &) = default; friend bool operator==(const SSL &, const SSL &) = default;
}; };
std::optional<SSL> ssl; std::optional<SSL> ssl;
friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default; friend bool operator==(CoordinatorToReplicaConfig const &, CoordinatorToReplicaConfig const &) = default;
}; };
using ReplClientInfo = CoordinatorClientConfig::ReplicationClientInfo; struct CoordinatorToCoordinatorConfig {
uint32_t coordinator_server_id{0};
io::network::Endpoint bolt_server;
io::network::Endpoint coordinator_server;
struct CoordinatorServerConfig { friend bool operator==(CoordinatorToCoordinatorConfig const &, CoordinatorToCoordinatorConfig const &) = default;
};
struct ManagementServerConfig {
std::string ip_address; std::string ip_address;
uint16_t port{}; uint16_t port{};
struct SSL { struct SSL {
@ -80,14 +85,17 @@ struct CoordinatorServerConfig {
std::optional<SSL> ssl; std::optional<SSL> ssl;
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default; friend bool operator==(ManagementServerConfig const &, ManagementServerConfig const &) = default;
}; };
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config); void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config);
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config); void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config);
void to_json(nlohmann::json &j, ReplClientInfo const &config); void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config);
void from_json(nlohmann::json const &j, ReplClientInfo &config); void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config);
void to_json(nlohmann::json &j, ReplicationClientInfo const &config);
void from_json(nlohmann::json const &j, ReplicationClientInfo &config);
} // namespace memgraph::coordination } // namespace memgraph::coordination
#endif #endif

View File

@ -94,5 +94,16 @@ class InvalidRaftLogActionException final : public utils::BasicException {
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException) SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException)
}; };
class InvalidRoutingTableException final : public utils::BasicException {
public:
explicit InvalidRoutingTableException(std::string_view what) noexcept : BasicException(what) {}
template <class... Args>
explicit InvalidRoutingTableException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
: InvalidRoutingTableException(fmt::format(fmt, std::forward<Args>(args)...)) {}
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRoutingTableException)
};
} // namespace memgraph::coordination } // namespace memgraph::coordination
#endif #endif

View File

@ -26,6 +26,8 @@
namespace memgraph::coordination { namespace memgraph::coordination {
using RoutingTable = std::vector<std::pair<std::vector<std::string>, std::string>>;
struct NewMainRes { struct NewMainRes {
std::string most_up_to_date_instance; std::string most_up_to_date_instance;
std::string latest_epoch; std::string latest_epoch;
@ -36,8 +38,14 @@ using InstanceNameDbHistories = std::pair<std::string, replication_coordination_
class CoordinatorInstance { class CoordinatorInstance {
public: public:
CoordinatorInstance(); CoordinatorInstance();
CoordinatorInstance(CoordinatorInstance const &) = delete;
CoordinatorInstance &operator=(CoordinatorInstance const &) = delete;
CoordinatorInstance(CoordinatorInstance &&) noexcept = delete;
CoordinatorInstance &operator=(CoordinatorInstance &&) noexcept = delete;
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config) ~CoordinatorInstance() = default;
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
-> RegisterInstanceCoordinatorStatus; -> RegisterInstanceCoordinatorStatus;
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name) [[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
-> UnregisterInstanceCoordinatorStatus; -> UnregisterInstanceCoordinatorStatus;
@ -48,15 +56,15 @@ class CoordinatorInstance {
auto TryFailover() -> void; auto TryFailover() -> void;
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void; auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes; static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes;
private: private:
HealthCheckClientCallback client_succ_cb_, client_fail_cb_; HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
auto OnRaftCommitCallback(TRaftLog const &log_entry, RaftLogAction log_action) -> void;
auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &; auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &;
void MainFailCallback(std::string_view); void MainFailCallback(std::string_view);
@ -71,7 +79,6 @@ class CoordinatorInstance {
auto IsReplica(std::string_view instance_name) const -> bool; auto IsReplica(std::string_view instance_name) const -> bool;
// NOTE: Must be std::list because we rely on pointer stability. // NOTE: Must be std::list because we rely on pointer stability.
// Leader and followers should both have same view on repl_instances_
std::list<ReplicationInstance> repl_instances_; std::list<ReplicationInstance> repl_instances_;
mutable utils::ResourceLock coord_instance_lock_{}; mutable utils::ResourceLock coord_instance_lock_{};

View File

@ -14,7 +14,7 @@
#include "utils/uuid.hpp" #include "utils/uuid.hpp"
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "replication_coordination_glue/common.hpp" #include "replication_coordination_glue/common.hpp"
#include "rpc/messages.hpp" #include "rpc/messages.hpp"
#include "slk/serialization.hpp" #include "slk/serialization.hpp"
@ -28,14 +28,13 @@ struct PromoteReplicaToMainReq {
static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader); static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader);
static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder); static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder);
explicit PromoteReplicaToMainReq(const utils::UUID &uuid, explicit PromoteReplicaToMainReq(const utils::UUID &uuid, std::vector<ReplicationClientInfo> replication_clients_info)
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info)
: main_uuid_(uuid), replication_clients_info(std::move(replication_clients_info)) {} : main_uuid_(uuid), replication_clients_info(std::move(replication_clients_info)) {}
PromoteReplicaToMainReq() = default; PromoteReplicaToMainReq() = default;
// get uuid here // get uuid here
utils::UUID main_uuid_; utils::UUID main_uuid_;
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info; std::vector<ReplicationClientInfo> replication_clients_info;
}; };
struct PromoteReplicaToMainRes { struct PromoteReplicaToMainRes {
@ -60,12 +59,12 @@ struct DemoteMainToReplicaReq {
static void Load(DemoteMainToReplicaReq *self, memgraph::slk::Reader *reader); static void Load(DemoteMainToReplicaReq *self, memgraph::slk::Reader *reader);
static void Save(const DemoteMainToReplicaReq &self, memgraph::slk::Builder *builder); static void Save(const DemoteMainToReplicaReq &self, memgraph::slk::Builder *builder);
explicit DemoteMainToReplicaReq(CoordinatorClientConfig::ReplicationClientInfo replication_client_info) explicit DemoteMainToReplicaReq(ReplicationClientInfo replication_client_info)
: replication_client_info(std::move(replication_client_info)) {} : replication_client_info(std::move(replication_client_info)) {}
DemoteMainToReplicaReq() = default; DemoteMainToReplicaReq() = default;
CoordinatorClientConfig::ReplicationClientInfo replication_client_info; ReplicationClientInfo replication_client_info;
}; };
struct DemoteMainToReplicaRes { struct DemoteMainToReplicaRes {

View File

@ -13,14 +13,14 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "rpc/server.hpp" #include "rpc/server.hpp"
namespace memgraph::coordination { namespace memgraph::coordination {
class CoordinatorServer { class CoordinatorServer {
public: public:
explicit CoordinatorServer(const CoordinatorServerConfig &config); explicit CoordinatorServer(const ManagementServerConfig &config);
CoordinatorServer(const CoordinatorServer &) = delete; CoordinatorServer(const CoordinatorServer &) = delete;
CoordinatorServer(CoordinatorServer &&) = delete; CoordinatorServer(CoordinatorServer &&) = delete;
CoordinatorServer &operator=(const CoordinatorServer &) = delete; CoordinatorServer &operator=(const CoordinatorServer &) = delete;

View File

@ -13,27 +13,37 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "replication_coordination_glue/common.hpp" #include "replication_coordination_glue/common.hpp"
#include "slk/serialization.hpp" #include "slk/serialization.hpp"
#include "slk/streams.hpp" #include "slk/streams.hpp"
namespace memgraph::slk { namespace memgraph::slk {
using ReplicationClientInfo = coordination::CoordinatorClientConfig::ReplicationClientInfo; using ReplicationClientInfo = coordination::ReplicationClientInfo;
inline void Save(const ReplicationClientInfo &obj, Builder *builder) { inline void Save(io::network::Endpoint const &obj, Builder *builder) {
Save(obj.address, builder);
Save(obj.port, builder);
Save(obj.family, builder);
}
inline void Load(io::network::Endpoint *obj, Reader *reader) {
Load(&obj->address, reader);
Load(&obj->port, reader);
Load(&obj->family, reader);
}
inline void Save(ReplicationClientInfo const &obj, Builder *builder) {
Save(obj.instance_name, builder); Save(obj.instance_name, builder);
Save(obj.replication_mode, builder); Save(obj.replication_mode, builder);
Save(obj.replication_ip_address, builder); Save(obj.replication_server, builder);
Save(obj.replication_port, builder);
} }
inline void Load(ReplicationClientInfo *obj, Reader *reader) { inline void Load(ReplicationClientInfo *obj, Reader *reader) {
Load(&obj->instance_name, reader); Load(&obj->instance_name, reader);
Load(&obj->replication_mode, reader); Load(&obj->replication_mode, reader);
Load(&obj->replication_ip_address, reader); Load(&obj->replication_server, reader);
Load(&obj->replication_port, reader);
} }
inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) { inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) {

View File

@ -33,7 +33,7 @@ class CoordinatorState {
CoordinatorState(CoordinatorState &&) noexcept = delete; CoordinatorState(CoordinatorState &&) noexcept = delete;
CoordinatorState &operator=(CoordinatorState &&) noexcept = delete; CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config) [[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
-> RegisterInstanceCoordinatorStatus; -> RegisterInstanceCoordinatorStatus;
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name) [[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
-> UnregisterInstanceCoordinatorStatus; -> UnregisterInstanceCoordinatorStatus;
@ -42,11 +42,13 @@ class CoordinatorState {
auto ShowInstances() const -> std::vector<InstanceStatus>; auto ShowInstances() const -> std::vector<InstanceStatus>;
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void; auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
// NOTE: The client code must check that the server exists before calling this method. // NOTE: The client code must check that the server exists before calling this method.
auto GetCoordinatorServer() const -> CoordinatorServer &; auto GetCoordinatorServer() const -> CoordinatorServer &;
auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
private: private:
struct CoordinatorMainReplicaData { struct CoordinatorMainReplicaData {
std::unique_ptr<CoordinatorServer> coordinator_server_; std::unique_ptr<CoordinatorServer> coordinator_server_;

View File

@ -23,7 +23,7 @@
namespace memgraph::coordination { namespace memgraph::coordination {
class CoordinatorInstance; class CoordinatorInstance;
struct CoordinatorClientConfig; struct CoordinatorToReplicaConfig;
using BecomeLeaderCb = std::function<void()>; using BecomeLeaderCb = std::function<void()>;
using BecomeFollowerCb = std::function<void()>; using BecomeFollowerCb = std::function<void()>;
@ -58,24 +58,27 @@ class RaftState {
auto InstanceName() const -> std::string; auto InstanceName() const -> std::string;
auto RaftSocketAddress() const -> std::string; auto RaftSocketAddress() const -> std::string;
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void; auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>; auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>;
auto RequestLeadership() -> bool; auto RequestLeadership() -> bool;
auto IsLeader() const -> bool; auto IsLeader() const -> bool;
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
auto MainExists() const -> bool; auto MainExists() const -> bool;
auto IsMain(std::string_view instance_name) const -> bool; auto IsMain(std::string_view instance_name) const -> bool;
auto IsReplica(std::string_view instance_name) const -> bool; auto IsReplica(std::string_view instance_name) const -> bool;
auto AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool; auto AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool;
auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool; auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool; auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool;
auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool; auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool; auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool;
auto AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool;
auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
// TODO: (andi) Do we need then GetAllCoordinators?
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
auto GetInstances() const -> std::vector<InstanceState>;
auto GetUUID() const -> utils::UUID; auto GetUUID() const -> utils::UUID;
private: private:

View File

@ -32,7 +32,7 @@ using HealthCheckInstanceCallback = void (CoordinatorInstance::*)(std::string_vi
class ReplicationInstance { class ReplicationInstance {
public: public:
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckClientCallback succ_cb, ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config, HealthCheckClientCallback succ_cb,
HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb, HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb,
HealthCheckInstanceCallback fail_instance_cb); HealthCheckInstanceCallback fail_instance_cb);
@ -67,7 +67,7 @@ class ReplicationInstance {
auto PauseFrequentCheck() -> void; auto PauseFrequentCheck() -> void;
auto ResumeFrequentCheck() -> void; auto ResumeFrequentCheck() -> void;
auto ReplicationClientInfo() const -> ReplClientInfo; auto ReplicationClientInfo() const -> ReplicationClientInfo;
auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool; auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool;

View File

@ -13,7 +13,7 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "nuraft/raft_log_action.hpp" #include "nuraft/raft_log_action.hpp"
#include "replication_coordination_glue/role.hpp" #include "replication_coordination_glue/role.hpp"
#include "utils/resource_lock.hpp" #include "utils/resource_lock.hpp"
@ -32,19 +32,29 @@ namespace memgraph::coordination {
using replication_coordination_glue::ReplicationRole; using replication_coordination_glue::ReplicationRole;
struct InstanceState { struct ReplicationInstanceState {
CoordinatorClientConfig config; CoordinatorToReplicaConfig config;
ReplicationRole status; ReplicationRole status;
friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool { friend auto operator==(ReplicationInstanceState const &lhs, ReplicationInstanceState const &rhs) -> bool {
return lhs.config == rhs.config && lhs.status == rhs.status; return lhs.config == rhs.config && lhs.status == rhs.status;
} }
}; };
void to_json(nlohmann::json &j, InstanceState const &instance_state); // NOTE: Currently instance of coordinator doesn't change from the registration. Hence, just wrap
void from_json(nlohmann::json const &j, InstanceState &instance_state); // CoordinatorToCoordinatorConfig.
struct CoordinatorInstanceState {
CoordinatorToCoordinatorConfig config;
using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>; friend auto operator==(CoordinatorInstanceState const &lhs, CoordinatorInstanceState const &rhs) -> bool {
return lhs.config == rhs.config;
}
};
void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state);
void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state);
using TRaftLog = std::variant<CoordinatorToReplicaConfig, CoordinatorToCoordinatorConfig, std::string, utils::UUID>;
using nuraft::buffer; using nuraft::buffer;
using nuraft::buffer_serializer; using nuraft::buffer_serializer;
@ -53,7 +63,7 @@ using nuraft::ptr;
class CoordinatorClusterState { class CoordinatorClusterState {
public: public:
CoordinatorClusterState() = default; CoordinatorClusterState() = default;
explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances); explicit CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances);
CoordinatorClusterState(CoordinatorClusterState const &); CoordinatorClusterState(CoordinatorClusterState const &);
CoordinatorClusterState &operator=(CoordinatorClusterState const &); CoordinatorClusterState &operator=(CoordinatorClusterState const &);
@ -62,15 +72,13 @@ class CoordinatorClusterState {
CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept; CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept;
~CoordinatorClusterState() = default; ~CoordinatorClusterState() = default;
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
auto MainExists() const -> bool; auto MainExists() const -> bool;
auto IsMain(std::string_view instance_name) const -> bool; auto IsMain(std::string_view instance_name) const -> bool;
auto IsReplica(std::string_view instance_name) const -> bool; auto IsReplica(std::string_view instance_name) const -> bool;
auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void; auto InsertInstance(std::string instance_name, ReplicationInstanceState instance_state) -> void;
auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void; auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
@ -78,12 +86,15 @@ class CoordinatorClusterState {
static auto Deserialize(buffer &data) -> CoordinatorClusterState; static auto Deserialize(buffer &data) -> CoordinatorClusterState;
auto GetInstances() const -> std::vector<InstanceState>; auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
auto GetUUID() const -> utils::UUID; auto GetUUID() const -> utils::UUID;
private: private:
std::map<std::string, InstanceState, std::less<>> instances_{}; std::vector<CoordinatorInstanceState> coordinators_{};
std::map<std::string, ReplicationInstanceState, std::less<>> repl_instances_{};
utils::UUID uuid_{}; utils::UUID uuid_{};
mutable utils::ResourceLock log_lock_{}; mutable utils::ResourceLock log_lock_{};
}; };

View File

@ -13,7 +13,7 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "nuraft/coordinator_cluster_state.hpp" #include "nuraft/coordinator_cluster_state.hpp"
#include "nuraft/raft_log_action.hpp" #include "nuraft/raft_log_action.hpp"
@ -42,17 +42,18 @@ class CoordinatorStateMachine : public state_machine {
CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete; CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete;
~CoordinatorStateMachine() override {} ~CoordinatorStateMachine() override {}
auto FindCurrentMainInstanceName() const -> std::optional<std::string>; // TODO: (andi) Check API of this class.
auto MainExists() const -> bool; auto MainExists() const -> bool;
auto IsMain(std::string_view instance_name) const -> bool; auto IsMain(std::string_view instance_name) const -> bool;
auto IsReplica(std::string_view instance_name) const -> bool; auto IsReplica(std::string_view instance_name) const -> bool;
static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>; static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>; static auto SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer>;
static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>; static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>; static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>; static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>; static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>;
static auto SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config) -> ptr<buffer>;
static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>; static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>;
@ -80,7 +81,10 @@ class CoordinatorStateMachine : public state_machine {
auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override; auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override;
auto GetInstances() const -> std::vector<InstanceState>; auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
auto GetUUID() const -> utils::UUID; auto GetUUID() const -> utils::UUID;
private: private:

View File

@ -27,16 +27,16 @@ enum class RaftLogAction : uint8_t {
UNREGISTER_REPLICATION_INSTANCE, UNREGISTER_REPLICATION_INSTANCE,
SET_INSTANCE_AS_MAIN, SET_INSTANCE_AS_MAIN,
SET_INSTANCE_AS_REPLICA, SET_INSTANCE_AS_REPLICA,
UPDATE_UUID UPDATE_UUID,
ADD_COORDINATOR_INSTANCE
}; };
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, { NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"}, {RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"}, {RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"}, {RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"}, {RaftLogAction::UPDATE_UUID, "update_uuid"},
{RaftLogAction::UPDATE_UUID, "update_uuid"}, {RaftLogAction::ADD_COORDINATOR_INSTANCE, "add_coordinator_instance"}})
})
} // namespace memgraph::coordination } // namespace memgraph::coordination
#endif #endif

View File

@ -13,7 +13,7 @@
#include <chrono> #include <chrono>
#include <spdlog/spdlog.h> #include <spdlog/spdlog.h>
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "coordination/coordinator_exceptions.hpp" #include "coordination/coordinator_exceptions.hpp"
#include "coordination/raft_state.hpp" #include "coordination/raft_state.hpp"
#include "utils/counter.hpp" #include "utils/counter.hpp"
@ -113,10 +113,9 @@ auto RaftState::InstanceName() const -> std::string {
auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); } auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) auto RaftState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
-> void { auto const endpoint = config.coordinator_server.SocketAddress();
auto const endpoint = fmt::format("{}:{}", raft_address, raft_port); srv_config const srv_config_to_add(static_cast<int>(config.coordinator_server_id), endpoint);
srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
auto cmd_result = raft_server_->add_srv(srv_config_to_add); auto cmd_result = raft_server_->add_srv(srv_config_to_add);
@ -134,9 +133,9 @@ auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_po
bool added{false}; bool added{false};
while (!maybe_stop()) { while (!maybe_stop()) {
std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period)); std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id)); const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(config.coordinator_server_id));
if (server_config) { if (server_config) {
spdlog::trace("Server with id {} added to cluster", raft_server_id); spdlog::trace("Server with id {} added to cluster", config.coordinator_server_id);
added = true; added = true;
break; break;
} }
@ -158,7 +157,7 @@ auto RaftState::IsLeader() const -> bool { return raft_server_->is_leader(); }
auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); } auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); }
auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool { auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool {
auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config); auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config);
auto const res = raft_server_->append_entries({new_log}); auto const res = raft_server_->append_entries({new_log});
@ -261,8 +260,26 @@ auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
return true; return true;
} }
auto RaftState::FindCurrentMainInstanceName() const -> std::optional<std::string> { auto RaftState::AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool {
return state_machine_->FindCurrentMainInstanceName(); auto new_log = CoordinatorStateMachine::SerializeAddCoordinatorInstance(config);
auto const res = raft_server_->append_entries({new_log});
if (!res->get_accepted()) {
spdlog::error(
"Failed to accept request for adding coordinator instance {}. Most likely the reason is that the instance is "
"not the leader.",
config.coordinator_server_id);
return false;
}
spdlog::info("Request for adding coordinator instance {} accepted", config.coordinator_server_id);
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
spdlog::error("Failed to add coordinator instance {} with error code {}", config.coordinator_server_id,
static_cast<int>(res->get_result_code()));
return false;
}
return true;
} }
auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); } auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); }
@ -273,7 +290,13 @@ auto RaftState::IsReplica(std::string_view instance_name) const -> bool {
return state_machine_->IsReplica(instance_name); return state_machine_->IsReplica(instance_name);
} }
auto RaftState::GetInstances() const -> std::vector<InstanceState> { return state_machine_->GetInstances(); } auto RaftState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
return state_machine_->GetReplicationInstances();
}
auto RaftState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
return state_machine_->GetCoordinatorInstances();
}
auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); } auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); }

View File

@ -20,7 +20,7 @@
namespace memgraph::coordination { namespace memgraph::coordination {
ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config,
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb, HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb,
HealthCheckInstanceCallback succ_instance_cb, HealthCheckInstanceCallback succ_instance_cb,
HealthCheckInstanceCallback fail_instance_cb) HealthCheckInstanceCallback fail_instance_cb)
@ -82,7 +82,7 @@ auto ReplicationInstance::StopFrequentCheck() -> void { client_.StopFrequentChec
auto ReplicationInstance::PauseFrequentCheck() -> void { client_.PauseFrequentCheck(); } auto ReplicationInstance::PauseFrequentCheck() -> void { client_.PauseFrequentCheck(); }
auto ReplicationInstance::ResumeFrequentCheck() -> void { client_.ResumeFrequentCheck(); } auto ReplicationInstance::ResumeFrequentCheck() -> void { client_.ResumeFrequentCheck(); }
auto ReplicationInstance::ReplicationClientInfo() const -> CoordinatorClientConfig::ReplicationClientInfo { auto ReplicationInstance::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
return client_.ReplicationClientInfo(); return client_.ReplicationClientInfo();
} }

View File

@ -20,7 +20,7 @@ namespace memgraph::dbms {
CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state) CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state)
: coordinator_state_(coordinator_state) {} : coordinator_state_(coordinator_state) {}
auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config) auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
-> coordination::RegisterInstanceCoordinatorStatus { -> coordination::RegisterInstanceCoordinatorStatus {
return coordinator_state_.RegisterReplicationInstance(config); return coordinator_state_.RegisterReplicationInstance(config);
} }
@ -39,9 +39,8 @@ auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::Inst
return coordinator_state_.ShowInstances(); return coordinator_state_.ShowInstances();
} }
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, auto CoordinatorHandler::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
std::string_view raft_address) -> void { coordinator_state_.AddCoordinatorInstance(config);
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
} }
} // namespace memgraph::dbms } // namespace memgraph::dbms

View File

@ -13,7 +13,7 @@
#ifdef MG_ENTERPRISE #ifdef MG_ENTERPRISE
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "coordination/coordinator_state.hpp" #include "coordination/coordinator_state.hpp"
#include "coordination/instance_status.hpp" #include "coordination/instance_status.hpp"
#include "coordination/register_main_replica_coordinator_status.hpp" #include "coordination/register_main_replica_coordinator_status.hpp"
@ -30,7 +30,7 @@ class CoordinatorHandler {
// TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to // TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to
// RegisterInstance // RegisterInstance
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config) auto RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
-> coordination::RegisterInstanceCoordinatorStatus; -> coordination::RegisterInstanceCoordinatorStatus;
auto UnregisterReplicationInstance(std::string_view instance_name) auto UnregisterReplicationInstance(std::string_view instance_name)
@ -40,7 +40,7 @@ class CoordinatorHandler {
auto ShowInstances() const -> std::vector<coordination::InstanceStatus>; auto ShowInstances() const -> std::vector<coordination::InstanceStatus>;
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void; auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
private: private:
coordination::CoordinatorState &coordinator_state_; coordination::CoordinatorState &coordinator_state_;

View File

@ -249,6 +249,40 @@ std::pair<std::vector<std::string>, std::optional<int>> SessionHL::Interpret(
} }
} }
using memgraph::communication::bolt::Value;
#ifdef MG_ENTERPRISE
auto SessionHL::Route(std::map<std::string, Value> const &routing,
std::vector<memgraph::communication::bolt::Value> const & /*bookmarks*/,
std::map<std::string, Value> const & /*extra*/) -> std::map<std::string, Value> {
auto routing_map = ranges::views::transform(
routing, [](auto const &pair) { return std::pair(pair.first, pair.second.ValueString()); }) |
ranges::to<std::map<std::string, std::string>>();
auto routing_table_res = interpreter_.Route(routing_map);
auto create_server = [](auto const &server_info) -> Value {
auto const &[addresses, role] = server_info;
std::map<std::string, Value> server_map;
auto bolt_addresses = ranges::views::transform(addresses, [](auto const &addr) { return Value{addr}; }) |
ranges::to<std::vector<Value>>();
server_map["addresses"] = std::move(bolt_addresses);
server_map["role"] = memgraph::communication::bolt::Value{role};
return Value{std::move(server_map)};
};
std::map<std::string, Value> communication_res;
communication_res["ttl"] = Value{routing_table_res.ttl};
communication_res["db"] = Value{};
auto servers = ranges::views::transform(routing_table_res.servers, create_server) | ranges::to<std::vector<Value>>();
communication_res["servers"] = memgraph::communication::bolt::Value{std::move(servers)};
return {{"rt", memgraph::communication::bolt::Value{std::move(communication_res)}}};
}
#endif
void SessionHL::RollbackTransaction() { void SessionHL::RollbackTransaction() {
try { try {
interpreter_.RollbackTransaction(); interpreter_.RollbackTransaction();

View File

@ -55,6 +55,13 @@ class SessionHL final : public memgraph::communication::bolt::Session<memgraph::
const std::string &query, const std::map<std::string, memgraph::communication::bolt::Value> &params, const std::string &query, const std::map<std::string, memgraph::communication::bolt::Value> &params,
const std::map<std::string, memgraph::communication::bolt::Value> &extra) override; const std::map<std::string, memgraph::communication::bolt::Value> &extra) override;
#ifdef MG_ENTERPRISE
auto Route(std::map<std::string, memgraph::communication::bolt::Value> const &routing,
std::vector<memgraph::communication::bolt::Value> const &bookmarks,
std::map<std::string, memgraph::communication::bolt::Value> const &extra)
-> std::map<std::string, memgraph::communication::bolt::Value> override;
#endif
std::map<std::string, memgraph::communication::bolt::Value> Pull(TEncoder *encoder, std::optional<int> n, std::map<std::string, memgraph::communication::bolt::Value> Pull(TEncoder *encoder, std::optional<int> n,
std::optional<int> qid) override; std::optional<int> qid) override;

View File

@ -82,8 +82,7 @@ bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
return status == 0; return status == 0;
} }
std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address, std::optional<Endpoint> Endpoint::ParseSocketOrAddress(std::string_view address, std::optional<uint16_t> default_port) {
std::optional<uint16_t> default_port) {
auto const parts = utils::SplitView(address, delimiter); auto const parts = utils::SplitView(address, delimiter);
if (parts.size() > 2) { if (parts.size() > 2) {
@ -109,13 +108,13 @@ std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view add
}(); }();
if (GetIpFamily(addr) == IpFamily::NONE) { if (GetIpFamily(addr) == IpFamily::NONE) {
if (IsResolvableAddress(addr, *port)) { // NOLINT if (IsResolvableAddress(addr, *port)) { // NOLINT
return std::pair{addr, *port}; // NOLINT return Endpoint{std::string(addr), *port}; // NOLINT
} }
return std::nullopt; return std::nullopt;
} }
return std::pair{addr, *port}; // NOLINT return Endpoint{std::string(addr), *port}; // NOLINT
} }
auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool { auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
@ -138,4 +137,14 @@ auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
return true; return true;
} }
void to_json(nlohmann::json &j, Endpoint const &config) {
j = nlohmann::json{{"address", config.address}, {"port", config.port}, {"family", config.family}};
}
void from_json(nlohmann::json const &j, Endpoint &config) {
config.address = j.at("address").get<std::string>();
config.port = j.at("port").get<uint16_t>();
config.family = j.at("family").get<Endpoint::IpFamily>();
}
} // namespace memgraph::io::network } // namespace memgraph::io::network

View File

@ -17,9 +17,9 @@
#include <optional> #include <optional>
#include <string> #include <string>
namespace memgraph::io::network { #include "json/json.hpp"
using ParsedAddress = std::pair<std::string_view, uint16_t>; namespace memgraph::io::network {
struct Endpoint { struct Endpoint {
static const struct needs_resolving_t { static const struct needs_resolving_t {
@ -39,8 +39,8 @@ struct Endpoint {
enum class IpFamily : std::uint8_t { NONE, IP4, IP6 }; enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address, static std::optional<Endpoint> ParseSocketOrAddress(std::string_view address,
std::optional<uint16_t> default_port = {}); std::optional<uint16_t> default_port = {});
std::string SocketAddress() const; std::string SocketAddress() const;
@ -59,4 +59,7 @@ struct Endpoint {
static auto ValidatePort(std::optional<uint16_t> port) -> bool; static auto ValidatePort(std::optional<uint16_t> port) -> bool;
}; };
void to_json(nlohmann::json &j, Endpoint const &config);
void from_json(nlohmann::json const &j, Endpoint &config);
} // namespace memgraph::io::network } // namespace memgraph::io::network

View File

@ -328,15 +328,14 @@ class ReplQueryHandler {
const auto repl_mode = convertToReplicationMode(sync_mode); const auto repl_mode = convertToReplicationMode(sync_mode);
const auto maybe_ip_and_port = auto maybe_endpoint =
io::network::Endpoint::ParseSocketOrAddress(socket_address, memgraph::replication::kDefaultReplicationPort); io::network::Endpoint::ParseSocketOrAddress(socket_address, memgraph::replication::kDefaultReplicationPort);
if (maybe_ip_and_port) { if (maybe_endpoint) {
const auto [ip, port] = *maybe_ip_and_port;
const auto replication_config = const auto replication_config =
replication::ReplicationClientConfig{.name = name, replication::ReplicationClientConfig{.name = name,
.mode = repl_mode, .mode = repl_mode,
.ip_address = std::string(ip), .ip_address = std::move(maybe_endpoint->address),
.port = port, .port = maybe_endpoint->port,
.replica_check_frequency = replica_check_frequency, .replica_check_frequency = replica_check_frequency,
.ssl = std::nullopt}; .ssl = std::nullopt};
@ -413,39 +412,41 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
} }
} }
void RegisterReplicationInstance(std::string_view coordinator_socket_address, void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
std::string_view replication_socket_address, std::string_view replication_server,
std::chrono::seconds const &instance_check_frequency, std::chrono::seconds const &instance_check_frequency,
std::chrono::seconds const &instance_down_timeout, std::chrono::seconds const &instance_down_timeout,
std::chrono::seconds const &instance_get_uuid_frequency, std::chrono::seconds const &instance_get_uuid_frequency,
std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override { std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override {
const auto maybe_replication_ip_port = io::network::Endpoint::ParseSocketOrAddress(replication_socket_address); auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
if (!maybe_replication_ip_port) { if (!maybe_bolt_server) {
throw QueryRuntimeException("Invalid bolt socket address!");
}
auto const maybe_management_server = io::network::Endpoint::ParseSocketOrAddress(management_server);
if (!maybe_management_server) {
throw QueryRuntimeException("Invalid management socket address!");
}
auto const maybe_replication_server = io::network::Endpoint::ParseSocketOrAddress(replication_server);
if (!maybe_replication_server) {
throw QueryRuntimeException("Invalid replication socket address!"); throw QueryRuntimeException("Invalid replication socket address!");
} }
const auto maybe_coordinator_ip_port = io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address); auto const repl_config =
if (!maybe_replication_ip_port) { coordination::ReplicationClientInfo{.instance_name = std::string(instance_name),
throw QueryRuntimeException("Invalid replication socket address!"); .replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
} .replication_server = *maybe_replication_server};
const auto [replication_ip, replication_port] = *maybe_replication_ip_port;
const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port;
const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
.instance_name = std::string(instance_name),
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
.replication_ip_address = std::string(replication_ip),
.replication_port = replication_port};
auto coordinator_client_config = auto coordinator_client_config =
coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name), coordination::CoordinatorToReplicaConfig{.instance_name = std::string(instance_name),
.ip_address = std::string(coordinator_server_ip), .mgt_server = *maybe_management_server,
.port = coordinator_server_port, .bolt_server = *maybe_bolt_server,
.instance_health_check_frequency_sec = instance_check_frequency, .replication_client_info = repl_config,
.instance_down_timeout_sec = instance_down_timeout, .instance_health_check_frequency_sec = instance_check_frequency,
.instance_get_uuid_frequency_sec = instance_get_uuid_frequency, .instance_down_timeout_sec = instance_down_timeout,
.replication_client_info = repl_config, .instance_get_uuid_frequency_sec = instance_get_uuid_frequency,
.ssl = std::nullopt}; .ssl = std::nullopt};
auto status = coordinator_handler_.RegisterReplicationInstance(coordinator_client_config); auto status = coordinator_handler_.RegisterReplicationInstance(coordinator_client_config);
switch (status) { switch (status) {
@ -473,15 +474,25 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
} }
} }
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view raft_socket_address) -> void override { auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view bolt_server,
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address); std::string_view coordinator_server) -> void override {
if (maybe_ip_and_port) { auto const maybe_coordinator_server = io::network::Endpoint::ParseSocketOrAddress(coordinator_server);
auto const [ip, port] = *maybe_ip_and_port; if (!maybe_coordinator_server) {
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port); throw QueryRuntimeException("Invalid coordinator socket address!");
coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
} else {
spdlog::error("Invalid raft socket address {}.", raft_socket_address);
} }
auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
if (!maybe_bolt_server) {
throw QueryRuntimeException("Invalid bolt socket address!");
}
auto const coord_coord_config =
coordination::CoordinatorToCoordinatorConfig{.coordinator_server_id = raft_server_id,
.bolt_server = *maybe_bolt_server,
.coordinator_server = *maybe_coordinator_server};
coordinator_handler_.AddCoordinatorInstance(coord_coord_config);
spdlog::info("Added instance on coordinator server {}", maybe_coordinator_server->SocketAddress());
} }
void SetReplicationInstanceToMain(std::string_view instance_name) override { void SetReplicationInstanceToMain(std::string_view instance_name) override {
@ -1197,8 +1208,9 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt(); auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt();
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id, callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id,
bolt_server = bolt_server_it->second,
coordinator_server = coordinator_server_it->second]() mutable { coordinator_server = coordinator_server_it->second]() mutable {
handler.AddCoordinatorInstance(coord_server_id, coordinator_server); handler.AddCoordinatorInstance(coord_server_id, bolt_server, coordinator_server);
return std::vector<std::vector<TypedValue>>(); return std::vector<std::vector<TypedValue>>();
}; };
@ -1243,15 +1255,15 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, callback.fn = [handler = CoordQueryHandler{*coordinator_state},
instance_health_check_frequency_sec = config.instance_health_check_frequency_sec, instance_health_check_frequency_sec = config.instance_health_check_frequency_sec,
management_server = management_server_it->second, bolt_server = bolt_server_it->second, management_server = management_server_it->second,
replication_server = replication_server_it->second, bolt_server = bolt_server_it->second, replication_server = replication_server_it->second,
instance_name = coordinator_query->instance_name_, instance_name = coordinator_query->instance_name_,
instance_down_timeout_sec = config.instance_down_timeout_sec, instance_down_timeout_sec = config.instance_down_timeout_sec,
instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec, instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec,
sync_mode = coordinator_query->sync_mode_]() mutable { sync_mode = coordinator_query->sync_mode_]() mutable {
handler.RegisterReplicationInstance(management_server, replication_server, instance_health_check_frequency_sec, handler.RegisterReplicationInstance(bolt_server, management_server, replication_server,
instance_down_timeout_sec, instance_get_uuid_frequency_sec, instance_name, instance_health_check_frequency_sec, instance_down_timeout_sec,
sync_mode); instance_get_uuid_frequency_sec, instance_name, sync_mode);
return std::vector<std::vector<TypedValue>>(); return std::vector<std::vector<TypedValue>>();
}; };
@ -4266,6 +4278,28 @@ void Interpreter::RollbackTransaction() {
ResetInterpreter(); ResetInterpreter();
} }
#ifdef MG_ENTERPRISE
auto Interpreter::Route(std::map<std::string, std::string> const &routing) -> RouteResult {
// TODO: (andi) Test
if (!FLAGS_raft_server_id) {
auto const &address = routing.find("address");
if (address == routing.end()) {
throw QueryException("Routing table must contain address field.");
}
auto result = RouteResult{};
if (interpreter_context_->repl_state->IsMain()) {
result.servers.emplace_back(std::vector<std::string>{address->second}, "WRITE");
} else {
result.servers.emplace_back(std::vector<std::string>{address->second}, "READ");
}
return result;
}
return RouteResult{.servers = interpreter_context_->coordinator_state_->GetRoutingTable(routing)};
}
#endif
#if MG_ENTERPRISE #if MG_ENTERPRISE
// Before Prepare or during Prepare, but single-threaded. // Before Prepare or during Prepare, but single-threaded.
// TODO: Is there any cleanup? // TODO: Is there any cleanup?

View File

@ -143,8 +143,8 @@ class CoordinatorQueryHandler {
}; };
/// @throw QueryRuntimeException if an error ocurred. /// @throw QueryRuntimeException if an error ocurred.
virtual void RegisterReplicationInstance(std::string_view coordinator_socket_address, virtual void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
std::string_view replication_socket_address, std::string_view replication_server,
std::chrono::seconds const &instance_health_check_frequency, std::chrono::seconds const &instance_health_check_frequency,
std::chrono::seconds const &instance_down_timeout, std::chrono::seconds const &instance_down_timeout,
std::chrono::seconds const &instance_get_uuid_frequency, std::chrono::seconds const &instance_get_uuid_frequency,
@ -160,7 +160,8 @@ class CoordinatorQueryHandler {
virtual std::vector<coordination::InstanceStatus> ShowInstances() const = 0; virtual std::vector<coordination::InstanceStatus> ShowInstances() const = 0;
/// @throw QueryRuntimeException if an error ocurred. /// @throw QueryRuntimeException if an error ocurred.
virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view coordinator_socket_address) -> void = 0; virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view bolt_server,
std::string_view coordinator_server) -> void = 0;
}; };
#endif #endif
@ -247,6 +248,14 @@ class Interpreter final {
std::optional<std::string> db; std::optional<std::string> db;
}; };
#ifdef MG_ENTERPRISE
struct RouteResult {
int ttl{300};
std::string db{}; // Currently not used since we don't have any specific replication groups etc.
coordination::RoutingTable servers{};
};
#endif
std::shared_ptr<QueryUserOrRole> user_or_role_{}; std::shared_ptr<QueryUserOrRole> user_or_role_{};
bool in_explicit_transaction_{false}; bool in_explicit_transaction_{false};
CurrentDB current_db_; CurrentDB current_db_;
@ -272,6 +281,10 @@ class Interpreter final {
const std::map<std::string, storage::PropertyValue> &params, const std::map<std::string, storage::PropertyValue> &params,
QueryExtras const &extras); QueryExtras const &extras);
#ifdef MG_ENTERPRISE
auto Route(std::map<std::string, std::string> const &routing) -> RouteResult;
#endif
/** /**
* Execute the last prepared query and stream *all* of the results into the * Execute the last prepared query and stream *all* of the results into the
* given stream. * given stream.

View File

@ -13,12 +13,13 @@ func handle_if_error(err error) {
} }
func main() { func main() {
dbUri := "bolt://localhost:7687" fmt.Println("Started running docs_quick_start.go test")
driver, err := neo4j.NewDriver(dbUri, neo4j.BasicAuth("", "", "")) dbUri := "bolt://localhost:7687"
if err != nil { driver, err := neo4j.NewDriver(dbUri, neo4j.BasicAuth("", "", ""))
log.Fatal("An error occurred opening conn: %s", err) if err != nil {
} log.Fatal("An error occurred opening conn: %s", err)
defer driver.Close() }
defer driver.Close()
session := driver.NewSession(neo4j.SessionConfig{}) session := driver.NewSession(neo4j.SessionConfig{})
defer session.Close() defer session.Close()
@ -33,7 +34,7 @@ func main() {
_,err = session.WriteTransaction(testAll) _,err = session.WriteTransaction(testAll)
handle_if_error(err) handle_if_error(err)
fmt.Println("All ok!") fmt.Println("doc_quick_start.go test finished successfully.")
} }
func clearDatabase(tx neo4j.Transaction) (interface{}, error) { func clearDatabase(tx neo4j.Transaction) (interface{}, error) {
@ -75,15 +76,14 @@ func testAll(tx neo4j.Transaction) (interface{}, error) {
handle_if_error(err) handle_if_error(err)
age, err := neo4j.GetProperty[int64](node_value, "age") age, err := neo4j.GetProperty[int64](node_value, "age")
handle_if_error(err) handle_if_error(err)
if label != "Person" && name != "Alice" && age != 22 { if label != "Person" && name != "Alice" && age != 22 {
return nil, fmt.Errorf("Data doesn't match.") return nil, fmt.Errorf("Data doesn't match.")
} }
fmt.Println("Label", label) fmt.Println("Label", label)
fmt.Println("name", name) fmt.Println("name", name)
fmt.Println("age", age) fmt.Println("age", age)
return result.Consume() return result.Consume()
} }

View File

@ -3,6 +3,6 @@ module bolt-test
go 1.18 go 1.18
require ( require (
github.com/neo4j/neo4j-go-driver/v5 v5.13.0 // indirect github.com/neo4j/neo4j-go-driver/v5 v5.18.0 // indirect
golang.org/dl v0.0.0-20230502172222-5216546bad51 // indirect golang.org/dl v0.0.0-20230502172222-5216546bad51 // indirect
) )

View File

@ -8,5 +8,7 @@ github.com/neo4j/neo4j-go-driver/v5 v5.9.0 h1:TYxT0RSiwnvVFia90V7TLnRXv8HkdQQ6rT
github.com/neo4j/neo4j-go-driver/v5 v5.9.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k= github.com/neo4j/neo4j-go-driver/v5 v5.9.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
github.com/neo4j/neo4j-go-driver/v5 v5.13.0 h1:NmyUxh4LYTdcJdI6EnazHyUKu1f0/BPiHCYUZUZIGQw= github.com/neo4j/neo4j-go-driver/v5 v5.13.0 h1:NmyUxh4LYTdcJdI6EnazHyUKu1f0/BPiHCYUZUZIGQw=
github.com/neo4j/neo4j-go-driver/v5 v5.13.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k= github.com/neo4j/neo4j-go-driver/v5 v5.13.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
github.com/neo4j/neo4j-go-driver/v5 v5.18.0 h1:3dmYsCYt/Fc/bPeSyGRGGfn/T6h06/OmHm72OFQKa3c=
github.com/neo4j/neo4j-go-driver/v5 v5.18.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
golang.org/dl v0.0.0-20230502172222-5216546bad51 h1:Bmo/kmR2hzyhGt3jjtl1ghkCqa5LINbB9D3QTkiLJIY= golang.org/dl v0.0.0-20230502172222-5216546bad51 h1:Bmo/kmR2hzyhGt3jjtl1ghkCqa5LINbB9D3QTkiLJIY=
golang.org/dl v0.0.0-20230502172222-5216546bad51/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ= golang.org/dl v0.0.0-20230502172222-5216546bad51/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=

View File

@ -0,0 +1,51 @@
package main
import (
"fmt"
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
)
func read_messages(uri string) {
username := ""
password := ""
// Connect to Memgraph
driver, err := neo4j.NewDriver(uri, neo4j.BasicAuth(username, password, ""))
if err != nil {
panic(err)
}
defer driver.Close()
// Use AccessModeRead for read transactions
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
greeting, err := session.ReadTransaction(func(transaction neo4j.Transaction) (interface{}, error) {
result, err := transaction.Run("MATCH (n:Greeting) RETURN n.message AS message LIMIT 1", nil)
if err != nil {
return nil, err
}
if result.Next() {
return result.Record().Values[0], nil
}
return nil, result.Err()
})
if err != nil {
panic(err)
}
fmt.Println(greeting)
}
// Test checks that you can use bolt+routing for connecting to main and coordinators for reading.
func main() {
fmt.Println("Started running read_route.go test")
read_messages("neo4j://localhost:7690") // coordinator_1
read_messages("neo4j://localhost:7691") // coordinator_2
read_messages("neo4j://localhost:7692") // coordinator_3
fmt.Println("Successfully finished running coordinator_route.go test")
}

View File

@ -18,4 +18,3 @@ done
go get github.com/neo4j/neo4j-go-driver/v5 go get github.com/neo4j/neo4j-go-driver/v5
go run docs_quick_start.go go run docs_quick_start.go
# go run parallel_edge_import.go

View File

@ -0,0 +1,21 @@
#!/bin/bash -e
GO_VERSION="1.18.9"
GO_VERSION_DIR="/opt/go$GO_VERSION"
if [ -f "$GO_VERSION_DIR/go/bin/go" ]; then
export GOROOT="$GO_VERSION_DIR/go"
export GOPATH="$HOME/go$GO_VERSION"
export PATH="$GO_VERSION_DIR/go/bin:$PATH"
fi
# check if go is installed
for i in go; do
if ! which $i >/dev/null; then
echo "Please install $i!"
exit 1
fi
done
go get github.com/neo4j/neo4j-go-driver/v5
go run write_routing.go
go run read_routing.go

View File

@ -0,0 +1,51 @@
package main
import (
"fmt"
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
)
func create_message(uri string) {
username := ""
password := ""
// Connect to Memgraph
driver, err := neo4j.NewDriver(uri, neo4j.BasicAuth(username, password, ""))
if err != nil {
panic(err)
}
defer driver.Close()
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
greeting, err := session.WriteTransaction(func(transaction neo4j.Transaction) (interface{}, error) {
result, err := transaction.Run("CREATE (n:Greeting) SET n.message = $message RETURN n.message", map[string]interface{}{
"message": "Hello, World!",
})
if err != nil {
return nil, err
}
if result.Next() {
return result.Record().Values[0], nil
}
return nil, result.Err()
})
if err != nil {
panic(err)
}
fmt.Println(greeting)
}
// Test checks that you can use bolt+routing for connecting to main and coordinators for writing.
func main() {
fmt.Println("Started running main_route.go test")
create_message("neo4j://localhost:7690") // coordinator_1
create_message("neo4j://localhost:7691") // coordinator_2
create_message("neo4j://localhost:7692") // coordinator_3
fmt.Println("Successfully finished running main_route.go test")
}

View File

@ -104,6 +104,45 @@
<goal>single</goal> <goal>single</goal>
</goals> </goals>
</execution> </execution>
<execution>
<id>build-e</id>
<configuration>
<archive>
<manifest>
<mainClass>memgraph.WriteRouting</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<appendAssemblyId>false</appendAssemblyId>
<finalName>WriteRouting</finalName>
</configuration>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
<execution>
<id>build-f</id>
<configuration>
<archive>
<manifest>
<mainClass>memgraph.ReadRouting</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<appendAssemblyId>false</appendAssemblyId>
<finalName>ReadRouting</finalName>
</configuration>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions> </executions>
</plugin> </plugin>
</plugins> </plugins>

View File

@ -36,4 +36,3 @@ mvn clean package
java -jar target/DocsHowToQuery.jar java -jar target/DocsHowToQuery.jar
java -jar target/MaxQueryLength.jar java -jar target/MaxQueryLength.jar
java -jar target/Transactions.jar java -jar target/Transactions.jar
# java -jar target/ParallelEdgeImport.jar

View File

@ -0,0 +1,37 @@
#!/bin/bash -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
if [ -d "/usr/lib/jvm/java-17-oracle" ]; then
export JAVA_HOME="/usr/lib/jvm/java-17-oracle"
fi
if [ -d "/usr/lib/jvm/java-17-openjdk-amd64" ]; then
export JAVA_HOME="/usr/lib/jvm/java-17-openjdk-amd64"
fi
if [ -d "/opt/apache-maven-3.9.3" ]; then
export M2_HOME="/opt/apache-maven-3.9.3"
fi
export PATH="$JAVA_HOME/bin:$M2_HOME/bin:$PATH"
for i in java mvn; do
if ! which $i >/dev/null; then
echo "Please install $i!"
exit 1
fi
done
JAVA_VER=$(java -version 2>&1 >/dev/null | grep 'version' | cut -d "\"" -f2 | cut -d "." -f1)
if [ $JAVA_VER -ne 17 ]
then
echo "neo4j-java-driver v5.8 requires Java 17. Please install it!"
exit 1
fi
# CentOS 7 doesn't have Java version that supports var keyword
source ../../../../environment/util.sh
mvn clean package
java -jar target/WriteRouting.jar
java -jar target/ReadRouting.jar

View File

@ -0,0 +1,35 @@
package memgraph;
import static org.neo4j.driver.Values.parameters;
import java.util.*;
import java.util.concurrent.TimeUnit;
import org.neo4j.driver.AuthTokens;
import org.neo4j.driver.Driver;
import org.neo4j.driver.GraphDatabase;
import org.neo4j.driver.Session;
import org.neo4j.driver.Transaction;
public class ReadRouting {
private Driver driver;
private void readMessage(String uri) {
driver = GraphDatabase.driver(uri, AuthTokens.basic("", ""));
try (Session session = driver.session()) {
String greeting = session.readTransaction(tx -> {
var result = tx.run("MATCH (n:Greeting) RETURN n.message AS message");
System.out.println("Read txn passed!");
return "OK";
});
}
}
public static void main(String... args) {
System.out.println("Started running ReadRoutingTest...");
ReadRouting greeter = new ReadRouting();
greeter.readMessage("neo4j://localhost:7690"); // coordinator_1
greeter.readMessage("neo4j://localhost:7691"); // coordinator_2
greeter.readMessage("neo4j://localhost:7692"); // coordinator_3
System.out.println("All good!");
}
}

View File

@ -0,0 +1,44 @@
package memgraph;
import static org.neo4j.driver.Values.parameters;
import java.util.*;
import java.util.concurrent.TimeUnit;
import org.neo4j.driver.AuthTokens;
import org.neo4j.driver.Config;
import org.neo4j.driver.Driver;
import org.neo4j.driver.GraphDatabase;
import org.neo4j.driver.Result;
import org.neo4j.driver.Session;
import org.neo4j.driver.Transaction;
import org.neo4j.driver.TransactionWork;
import org.neo4j.driver.exceptions.ClientException;
import org.neo4j.driver.exceptions.TransientException;
public class WriteRouting {
private Driver driver;
private void createMessage(String uri) {
driver = GraphDatabase.driver(uri, AuthTokens.basic("", ""));
try (Session session = driver.session()) {
String greeting = session.writeTransaction(tx -> {
var result = tx.run("CREATE (n:Greeting) SET n.message = $message RETURN n.message",
parameters("message", "Hello, World!"));
if (result.hasNext()) {
return result.single().get(0).asString();
}
throw new RuntimeException("No result found.");
});
System.out.println(greeting);
}
}
public static void main(String... args) {
System.out.println("Started running WriteRoutingTest...");
WriteRouting greeter = new WriteRouting();
greeter.createMessage("neo4j://localhost:7690"); // coordinator_1
greeter.createMessage("neo4j://localhost:7691"); // coordinator_2
greeter.createMessage("neo4j://localhost:7692"); // coordinator_3
System.out.println("All good!");
}
}

View File

@ -0,0 +1,59 @@
const neo4j = require('neo4j-driver');
function die() {
session.close();
driver.close();
process.exit(1);
}
function Neo4jService(uri) {
const driver = neo4j.driver(uri, neo4j.auth.basic("", ""));
async function readGreeting() {
const session = driver.session({ defaultAccessMode: neo4j.session.READ });
try {
const result = await session.readTransaction(tx =>
tx.run('MATCH (n:Greeting) RETURN n.message AS message')
);
console.log("Read txn finished");
} finally {
await session.close();
}
}
async function close() {
await driver.close();
}
return {
readGreeting,
close
};
}
async function readGreetingsFromUri(uri) {
const service = Neo4jService(uri);
await service.readGreeting();
await service.close();
}
async function main() {
console.log("Started reading route");
const uris = [
'neo4j://localhost:7690',
'neo4j://localhost:7691',
'neo4j://localhost:7692'
];
try {
for (const uri of uris) {
await readGreetingsFromUri(uri);
}
} catch (error) {
console.error('An error occurred:', error);
die();
}
console.log("Finished reading route");
}
main().catch(error => console.error(error));

View File

@ -15,4 +15,3 @@ fi
node docs_how_to_query.js node docs_how_to_query.js
node max_query_length.js node max_query_length.js
# node parallel_edge_import.js

View File

@ -0,0 +1,17 @@
#!/bin/bash -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
if ! which node >/dev/null; then
echo "Please install nodejs!"
exit 1
fi
if [ ! -d node_modules ]; then
# Driver generated with: `npm install neo4j-driver`
npm install --no-package-lock --no-save neo4j-driver@5.8.0
fi
node write_routing.js
node read_routing.js

View File

@ -0,0 +1,59 @@
const neo4j = require('neo4j-driver');
function die() {
session.close();
driver.close();
process.exit(1);
}
function Neo4jService(uri) {
const driver = neo4j.driver(uri, neo4j.auth.basic("", ""));
async function createGreeting() {
const session = driver.session({ defaultAccessMode: neo4j.session.WRITE });
try {
const result = await session.writeTransaction(tx =>
tx.run('CREATE (n:Greeting {message: "Hello NodeJs"}) RETURN n.message AS message')
);
console.log("Write txn finished");
} finally {
await session.close();
}
}
async function close() {
await driver.close();
}
return {
createGreeting,
close
};
}
async function createGreetingsFromUri(uri) {
const service = Neo4jService(uri);
await service.createGreeting();
await service.close();
}
async function main() {
console.log("Started writing route");
const uris = [
'neo4j://localhost:7690',
'neo4j://localhost:7691',
'neo4j://localhost:7692'
];
try {
for (const uri of uris) {
await createGreetingsFromUri(uri);
}
} catch (error) {
console.error('An error occurred:', error);
die();
}
console.log("Finished writing route");
}
main().catch(error => console.error(error));

View File

@ -0,0 +1,41 @@
from neo4j import GraphDatabase
class Neo4jService:
def __init__(self, uri, user="", password=""):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self.driver.close()
def read_greeting(self):
with self.driver.session() as session:
session.execute_read(self._create_and_return_greeting)
print("Read txn passed!")
@staticmethod
def _create_and_return_greeting(tx):
tx.run("MATCH (n:Greeting) RETURN n.message AS message")
def read_greetings_from_uri(uri):
service = Neo4jService(uri)
service.read_greeting()
service.close()
def main():
print("Started reading route")
uris = ["neo4j://localhost:7690", "neo4j://localhost:7691", "neo4j://localhost:7692"]
try:
for uri in uris:
read_greetings_from_uri(uri)
except Exception as error:
print(f"An error occurred: {error}")
exit(-1)
print("Finished reading route")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,25 @@
#!/bin/bash -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
# system check
if ! which virtualenv >/dev/null; then
echo "Please install virtualenv!"
exit 1
fi
# setup virtual environment
if [ ! -d "ve3" ]; then
virtualenv -p python3 ve3 || exit 1
source ve3/bin/activate
python3 -m pip install neo4j==5.8.0 || exit 1
deactivate
fi
# activate virtualenv
source ve3/bin/activate
# execute test
python3 write_routing.py || exit 1
python3 read_routing.py || exit 1

View File

@ -0,0 +1,41 @@
from neo4j import GraphDatabase
class Neo4jService:
def __init__(self, uri, user="", password=""):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self.driver.close()
def create_greeting(self):
with self.driver.session() as session:
session.execute_write(self._create_and_return_greeting)
print("Write txn passed!")
@staticmethod
def _create_and_return_greeting(tx):
tx.run("CREATE (n:Greeting {message: 'Hello from Python'}) RETURN n.message AS message")
def create_greetings_from_uri(uri):
service = Neo4jService(uri)
service.create_greeting()
service.close()
def main():
print("Started writing route")
uris = ["neo4j://localhost:7690", "neo4j://localhost:7691", "neo4j://localhost:7692"]
try:
for uri in uris:
create_greetings_from_uri(uri)
except Exception as error:
print(f"An error occurred: {error}")
exit(-1)
print("Finished writing route")
if __name__ == "__main__":
main()

203
tests/drivers/run_cluster.sh Executable file
View File

@ -0,0 +1,203 @@
#!/bin/bash
pushd () { command pushd "$@" > /dev/null; }
popd () { command popd "$@" > /dev/null; }
function wait_for_server {
port=$1
while ! nc -z -w 1 127.0.0.1 $port; do
sleep 0.1
done
sleep 1
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
# create a temporary directory.
tmpdir=/tmp/memgraph_drivers
if [ -d $tmpdir ]; then
rm -rf $tmpdir
fi
mkdir -p $tmpdir
# find memgraph binaries.
binary_dir="$DIR/../../build"
# Start instance_1
$binary_dir/memgraph \
--bolt-port=7687 \
--data-directory=$tmpdir/instance_1/ \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/instance1.log \
--also-log-to-stderr \
--coordinator-server-port=10011 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_instance_1=$!
wait_for_server 7687
# Start instance_2
$binary_dir/memgraph \
--bolt-port=7688 \
--data-directory=$tmpdir/instance_2 \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/instance2.log \
--also-log-to-stderr \
--coordinator-server-port=10012 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_instance_2=$!
wait_for_server 7688
# Start instance_3
$binary_dir/memgraph \
--bolt-port=7689 \
--data-directory=$tmpdir/instance_3 \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/instance3.log \
--also-log-to-stderr \
--coordinator-server-port=10013 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_instance_3=$!
wait_for_server 7689
# Start coordinator_1
$binary_dir/memgraph \
--bolt-port=7690 \
--data-directory=$tmpdir/coordinator_1 \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/coordinator1.log \
--also-log-to-stderr \
--raft-server-id=1 \
--raft-server-port=10111 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_coordinator_1=$!
wait_for_server 7690
# Start coordinator_2
$binary_dir/memgraph \
--bolt-port=7691 \
--data-directory=$tmpdir/coordinator_2 \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/coordinator2.log \
--also-log-to-stderr \
--raft-server-id=2 \
--raft-server-port=10112 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_coordinator_2=$!
wait_for_server 7691
# Start coordinator_3
$binary_dir/memgraph \
--bolt-port=7692 \
--data-directory=$tmpdir/coordinator_3 \
--query-execution-timeout-sec=5 \
--bolt-session-inactivity-timeout=10 \
--bolt-server-name-for-init="Neo4j/1.1" \
--bolt-cert-file="" \
--log-file=$tmpdir/logs/coordinator3.log \
--also-log-to-stderr \
--raft-server-id=3 \
--raft-server-port=10113 \
--experimental-enabled=high-availability \
--log-level ERROR &
pid_coordinator_3=$!
wait_for_server 7692
sleep 5
echo 'ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "127.0.0.1:7691", "coordinator_server": "127.0.0.1:10112"};' | $binary_dir/bin/mgconsole --port 7690
echo 'ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "127.0.0.1:7692", "coordinator_server": "127.0.0.1:10113"};' | $binary_dir/bin/mgconsole --port 7690
echo 'REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "127.0.0.1:7687", "management_server": "127.0.0.1:10011", "replication_server": "127.0.0.1:10001"};' | $binary_dir/bin/mgconsole --port 7690
echo 'REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "127.0.0.1:7688", "management_server": "127.0.0.1:10012", "replication_server": "127.0.0.1:10002"};' | $binary_dir/bin/mgconsole --port 7690
echo 'REGISTER INSTANCE instance_3 WITH CONFIG {"bolt_server": "127.0.0.1:7689", "management_server": "127.0.0.1:10013", "replication_server": "127.0.0.1:10003"};' | $binary_dir/bin/mgconsole --port 7690
echo 'SET INSTANCE instance_1 TO MAIN;' | $binary_dir/bin/mgconsole --port 7690
code_test=0
for lang in *; do
if [ ! -d $lang ]; then continue; fi
pushd $lang
echo "Running tests for language: $lang"
for version in *; do
if [ ! -d $version ]; then continue; fi
pushd $version
if [ -f "run_cluster_tests.sh" ]; then
echo "Running version: $version"
./run_cluster_tests.sh
code_test=$?
if [ $code_test -ne 0 ]; then
echo "FAILED: $lang-$version"
break
fi
fi
popd
done;
popd
done
# Function to stop a process by PID and check its exit code
stop_process() {
local pid=$1 # Capture the PID from the first argument
# Stop the process
kill $pid
wait $pid
local exit_code=$? # Capture the exit code
# Check the process's exit code
if [ $exit_code -ne 0 ]; then
echo "The process with PID $pid didn't terminate properly!"
exit $exit_code
else
echo "Process with PID $pid terminated successfully."
fi
}
echo "Stopping coordinator1"
stop_process $pid_coordinator_1
echo "Stopping coordinator2"
stop_process $pid_coordinator_2
echo "Stopping coordinator3"
stop_process $pid_coordinator_3
echo "Stopping instance1"
stop_process $pid_instance_1
echo "Stopping instance2"
stop_process $pid_instance_2
echo "Stopping instance3"
stop_process $pid_instance_3
# Check test exit code.
if [ $code_test -ne 0 ]; then
echo "One of the tests failed!"
exit $code_test
fi
# Temporary directory cleanup.
if [ -d $tmpdir ]; then
rm -rf $tmpdir
fi

View File

@ -30,14 +30,3 @@ def safe_execute(function, *args):
function(*args) function(*args)
except: except:
pass pass
# NOTE: Repeated execution because it can fail if Raft server is not up
def add_coordinator(cursor, query):
for _ in range(10):
try:
execute_and_fetch_all(cursor, query)
return True
except Exception:
pass
return False

View File

@ -16,7 +16,7 @@ import tempfile
import interactive_mg_runner import interactive_mg_runner
import pytest import pytest
from common import add_coordinator, connect, execute_and_fetch_all, safe_execute from common import connect, execute_and_fetch_all, safe_execute
from mg_utils import mg_sleep_and_assert from mg_utils import mg_sleep_and_assert
interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@ -110,134 +110,134 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
} }
def test_register_repl_instances_then_coordinators(): # def test_register_repl_instances_then_coordinators():
safe_execute(shutil.rmtree, TEMP_DIR) # safe_execute(shutil.rmtree, TEMP_DIR)
interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) # interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
#
coordinator3_cursor = connect(host="localhost", port=7692).cursor() # coordinator3_cursor = connect(host="localhost", port=7692).cursor()
#
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};", # "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};", # "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};", # "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
) # )
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN") # execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", # "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) # )
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", # "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) # )
#
def check_coordinator3(): # def check_coordinator3():
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
#
expected_cluster_coord3 = [ # expected_cluster_coord3 = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "replica"), # ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3) # mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
#
coordinator1_cursor = connect(host="localhost", port=7690).cursor() # coordinator1_cursor = connect(host="localhost", port=7690).cursor()
#
def check_coordinator1(): # def check_coordinator1():
return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "replica"), # ("instance_1", "", "", "unknown", "replica"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
#
coordinator2_cursor = connect(host="localhost", port=7691).cursor() # coordinator2_cursor = connect(host="localhost", port=7691).cursor()
#
def check_coordinator2(): # def check_coordinator2():
return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
#
#
def test_register_coordinator_then_repl_instances(): # def test_register_coordinator_then_repl_instances():
safe_execute(shutil.rmtree, TEMP_DIR) # safe_execute(shutil.rmtree, TEMP_DIR)
interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) # interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
#
coordinator3_cursor = connect(host="localhost", port=7692).cursor() # coordinator3_cursor = connect(host="localhost", port=7692).cursor()
#
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", # "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) # )
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", # "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};", # "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};", # "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};", # "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
) # )
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN") # execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
#
def check_coordinator3(): # def check_coordinator3():
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
#
expected_cluster_coord3 = [ # expected_cluster_coord3 = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "replica"), # ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3) # mg_sleep_and_assert(expected_cluster_coord3, check_coordinator3)
#
coordinator1_cursor = connect(host="localhost", port=7690).cursor() # coordinator1_cursor = connect(host="localhost", port=7690).cursor()
#
def check_coordinator1(): # def check_coordinator1():
return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "replica"), # ("instance_1", "", "", "unknown", "replica"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
#
coordinator2_cursor = connect(host="localhost", port=7691).cursor() # coordinator2_cursor = connect(host="localhost", port=7691).cursor()
#
def check_coordinator2(): # def check_coordinator2():
return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
def test_coordinators_communication_with_restarts(): def test_coordinators_communication_with_restarts():
@ -246,11 +246,11 @@ def test_coordinators_communication_with_restarts():
coordinator3_cursor = connect(host="localhost", port=7692).cursor() coordinator3_cursor = connect(host="localhost", port=7692).cursor()
assert add_coordinator( execute_and_fetch_all(
coordinator3_cursor, coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) )
assert add_coordinator( execute_and_fetch_all(
coordinator3_cursor, coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) )
@ -310,284 +310,284 @@ def test_coordinators_communication_with_restarts():
# # TODO: (andi) Test when dealing with distributed coordinators that you can register on one coordinator and unregister from any other coordinator # # TODO: (andi) Test when dealing with distributed coordinators that you can register on one coordinator and unregister from any other coordinator
@pytest.mark.parametrize( # @pytest.mark.parametrize(
"kill_instance", # "kill_instance",
[True, False], # [True, False],
) # )
def test_unregister_replicas(kill_instance): # def test_unregister_replicas(kill_instance):
safe_execute(shutil.rmtree, TEMP_DIR) # safe_execute(shutil.rmtree, TEMP_DIR)
interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) # interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
#
coordinator1_cursor = connect(host="localhost", port=7690).cursor() # coordinator1_cursor = connect(host="localhost", port=7690).cursor()
coordinator2_cursor = connect(host="localhost", port=7691).cursor() # coordinator2_cursor = connect(host="localhost", port=7691).cursor()
coordinator3_cursor = connect(host="localhost", port=7692).cursor() # coordinator3_cursor = connect(host="localhost", port=7692).cursor()
#
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", # "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) # )
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", # "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};", # "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};", # "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};", # "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
) # )
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN") # execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
#
def check_coordinator1(): # def check_coordinator1():
return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
#
def check_coordinator2(): # def check_coordinator2():
return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
#
def check_coordinator3(): # def check_coordinator3():
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
#
main_cursor = connect(host="localhost", port=7689).cursor() # main_cursor = connect(host="localhost", port=7689).cursor()
#
def check_main(): # def check_main():
return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS"))) # return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "replica"), # ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "replica"), # ("instance_1", "", "", "unknown", "replica"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
expected_replicas = [ # expected_replicas = [
( # (
"instance_1", # "instance_1",
"127.0.0.1:10001", # "127.0.0.1:10001",
"sync", # "sync",
{"ts": 0, "behind": None, "status": "ready"}, # {"ts": 0, "behind": None, "status": "ready"},
{"memgraph": {"ts": 0, "behind": 0, "status": "ready"}}, # {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
), # ),
( # (
"instance_2", # "instance_2",
"127.0.0.1:10002", # "127.0.0.1:10002",
"sync", # "sync",
{"ts": 0, "behind": None, "status": "ready"}, # {"ts": 0, "behind": None, "status": "ready"},
{"memgraph": {"ts": 0, "behind": 0, "status": "ready"}}, # {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
), # ),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
mg_sleep_and_assert(expected_replicas, check_main) # mg_sleep_and_assert(expected_replicas, check_main)
#
if kill_instance: # if kill_instance:
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1") # interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1")
execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_1") # execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_1")
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
expected_replicas = [ # expected_replicas = [
( # (
"instance_2", # "instance_2",
"127.0.0.1:10002", # "127.0.0.1:10002",
"sync", # "sync",
{"ts": 0, "behind": None, "status": "ready"}, # {"ts": 0, "behind": None, "status": "ready"},
{"memgraph": {"ts": 0, "behind": 0, "status": "ready"}}, # {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
), # ),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
mg_sleep_and_assert(expected_replicas, check_main) # mg_sleep_and_assert(expected_replicas, check_main)
#
if kill_instance: # if kill_instance:
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_2") # interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_2")
execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_2") # execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_2")
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
expected_replicas = [] # expected_replicas = []
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
mg_sleep_and_assert(expected_replicas, check_main) # mg_sleep_and_assert(expected_replicas, check_main)
#
#
def test_unregister_main(): # def test_unregister_main():
safe_execute(shutil.rmtree, TEMP_DIR) # safe_execute(shutil.rmtree, TEMP_DIR)
interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) # interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION)
#
coordinator1_cursor = connect(host="localhost", port=7690).cursor() # coordinator1_cursor = connect(host="localhost", port=7690).cursor()
coordinator2_cursor = connect(host="localhost", port=7691).cursor() # coordinator2_cursor = connect(host="localhost", port=7691).cursor()
coordinator3_cursor = connect(host="localhost", port=7692).cursor() # coordinator3_cursor = connect(host="localhost", port=7692).cursor()
#
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", # "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) # )
assert add_coordinator( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", # "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};", # "REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};", # "REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
) # )
execute_and_fetch_all( # execute_and_fetch_all(
coordinator3_cursor, # coordinator3_cursor,
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};", # "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
) # )
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN") # execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
#
def check_coordinator1(): # def check_coordinator1():
return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator1_cursor, "SHOW INSTANCES")))
#
def check_coordinator2(): # def check_coordinator2():
return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator2_cursor, "SHOW INSTANCES")))
#
def check_coordinator3(): # def check_coordinator3():
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES"))) # return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "replica"), # ("instance_1", "", "127.0.0.1:10011", "up", "replica"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"), # ("instance_3", "", "127.0.0.1:10013", "up", "main"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "replica"), # ("instance_1", "", "", "unknown", "replica"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
#
try: # try:
execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3") # execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
except Exception as e: # except Exception as e:
assert ( # assert (
str(e) # str(e)
== "Alive main instance can't be unregistered! Shut it down to trigger failover and then unregister it!" # == "Alive main instance can't be unregistered! Shut it down to trigger failover and then unregister it!"
) # )
#
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3") # interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3")
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "main"), # ("instance_1", "", "127.0.0.1:10011", "up", "main"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "down", "unknown"), # ("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "main"), # ("instance_1", "", "", "unknown", "main"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"), # ("instance_3", "", "", "unknown", "main"),
] # ]
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
#
execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3") # execute_and_fetch_all(coordinator3_cursor, "UNREGISTER INSTANCE instance_3")
#
expected_cluster = [ # expected_cluster = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "main"), # ("instance_1", "", "127.0.0.1:10011", "up", "main"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"), # ("instance_2", "", "127.0.0.1:10012", "up", "replica"),
] # ]
#
expected_cluster_shared = [ # expected_cluster_shared = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"), # ("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"), # ("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"), # ("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "main"), # ("instance_1", "", "", "unknown", "main"),
("instance_2", "", "", "unknown", "replica"), # ("instance_2", "", "", "unknown", "replica"),
] # ]
#
expected_replicas = [ # expected_replicas = [
( # (
"instance_2", # "instance_2",
"127.0.0.1:10002", # "127.0.0.1:10002",
"sync", # "sync",
{"ts": 0, "behind": None, "status": "ready"}, # {"ts": 0, "behind": None, "status": "ready"},
{"memgraph": {"ts": 0, "behind": 0, "status": "ready"}}, # {"memgraph": {"ts": 0, "behind": 0, "status": "ready"}},
), # ),
] # ]
#
main_cursor = connect(host="localhost", port=7687).cursor() # main_cursor = connect(host="localhost", port=7687).cursor()
#
def check_main(): # def check_main():
return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS"))) # return sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS")))
#
mg_sleep_and_assert(expected_cluster_shared, check_coordinator1) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator1)
mg_sleep_and_assert(expected_cluster_shared, check_coordinator2) # mg_sleep_and_assert(expected_cluster_shared, check_coordinator2)
mg_sleep_and_assert(expected_cluster, check_coordinator3) # mg_sleep_and_assert(expected_cluster, check_coordinator3)
mg_sleep_and_assert(expected_replicas, check_main) # mg_sleep_and_assert(expected_replicas, check_main)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -16,7 +16,7 @@ import tempfile
import interactive_mg_runner import interactive_mg_runner
import pytest import pytest
from common import add_coordinator, connect, execute_and_fetch_all, safe_execute from common import connect, execute_and_fetch_all, safe_execute
from mg_utils import mg_sleep_and_assert from mg_utils import mg_sleep_and_assert
interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@ -137,11 +137,11 @@ def test_writing_disabled_on_main_restart():
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};", "REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
) )
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN") execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
assert add_coordinator( execute_and_fetch_all(
coordinator3_cursor, coordinator3_cursor,
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}", "ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
) )
assert add_coordinator( execute_and_fetch_all(
coordinator3_cursor, coordinator3_cursor,
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}", "ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
) )

View File

@ -37,10 +37,9 @@ auto ParseDatabaseEndpoints(const std::string &database_endpoints_str) {
const auto db_endpoints_strs = memgraph::utils::SplitView(database_endpoints_str, ","); const auto db_endpoints_strs = memgraph::utils::SplitView(database_endpoints_str, ",");
std::vector<memgraph::io::network::Endpoint> database_endpoints; std::vector<memgraph::io::network::Endpoint> database_endpoints;
for (const auto &db_endpoint_str : db_endpoints_strs) { for (const auto &db_endpoint_str : db_endpoints_strs) {
const auto maybe_host_port = memgraph::io::network::Endpoint::ParseSocketOrAddress(db_endpoint_str, 7687); auto maybe_endpoint = memgraph::io::network::Endpoint::ParseSocketOrAddress(db_endpoint_str, 7687);
MG_ASSERT(maybe_host_port); MG_ASSERT(maybe_endpoint);
auto const [ip, port] = *maybe_host_port; database_endpoints.emplace_back(std::move(*maybe_endpoint));
database_endpoints.emplace_back(std::string(ip), port);
} }
return database_endpoints; return database_endpoints;
} }

View File

@ -446,9 +446,16 @@ target_link_libraries(${test_prefix}raft_log_serialization gflags mg-coordinatio
target_include_directories(${test_prefix}raft_log_serialization PRIVATE ${CMAKE_SOURCE_DIR}/include) target_include_directories(${test_prefix}raft_log_serialization PRIVATE ${CMAKE_SOURCE_DIR}/include)
endif() endif()
# Test Raft log serialization # Test CoordinatorClusterState
if(MG_ENTERPRISE) if(MG_ENTERPRISE)
add_unit_test(coordinator_cluster_state.cpp) add_unit_test(coordinator_cluster_state.cpp)
target_link_libraries(${test_prefix}coordinator_cluster_state gflags mg-coordination mg-repl_coord_glue) target_link_libraries(${test_prefix}coordinator_cluster_state gflags mg-coordination mg-repl_coord_glue)
target_include_directories(${test_prefix}coordinator_cluster_state PRIVATE ${CMAKE_SOURCE_DIR}/include) target_include_directories(${test_prefix}coordinator_cluster_state PRIVATE ${CMAKE_SOURCE_DIR}/include)
endif() endif()
# Test Raft log serialization
if(MG_ENTERPRISE)
add_unit_test(routing_table.cpp)
target_link_libraries(${test_prefix}routing_table gflags mg-coordination mg-repl_coord_glue)
target_include_directories(${test_prefix}routing_table PRIVATE ${CMAKE_SOURCE_DIR}/include)
endif()

View File

@ -1,4 +1,4 @@
// Copyright 2023 Memgraph Ltd. // Copyright 2024 Memgraph Ltd.
// //
// Use of this software is governed by the Business Source License // Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -114,6 +114,14 @@ class TestSession final : public Session<TestInputStream, TestOutputStream> {
bool Authenticate(const std::string & /*username*/, const std::string & /*password*/) override { return true; } bool Authenticate(const std::string & /*username*/, const std::string & /*password*/) override { return true; }
#ifdef MG_ENTERPRISE
auto Route(std::map<std::string, Value> const & /*routing*/,
std::vector<memgraph::communication::bolt::Value> const & /*bookmarks*/,
std::map<std::string, Value> const & /*extra*/) -> std::map<std::string, Value> override {
return {};
}
#endif
std::optional<std::string> GetServerNameForInit() override { return std::nullopt; } std::optional<std::string> GetServerNameForInit() override { return std::nullopt; }
void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &) override {} void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &) override {}
@ -1027,104 +1035,115 @@ TEST(BoltSession, Noop) {
} }
} }
TEST(BoltSession, Route) { TEST(BoltSession, Route){{SCOPED_TRACE("v1");
// Memgraph does not support route message, but it handles it INIT_VARS;
{
SCOPED_TRACE("v1");
INIT_VARS;
ExecuteHandshake(input_stream, session, output); ExecuteHandshake(input_stream, session, output);
ExecuteInit(input_stream, session, output); ExecuteInit(input_stream, session, output);
ASSERT_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)), SessionException); ASSERT_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)), SessionException);
EXPECT_EQ(session.state_, State::Close); EXPECT_EQ(session.state_, State::Close);
} }
{ #ifdef MG_ENTERPRISE
SCOPED_TRACE("v4"); {
INIT_VARS; SCOPED_TRACE("v4");
INIT_VARS;
ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp); ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp);
ExecuteInit(input_stream, session, output, true); ExecuteInit(input_stream, session, output, true);
ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route))); ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)));
static constexpr uint8_t expected_resp[] = {
0x00 /*two bytes of chunk header, chunk contains 64 bytes of data*/,
0x40,
0xb1 /*TinyStruct1*/,
0x7f /*Failure*/,
0xa2 /*TinyMap with 2 items*/,
0x84 /*TinyString with 4 chars*/,
'c',
'o',
'd',
'e',
0x82 /*TinyString with 2 chars*/,
'6',
'6',
0x87 /*TinyString with 7 chars*/,
'm',
'e',
's',
's',
'a',
'g',
'e',
0xd0 /*String*/,
0x2b /*With 43 chars*/,
'R',
'o',
'u',
't',
'e',
' ',
'm',
'e',
's',
's',
'a',
'g',
'e',
' ',
'i',
's',
' ',
'n',
'o',
't',
' ',
's',
'u',
'p',
'p',
'o',
'r',
't',
'e',
'd',
' ',
'i',
'n',
' ',
'M',
'e',
'm',
'g',
'r',
'a',
'p',
'h',
'!',
0x00 /*Terminating zeros*/,
0x00,
};
EXPECT_EQ(input_stream.size(), 0U);
CheckOutput(output, expected_resp, sizeof(expected_resp));
EXPECT_EQ(session.state_, State::Error);
SCOPED_TRACE("Try to reset connection after ROUTE failed"); EXPECT_EQ(session.state_, State::Idle);
ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4::reset_req, sizeof(v4::reset_req))); CheckSuccessMessage(output);
EXPECT_EQ(input_stream.size(), 0U); }
CheckOutput(output, success_resp, sizeof(success_resp)); #else
EXPECT_EQ(session.state_, State::Idle); {
} SCOPED_TRACE("v4");
INIT_VARS;
ExecuteHandshake(input_stream, session, output, v4_3::handshake_req, v4_3::handshake_resp);
ExecuteInit(input_stream, session, output, true);
ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4_3::route, sizeof(v4_3::route)));
static constexpr uint8_t expected_resp[] = {
0x00 /*two bytes of chunk header, chunk contains 64 bytes of data*/,
0x40,
0xb1 /*TinyStruct1*/,
0x7f /*Failure*/,
0xa2 /*TinyMap with 2 items*/,
0x84 /*TinyString with 4 chars*/,
'c',
'o',
'd',
'e',
0x82 /*TinyString with 2 chars*/,
'6',
'6',
0x87 /*TinyString with 7 chars*/,
'm',
'e',
's',
's',
'a',
'g',
'e',
0xd0 /*String*/,
0x2b /*With 43 chars*/,
'R',
'o',
'u',
't',
'e',
' ',
'm',
'e',
's',
's',
'a',
'g',
'e',
' ',
'i',
's',
' ',
'n',
'o',
't',
' ',
's',
'u',
'p',
'p',
'o',
'r',
't',
'e',
'd',
' ',
'i',
'n',
' ',
'M',
'e',
'm',
'g',
'r',
'a',
'p',
'h',
'!',
0x00 /*Terminating zeros*/,
0x00,
};
EXPECT_EQ(input_stream.size(), 0U);
CheckOutput(output, expected_resp, sizeof(expected_resp));
EXPECT_EQ(session.state_, State::Error);
SCOPED_TRACE("Try to reset connection after ROUTE failed");
ASSERT_NO_THROW(ExecuteCommand(input_stream, session, v4::reset_req, sizeof(v4::reset_req)));
EXPECT_EQ(input_stream.size(), 0U);
CheckOutput(output, success_resp, sizeof(success_resp));
EXPECT_EQ(session.state_, State::Idle);
}
#endif
} }
TEST(BoltSession, Rollback) { TEST(BoltSession, Rollback) {

View File

@ -10,6 +10,7 @@
// licenses/APL.txt. // licenses/APL.txt.
#include "nuraft/coordinator_cluster_state.hpp" #include "nuraft/coordinator_cluster_state.hpp"
#include "io/network/endpoint.hpp"
#include "nuraft/coordinator_state_machine.hpp" #include "nuraft/coordinator_state_machine.hpp"
#include "replication_coordination_glue/role.hpp" #include "replication_coordination_glue/role.hpp"
@ -21,11 +22,12 @@
#include "libnuraft/nuraft.hxx" #include "libnuraft/nuraft.hxx"
using memgraph::coordination::CoordinatorClientConfig;
using memgraph::coordination::CoordinatorClusterState; using memgraph::coordination::CoordinatorClusterState;
using memgraph::coordination::CoordinatorStateMachine; using memgraph::coordination::CoordinatorStateMachine;
using memgraph::coordination::InstanceState; using memgraph::coordination::CoordinatorToReplicaConfig;
using memgraph::coordination::RaftLogAction; using memgraph::coordination::RaftLogAction;
using memgraph::coordination::ReplicationInstanceState;
using memgraph::io::network::Endpoint;
using memgraph::replication_coordination_glue::ReplicationMode; using memgraph::replication_coordination_glue::ReplicationMode;
using memgraph::replication_coordination_glue::ReplicationRole; using memgraph::replication_coordination_glue::ReplicationRole;
using nuraft::buffer; using nuraft::buffer;
@ -42,20 +44,22 @@ class CoordinatorClusterStateTest : public ::testing::Test {
"MG_tests_unit_coordinator_cluster_state"}; "MG_tests_unit_coordinator_cluster_state"};
}; };
TEST_F(CoordinatorClusterStateTest, InstanceStateSerialization) { TEST_F(CoordinatorClusterStateTest, ReplicationInstanceStateSerialization) {
InstanceState instance_state{ ReplicationInstanceState instance_state{
CoordinatorClientConfig{"instance3", CoordinatorToReplicaConfig{.instance_name = "instance3",
"127.0.0.1", .mgt_server = Endpoint{"127.0.0.1", 10112},
10112, .bolt_server = Endpoint{"127.0.0.1", 7687},
std::chrono::seconds{1}, .replication_client_info = {.instance_name = "instance_name",
std::chrono::seconds{5}, .replication_mode = ReplicationMode::ASYNC,
std::chrono::seconds{10}, .replication_server = Endpoint{"127.0.0.1", 10001}},
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001}, .instance_health_check_frequency_sec = std::chrono::seconds{1},
.ssl = std::nullopt}, .instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt},
ReplicationRole::MAIN}; ReplicationRole::MAIN};
nlohmann::json j = instance_state; nlohmann::json j = instance_state;
InstanceState deserialized_instance_state = j.get<InstanceState>(); ReplicationInstanceState deserialized_instance_state = j.get<ReplicationInstanceState>();
EXPECT_EQ(instance_state.config, deserialized_instance_state.config); EXPECT_EQ(instance_state.config, deserialized_instance_state.config);
EXPECT_EQ(instance_state.status, deserialized_instance_state.status); EXPECT_EQ(instance_state.status, deserialized_instance_state.status);
@ -65,13 +69,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
auto coordinator_cluster_state = memgraph::coordination::CoordinatorClusterState{}; auto coordinator_cluster_state = memgraph::coordination::CoordinatorClusterState{};
{ {
CoordinatorClientConfig config{"instance1", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance1",
10111, .mgt_server = Endpoint{"127.0.0.1", 10111},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7687},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance1",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001}, .replication_server = Endpoint{"127.0.0.1", 10001}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -80,13 +87,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.DoAction(payload, action); coordinator_cluster_state.DoAction(payload, action);
} }
{ {
CoordinatorClientConfig config{"instance2", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance2",
10112, .mgt_server = Endpoint{"127.0.0.1", 10112},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7688},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance2",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10002}, .replication_server = Endpoint{"127.0.0.1", 10002}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -95,13 +105,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.DoAction(payload, action); coordinator_cluster_state.DoAction(payload, action);
} }
{ {
CoordinatorClientConfig config{"instance3", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance3",
10113, .mgt_server = Endpoint{"127.0.0.1", 10113},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7689},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance3",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10003}, .replication_server = Endpoint{"127.0.0.1", 10003}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -110,13 +123,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.DoAction(payload, action); coordinator_cluster_state.DoAction(payload, action);
} }
{ {
CoordinatorClientConfig config{"instance4", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance4",
10114, .mgt_server = Endpoint{"127.0.0.1", 10114},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7690},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance4",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10004}, .replication_server = Endpoint{"127.0.0.1", 10004}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -125,13 +141,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.DoAction(payload, action); coordinator_cluster_state.DoAction(payload, action);
} }
{ {
CoordinatorClientConfig config{"instance5", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance5",
10115, .mgt_server = Endpoint{"127.0.0.1", 10115},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7691},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance5",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10005}, .replication_server = Endpoint{"127.0.0.1", 10005}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -140,13 +159,16 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.DoAction(payload, action); coordinator_cluster_state.DoAction(payload, action);
} }
{ {
CoordinatorClientConfig config{"instance6", auto config =
"127.0.0.1", CoordinatorToReplicaConfig{.instance_name = "instance6",
10116, .mgt_server = Endpoint{"127.0.0.1", 10116},
std::chrono::seconds{1}, .bolt_server = Endpoint{"127.0.0.1", 7692},
std::chrono::seconds{5}, .replication_client_info = {.instance_name = "instance6",
std::chrono::seconds{10}, .replication_mode = ReplicationMode::ASYNC,
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10006}, .replication_server = Endpoint{"127.0.0.1", 10006}},
.instance_health_check_frequency_sec = std::chrono::seconds{1},
.instance_down_timeout_sec = std::chrono::seconds{5},
.instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt}; .ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
@ -159,5 +181,6 @@ TEST_F(CoordinatorClusterStateTest, DoActionRegisterInstances) {
coordinator_cluster_state.Serialize(data); coordinator_cluster_state.Serialize(data);
auto deserialized_coordinator_cluster_state = CoordinatorClusterState::Deserialize(*data); auto deserialized_coordinator_cluster_state = CoordinatorClusterState::Deserialize(*data);
ASSERT_EQ(coordinator_cluster_state.GetInstances(), deserialized_coordinator_cluster_state.GetInstances()); ASSERT_EQ(coordinator_cluster_state.GetReplicationInstances(),
deserialized_coordinator_cluster_state.GetReplicationInstances());
} }

View File

@ -9,7 +9,8 @@
// by the Apache License, Version 2.0, included in the file // by the Apache License, Version 2.0, included in the file
// licenses/APL.txt. // licenses/APL.txt.
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "io/network/endpoint.hpp"
#include "nuraft/coordinator_state_machine.hpp" #include "nuraft/coordinator_state_machine.hpp"
#include "nuraft/raft_log_action.hpp" #include "nuraft/raft_log_action.hpp"
#include "utils/file.hpp" #include "utils/file.hpp"
@ -19,10 +20,11 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "json/json.hpp" #include "json/json.hpp"
using memgraph::coordination::CoordinatorClientConfig;
using memgraph::coordination::CoordinatorStateMachine; using memgraph::coordination::CoordinatorStateMachine;
using memgraph::coordination::CoordinatorToReplicaConfig;
using memgraph::coordination::RaftLogAction; using memgraph::coordination::RaftLogAction;
using memgraph::coordination::ReplClientInfo; using memgraph::coordination::ReplicationClientInfo;
using memgraph::io::network::Endpoint;
using memgraph::replication_coordination_glue::ReplicationMode; using memgraph::replication_coordination_glue::ReplicationMode;
using memgraph::utils::UUID; using memgraph::utils::UUID;
@ -36,26 +38,29 @@ class RaftLogSerialization : public ::testing::Test {
}; };
TEST_F(RaftLogSerialization, ReplClientInfo) { TEST_F(RaftLogSerialization, ReplClientInfo) {
ReplClientInfo info{"instance_name", ReplicationMode::SYNC, "127.0.0.1", 10111}; ReplicationClientInfo info{.instance_name = "instance_name",
.replication_mode = ReplicationMode::SYNC,
.replication_server = Endpoint{"127.0.0.1", 10111}};
nlohmann::json j = info; nlohmann::json j = info;
ReplClientInfo info2 = j.get<memgraph::coordination::ReplClientInfo>(); ReplicationClientInfo info2 = j.get<memgraph::coordination::ReplicationClientInfo>();
ASSERT_EQ(info, info2); ASSERT_EQ(info, info2);
} }
TEST_F(RaftLogSerialization, CoordinatorClientConfig) { TEST_F(RaftLogSerialization, CoordinatorToReplicaConfig) {
CoordinatorClientConfig config{"instance3", CoordinatorToReplicaConfig config{.instance_name = "instance3",
"127.0.0.1", .mgt_server = Endpoint{"127.0.0.1", 10112},
10112, .replication_client_info = {.instance_name = "instance_name",
std::chrono::seconds{1}, .replication_mode = ReplicationMode::ASYNC,
std::chrono::seconds{5}, .replication_server = Endpoint{"127.0.0.1", 10001}},
std::chrono::seconds{10}, .instance_health_check_frequency_sec = std::chrono::seconds{1},
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001}, .instance_down_timeout_sec = std::chrono::seconds{5},
.ssl = std::nullopt}; .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt};
nlohmann::json j = config; nlohmann::json j = config;
CoordinatorClientConfig config2 = j.get<memgraph::coordination::CoordinatorClientConfig>(); CoordinatorToReplicaConfig config2 = j.get<memgraph::coordination::CoordinatorToReplicaConfig>();
ASSERT_EQ(config, config2); ASSERT_EQ(config, config2);
} }
@ -106,19 +111,20 @@ TEST_F(RaftLogSerialization, RaftLogActionUpdateUUID) {
} }
TEST_F(RaftLogSerialization, RegisterInstance) { TEST_F(RaftLogSerialization, RegisterInstance) {
CoordinatorClientConfig config{"instance3", CoordinatorToReplicaConfig config{.instance_name = "instance3",
"127.0.0.1", .mgt_server = Endpoint{"127.0.0.1", 10112},
10112, .replication_client_info = {.instance_name = "instance_name",
std::chrono::seconds{1}, .replication_mode = ReplicationMode::ASYNC,
std::chrono::seconds{5}, .replication_server = Endpoint{"127.0.0.1", 10001}},
std::chrono::seconds{10}, .instance_health_check_frequency_sec = std::chrono::seconds{1},
{"instance_name", ReplicationMode::ASYNC, "replication_ip_address", 10001}, .instance_down_timeout_sec = std::chrono::seconds{5},
.ssl = std::nullopt}; .instance_get_uuid_frequency_sec = std::chrono::seconds{10},
.ssl = std::nullopt};
auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config); auto buffer = CoordinatorStateMachine::SerializeRegisterInstance(config);
auto [payload, action] = CoordinatorStateMachine::DecodeLog(*buffer); auto [payload, action] = CoordinatorStateMachine::DecodeLog(*buffer);
ASSERT_EQ(action, RaftLogAction::REGISTER_REPLICATION_INSTANCE); ASSERT_EQ(action, RaftLogAction::REGISTER_REPLICATION_INSTANCE);
ASSERT_EQ(config, std::get<CoordinatorClientConfig>(payload)); ASSERT_EQ(config, std::get<CoordinatorToReplicaConfig>(payload));
} }
TEST_F(RaftLogSerialization, UnregisterInstance) { TEST_F(RaftLogSerialization, UnregisterInstance) {

View File

@ -0,0 +1,176 @@
// Copyright 2024 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include "auth/auth.hpp"
#include "coordination/coordinator_instance.hpp"
#include "flags/run_time_configurable.hpp"
#include "interpreter_faker.hpp"
#include "io/network/endpoint.hpp"
#include "license/license.hpp"
#include "replication_handler/replication_handler.hpp"
#include "storage/v2/config.hpp"
#include "utils/file.hpp"
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "json/json.hpp"
using memgraph::coordination::CoordinatorInstance;
using memgraph::coordination::CoordinatorToCoordinatorConfig;
using memgraph::coordination::CoordinatorToReplicaConfig;
using memgraph::coordination::RaftState;
using memgraph::coordination::ReplicationClientInfo;
using memgraph::io::network::Endpoint;
using memgraph::replication::ReplicationHandler;
using memgraph::replication_coordination_glue::ReplicationMode;
using memgraph::storage::Config;
// class MockCoordinatorInstance : CoordinatorInstance {
// auto AddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config) -> void override {}
// };
class RoutingTableTest : public ::testing::Test {
protected:
std::filesystem::path main_data_directory{std::filesystem::temp_directory_path() /
"MG_tests_unit_coordinator_cluster_state"};
std::filesystem::path repl1_data_directory{std::filesystem::temp_directory_path() /
"MG_test_unit_storage_v2_replication_repl"};
std::filesystem::path repl2_data_directory{std::filesystem::temp_directory_path() /
"MG_test_unit_storage_v2_replication_repl2"};
void SetUp() override { Clear(); }
void TearDown() override { Clear(); }
Config main_conf = [&] {
Config config{
.durability =
{
.snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
},
.salient.items = {.properties_on_edges = true},
};
UpdatePaths(config, main_data_directory);
return config;
}();
Config repl1_conf = [&] {
Config config{
.durability =
{
.snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
},
.salient.items = {.properties_on_edges = true},
};
UpdatePaths(config, repl1_data_directory);
return config;
}();
Config repl2_conf = [&] {
Config config{
.durability =
{
.snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL,
},
.salient.items = {.properties_on_edges = true},
};
UpdatePaths(config, repl2_data_directory);
return config;
}();
const std::string local_host = ("127.0.0.1");
const std::array<uint16_t, 2> ports{10000, 20000};
const std::array<std::string, 2> replicas = {"REPLICA1", "REPLICA2"};
private:
void Clear() {
if (std::filesystem::exists(main_data_directory)) std::filesystem::remove_all(main_data_directory);
if (std::filesystem::exists(repl1_data_directory)) std::filesystem::remove_all(repl1_data_directory);
if (std::filesystem::exists(repl2_data_directory)) std::filesystem::remove_all(repl2_data_directory);
}
};
struct MinMemgraph {
MinMemgraph(const memgraph::storage::Config &conf)
: auth{conf.durability.storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}},
repl_state{ReplicationStateRootPath(conf)},
dbms{conf, repl_state
#ifdef MG_ENTERPRISE
,
auth, true
#endif
},
db_acc{dbms.Get()},
db{*db_acc.get()},
repl_handler(repl_state, dbms
#ifdef MG_ENTERPRISE
,
system_, auth
#endif
) {
}
memgraph::auth::SynchedAuth auth;
memgraph::system::System system_;
memgraph::replication::ReplicationState repl_state;
memgraph::dbms::DbmsHandler dbms;
memgraph::dbms::DatabaseAccess db_acc;
memgraph::dbms::Database &db;
ReplicationHandler repl_handler;
};
;
TEST_F(RoutingTableTest, GetSingleRouterRoutingTable) {
CoordinatorInstance instance1;
auto routing = std::map<std::string, std::string>{{"address", "localhost:7688"}};
auto routing_table = instance1.GetRoutingTable(routing);
ASSERT_EQ(routing_table.size(), 1);
auto const routers = routing_table[0];
ASSERT_EQ(routers.first, std::vector<std::string>{"localhost:7688"});
ASSERT_EQ(routers.second, "ROUTE");
}
TEST_F(RoutingTableTest, GetMixedRoutingTable) {
auto instance1 = RaftState::MakeRaftState([]() {}, []() {});
auto routing = std::map<std::string, std::string>{{"address", "localhost:7690"}};
instance1.AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig{
.instance_name = "instance2",
.mgt_server = Endpoint{"127.0.0.1", 10011},
.bolt_server = Endpoint{"127.0.0.1", 7687},
.replication_client_info = ReplicationClientInfo{.instance_name = "instance2",
.replication_mode = ReplicationMode::ASYNC,
.replication_server = Endpoint{"127.0.0.1", 10001}}});
instance1.GetAllCoordinators();
// auto routing_table = instance1.GetRoutingTable(routing);
// ASSERT_EQ(routing_table.size(), 1);
// auto const routers = routing_table[0];
// ASSERT_EQ(routers.second, "ROUTE");
}
// TEST_F(RoutingTableTest, GetMultipleRoutersRoutingTable) {
//
// CoordinatorInstance instance1;
// instance1.AddCoordinatorInstance(CoordinatorToCoordinatorConfig{.coordinator_server_id = 1,
// .bolt_server = Endpoint{"127.0.0.1", 7689},
// .coordinator_server = Endpoint{"127.0.0.1",
// 10111}});
//
// auto routing = std::map<std::string, std::string>{{"address", "localhost:7688"}};
// auto routing_table = instance1.GetRoutingTable(routing);
//
// ASSERT_EQ(routing_table.size(), 1);
//
// auto const routers = routing_table[0];
// ASSERT_EQ(routers.second, "ROUTE");
// ASSERT_EQ(routers.first.size(), 2);
// auto const expected_routers = std::vector<std::string>{"localhost:7689", "localhost:7688"};
// ASSERT_EQ(routers.first, expected_routers);
// }

View File

@ -11,8 +11,9 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "coordination/coordinator_config.hpp" #include "coordination/coordinator_communication_config.hpp"
#include "coordination/coordinator_slk.hpp" #include "coordination/coordinator_slk.hpp"
#include "io/network/endpoint.hpp"
#include "replication/config.hpp" #include "replication/config.hpp"
#include "replication_coordination_glue/mode.hpp" #include "replication_coordination_glue/mode.hpp"
#include "slk_common.hpp" #include "slk_common.hpp"
@ -20,6 +21,8 @@
#include "storage/v2/replication/slk.hpp" #include "storage/v2/replication/slk.hpp"
#include "storage/v2/temporal.hpp" #include "storage/v2/temporal.hpp"
using memgraph::io::network::Endpoint;
TEST(SlkAdvanced, PropertyValueList) { TEST(SlkAdvanced, PropertyValueList) {
std::vector<memgraph::storage::PropertyValue> original{ std::vector<memgraph::storage::PropertyValue> original{
memgraph::storage::PropertyValue("hello world!"), memgraph::storage::PropertyValue("hello world!"),
@ -119,24 +122,19 @@ TEST(SlkAdvanced, PropertyValueComplex) {
} }
TEST(SlkAdvanced, ReplicationClientConfigs) { TEST(SlkAdvanced, ReplicationClientConfigs) {
using ReplicationClientInfo = memgraph::coordination::CoordinatorClientConfig::ReplicationClientInfo; using ReplicationClientInfo = memgraph::coordination::ReplicationClientInfo;
using ReplicationClientInfoVec = std::vector<ReplicationClientInfo>; using ReplicationClientInfoVec = std::vector<ReplicationClientInfo>;
using ReplicationMode = memgraph::replication_coordination_glue::ReplicationMode; using ReplicationMode = memgraph::replication_coordination_glue::ReplicationMode;
ReplicationClientInfoVec original{ReplicationClientInfo{.instance_name = "replica1", ReplicationClientInfoVec original{ReplicationClientInfo{.instance_name = "replica1",
.replication_mode = ReplicationMode::SYNC, .replication_mode = ReplicationMode::SYNC,
.replication_ip_address = "127.0.0.1", .replication_server = Endpoint{"127.0.0.1", 10000}},
.replication_port = 10000},
ReplicationClientInfo{.instance_name = "replica2", ReplicationClientInfo{.instance_name = "replica2",
.replication_mode = ReplicationMode::ASYNC, .replication_mode = ReplicationMode::ASYNC,
.replication_ip_address = "127.0.1.1", .replication_server = Endpoint{"127.0.0.1", 10010}},
.replication_port = 10010}, ReplicationClientInfo{.instance_name = "replica3",
ReplicationClientInfo{ .replication_mode = ReplicationMode::ASYNC,
.instance_name = "replica3", .replication_server = Endpoint{"127.0.0.1", 10011}}};
.replication_mode = ReplicationMode::ASYNC,
.replication_ip_address = "127.1.1.1",
.replication_port = 1110,
}};
memgraph::slk::Loopback loopback; memgraph::slk::Loopback loopback;
auto builder = loopback.GetBuilder(); auto builder = loopback.GetBuilder();