Add generic CoordinatorInstance
This commit is contained in:
parent
567e1fa1cb
commit
a0ecea7d1c
@ -8,6 +8,7 @@ target_sources(mg-coordination
|
||||
include/coordination/coordinator_server.hpp
|
||||
include/coordination/coordinator_config.hpp
|
||||
include/coordination/coordinator_exceptions.hpp
|
||||
include/coordination/coordinator_instance.hpp
|
||||
include/coordination/coordinator_slk.hpp
|
||||
include/coordination/coordinator_data.hpp
|
||||
include/coordination/constants.hpp
|
||||
@ -25,5 +26,5 @@ target_sources(mg-coordination
|
||||
target_include_directories(mg-coordination PUBLIC include)
|
||||
|
||||
target_link_libraries(mg-coordination
|
||||
PUBLIC mg::utils mg::rpc mg::slk mg::io mg::repl_coord_glue
|
||||
PUBLIC mg::utils mg::rpc mg::slk mg::io mg::repl_coord_glue rangev3
|
||||
)
|
||||
|
@ -13,78 +13,46 @@
|
||||
|
||||
#include "coordination/coordinator_data.hpp"
|
||||
|
||||
#include <range/v3/view.hpp>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReplicaWithNameExists(const std::list<CoordinatorClientInfo> &replicas, const CoordinatorClientConfig &config) {
|
||||
auto name_matches = [&instance_name = config.instance_name](auto const &replica) {
|
||||
return replica.InstanceName() == instance_name;
|
||||
};
|
||||
return std::ranges::any_of(replicas, name_matches);
|
||||
};
|
||||
|
||||
bool ReplicaWithEndpointExists(const std::list<CoordinatorClientInfo> &replicas,
|
||||
const CoordinatorClientConfig &config) {
|
||||
auto address_matches = [socket_address = fmt::format("{}:{}", config.ip_address, config.port)](auto const &replica) {
|
||||
return replica.SocketAddress() == socket_address;
|
||||
};
|
||||
return std::ranges::any_of(replicas, address_matches);
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
CoordinatorData::CoordinatorData() {
|
||||
auto replica_find_client_info = [](CoordinatorData *coord_data,
|
||||
std::string_view instance_name) -> CoordinatorClientInfo & {
|
||||
auto find_instance = [](CoordinatorData *coord_data, std::string_view instance_name) -> CoordinatorInstance & {
|
||||
std::shared_lock<utils::RWLock> lock{coord_data->coord_data_lock_};
|
||||
|
||||
auto replica_client_info = std::ranges::find_if(
|
||||
coord_data->registered_replicas_info_,
|
||||
[instance_name](const CoordinatorClientInfo &replica) { return replica.InstanceName() == instance_name; });
|
||||
auto instance =
|
||||
std::ranges::find_if(coord_data->registered_instances_, [instance_name](const CoordinatorInstance &instance) {
|
||||
return instance.client_info_.InstanceName() == instance_name;
|
||||
});
|
||||
|
||||
if (replica_client_info != coord_data->registered_replicas_info_.end()) {
|
||||
return *replica_client_info;
|
||||
}
|
||||
|
||||
MG_ASSERT(coord_data->registered_main_info_->InstanceName() == instance_name,
|
||||
"Instance is neither a replica nor main...");
|
||||
return *coord_data->registered_main_info_;
|
||||
MG_ASSERT(instance != coord_data->registered_instances_.end(), "Instance {} not found during callback!",
|
||||
instance_name);
|
||||
return *instance;
|
||||
};
|
||||
|
||||
replica_succ_cb_ = [replica_find_client_info](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &client_info = replica_find_client_info(coord_data, instance_name);
|
||||
client_info.UpdateLastResponseTime();
|
||||
replica_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name);
|
||||
instance.client_info_.UpdateLastResponseTime();
|
||||
};
|
||||
|
||||
replica_fail_cb_ = [replica_find_client_info](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &client_info = replica_find_client_info(coord_data, instance_name);
|
||||
client_info.UpdateInstanceStatus();
|
||||
replica_fail_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name);
|
||||
instance.client_info_.UpdateInstanceStatus();
|
||||
};
|
||||
|
||||
auto get_main_client_info = [](CoordinatorData *coord_data,
|
||||
std::string_view instance_name) -> CoordinatorClientInfo & {
|
||||
MG_ASSERT(coord_data->registered_main_info_.has_value(), "Main info is not set, but callback is called");
|
||||
std::shared_lock<utils::RWLock> lock{coord_data->coord_data_lock_};
|
||||
|
||||
// TODO When we will support restoration of main, we have to assert that the instance is main or replica, not at
|
||||
// this point....
|
||||
auto ®istered_main_info = coord_data->registered_main_info_;
|
||||
MG_ASSERT(registered_main_info->InstanceName() == instance_name,
|
||||
"Callback called for wrong instance name: {}, expected: {}", instance_name,
|
||||
registered_main_info->InstanceName());
|
||||
return *registered_main_info;
|
||||
main_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsMain(), "Instance {} is not a main!", instance_name);
|
||||
instance.client_info_.UpdateLastResponseTime();
|
||||
};
|
||||
|
||||
main_succ_cb_ = [get_main_client_info](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto ®istered_main_info = get_main_client_info(coord_data, instance_name);
|
||||
registered_main_info.UpdateLastResponseTime();
|
||||
};
|
||||
|
||||
main_fail_cb_ = [this, get_main_client_info](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto ®istered_main_info = get_main_client_info(coord_data, instance_name);
|
||||
if (bool main_alive = registered_main_info.UpdateInstanceStatus(); !main_alive) {
|
||||
spdlog::warn("Main is not alive, starting failover");
|
||||
main_fail_cb_ = [this, find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
if (bool main_alive = instance.client_info_.UpdateInstanceStatus(); !main_alive) {
|
||||
// spdlog::warn("Main is not alive, starting failover");
|
||||
// switch (auto failover_status = DoFailover(); failover_status) {
|
||||
// using enum DoFailoverStatus;
|
||||
// case ALL_REPLICAS_DOWN:
|
||||
@ -102,115 +70,127 @@ CoordinatorData::CoordinatorData() {
|
||||
|
||||
auto CoordinatorData::DoFailover() -> DoFailoverStatus {
|
||||
using ReplicationClientInfo = CoordinatorClientConfig::ReplicationClientInfo;
|
||||
|
||||
// std::lock_guard<utils::RWLock> lock{coord_data_lock_};
|
||||
|
||||
if (!registered_main_info_.has_value()) {
|
||||
// TODO: (andi) Make const what is possible to make const
|
||||
|
||||
auto main_instance = std::ranges::find_if(registered_instances_, &CoordinatorInstance::IsMain);
|
||||
|
||||
if (main_instance == registered_instances_.end()) {
|
||||
return DoFailoverStatus::CLUSTER_UNINITIALIZED;
|
||||
}
|
||||
|
||||
if (registered_main_info_->IsAlive()) {
|
||||
if (main_instance->client_info_.IsAlive()) {
|
||||
return DoFailoverStatus::MAIN_ALIVE;
|
||||
}
|
||||
|
||||
registered_main_->StopFrequentCheck();
|
||||
main_instance->client_.StopFrequentCheck();
|
||||
|
||||
const auto chosen_replica_info = std::ranges::find_if(
|
||||
registered_replicas_info_, [](const CoordinatorClientInfo &client_info) { return client_info.IsAlive(); });
|
||||
if (chosen_replica_info == registered_replicas_info_.end()) {
|
||||
auto replica_instances = registered_instances_ | ranges::views::filter(&CoordinatorInstance::IsReplica);
|
||||
|
||||
auto chosen_replica_instance = std::ranges::find_if(
|
||||
replica_instances, [](const CoordinatorInstance &instance) { return instance.client_info_.IsAlive(); });
|
||||
|
||||
if (chosen_replica_instance == replica_instances.end()) {
|
||||
return DoFailoverStatus::ALL_REPLICAS_DOWN;
|
||||
}
|
||||
|
||||
auto chosen_replica =
|
||||
std::ranges::find_if(registered_replicas_, [&chosen_replica_info](const CoordinatorClient &replica) {
|
||||
return replica.InstanceName() == chosen_replica_info->InstanceName();
|
||||
});
|
||||
MG_ASSERT(chosen_replica != registered_replicas_.end(), "Chosen replica {} not found in registered replicas",
|
||||
chosen_replica_info->InstanceName());
|
||||
chosen_replica->PauseFrequentCheck();
|
||||
chosen_replica_instance->client_.PauseFrequentCheck();
|
||||
|
||||
std::vector<ReplicationClientInfo> repl_clients_info;
|
||||
repl_clients_info.reserve(registered_replicas_.size() - 1);
|
||||
std::ranges::for_each(registered_replicas_, [&chosen_replica, &repl_clients_info](const CoordinatorClient &replica) {
|
||||
if (replica != *chosen_replica) {
|
||||
repl_clients_info.emplace_back(replica.ReplicationClientInfo());
|
||||
}
|
||||
});
|
||||
repl_clients_info.reserve(std::ranges::distance(replica_instances));
|
||||
|
||||
if (!chosen_replica->SendPromoteReplicaToMainRpc(std::move(repl_clients_info))) {
|
||||
auto not_chosen_replica_instance = [&chosen_replica_instance](const CoordinatorInstance &instance) {
|
||||
return instance != *chosen_replica_instance;
|
||||
};
|
||||
|
||||
for (const auto &unchosen_replica_instance : replica_instances | ranges::views::filter(not_chosen_replica_instance)) {
|
||||
repl_clients_info.emplace_back(unchosen_replica_instance.client_.ReplicationClientInfo());
|
||||
}
|
||||
|
||||
if (!chosen_replica_instance->client_.SendPromoteReplicaToMainRpc(std::move(repl_clients_info))) {
|
||||
spdlog::error("Sent RPC message, but exception was caught, aborting Failover");
|
||||
// TODO: new status and rollback all changes that were done...
|
||||
MG_ASSERT(false, "RPC message failed");
|
||||
}
|
||||
|
||||
registered_replicas_.erase(chosen_replica);
|
||||
registered_replicas_info_.erase(chosen_replica_info);
|
||||
chosen_replica_instance->client_.SetSuccCallback(main_succ_cb_);
|
||||
chosen_replica_instance->client_.SetFailCallback(main_fail_cb_);
|
||||
chosen_replica_instance->replication_role_ = replication_coordination_glue::ReplicationRole::MAIN;
|
||||
// TODO: (andi) Is this correct
|
||||
chosen_replica_instance->client_.ReplicationClientInfo().reset();
|
||||
chosen_replica_instance->client_.ResumeFrequentCheck();
|
||||
|
||||
registered_main_ = std::make_unique<CoordinatorClient>(this, chosen_replica->Config(), chosen_replica->SuccCallback(),
|
||||
chosen_replica->FailCallback());
|
||||
registered_main_->ReplicationClientInfo().reset();
|
||||
registered_main_info_.emplace(*chosen_replica_info);
|
||||
registered_main_->StartFrequentCheck();
|
||||
registered_instances_.erase(main_instance);
|
||||
|
||||
return DoFailoverStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorData::ShowMain() const -> std::optional<CoordinatorInstanceStatus> {
|
||||
if (!registered_main_info_.has_value()) {
|
||||
auto main_instance = std::ranges::find_if(registered_instances_, &CoordinatorInstance::IsMain);
|
||||
if (main_instance == registered_instances_.end()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return CoordinatorInstanceStatus{.instance_name = registered_main_info_->InstanceName(),
|
||||
.socket_address = registered_main_info_->SocketAddress(),
|
||||
.is_alive = registered_main_info_->IsAlive()};
|
||||
|
||||
return CoordinatorInstanceStatus{.instance_name = main_instance->client_info_.InstanceName(),
|
||||
.socket_address = main_instance->client_info_.SocketAddress(),
|
||||
.is_alive = main_instance->client_info_.IsAlive()};
|
||||
};
|
||||
|
||||
auto CoordinatorData::ShowReplicas() const -> std::vector<CoordinatorInstanceStatus> {
|
||||
std::vector<CoordinatorInstanceStatus> instances_status;
|
||||
instances_status.reserve(registered_replicas_info_.size());
|
||||
|
||||
std::ranges::transform(registered_replicas_info_, std::back_inserter(instances_status),
|
||||
[](const CoordinatorClientInfo &coord_client_info) {
|
||||
return CoordinatorInstanceStatus{.instance_name = coord_client_info.InstanceName(),
|
||||
.socket_address = coord_client_info.SocketAddress(),
|
||||
.is_alive = coord_client_info.IsAlive()};
|
||||
});
|
||||
for (const auto &replica_instance : registered_instances_ | ranges::views::filter(&CoordinatorInstance::IsReplica)) {
|
||||
instances_status.emplace_back(
|
||||
CoordinatorInstanceStatus{.instance_name = replica_instance.client_info_.InstanceName(),
|
||||
.socket_address = replica_instance.client_info_.SocketAddress(),
|
||||
.is_alive = replica_instance.client_info_.IsAlive()});
|
||||
}
|
||||
|
||||
return instances_status;
|
||||
}
|
||||
|
||||
auto CoordinatorData::RegisterMain(CoordinatorClientConfig config) -> RegisterMainReplicaCoordinatorStatus {
|
||||
// TODO: (andi) test this
|
||||
if (ReplicaWithNameExists(registered_replicas_info_, config)) {
|
||||
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
return instance.client_info_.InstanceName() == config.instance_name;
|
||||
})) {
|
||||
return RegisterMainReplicaCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
if (ReplicaWithEndpointExists(registered_replicas_info_, config)) {
|
||||
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
return instance.client_info_.SocketAddress() == config.ip_address + ":" + std::to_string(config.port);
|
||||
})) {
|
||||
return RegisterMainReplicaCoordinatorStatus::ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
registered_main_ = std::make_unique<CoordinatorClient>(this, std::move(config), main_succ_cb_, main_fail_cb_);
|
||||
|
||||
registered_main_info_.emplace(registered_main_->InstanceName(), registered_main_->SocketAddress());
|
||||
registered_main_->StartFrequentCheck();
|
||||
// TODO: (andi) Improve this
|
||||
auto *instance = ®istered_instances_.emplace_back(this, std::move(config), main_succ_cb_, main_fail_cb_,
|
||||
replication_coordination_glue::ReplicationRole::MAIN);
|
||||
instance->client_.StartFrequentCheck();
|
||||
|
||||
return RegisterMainReplicaCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorData::RegisterReplica(CoordinatorClientConfig config) -> RegisterMainReplicaCoordinatorStatus {
|
||||
// TODO: (andi) Test it
|
||||
if (ReplicaWithNameExists(registered_replicas_info_, config)) {
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
return instance.client_info_.InstanceName() == config.instance_name;
|
||||
})) {
|
||||
return RegisterMainReplicaCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
|
||||
if (registered_main_info_ && registered_main_info_->InstanceName() == config.instance_name) {
|
||||
return RegisterMainReplicaCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
|
||||
if (ReplicaWithEndpointExists(registered_replicas_info_, config)) {
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
return instance.client_info_.SocketAddress() == config.ip_address + ":" + std::to_string(config.port);
|
||||
})) {
|
||||
return RegisterMainReplicaCoordinatorStatus::ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
auto *coord_client = ®istered_replicas_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_);
|
||||
registered_replicas_info_.emplace_back(coord_client->InstanceName(), coord_client->SocketAddress());
|
||||
coord_client->StartFrequentCheck();
|
||||
// TODO: (andi) Improve this
|
||||
auto *instance = ®istered_instances_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_,
|
||||
replication_coordination_glue::ReplicationRole::REPLICA);
|
||||
instance->client_.StartFrequentCheck();
|
||||
|
||||
return RegisterMainReplicaCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ class CoordinatorClient {
|
||||
|
||||
auto SendPromoteReplicaToMainRpc(ReplicationClientsInfo replication_clients_info) const -> bool;
|
||||
|
||||
// TODO: (andi) These several methods are probably not needed, Instance should own this info
|
||||
auto InstanceName() const -> std::string_view;
|
||||
auto SocketAddress() const -> std::string;
|
||||
auto Config() const -> CoordinatorClientConfig const &;
|
||||
|
@ -21,9 +21,10 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
// TODO: (andi) Fix ownerships with std::string_view
|
||||
class CoordinatorClientInfo {
|
||||
public:
|
||||
CoordinatorClientInfo(std::string_view instance_name, std::string_view socket_address)
|
||||
CoordinatorClientInfo(std::string instance_name, std::string socket_address)
|
||||
: last_response_time_(std::chrono::system_clock::now()),
|
||||
is_alive_(true), // TODO: (andi) Maybe it should be false until the first ping
|
||||
instance_name_(instance_name),
|
||||
@ -50,15 +51,15 @@ class CoordinatorClientInfo {
|
||||
CoordinatorClientInfo(CoordinatorClientInfo &&other) noexcept
|
||||
: last_response_time_(other.last_response_time_.load()),
|
||||
is_alive_(other.is_alive_.load()),
|
||||
instance_name_(other.instance_name_),
|
||||
socket_address_(other.socket_address_) {}
|
||||
instance_name_(std::move(other.instance_name_)),
|
||||
socket_address_(std::move(other.socket_address_)) {}
|
||||
|
||||
CoordinatorClientInfo &operator=(CoordinatorClientInfo &&other) noexcept {
|
||||
if (this != &other) {
|
||||
last_response_time_.store(other.last_response_time_.load());
|
||||
is_alive_ = other.is_alive_.load();
|
||||
instance_name_ = other.instance_name_;
|
||||
socket_address_ = other.socket_address_;
|
||||
instance_name_ = std::move(other.instance_name_);
|
||||
socket_address_ = std::move(other.socket_address_);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -78,7 +79,8 @@ class CoordinatorClientInfo {
|
||||
private:
|
||||
std::atomic<std::chrono::system_clock::time_point> last_response_time_{};
|
||||
std::atomic<bool> is_alive_{false};
|
||||
std::string_view instance_name_;
|
||||
// TODO: (andi) Who owns this info?
|
||||
std::string instance_name_;
|
||||
std::string socket_address_;
|
||||
};
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include "coordination/coordinator_client.hpp"
|
||||
#include "coordination/coordinator_client_info.hpp"
|
||||
#include "coordination/coordinator_instance.hpp"
|
||||
#include "coordination/coordinator_instance_status.hpp"
|
||||
#include "coordination/coordinator_server.hpp"
|
||||
#include "coordination/failover_status.hpp"
|
||||
@ -48,10 +49,7 @@ class CoordinatorData {
|
||||
std::function<void(CoordinatorData *, std::string_view)> replica_succ_cb_;
|
||||
std::function<void(CoordinatorData *, std::string_view)> replica_fail_cb_;
|
||||
|
||||
std::list<CoordinatorClient> registered_replicas_;
|
||||
std::list<CoordinatorClientInfo> registered_replicas_info_;
|
||||
std::unique_ptr<CoordinatorClient> registered_main_;
|
||||
std::optional<CoordinatorClientInfo> registered_main_info_;
|
||||
std::list<CoordinatorInstance> registered_instances_;
|
||||
};
|
||||
|
||||
struct CoordinatorMainReplicaData {
|
||||
|
@ -0,0 +1,55 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_client.hpp"
|
||||
#include "coordination/coordinator_client_info.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorData;
|
||||
using HealthCheckCallback = std::function<void(CoordinatorData *, std::string_view)>;
|
||||
|
||||
struct CoordinatorInstance {
|
||||
// TODO: (andi) Capture by const reference functions
|
||||
CoordinatorInstance(CoordinatorData *data, CoordinatorClientConfig config, HealthCheckCallback succ_cb,
|
||||
HealthCheckCallback fail_cb, replication_coordination_glue::ReplicationRole replication_role)
|
||||
: client_(data, config, succ_cb, fail_cb),
|
||||
client_info_(config.instance_name, config.ip_address + ":" + std::to_string(config.port)),
|
||||
replication_role_(replication_role) {}
|
||||
|
||||
CoordinatorInstance(CoordinatorInstance const &other) = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance const &other) = delete;
|
||||
CoordinatorInstance(CoordinatorInstance &&other) noexcept = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance &&other) noexcept = delete;
|
||||
~CoordinatorInstance() = default;
|
||||
|
||||
auto IsReplica() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
}
|
||||
auto IsMain() const -> bool { return replication_role_ == replication_coordination_glue::ReplicationRole::MAIN; }
|
||||
|
||||
CoordinatorClient client_;
|
||||
CoordinatorClientInfo client_info_;
|
||||
replication_coordination_glue::ReplicationRole replication_role_;
|
||||
|
||||
// TODO: (andi) Make this better
|
||||
friend bool operator==(CoordinatorInstance const &first, CoordinatorInstance const &second) {
|
||||
return first.client_ == second.client_ && first.replication_role_ == second.replication_role_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -110,7 +110,7 @@ class Database {
|
||||
* @param force_directory Use the configured directory, do not try to decipher the multi-db version
|
||||
* @return DatabaseInfo
|
||||
*/
|
||||
DatabaseInfo GetInfo(bool force_directory, replication::ReplicationRole replication_role) const {
|
||||
DatabaseInfo GetInfo(bool force_directory, replication_coordination_glue::ReplicationRole replication_role) const {
|
||||
DatabaseInfo info;
|
||||
info.storage_info = storage_->GetInfo(force_directory, replication_role);
|
||||
info.triggers = trigger_store_.GetTriggerInfo().size();
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using memgraph::storage::Delta;
|
||||
using memgraph::storage::EdgeAccessor;
|
||||
using memgraph::storage::EdgeRef;
|
||||
|
@ -169,7 +169,7 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole {
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole {
|
||||
return dbms_handler_.ReplicationState().GetRole();
|
||||
}
|
||||
|
||||
|
@ -11,8 +11,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "dbms/database.hpp"
|
||||
#include "replication/role.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
namespace memgraph::replication {
|
||||
@ -53,7 +53,7 @@ struct ReplicationHandler {
|
||||
auto UnregisterReplica(std::string_view name) -> UnregisterReplicaResult;
|
||||
|
||||
// Helper pass-through (TODO: remove)
|
||||
auto GetRole() const -> memgraph::replication::ReplicationRole;
|
||||
auto GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole;
|
||||
bool IsMain() const;
|
||||
bool IsReplica() const;
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "utils/timer.hpp"
|
||||
#include "version.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
bool ValidateControlCharacter(const char *flagname, const std::string &value) {
|
||||
if (value.empty()) {
|
||||
|
@ -336,9 +336,9 @@ class ReplQueryHandler {
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
ReplicationQuery::ReplicationRole ShowReplicationRole() const {
|
||||
switch (handler_.GetRole()) {
|
||||
case memgraph::replication::ReplicationRole::MAIN:
|
||||
case memgraph::replication_coordination_glue::ReplicationRole::MAIN:
|
||||
return ReplicationQuery::ReplicationRole::MAIN;
|
||||
case memgraph::replication::ReplicationRole::REPLICA:
|
||||
case memgraph::replication_coordination_glue::ReplicationRole::REPLICA:
|
||||
return ReplicationQuery::ReplicationRole::REPLICA;
|
||||
}
|
||||
throw QueryRuntimeException("Couldn't show replication role - invalid role set!");
|
||||
@ -3171,7 +3171,7 @@ PreparedQuery PrepareEdgeImportModeQuery(ParsedQuery parsed_query, CurrentDB &cu
|
||||
}
|
||||
|
||||
PreparedQuery PrepareCreateSnapshotQuery(ParsedQuery parsed_query, bool in_explicit_transaction, CurrentDB ¤t_db,
|
||||
replication::ReplicationRole replication_role) {
|
||||
replication_coordination_glue::ReplicationRole replication_role) {
|
||||
if (in_explicit_transaction) {
|
||||
throw CreateSnapshotInMulticommandTxException();
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ target_sources(mg-replication
|
||||
include/replication/state.hpp
|
||||
include/replication/epoch.hpp
|
||||
include/replication/config.hpp
|
||||
include/replication/role.hpp
|
||||
include/replication/status.hpp
|
||||
include/replication/messages.hpp
|
||||
include/replication/replication_client.hpp
|
||||
|
@ -15,8 +15,8 @@
|
||||
#include "replication/config.hpp"
|
||||
#include "replication/epoch.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "replication/role.hpp"
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "replication_server.hpp"
|
||||
#include "status.hpp"
|
||||
#include "utils/result.hpp"
|
||||
@ -72,12 +72,13 @@ struct ReplicationState {
|
||||
using FetchReplicationResult_t = utils::BasicResult<FetchReplicationError, ReplicationData_t>;
|
||||
auto FetchReplicationData() -> FetchReplicationResult_t;
|
||||
|
||||
auto GetRole() const -> ReplicationRole {
|
||||
return std::holds_alternative<RoleReplicaData>(replication_data_) ? ReplicationRole::REPLICA
|
||||
: ReplicationRole::MAIN;
|
||||
auto GetRole() const -> replication_coordination_glue::ReplicationRole {
|
||||
return std::holds_alternative<RoleReplicaData>(replication_data_)
|
||||
? replication_coordination_glue::ReplicationRole::REPLICA
|
||||
: replication_coordination_glue::ReplicationRole::MAIN;
|
||||
}
|
||||
bool IsMain() const { return GetRole() == ReplicationRole::MAIN; }
|
||||
bool IsReplica() const { return GetRole() == ReplicationRole::REPLICA; }
|
||||
bool IsMain() const { return GetRole() == replication_coordination_glue::ReplicationRole::MAIN; }
|
||||
bool IsReplica() const { return GetRole() == replication_coordination_glue::ReplicationRole::REPLICA; }
|
||||
|
||||
bool HasDurability() const { return nullptr != durability_; }
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
#include "replication/config.hpp"
|
||||
#include "replication/epoch.hpp"
|
||||
#include "replication/role.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
|
||||
namespace memgraph::replication::durability {
|
||||
|
||||
|
@ -29,12 +29,14 @@ constexpr auto *kVersion = "durability_version";
|
||||
|
||||
void to_json(nlohmann::json &j, const ReplicationRoleEntry &p) {
|
||||
auto processMAIN = [&](MainRole const &main) {
|
||||
j = nlohmann::json{{kVersion, p.version}, {kReplicationRole, ReplicationRole::MAIN}, {kEpoch, main.epoch.id()}};
|
||||
j = nlohmann::json{{kVersion, p.version},
|
||||
{kReplicationRole, replication_coordination_glue::ReplicationRole::MAIN},
|
||||
{kEpoch, main.epoch.id()}};
|
||||
};
|
||||
auto processREPLICA = [&](ReplicaRole const &replica) {
|
||||
j = nlohmann::json{
|
||||
{kVersion, p.version},
|
||||
{kReplicationRole, ReplicationRole::REPLICA},
|
||||
{kReplicationRole, replication_coordination_glue::ReplicationRole::REPLICA},
|
||||
{kIpAddress, replica.config.ip_address},
|
||||
{kPort, replica.config.port}
|
||||
// TODO: SSL
|
||||
@ -47,17 +49,17 @@ void from_json(const nlohmann::json &j, ReplicationRoleEntry &p) {
|
||||
// This value did not exist in V1, hence default DurabilityVersion::V1
|
||||
DurabilityVersion version = j.value(kVersion, DurabilityVersion::V1);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
ReplicationRole role;
|
||||
replication_coordination_glue::ReplicationRole role;
|
||||
j.at(kReplicationRole).get_to(role);
|
||||
switch (role) {
|
||||
case ReplicationRole::MAIN: {
|
||||
case replication_coordination_glue::ReplicationRole::MAIN: {
|
||||
auto json_epoch = j.value(kEpoch, std::string{});
|
||||
auto epoch = ReplicationEpoch{};
|
||||
if (!json_epoch.empty()) epoch.SetEpoch(json_epoch);
|
||||
p = ReplicationRoleEntry{.version = version, .role = MainRole{.epoch = std::move(epoch)}};
|
||||
break;
|
||||
}
|
||||
case ReplicationRole::REPLICA: {
|
||||
case memgraph::replication_coordination_glue::ReplicationRole::REPLICA: {
|
||||
std::string ip_address;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
uint16_t port;
|
||||
|
@ -5,6 +5,7 @@ target_sources(mg-repl_coord_glue
|
||||
PUBLIC
|
||||
messages.hpp
|
||||
mode.hpp
|
||||
role.hpp
|
||||
|
||||
PRIVATE
|
||||
messages.cpp
|
||||
|
@ -12,8 +12,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
namespace memgraph::replication {
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
// TODO: figure out a way of ensuring that usage of this type is never uninitialed/defaulted incorrectly to MAIN
|
||||
enum class ReplicationRole : uint8_t { MAIN, REPLICA };
|
||||
} // namespace memgraph::replication
|
||||
} // namespace memgraph::replication_coordination_glue
|
@ -288,7 +288,8 @@ DiskStorage::~DiskStorage() {
|
||||
|
||||
DiskStorage::DiskAccessor::DiskAccessor(auto tag, DiskStorage *storage, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode)
|
||||
: Accessor(tag, storage, isolation_level, storage_mode, memgraph::replication::ReplicationRole::MAIN) {
|
||||
: Accessor(tag, storage, isolation_level, storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole::MAIN) {
|
||||
rocksdb::WriteOptions write_options;
|
||||
auto txOptions = rocksdb::TransactionOptions{.set_snapshot = true};
|
||||
transaction_.disk_transaction_ = storage->kvstore_->db_->BeginTransaction(write_options, txOptions);
|
||||
@ -837,7 +838,8 @@ StorageInfo DiskStorage::GetBaseInfo(bool /* unused */) {
|
||||
return info;
|
||||
}
|
||||
|
||||
StorageInfo DiskStorage::GetInfo(bool force_dir, memgraph::replication::ReplicationRole replication_role) {
|
||||
StorageInfo DiskStorage::GetInfo(bool force_dir,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
StorageInfo info = GetBaseInfo(force_dir);
|
||||
{
|
||||
auto access = Access(replication_role);
|
||||
@ -2007,7 +2009,7 @@ UniqueConstraints::DeletionStatus DiskStorage::DiskAccessor::DropUniqueConstrain
|
||||
}
|
||||
|
||||
Transaction DiskStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole /*is_main*/) {
|
||||
memgraph::replication_coordination_glue::ReplicationRole /*is_main*/) {
|
||||
/// We acquire the transaction engine lock here because we access (and
|
||||
/// modify) the transaction engine variables (`transaction_id` and
|
||||
/// `timestamp`) below.
|
||||
@ -2032,8 +2034,9 @@ uint64_t DiskStorage::CommitTimestamp(const std::optional<uint64_t> desired_comm
|
||||
return *desired_commit_timestamp;
|
||||
}
|
||||
|
||||
std::unique_ptr<Storage::Accessor> DiskStorage::Access(memgraph::replication::ReplicationRole /*replication_role*/,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
std::unique_ptr<Storage::Accessor> DiskStorage::Access(
|
||||
memgraph::replication_coordination_glue::ReplicationRole /*replication_role*/,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
auto isolation_level = override_isolation_level.value_or(isolation_level_);
|
||||
if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) {
|
||||
throw utils::NotYetImplemented("Disk storage supports only SNAPSHOT isolation level.");
|
||||
@ -2042,7 +2045,7 @@ std::unique_ptr<Storage::Accessor> DiskStorage::Access(memgraph::replication::Re
|
||||
new DiskAccessor{Storage::Accessor::shared_access, this, isolation_level, storage_mode_});
|
||||
}
|
||||
std::unique_ptr<Storage::Accessor> DiskStorage::UniqueAccess(
|
||||
memgraph::replication::ReplicationRole /*replication_role*/,
|
||||
memgraph::replication_coordination_glue::ReplicationRole /*replication_role*/,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
auto isolation_level = override_isolation_level.value_or(isolation_level_);
|
||||
if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) {
|
||||
|
@ -176,11 +176,11 @@ class DiskStorage final : public Storage {
|
||||
};
|
||||
|
||||
using Storage::Access;
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication::ReplicationRole replication_role,
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) override;
|
||||
|
||||
using Storage::UniqueAccess;
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication::ReplicationRole replication_role,
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) override;
|
||||
|
||||
/// Flushing methods
|
||||
@ -285,7 +285,7 @@ class DiskStorage final : public Storage {
|
||||
RocksDBStorage *GetRocksDBStorage() const { return kvstore_.get(); }
|
||||
|
||||
Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role) override;
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
|
||||
void SetEdgeImportMode(EdgeImportMode edge_import_status);
|
||||
|
||||
@ -308,7 +308,8 @@ class DiskStorage final : public Storage {
|
||||
PropertyId property);
|
||||
|
||||
StorageInfo GetBaseInfo(bool force_directory) override;
|
||||
StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) override;
|
||||
StorageInfo GetInfo(bool force_directory,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
|
||||
void FreeMemory(std::unique_lock<utils::ResourceLock> /*lock*/) override {}
|
||||
|
||||
|
@ -176,7 +176,7 @@ InMemoryStorage::~InMemoryStorage() {
|
||||
|
||||
InMemoryStorage::InMemoryAccessor::InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role)
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role)
|
||||
: Accessor(tag, storage, isolation_level, storage_mode, replication_role),
|
||||
config_(storage->config_.salient.items) {}
|
||||
InMemoryStorage::InMemoryAccessor::InMemoryAccessor(InMemoryAccessor &&other) noexcept
|
||||
@ -1278,8 +1278,9 @@ VerticesIterable InMemoryStorage::InMemoryAccessor::Vertices(
|
||||
mem_label_property_index->Vertices(label, property, lower_bound, upper_bound, view, storage_, &transaction_));
|
||||
}
|
||||
|
||||
Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role) {
|
||||
Transaction InMemoryStorage::CreateTransaction(
|
||||
IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
// We acquire the transaction engine lock here because we access (and
|
||||
// modify) the transaction engine variables (`transaction_id` and
|
||||
// `timestamp`) below.
|
||||
@ -1294,7 +1295,7 @@ Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, S
|
||||
// of any query on replica to the last commited transaction
|
||||
// which is timestamp_ as only commit of transaction with writes
|
||||
// can change the value of it.
|
||||
if (replication_role == memgraph::replication::ReplicationRole::MAIN) {
|
||||
if (replication_role == memgraph::replication_coordination_glue::ReplicationRole::MAIN) {
|
||||
start_timestamp = timestamp_++;
|
||||
} else {
|
||||
start_timestamp = timestamp_;
|
||||
@ -1678,7 +1679,8 @@ StorageInfo InMemoryStorage::GetBaseInfo(bool force_directory) {
|
||||
return info;
|
||||
}
|
||||
|
||||
StorageInfo InMemoryStorage::GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) {
|
||||
StorageInfo InMemoryStorage::GetInfo(bool force_directory,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
StorageInfo info = GetBaseInfo(force_directory);
|
||||
{
|
||||
auto access = Access(replication_role); // TODO: override isolation level?
|
||||
@ -1999,15 +2001,15 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera
|
||||
}
|
||||
|
||||
utils::BasicResult<InMemoryStorage::CreateSnapshotError> InMemoryStorage::CreateSnapshot(
|
||||
memgraph::replication::ReplicationRole replication_role) {
|
||||
if (replication_role == memgraph::replication::ReplicationRole::REPLICA) {
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
if (replication_role == memgraph::replication_coordination_glue::ReplicationRole::REPLICA) {
|
||||
return InMemoryStorage::CreateSnapshotError::DisabledForReplica;
|
||||
}
|
||||
auto const &epoch = repl_storage_state_.epoch_;
|
||||
auto snapshot_creator = [this, &epoch]() {
|
||||
utils::Timer timer;
|
||||
auto transaction = CreateTransaction(IsolationLevel::SNAPSHOT_ISOLATION, storage_mode_,
|
||||
memgraph::replication::ReplicationRole::MAIN);
|
||||
memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
durability::CreateSnapshot(this, &transaction, recovery_.snapshot_directory_, recovery_.wal_directory_, &vertices_,
|
||||
&edges_, uuid_, epoch, repl_storage_state_.history, &file_retainer_);
|
||||
// Finalize snapshot transaction.
|
||||
@ -2095,14 +2097,16 @@ utils::FileRetainer::FileLockerAccessor::ret_type InMemoryStorage::UnlockPath()
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unique_ptr<Storage::Accessor> InMemoryStorage::Access(memgraph::replication::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
std::unique_ptr<Storage::Accessor> InMemoryStorage::Access(
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{Storage::Accessor::shared_access, this,
|
||||
override_isolation_level.value_or(isolation_level_),
|
||||
storage_mode_, replication_role});
|
||||
}
|
||||
std::unique_ptr<Storage::Accessor> InMemoryStorage::UniqueAccess(
|
||||
memgraph::replication::ReplicationRole replication_role, std::optional<IsolationLevel> override_isolation_level) {
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) {
|
||||
return std::unique_ptr<InMemoryAccessor>(new InMemoryAccessor{Storage::Accessor::unique_access, this,
|
||||
override_isolation_level.value_or(isolation_level_),
|
||||
storage_mode_, replication_role});
|
||||
|
@ -73,7 +73,8 @@ class InMemoryStorage final : public Storage {
|
||||
friend class InMemoryStorage;
|
||||
|
||||
explicit InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role);
|
||||
StorageMode storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role);
|
||||
|
||||
public:
|
||||
InMemoryAccessor(const InMemoryAccessor &) = delete;
|
||||
@ -322,10 +323,10 @@ class InMemoryStorage final : public Storage {
|
||||
};
|
||||
|
||||
using Storage::Access;
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication::ReplicationRole replication_role,
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) override;
|
||||
using Storage::UniqueAccess;
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication::ReplicationRole replication_role,
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) override;
|
||||
|
||||
void FreeMemory(std::unique_lock<utils::ResourceLock> main_guard) override;
|
||||
@ -335,12 +336,12 @@ class InMemoryStorage final : public Storage {
|
||||
utils::FileRetainer::FileLockerAccessor::ret_type UnlockPath();
|
||||
|
||||
utils::BasicResult<InMemoryStorage::CreateSnapshotError> CreateSnapshot(
|
||||
memgraph::replication::ReplicationRole replication_role);
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role);
|
||||
|
||||
void CreateSnapshotHandler(std::function<utils::BasicResult<InMemoryStorage::CreateSnapshotError>()> cb);
|
||||
|
||||
Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role) override;
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
|
||||
void SetStorageMode(StorageMode storage_mode);
|
||||
|
||||
@ -365,7 +366,8 @@ class InMemoryStorage final : public Storage {
|
||||
void FinalizeWalFile();
|
||||
|
||||
StorageInfo GetBaseInfo(bool force_directory) override;
|
||||
StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) override;
|
||||
StorageInfo GetInfo(bool force_directory,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
[[nodiscard]] bool AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp,
|
||||
|
@ -49,7 +49,8 @@ Storage::Storage(Config config, StorageMode storage_mode)
|
||||
}
|
||||
|
||||
Storage::Accessor::Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role)
|
||||
StorageMode storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role)
|
||||
: storage_(storage),
|
||||
// The lock must be acquired before creating the transaction object to
|
||||
// prevent freshly created transactions from dangling in an active state
|
||||
@ -61,7 +62,8 @@ Storage::Accessor::Accessor(SharedAccess /* tag */, Storage *storage, IsolationL
|
||||
creation_storage_mode_(storage_mode) {}
|
||||
|
||||
Storage::Accessor::Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role)
|
||||
StorageMode storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role)
|
||||
: storage_(storage),
|
||||
// The lock must be acquired before creating the transaction object to
|
||||
// prevent freshly created transactions from dangling in an active state
|
||||
|
@ -145,9 +145,9 @@ class Storage {
|
||||
} unique_access;
|
||||
|
||||
Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role);
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role);
|
||||
Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role);
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role);
|
||||
Accessor(const Accessor &) = delete;
|
||||
Accessor &operator=(const Accessor &) = delete;
|
||||
Accessor &operator=(Accessor &&other) = delete;
|
||||
@ -328,16 +328,17 @@ class Storage {
|
||||
|
||||
void FreeMemory() { FreeMemory({}); }
|
||||
|
||||
virtual std::unique_ptr<Accessor> Access(memgraph::replication::ReplicationRole replication_role,
|
||||
virtual std::unique_ptr<Accessor> Access(memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) = 0;
|
||||
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication::ReplicationRole replication_role) {
|
||||
std::unique_ptr<Accessor> Access(memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
return Access(replication_role, {});
|
||||
}
|
||||
|
||||
virtual std::unique_ptr<Accessor> UniqueAccess(memgraph::replication::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) = 0;
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication::ReplicationRole replication_role) {
|
||||
virtual std::unique_ptr<Accessor> UniqueAccess(
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role,
|
||||
std::optional<IsolationLevel> override_isolation_level) = 0;
|
||||
std::unique_ptr<Accessor> UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
return UniqueAccess(replication_role, {});
|
||||
}
|
||||
|
||||
@ -356,10 +357,11 @@ class Storage {
|
||||
return GetBaseInfo(force_dir);
|
||||
}
|
||||
|
||||
virtual StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) = 0;
|
||||
virtual StorageInfo GetInfo(bool force_directory,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) = 0;
|
||||
|
||||
virtual Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication::ReplicationRole replication_role) = 0;
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) = 0;
|
||||
|
||||
virtual void PrepareForNewEpoch() = 0;
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
// The following classes are wrappers for memgraph::utils::MemoryResource, so that we can
|
||||
// use BENCHMARK_TEMPLATE
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "query/interpreter.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// The following classes are wrappers for memgraph::utils::MemoryResource, so that we can
|
||||
// use BENCHMARK_TEMPLATE
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "query/plan/vertex_count_cache.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// Add chained MATCH (node1) -- (node2), MATCH (node2) -- (node3) ... clauses.
|
||||
static memgraph::query::CypherQuery *AddChainedMatches(int num_matches, memgraph::query::AstStorage &storage) {
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// This benchmark should be run for a fixed amount of time that is
|
||||
// large compared to GC interval to make the output relevant.
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// This benchmark should be run for a fixed amount of time that is
|
||||
// large compared to GC interval to make the output relevant.
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include "storage/v2/storage_error.hpp"
|
||||
#include "utils/thread.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
const uint64_t kNumVerifiers = 5;
|
||||
const uint64_t kNumMutators = 1;
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
const int kNumThreads = 8;
|
||||
|
||||
|
@ -71,33 +71,33 @@ def test_show_replication_cluster(connection):
|
||||
|
||||
# We leave some time for the coordinator to realise the replicas are down.
|
||||
def retrieve_data():
|
||||
return set(execute_and_fetch_all(cursor, "SHOW REPLICATION CLUSTER;"))
|
||||
return sorted(list(execute_and_fetch_all(cursor, "SHOW REPLICATION CLUSTER;")))
|
||||
|
||||
expected_data = {
|
||||
expected_data = [
|
||||
("instance_1", "127.0.0.1:10011", True, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", True, "main"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data, retrieve_data)
|
||||
|
||||
# 3.
|
||||
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1")
|
||||
|
||||
expected_data = {
|
||||
expected_data = [
|
||||
("instance_1", "127.0.0.1:10011", False, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", True, "main"),
|
||||
("instance_1", "127.0.0.1:10011", False, "replica"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data, retrieve_data)
|
||||
|
||||
# 4.
|
||||
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3")
|
||||
|
||||
expected_data = {
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
expected_data = [
|
||||
("instance_1", "127.0.0.1:10011", False, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", False, "main"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data, retrieve_data)
|
||||
|
||||
|
||||
@ -113,44 +113,44 @@ def test_simple_client_initiated_failover(connection):
|
||||
|
||||
# 2.
|
||||
main_cursor = connection(7687, "instance_3").cursor()
|
||||
expected_data_on_main = {
|
||||
expected_data_on_main = [
|
||||
("instance_1", "127.0.0.1:10001", "sync", 0, 0, "ready"),
|
||||
("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"),
|
||||
}
|
||||
actual_data_on_main = set(execute_and_fetch_all(main_cursor, "SHOW REPLICAS;"))
|
||||
]
|
||||
actual_data_on_main = sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS;")))
|
||||
assert actual_data_on_main == expected_data_on_main
|
||||
|
||||
interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3")
|
||||
coord_cursor = connection(7690, "coordinator").cursor()
|
||||
|
||||
def retrieve_data_show_repl_cluster():
|
||||
return set(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;"))
|
||||
return sorted(list(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;")))
|
||||
|
||||
expected_data_on_coord = {
|
||||
expected_data_on_coord = [
|
||||
("instance_1", "127.0.0.1:10011", True, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", False, "main"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster)
|
||||
|
||||
# 3.
|
||||
execute_and_fetch_all(coord_cursor, "DO FAILOVER")
|
||||
|
||||
expected_data_on_coord = {
|
||||
expected_data_on_coord = [
|
||||
("instance_1", "127.0.0.1:10011", True, "main"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster)
|
||||
|
||||
# 4.
|
||||
new_main_cursor = connection(7688, "instance_1").cursor()
|
||||
|
||||
def retrieve_data_show_replicas():
|
||||
return set(execute_and_fetch_all(new_main_cursor, "SHOW REPLICAS;"))
|
||||
return sorted(list(execute_and_fetch_all(new_main_cursor, "SHOW REPLICAS;")))
|
||||
|
||||
expected_data_on_new_main = {
|
||||
expected_data_on_new_main = [
|
||||
("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data_on_new_main, retrieve_data_show_replicas)
|
||||
|
||||
|
||||
@ -164,13 +164,13 @@ def test_failover_fails_all_replicas_down(connection):
|
||||
coord_cursor = connection(7690, "coordinator").cursor()
|
||||
|
||||
def retrieve_data():
|
||||
return set(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;"))
|
||||
return sorted(list(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;")))
|
||||
|
||||
expected_data_on_coord = {
|
||||
expected_data_on_coord = [
|
||||
("instance_1", "127.0.0.1:10011", False, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", False, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", False, "main"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data_on_coord, retrieve_data)
|
||||
|
||||
# 4.
|
||||
@ -187,13 +187,13 @@ def test_failover_fails_main_is_alive(connection):
|
||||
coord_cursor = connection(7690, "coordinator").cursor()
|
||||
|
||||
def retrieve_data():
|
||||
return set(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;"))
|
||||
return sorted(list(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;")))
|
||||
|
||||
expected_data_on_coord = {
|
||||
expected_data_on_coord = [
|
||||
("instance_1", "127.0.0.1:10011", True, "replica"),
|
||||
("instance_2", "127.0.0.1:10012", True, "replica"),
|
||||
("instance_3", "127.0.0.1:10013", True, "main"),
|
||||
}
|
||||
]
|
||||
mg_sleep_and_assert(expected_data_on_coord, retrieve_data)
|
||||
|
||||
with pytest.raises(Exception) as e:
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
DECLARE_int32(min_log_level);
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "storage/v2/vertex_accessor.hpp"
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
/**
|
||||
* It is possible to run test with custom seed with:
|
||||
* RC_PARAMS="seed=1" ./random_graph
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include "storage/v2/disk/storage.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
#ifdef MG_ENTERPRISE
|
||||
template <typename StorageType>
|
||||
class FineGrainedAuthCheckerFixture : public testing::Test {
|
||||
|
@ -43,7 +43,7 @@ class VertexDb : public Database {
|
||||
}
|
||||
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> Access() override {
|
||||
return db_->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
return db_->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> MakeBfsOperator(Symbol source_sym, Symbol sink_sym, Symbol edge_sym,
|
||||
|
@ -32,7 +32,7 @@ class SingleNodeDb : public Database {
|
||||
}
|
||||
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> Access() override {
|
||||
return db_->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
return db_->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> MakeBfsOperator(Symbol source_sym, Symbol sink_sym, Symbol edge_sym,
|
||||
|
@ -182,7 +182,7 @@ void TestVertexAndEdgeWithDifferentStorages(std::unique_ptr<memgraph::storage::S
|
||||
output.clear();
|
||||
|
||||
// create vertex
|
||||
auto dba = db->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto dba = db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
auto va1 = dba->CreateVertex();
|
||||
auto va2 = dba->CreateVertex();
|
||||
auto l1 = dba->NameToLabel("label1");
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
class ClearingOldDiskDataTest : public ::testing::Test {
|
||||
public:
|
||||
|
@ -43,7 +43,8 @@ struct CppApiTestFixture : public ::testing::Test {
|
||||
}
|
||||
|
||||
memgraph::query::DbAccessor &CreateDbAccessor(const memgraph::storage::IsolationLevel isolationLevel) {
|
||||
accessors_.push_back(storage->Access(memgraph::replication::ReplicationRole::MAIN, isolationLevel));
|
||||
accessors_.push_back(
|
||||
storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN, isolationLevel));
|
||||
db_accessors_.emplace_back(accessors_.back().get());
|
||||
return db_accessors_.back();
|
||||
}
|
||||
|
@ -165,8 +165,8 @@ TYPED_TEST(InfoTest, InfoCheck) {
|
||||
ASSERT_FALSE(unique_acc->Commit().HasError());
|
||||
}
|
||||
|
||||
const auto &info =
|
||||
db_acc->GetInfo(true, memgraph::replication::ReplicationRole::MAIN); // force to use configured directory
|
||||
const auto &info = db_acc->GetInfo(
|
||||
true, memgraph::replication_coordination_glue::ReplicationRole::MAIN); // force to use configured directory
|
||||
|
||||
ASSERT_EQ(info.storage_info.vertex_count, 5);
|
||||
ASSERT_EQ(info.storage_info.edge_count, 2);
|
||||
|
@ -43,7 +43,7 @@ class PrintToJsonTest : public ::testing::Test {
|
||||
PrintToJsonTest()
|
||||
: config(disk_test_utils::GenerateOnDiskConfig(testSuite)),
|
||||
db(new StorageType(config)),
|
||||
dba_storage(db->Access(memgraph::replication::ReplicationRole::MAIN)),
|
||||
dba_storage(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)),
|
||||
dba(dba_storage.get()) {}
|
||||
|
||||
~PrintToJsonTest() override {
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
using namespace memgraph::query;
|
||||
using namespace memgraph::query::plan;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using CardParam = CostEstimator<memgraph::query::DbAccessor>::CardParam;
|
||||
using CostParam = CostEstimator<memgraph::query::DbAccessor>::CostParam;
|
||||
using MiscParam = CostEstimator<memgraph::query::DbAccessor>::MiscParam;
|
||||
|
@ -141,7 +141,7 @@ DatabaseState GetState(memgraph::storage::Storage *db) {
|
||||
// Capture all vertices
|
||||
std::map<memgraph::storage::Gid, int64_t> gid_mapping;
|
||||
std::set<DatabaseState::Vertex> vertices;
|
||||
auto dba = db->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto dba = db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
for (const auto &vertex : dba->Vertices(memgraph::storage::View::NEW)) {
|
||||
std::set<std::string, std::less<>> labels;
|
||||
auto maybe_labels = vertex.Labels(memgraph::storage::View::NEW);
|
||||
@ -1105,7 +1105,7 @@ TYPED_TEST(DumpTest, MultiplePartialPulls) {
|
||||
}
|
||||
|
||||
TYPED_TEST(DumpTest, DumpDatabaseWithTriggers) {
|
||||
auto acc = this->db->storage()->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto acc = this->db->storage()->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
memgraph::query::DbAccessor dba(acc.get());
|
||||
{
|
||||
auto trigger_store = this->db.get()->trigger_store();
|
||||
|
@ -67,7 +67,7 @@ class ExpressionEvaluatorTest : public ::testing::Test {
|
||||
ExpressionEvaluatorTest()
|
||||
: config(disk_test_utils::GenerateOnDiskConfig(testSuite)),
|
||||
db(new StorageType(config)),
|
||||
storage_dba(db->Access(memgraph::replication::ReplicationRole::MAIN)),
|
||||
storage_dba(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)),
|
||||
dba(storage_dba.get()) {}
|
||||
|
||||
~ExpressionEvaluatorTest() override {
|
||||
|
@ -39,7 +39,7 @@ class HintProviderSuite : public ::testing::Test {
|
||||
int symbol_count = 0;
|
||||
|
||||
void SetUp() {
|
||||
storage_dba.emplace(db->Access(memgraph::replication::ReplicationRole::MAIN));
|
||||
storage_dba.emplace(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN));
|
||||
dba.emplace(storage_dba->get());
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "storage/v2/disk/storage.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
using namespace memgraph::query;
|
||||
using namespace memgraph::query::plan;
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#include "query_plan_common.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using namespace memgraph::query;
|
||||
using namespace memgraph::query::plan;
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
|
||||
using namespace memgraph::query;
|
||||
using namespace memgraph::query::plan;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
template <typename StorageType>
|
||||
class QueryPlanTest : public testing::Test {
|
||||
|
@ -42,7 +42,7 @@
|
||||
|
||||
using namespace memgraph::query;
|
||||
using namespace memgraph::query::plan;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
const std::string testSuite = "query_plan_match_filter_return";
|
||||
|
||||
|
@ -37,7 +37,7 @@ class OperatorToStringTest : public ::testing::Test {
|
||||
OperatorToStringTest()
|
||||
: config(disk_test_utils::GenerateOnDiskConfig(testSuite)),
|
||||
db(new StorageType(config)),
|
||||
dba_storage(db->Access(memgraph::replication::ReplicationRole::MAIN)),
|
||||
dba_storage(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)),
|
||||
dba(dba_storage.get()) {}
|
||||
|
||||
~OperatorToStringTest() override {
|
||||
|
@ -37,7 +37,7 @@ class ReadWriteTypeCheckTest : public ::testing::Test {
|
||||
memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite);
|
||||
std::unique_ptr<memgraph::storage::Storage> db{new StorageType(config)};
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> dba_storage{
|
||||
db->Access(memgraph::replication::ReplicationRole::MAIN)};
|
||||
db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)};
|
||||
memgraph::query::DbAccessor dba{dba_storage.get()};
|
||||
|
||||
void TearDown() override {
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "storage/v2/disk/storage.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
template <typename StorageType>
|
||||
class QueryPlan : public testing::Test {
|
||||
|
@ -37,7 +37,7 @@ class ExpressionPrettyPrinterTest : public ::testing::Test {
|
||||
memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite);
|
||||
std::unique_ptr<memgraph::storage::Storage> db{new StorageType(config)};
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> storage_dba{
|
||||
db->Access(memgraph::replication::ReplicationRole::MAIN)};
|
||||
db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)};
|
||||
memgraph::query::DbAccessor dba{storage_dba.get()};
|
||||
AstStorage storage;
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "disk_test_utils.hpp"
|
||||
#include "test_utils.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
template <typename StorageType>
|
||||
class CypherType : public testing::Test {
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "test_utils.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
template <typename StorageType>
|
||||
class PyModule : public testing::Test {
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "utils/memory.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
#define EXPECT_SUCCESS(...) EXPECT_EQ(__VA_ARGS__, mgp_error::MGP_ERROR_NO_ERROR)
|
||||
|
||||
|
@ -35,7 +35,7 @@ class TestSymbolGenerator : public ::testing::Test {
|
||||
memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite);
|
||||
std::unique_ptr<memgraph::storage::Storage> db{new StorageType(config)};
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> storage_dba{
|
||||
db->Access(memgraph::replication::ReplicationRole::MAIN)};
|
||||
db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)};
|
||||
memgraph::query::DbAccessor dba{storage_dba.get()};
|
||||
AstStorage storage;
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
namespace {
|
||||
const std::unordered_set<memgraph::query::TriggerEventType> kAllEventTypes{
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#include "formatters.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using namespace memgraph::query::plan;
|
||||
using memgraph::query::AstStorage;
|
||||
using Type = memgraph::query::EdgeAtom::Type;
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "storage/v2/view.hpp"
|
||||
#include "utils/rocksdb_serialization.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// NOLINTNEXTLINE(google-build-using-namespace)
|
||||
using namespace memgraph::storage;
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "storage/v2/vertex_accessor.hpp"
|
||||
#include "storage_test_utils.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
using testing::Types;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#include "disk_test_utils.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
// NOLINTNEXTLINE(google-build-using-namespace)
|
||||
using namespace memgraph::storage;
|
||||
|
@ -48,7 +48,7 @@
|
||||
#include "utils/timer.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using testing::Contains;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
||||
class StorageEdgeTest : public ::testing::TestWithParam<bool> {};
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "storage/v2/disk/storage.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
||||
class StorageEdgeTest : public ::testing::TestWithParam<bool> {};
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
||||
// TODO: The point of these is not to test GC fully, these are just simple
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
// NOLINTNEXTLINE(google-build-using-namespace)
|
||||
using namespace memgraph::storage;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
constexpr auto testSuite = "storage_v2_get_info";
|
||||
const std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / testSuite};
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
// NOLINTNEXTLINE(google-build-using-namespace)
|
||||
using namespace memgraph::storage;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using testing::IsEmpty;
|
||||
using testing::Types;
|
||||
using testing::UnorderedElementsAre;
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/isolation_level.hpp"
|
||||
#include "utils/on_scope_exit.hpp"
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
namespace {
|
||||
int64_t VerticesCount(memgraph::storage::Storage::Accessor *accessor) {
|
||||
|
@ -39,9 +39,9 @@ using memgraph::dbms::RegisterReplicaError;
|
||||
using memgraph::dbms::ReplicationHandler;
|
||||
using memgraph::dbms::UnregisterReplicaResult;
|
||||
using memgraph::replication::ReplicationClientConfig;
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication::ReplicationServerConfig;
|
||||
using memgraph::replication_coordination_glue::ReplicationMode;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using memgraph::storage::Config;
|
||||
using memgraph::storage::EdgeAccessor;
|
||||
using memgraph::storage::Gid;
|
||||
|
@ -44,7 +44,7 @@ class ShowStorageInfoTest : public testing::Test {
|
||||
};
|
||||
|
||||
TEST_F(ShowStorageInfoTest, CountOnAbort) {
|
||||
auto acc = this->storage->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto acc = this->storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
auto src_vertex = acc->CreateVertex();
|
||||
auto dest_vertex = acc->CreateVertex();
|
||||
auto et = acc->NameToEdgeType("et5");
|
||||
|
@ -44,8 +44,8 @@ TEST_P(StorageModeTest, Mode) {
|
||||
.transaction{.isolation_level = memgraph::storage::IsolationLevel::SNAPSHOT_ISOLATION}});
|
||||
|
||||
static_cast<memgraph::storage::InMemoryStorage *>(storage.get())->SetStorageMode(storage_mode);
|
||||
auto creator = storage->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto other_analytics_mode_reader = storage->Access(memgraph::replication::ReplicationRole::MAIN);
|
||||
auto creator = storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
auto other_analytics_mode_reader = storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN);
|
||||
|
||||
ASSERT_EQ(CountVertices(*creator, memgraph::storage::View::OLD), 0);
|
||||
ASSERT_EQ(CountVertices(*other_analytics_mode_reader, memgraph::storage::View::OLD), 0);
|
||||
|
@ -38,7 +38,7 @@ class AllTypesFixture : public testing::Test {
|
||||
memgraph::storage::Config config_{disk_test_utils::GenerateOnDiskConfig(testSuite)};
|
||||
std::unique_ptr<memgraph::storage::Storage> db{new StorageType(config_)};
|
||||
std::unique_ptr<memgraph::storage::Storage::Accessor> storage_dba{
|
||||
db->Access(memgraph::replication::ReplicationRole::MAIN)};
|
||||
db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)};
|
||||
memgraph::query::DbAccessor dba{storage_dba.get()};
|
||||
|
||||
void SetUp() override {
|
||||
|
Loading…
Reference in New Issue
Block a user