Merge branch 'master' into split_empty_delimiter_fix
This commit is contained in:
commit
9211c9fc7d
@ -20,14 +20,18 @@ if [ ! -f "$INPUT" ]; then
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create consraints manually if needed${COLOR_NULL}"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/^:begin/BEGIN/g; s/^BEGIN$/BEGIN;/g;' \
|
||||
-e 's/^:commit/COMMIT/g; s/^COMMIT$/COMMIT;/g;' \
|
||||
-e '/^CALL/d; /^SCHEMA AWAIT/d;' \
|
||||
-e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" > "$OUTPUT"
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
|
61
import/n2mg_separate_files_cypherl.sh
Executable file
61
import/n2mg_separate_files_cypherl.sh
Executable file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 5 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT="$5"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT"
|
||||
|
||||
cat "$INPUT_NODES" >> "$OUTPUT"
|
||||
cat "$INPUT_RELATIONSHIPS" >> "$OUTPUT"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT | mgconsole\`"
|
64
import/n2mg_separate_files_cypherls.sh
Executable file
64
import/n2mg_separate_files_cypherls.sh
Executable file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_schema_path output_file_nodes_path output_file_relationships_path output_file_cleanup_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 8 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT_SCHEMA="$5"
|
||||
OUTPUT_NODES="$6"
|
||||
OUTPUT_RELATIONSHIPS="$7"
|
||||
OUTPUT_CLEANUP="$8"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT_SCHEMA"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT_SCHEMA"
|
||||
|
||||
cat "$INPUT_NODES" > "$OUTPUT_NODES"
|
||||
cat "$INPUT_RELATIONSHIPS" > "$OUTPUT_RELATIONSHIPS"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher files under $OUTPUT_SCHEMA, $OUTPUT_NODES, $OUTPUT_RELATIONSHIPS and $OUTPUT_CLEANUP"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT_SCHEMA | mgconsole\`, \`cat $OUTPUT_NODES | mgconsole\`, \`cat $OUTPUT_RELATIONSHIPS | mgconsole\` and \`cat $OUTPUT_CLEANUP | mgconsole\`"
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -23,6 +23,7 @@ target_sources(mg-coordination
|
||||
include/nuraft/coordinator_state_manager.hpp
|
||||
|
||||
PRIVATE
|
||||
coordinator_config.cpp
|
||||
coordinator_client.cpp
|
||||
coordinator_state.cpp
|
||||
coordinator_rpc.cpp
|
||||
|
@ -135,8 +135,7 @@ auto CoordinatorClient::SendSwapMainUUIDRpc(utils::UUID const &uuid) const -> bo
|
||||
|
||||
auto CoordinatorClient::SendUnregisterReplicaRpc(std::string_view instance_name) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<UnregisterReplicaRpc>(
|
||||
std::string(instance_name))}; // TODO: (andi) Try to change to stream string_view and do just one copy later
|
||||
auto stream{rpc_client_.Stream<UnregisterReplicaRpc>(instance_name)};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
spdlog::error("Failed to receive successful RPC response for unregistering replica!");
|
||||
return false;
|
||||
|
@ -18,78 +18,87 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using replication_coordination_glue::ReplicationRole;
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state) {
|
||||
j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other)
|
||||
: instance_roles_{other.instance_roles_} {}
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state) {
|
||||
j.at("config").get_to(instance_state.config);
|
||||
j.at("status").get_to(instance_state.status);
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances)
|
||||
: instances_{std::move(instances)} {}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instance_roles_ = other.instance_roles_;
|
||||
instances_ = other.instances_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
|
||||
: instance_roles_{std::move(other.instance_roles_)} {}
|
||||
: instances_{std::move(other.instances_)} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instance_roles_ = std::move(other.instance_roles_);
|
||||
instances_ = std::move(other.instances_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::MainExists() const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return std::ranges::any_of(instance_roles_,
|
||||
[](auto const &entry) { return entry.second.role == ReplicationRole::MAIN; });
|
||||
return std::ranges::any_of(instances_,
|
||||
[](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instance_roles_.find(instance_name);
|
||||
return it != instance_roles_.end() && it->second.role == ReplicationRole::MAIN;
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instance_roles_.find(instance_name);
|
||||
return it != instance_roles_.end() && it->second.role == ReplicationRole::REPLICA;
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::REPLICA;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::InsertInstance(std::string_view instance_name, ReplicationRole role) -> void {
|
||||
auto lock = std::unique_lock{log_lock_};
|
||||
instance_roles_[instance_name.data()].role = role;
|
||||
auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
|
||||
auto lock = std::unique_lock{log_lock_};
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
switch (log_action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
|
||||
auto const &config = std::get<CoordinatorClientConfig>(log_entry);
|
||||
instance_roles_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
|
||||
instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
instance_roles_.erase(instance_name);
|
||||
instances_.erase(instance_name);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instance_roles_.find(instance_name);
|
||||
MG_ASSERT(it != instance_roles_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.role = ReplicationRole::MAIN;
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::MAIN;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instance_roles_.find(instance_name);
|
||||
MG_ASSERT(it != instance_roles_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.role = ReplicationRole::REPLICA;
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::REPLICA;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UPDATE_UUID: {
|
||||
@ -99,64 +108,36 @@ auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_act
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: (andi) Improve based on Gareth's comments
|
||||
auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const role_to_string = [](auto const &role) -> std::string_view {
|
||||
switch (role) {
|
||||
case ReplicationRole::MAIN:
|
||||
return "main";
|
||||
case ReplicationRole::REPLICA:
|
||||
return "replica";
|
||||
}
|
||||
};
|
||||
|
||||
auto const entry_to_string = [&role_to_string](auto const &entry) {
|
||||
return fmt::format("{}_{}", entry.first, role_to_string(entry.second.role));
|
||||
};
|
||||
auto const log = nlohmann::json(instances_).dump();
|
||||
|
||||
auto instances_str_view = instance_roles_ | ranges::views::transform(entry_to_string);
|
||||
uint32_t size =
|
||||
std::accumulate(instances_str_view.begin(), instances_str_view.end(), 0,
|
||||
[](uint32_t acc, auto const &entry) { return acc + sizeof(uint32_t) + entry.size(); });
|
||||
|
||||
data = buffer::alloc(size);
|
||||
data = buffer::alloc(sizeof(uint32_t) + log.size());
|
||||
buffer_serializer bs(data);
|
||||
std::for_each(instances_str_view.begin(), instances_str_view.end(), [&bs](auto const &entry) { bs.put_str(entry); });
|
||||
bs.put_str(log);
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
|
||||
auto const str_to_role = [](auto const &str) -> ReplicationRole {
|
||||
if (str == "main") {
|
||||
return ReplicationRole::MAIN;
|
||||
}
|
||||
return ReplicationRole::REPLICA;
|
||||
};
|
||||
|
||||
CoordinatorClusterState cluster_state;
|
||||
buffer_serializer bs(data);
|
||||
while (bs.size() > 0) {
|
||||
auto const entry = bs.get_str();
|
||||
auto const first_dash = entry.find('_');
|
||||
auto const instance_name = entry.substr(0, first_dash);
|
||||
auto const role_str = entry.substr(first_dash + 1);
|
||||
cluster_state.InsertInstance(instance_name, str_to_role(role_str));
|
||||
}
|
||||
return cluster_state;
|
||||
auto const j = nlohmann::json::parse(bs.get_str());
|
||||
auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>();
|
||||
|
||||
return CoordinatorClusterState{std::move(instances)};
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return instance_roles_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
|
||||
return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
|
||||
|
||||
auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = std::ranges::find_if(instance_roles_,
|
||||
[](auto const &entry) { return entry.second.role == ReplicationRole::MAIN; });
|
||||
if (it == instance_roles_.end()) {
|
||||
auto const it =
|
||||
std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
if (it == instances_.end()) {
|
||||
return {};
|
||||
}
|
||||
return it->first;
|
||||
|
54
src/coordination/coordinator_config.cpp
Normal file
54
src/coordination/coordinator_config.cpp
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"replication_mode", config.replication_mode},
|
||||
{"replication_ip_address", config.replication_ip_address},
|
||||
{"replication_port", config.replication_port}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
|
||||
config.replication_ip_address = j.at("replication_ip_address").get<std::string>();
|
||||
config.replication_port = j.at("replication_port").get<uint16_t>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"ip_address", config.ip_address},
|
||||
{"port", config.port},
|
||||
{"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
|
||||
{"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
|
||||
{"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
|
||||
{"replication_client_info", config.replication_client_info}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.ip_address = j.at("ip_address").get<std::string>();
|
||||
config.port = j.at("port").get<uint16_t>();
|
||||
config.instance_health_check_frequency_sec =
|
||||
std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
|
||||
config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
|
||||
config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
|
||||
config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>();
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -36,31 +36,28 @@ CoordinatorInstance::CoordinatorInstance()
|
||||
spdlog::info("Leader changed, starting all replication instances!");
|
||||
auto const instances = raft_state_.GetInstances();
|
||||
auto replicas = instances | ranges::views::filter([](auto const &instance) {
|
||||
return instance.role == ReplicationRole::REPLICA;
|
||||
return instance.status == ReplicationRole::REPLICA;
|
||||
});
|
||||
|
||||
std::ranges::for_each(replicas, [this](auto &replica) {
|
||||
spdlog::info("Starting replication instance {}", replica.config.instance_name);
|
||||
spdlog::info("Started pinging replication instance {}", replica.config.instance_name);
|
||||
repl_instances_.emplace_back(this, replica.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
});
|
||||
|
||||
auto main = instances | ranges::views::filter(
|
||||
[](auto const &instance) { return instance.role == ReplicationRole::MAIN; });
|
||||
|
||||
// TODO: (andi) Add support for this
|
||||
// MG_ASSERT(std::ranges::distance(main) == 1, "There should be exactly one main instance");
|
||||
[](auto const &instance) { return instance.status == ReplicationRole::MAIN; });
|
||||
|
||||
std::ranges::for_each(main, [this](auto &main_instance) {
|
||||
spdlog::info("Starting main instance {}", main_instance.config.instance_name);
|
||||
spdlog::info("Started pinging main instance {}", main_instance.config.instance_name);
|
||||
repl_instances_.emplace_back(this, main_instance.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback);
|
||||
});
|
||||
|
||||
std::ranges::for_each(repl_instances_, [this](auto &instance) {
|
||||
instance.SetNewMainUUID(raft_state_.GetUUID()); // TODO: (andi) Rename
|
||||
instance.SetNewMainUUID(raft_state_.GetUUID());
|
||||
instance.StartFrequentCheck();
|
||||
});
|
||||
},
|
||||
@ -69,13 +66,13 @@ CoordinatorInstance::CoordinatorInstance()
|
||||
repl_instances_.clear();
|
||||
})) {
|
||||
client_succ_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::unique_lock{self->coord_instance_lock_};
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetSuccessCallback(), self, repl_instance_name);
|
||||
};
|
||||
|
||||
client_fail_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::unique_lock{self->coord_instance_lock_};
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetFailCallback(), self, repl_instance_name);
|
||||
};
|
||||
@ -98,10 +95,8 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
.raft_socket_address = instance->get_endpoint(),
|
||||
.cluster_role = "coordinator",
|
||||
.health = "unknown"}; // TODO: (andi) Get this info from RAFT and test it or when we will move
|
||||
// CoordinatorState to every instance, we can be smarter about this using our RPC.
|
||||
};
|
||||
|
||||
auto instances_status = utils::fmap(coord_instance_to_status, raft_state_.GetAllCoordinators());
|
||||
auto instances_status = utils::fmap(raft_state_.GetAllCoordinators(), coord_instance_to_status);
|
||||
|
||||
if (raft_state_.IsLeader()) {
|
||||
auto const stringify_repl_role = [this](ReplicationInstance const &instance) -> std::string {
|
||||
@ -127,14 +122,14 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), process_repl_instance_as_leader);
|
||||
}
|
||||
} else {
|
||||
auto const stringify_repl_role = [](ReplicationRole role) -> std::string {
|
||||
return role == ReplicationRole::MAIN ? "main" : "replica";
|
||||
auto const stringify_inst_status = [](ReplicationRole status) -> std::string {
|
||||
return status == ReplicationRole::MAIN ? "main" : "replica";
|
||||
};
|
||||
|
||||
// TODO: (andi) Add capability that followers can also return socket addresses
|
||||
auto process_repl_instance_as_follower = [&stringify_repl_role](auto const &instance) -> InstanceStatus {
|
||||
auto process_repl_instance_as_follower = [&stringify_inst_status](auto const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.config.instance_name,
|
||||
.cluster_role = stringify_repl_role(instance.role),
|
||||
.cluster_role = stringify_inst_status(instance.status),
|
||||
.health = "unknown"};
|
||||
};
|
||||
|
||||
@ -319,17 +314,20 @@ auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig co
|
||||
return RegisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const undo_action_ = [this]() { repl_instances_.pop_back(); };
|
||||
|
||||
auto *new_instance = &repl_instances_.emplace_back(this, config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
|
||||
if (!new_instance->SendDemoteToReplicaRpc()) {
|
||||
spdlog::error("Failed to send demote to replica rpc for instance {}", config.instance_name);
|
||||
repl_instances_.pop_back();
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendRegisterReplicationInstanceLog(config)) {
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
@ -356,11 +354,11 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
|
||||
return UnregisterInstanceCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
// TODO: (andi) Change so that RaftLogState is the central place for asking who is main...
|
||||
auto const is_main = [this](ReplicationInstance const &instance) {
|
||||
return IsMain(instance.InstanceName()) && instance.GetMainUUID() == raft_state_.GetUUID() && instance.IsAlive();
|
||||
};
|
||||
|
||||
auto const is_main = [this](ReplicationInstance const &instance) { return IsMain(instance.InstanceName()); };
|
||||
|
||||
if (is_main(*inst_to_remove) && inst_to_remove->IsAlive()) {
|
||||
if (is_main(*inst_to_remove)) {
|
||||
return UnregisterInstanceCoordinatorStatus::IS_MAIN;
|
||||
}
|
||||
|
||||
|
@ -62,34 +62,33 @@ ptr<log_entry> CoordinatorLogStore::last_entry() const {
|
||||
|
||||
uint64_t CoordinatorLogStore::append(ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
uint64_t next_slot{0};
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
}
|
||||
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
uint64_t next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
|
||||
return next_slot;
|
||||
}
|
||||
|
||||
// TODO: (andi) I think this is used for resolving conflicts inside NuRaft, check...
|
||||
// different compared to in_memory_log_store.cxx
|
||||
void CoordinatorLogStore::write_at(uint64_t index, ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
|
||||
// Discard all logs equal to or greater than `index.
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
}
|
||||
|
||||
ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start, uint64_t end) {
|
||||
auto ret = cs_new<std::vector<ptr<log_entry>>>();
|
||||
ret->resize(end - start);
|
||||
|
||||
for (uint64_t i = start, curr_index = 0; i < end; ++i, ++curr_index) {
|
||||
for (uint64_t i = start, curr_index = 0; i < end; i++, curr_index++) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
@ -105,21 +104,14 @@ ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start
|
||||
}
|
||||
|
||||
ptr<log_entry> CoordinatorLogStore::entry_at(uint64_t index) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
src = FindOrDefault_(index);
|
||||
}
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
ptr<log_entry> src = FindOrDefault_(index);
|
||||
return MakeClone(src);
|
||||
}
|
||||
|
||||
uint64_t CoordinatorLogStore::term_at(uint64_t index) {
|
||||
uint64_t term = 0;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
term = FindOrDefault_(index)->get_term();
|
||||
}
|
||||
return term;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
return FindOrDefault_(index)->get_term();
|
||||
}
|
||||
|
||||
ptr<buffer> CoordinatorLogStore::pack(uint64_t index, int32 cnt) {
|
||||
|
@ -14,6 +14,10 @@
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr int MAX_SNAPSHOTS = 3;
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
@ -30,90 +34,83 @@ auto CoordinatorStateMachine::IsReplica(std::string_view instance_name) const ->
|
||||
return cluster_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::CreateLog(std::string_view log) -> ptr<buffer> {
|
||||
ptr<buffer> log_buf = buffer::alloc(sizeof(uint32_t) + log.size());
|
||||
auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
|
||||
auto const log_dump = log.dump();
|
||||
ptr<buffer> log_buf = buffer::alloc(sizeof(uint32_t) + log_dump.size());
|
||||
buffer_serializer bs(log_buf);
|
||||
bs.put_str(log.data());
|
||||
bs.put_str(log_dump);
|
||||
return log_buf;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> {
|
||||
auto const str_log = fmt::format("{}*register", config.ToString());
|
||||
return CreateLog(str_log);
|
||||
return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer> {
|
||||
auto const str_log = fmt::format("{}*unregister", instance_name);
|
||||
return CreateLog(str_log);
|
||||
return CreateLog({{"action", RaftLogAction::UNREGISTER_REPLICATION_INSTANCE}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
|
||||
auto const str_log = fmt::format("{}*promote", instance_name);
|
||||
return CreateLog(str_log);
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
|
||||
auto const str_log = fmt::format("{}*demote", instance_name);
|
||||
return CreateLog(str_log);
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer> {
|
||||
auto const str_log = fmt::format("{}*update_uuid", nlohmann::json{{"uuid", uuid}}.dump());
|
||||
return CreateLog(str_log);
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
|
||||
buffer_serializer bs(data);
|
||||
auto const json = nlohmann::json::parse(bs.get_str());
|
||||
|
||||
auto const log_str = bs.get_str();
|
||||
auto const sep = log_str.find('*');
|
||||
auto const action = log_str.substr(sep + 1);
|
||||
auto const info = log_str.substr(0, sep);
|
||||
auto const action = json["action"].get<RaftLogAction>();
|
||||
auto const &info = json["info"];
|
||||
|
||||
if (action == "register") {
|
||||
return {CoordinatorClientConfig::FromString(info), RaftLogAction::REGISTER_REPLICATION_INSTANCE};
|
||||
switch (action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
|
||||
return {info.get<CoordinatorClientConfig>(), action};
|
||||
case RaftLogAction::UPDATE_UUID:
|
||||
return {info.get<utils::UUID>(), action};
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA:
|
||||
return {info.get<std::string>(), action};
|
||||
}
|
||||
if (action == "unregister") {
|
||||
return {info, RaftLogAction::UNREGISTER_REPLICATION_INSTANCE};
|
||||
}
|
||||
if (action == "promote") {
|
||||
return {info, RaftLogAction::SET_INSTANCE_AS_MAIN};
|
||||
}
|
||||
if (action == "demote") {
|
||||
return {info, RaftLogAction::SET_INSTANCE_AS_REPLICA};
|
||||
}
|
||||
if (action == "update_uuid") {
|
||||
auto const json = nlohmann::json::parse(info);
|
||||
return {json.at("uuid").get<utils::UUID>(), RaftLogAction::UPDATE_UUID};
|
||||
}
|
||||
|
||||
throw std::runtime_error("Unknown action");
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::pre_commit(ulong const /*log_idx*/, buffer & /*data*/) -> ptr<buffer> { return nullptr; }
|
||||
|
||||
auto CoordinatorStateMachine::commit(ulong const log_idx, buffer &data) -> ptr<buffer> {
|
||||
buffer_serializer bs(data);
|
||||
|
||||
spdlog::debug("Commit: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
auto const [parsed_data, log_action] = DecodeLog(data);
|
||||
cluster_state_.DoAction(parsed_data, log_action);
|
||||
|
||||
last_committed_idx_ = log_idx;
|
||||
// TODO: (andi) Don't return nullptr
|
||||
return nullptr;
|
||||
|
||||
// Return raft log number
|
||||
ptr<buffer> ret = buffer::alloc(sizeof(log_idx));
|
||||
buffer_serializer bs_ret(ret);
|
||||
bs_ret.put_u64(log_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::commit_config(ulong const log_idx, ptr<cluster_config> & /*new_conf*/) -> void {
|
||||
last_committed_idx_ = log_idx;
|
||||
spdlog::debug("Commit config: log_idx={}", log_idx);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::rollback(ulong const log_idx, buffer &data) -> void {
|
||||
// NOTE: Nothing since we don't do anything in pre_commit
|
||||
spdlog::debug("Rollback: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /*user_snp_ctx*/, ulong obj_id,
|
||||
ptr<buffer> &data_out, bool &is_last_obj) -> int {
|
||||
spdlog::info("read logical snapshot object, obj_id: {}", obj_id);
|
||||
spdlog::debug("read logical snapshot object, obj_id: {}", obj_id);
|
||||
|
||||
ptr<SnapshotCtx> ctx = nullptr;
|
||||
{
|
||||
@ -126,20 +123,33 @@ auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /
|
||||
}
|
||||
ctx = entry->second;
|
||||
}
|
||||
ctx->cluster_state_.Serialize(data_out);
|
||||
is_last_obj = true;
|
||||
|
||||
if (obj_id == 0) {
|
||||
// Object ID == 0: first object, put dummy data.
|
||||
data_out = buffer::alloc(sizeof(int32));
|
||||
buffer_serializer bs(data_out);
|
||||
bs.put_i32(0);
|
||||
is_last_obj = false;
|
||||
} else {
|
||||
// Object ID > 0: second object, put actual value.
|
||||
ctx->cluster_state_.Serialize(data_out);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &obj_id, buffer &data, bool is_first_obj,
|
||||
bool is_last_obj) -> void {
|
||||
spdlog::info("save logical snapshot object, obj_id: {}, is_first_obj: {}, is_last_obj: {}", obj_id, is_first_obj,
|
||||
is_last_obj);
|
||||
spdlog::debug("save logical snapshot object, obj_id: {}, is_first_obj: {}, is_last_obj: {}", obj_id, is_first_obj,
|
||||
is_last_obj);
|
||||
|
||||
buffer_serializer bs(data);
|
||||
auto cluster_state = CoordinatorClusterState::Deserialize(data);
|
||||
if (obj_id == 0) {
|
||||
ptr<buffer> snp_buf = snapshot.serialize();
|
||||
auto ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
} else {
|
||||
auto cluster_state = CoordinatorClusterState::Deserialize(data);
|
||||
|
||||
{
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
auto entry = snapshots_.find(snapshot.get_last_log_idx());
|
||||
DMG_ASSERT(entry != snapshots_.end());
|
||||
@ -149,6 +159,7 @@ auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &ob
|
||||
|
||||
auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("apply snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
|
||||
auto entry = snapshots_.find(s.get_last_log_idx());
|
||||
if (entry == snapshots_.end()) return false;
|
||||
@ -161,6 +172,7 @@ auto CoordinatorStateMachine::free_user_snp_ctx(void *&user_snp_ctx) -> void {}
|
||||
|
||||
auto CoordinatorStateMachine::last_snapshot() -> ptr<snapshot> {
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("last_snapshot");
|
||||
auto entry = snapshots_.rbegin();
|
||||
if (entry == snapshots_.rend()) return nullptr;
|
||||
|
||||
@ -171,6 +183,7 @@ auto CoordinatorStateMachine::last_snapshot() -> ptr<snapshot> {
|
||||
auto CoordinatorStateMachine::last_commit_index() -> ulong { return last_committed_idx_; }
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void {
|
||||
spdlog::debug("create_snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
ptr<snapshot> ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
@ -182,11 +195,11 @@ auto CoordinatorStateMachine::create_snapshot(snapshot &s, async_result<bool>::h
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -> void {
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("create_snapshot_internal, last_log_idx: {}", snapshot->get_last_log_idx());
|
||||
|
||||
auto ctx = cs_new<SnapshotCtx>(snapshot, cluster_state_);
|
||||
snapshots_[snapshot->get_last_log_idx()] = ctx;
|
||||
|
||||
constexpr int MAX_SNAPSHOTS = 3;
|
||||
while (snapshots_.size() > MAX_SNAPSHOTS) {
|
||||
snapshots_.erase(snapshots_.begin());
|
||||
}
|
||||
|
@ -22,12 +22,12 @@
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
|
||||
// TODO: (andi) JSON serialization for RAFT log.
|
||||
struct CoordinatorClientConfig {
|
||||
std::string instance_name;
|
||||
std::string ip_address;
|
||||
@ -43,28 +43,11 @@ struct CoordinatorClientConfig {
|
||||
}
|
||||
|
||||
struct ReplicationClientInfo {
|
||||
// TODO: (andi) Do we even need here instance_name for this struct?
|
||||
std::string instance_name;
|
||||
replication_coordination_glue::ReplicationMode replication_mode{};
|
||||
std::string replication_ip_address;
|
||||
uint16_t replication_port{};
|
||||
|
||||
auto ToString() const -> std::string {
|
||||
return fmt::format("{}#{}#{}#{}", instance_name, replication_ip_address, replication_port,
|
||||
replication_coordination_glue::ReplicationModeToString(replication_mode));
|
||||
}
|
||||
|
||||
// TODO: (andi) How can I make use of monadic parsers here?
|
||||
static auto FromString(std::string_view log) -> ReplicationClientInfo {
|
||||
ReplicationClientInfo replication_client_info;
|
||||
auto splitted = utils::Split(log, "#");
|
||||
replication_client_info.instance_name = splitted[0];
|
||||
replication_client_info.replication_ip_address = splitted[1];
|
||||
replication_client_info.replication_port = std::stoi(splitted[2]);
|
||||
replication_client_info.replication_mode = replication_coordination_glue::ReplicationModeFromString(splitted[3]);
|
||||
return replication_client_info;
|
||||
}
|
||||
|
||||
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
|
||||
};
|
||||
|
||||
@ -79,25 +62,6 @@ struct CoordinatorClientConfig {
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
auto ToString() const -> std::string {
|
||||
return fmt::format("{}|{}|{}|{}|{}|{}|{}", instance_name, ip_address, port,
|
||||
instance_health_check_frequency_sec.count(), instance_down_timeout_sec.count(),
|
||||
instance_get_uuid_frequency_sec.count(), replication_client_info.ToString());
|
||||
}
|
||||
|
||||
static auto FromString(std::string_view log) -> CoordinatorClientConfig {
|
||||
CoordinatorClientConfig config;
|
||||
auto splitted = utils::Split(log, "|");
|
||||
config.instance_name = splitted[0];
|
||||
config.ip_address = splitted[1];
|
||||
config.port = std::stoi(splitted[2]);
|
||||
config.instance_health_check_frequency_sec = std::chrono::seconds(std::stoi(splitted[3]));
|
||||
config.instance_down_timeout_sec = std::chrono::seconds(std::stoi(splitted[4]));
|
||||
config.instance_get_uuid_frequency_sec = std::chrono::seconds(std::stoi(splitted[5]));
|
||||
config.replication_client_info = ReplicationClientInfo::FromString(splitted[6]);
|
||||
return config;
|
||||
}
|
||||
|
||||
friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default;
|
||||
};
|
||||
|
||||
@ -119,5 +83,11 @@ struct CoordinatorServerConfig {
|
||||
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config);
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config);
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -90,7 +90,7 @@ struct UnregisterReplicaReq {
|
||||
static void Load(UnregisterReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(UnregisterReplicaReq const &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit UnregisterReplicaReq(std::string instance_name) : instance_name(std::move(instance_name)) {}
|
||||
explicit UnregisterReplicaReq(std::string_view inst_name) : instance_name(inst_name) {}
|
||||
|
||||
UnregisterReplicaReq() = default;
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include <flags/replication.hpp>
|
||||
#include "io/network/endpoint.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
|
||||
@ -79,9 +80,8 @@ class RaftState {
|
||||
|
||||
private:
|
||||
// TODO: (andi) I think variables below can be abstracted/clean them.
|
||||
io::network::Endpoint raft_endpoint_;
|
||||
uint32_t raft_server_id_;
|
||||
uint32_t raft_port_;
|
||||
std::string raft_address_;
|
||||
|
||||
ptr<CoordinatorStateMachine> state_machine_;
|
||||
ptr<CoordinatorStateManager> state_manager_;
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include <range/v3/view.hpp>
|
||||
#include "json/json.hpp"
|
||||
|
||||
#include <map>
|
||||
#include <numeric>
|
||||
@ -33,9 +34,16 @@ using replication_coordination_glue::ReplicationRole;
|
||||
|
||||
struct InstanceState {
|
||||
CoordinatorClientConfig config;
|
||||
ReplicationRole role;
|
||||
ReplicationRole status;
|
||||
|
||||
friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config && lhs.status == rhs.status;
|
||||
}
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state);
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state);
|
||||
|
||||
using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>;
|
||||
|
||||
using nuraft::buffer;
|
||||
@ -45,6 +53,8 @@ using nuraft::ptr;
|
||||
class CoordinatorClusterState {
|
||||
public:
|
||||
CoordinatorClusterState() = default;
|
||||
explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState const &);
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState const &);
|
||||
|
||||
@ -60,7 +70,7 @@ class CoordinatorClusterState {
|
||||
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto InsertInstance(std::string_view instance_name, ReplicationRole role) -> void;
|
||||
auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void;
|
||||
|
||||
auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
@ -73,7 +83,7 @@ class CoordinatorClusterState {
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
std::map<std::string, InstanceState, std::less<>> instance_roles_;
|
||||
std::map<std::string, InstanceState, std::less<>> instances_{};
|
||||
utils::UUID uuid_{};
|
||||
mutable utils::ResourceLock log_lock_{};
|
||||
};
|
||||
|
@ -47,7 +47,7 @@ class CoordinatorStateMachine : public state_machine {
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
static auto CreateLog(std::string_view log) -> ptr<buffer>;
|
||||
static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
|
||||
static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
|
||||
@ -95,12 +95,8 @@ class CoordinatorStateMachine : public state_machine {
|
||||
auto create_snapshot_internal(ptr<snapshot> snapshot) -> void;
|
||||
|
||||
CoordinatorClusterState cluster_state_;
|
||||
|
||||
// mutable utils::RWLock lock{utils::RWLock::Priority::READ};
|
||||
|
||||
std::atomic<uint64_t> last_committed_idx_{0};
|
||||
|
||||
// TODO: (andi) Maybe not needed, remove it
|
||||
std::map<uint64_t, ptr<SnapshotCtx>> snapshots_;
|
||||
std::mutex snapshots_lock_;
|
||||
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
enum class RaftLogAction : uint8_t {
|
||||
@ -28,26 +30,13 @@ enum class RaftLogAction : uint8_t {
|
||||
UPDATE_UUID
|
||||
};
|
||||
|
||||
inline auto ParseRaftLogAction(std::string_view action) -> RaftLogAction {
|
||||
if (action == "register") {
|
||||
return RaftLogAction::REGISTER_REPLICATION_INSTANCE;
|
||||
}
|
||||
if (action == "unregister") {
|
||||
return RaftLogAction::UNREGISTER_REPLICATION_INSTANCE;
|
||||
}
|
||||
if (action == "promote") {
|
||||
return RaftLogAction::SET_INSTANCE_AS_MAIN;
|
||||
}
|
||||
if (action == "demote") {
|
||||
return RaftLogAction::SET_INSTANCE_AS_REPLICA;
|
||||
}
|
||||
|
||||
if (action == "update_uuid") {
|
||||
return RaftLogAction::UPDATE_UUID;
|
||||
}
|
||||
|
||||
throw InvalidRaftLogActionException("Invalid Raft log action: {}.", action);
|
||||
}
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {
|
||||
{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
|
||||
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
|
||||
{RaftLogAction::UPDATE_UUID, "update_uuid"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -10,11 +10,11 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include <chrono>
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
@ -32,31 +32,35 @@ using raft_result = cmd_result<ptr<buffer>>;
|
||||
|
||||
RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
|
||||
uint32_t raft_port, std::string raft_address)
|
||||
: raft_server_id_(raft_server_id),
|
||||
raft_port_(raft_port),
|
||||
raft_address_(std::move(raft_address)),
|
||||
: raft_endpoint_(raft_address, raft_port),
|
||||
raft_server_id_(raft_server_id),
|
||||
state_machine_(cs_new<CoordinatorStateMachine>()),
|
||||
state_manager_(
|
||||
cs_new<CoordinatorStateManager>(raft_server_id_, raft_address_ + ":" + std::to_string(raft_port_))),
|
||||
state_manager_(cs_new<CoordinatorStateManager>(raft_server_id_, raft_endpoint_.SocketAddress())),
|
||||
logger_(nullptr),
|
||||
become_leader_cb_(std::move(become_leader_cb)),
|
||||
become_follower_cb_(std::move(become_follower_cb)) {}
|
||||
|
||||
auto RaftState::InitRaftServer() -> void {
|
||||
asio_service::options asio_opts;
|
||||
asio_opts.thread_pool_size_ = 1; // TODO: (andi) Improve this
|
||||
asio_opts.thread_pool_size_ = 1;
|
||||
|
||||
raft_params params;
|
||||
params.heart_beat_interval_ = 100;
|
||||
params.election_timeout_lower_bound_ = 200;
|
||||
params.election_timeout_upper_bound_ = 400;
|
||||
// 5 logs are preserved before the last snapshot
|
||||
params.reserved_log_items_ = 5;
|
||||
// Create snapshot for every 5 log appends
|
||||
params.snapshot_distance_ = 5;
|
||||
params.client_req_timeout_ = 3000;
|
||||
params.return_method_ = raft_params::blocking;
|
||||
|
||||
// If the leader doesn't receive any response from quorum nodes
|
||||
// in 200ms, it will step down.
|
||||
// This allows us to achieve strong consistency even if network partition
|
||||
// happens between the current leader and followers.
|
||||
// The value must be <= election_timeout_lower_bound_ so that cluster can never
|
||||
// have multiple leaders.
|
||||
params.leadership_expiry_ = 200;
|
||||
|
||||
raft_server::init_options init_opts;
|
||||
init_opts.raft_callback_ = [this](cb_func::Type event_type, cb_func::Param *param) -> nuraft::CbReturnCode {
|
||||
if (event_type == cb_func::BecomeLeader) {
|
||||
@ -71,11 +75,11 @@ auto RaftState::InitRaftServer() -> void {
|
||||
|
||||
raft_launcher launcher;
|
||||
|
||||
raft_server_ = launcher.init(state_machine_, state_manager_, logger_, static_cast<int>(raft_port_), asio_opts, params,
|
||||
init_opts);
|
||||
raft_server_ =
|
||||
launcher.init(state_machine_, state_manager_, logger_, raft_endpoint_.port, asio_opts, params, init_opts);
|
||||
|
||||
if (!raft_server_) {
|
||||
throw RaftServerStartException("Failed to launch raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to launch raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto maybe_stop = utils::ResettableCounter<20>();
|
||||
@ -86,7 +90,7 @@ auto RaftState::InitRaftServer() -> void {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
} while (!maybe_stop());
|
||||
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState {
|
||||
@ -102,18 +106,45 @@ auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerC
|
||||
|
||||
RaftState::~RaftState() { launcher_.shutdown(); }
|
||||
|
||||
auto RaftState::InstanceName() const -> std::string { return "coordinator_" + std::to_string(raft_server_id_); }
|
||||
auto RaftState::InstanceName() const -> std::string {
|
||||
return fmt::format("coordinator_{}", std::to_string(raft_server_id_));
|
||||
}
|
||||
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_address_ + ":" + std::to_string(raft_port_); }
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
|
||||
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address)
|
||||
-> void {
|
||||
auto const endpoint = fmt::format("{}:{}", raft_address, raft_port);
|
||||
srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
|
||||
if (!raft_server_->add_srv(srv_config_to_add)->get_accepted()) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster", endpoint);
|
||||
|
||||
auto cmd_result = raft_server_->add_srv(srv_config_to_add);
|
||||
|
||||
if (cmd_result->get_result_code() == nuraft::cmd_result_code::OK) {
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
} else {
|
||||
throw RaftAddServerException("Failed to accept request to add server {} to the cluster with error code {}",
|
||||
endpoint, cmd_result->get_result_code());
|
||||
}
|
||||
|
||||
// Waiting for server to join
|
||||
constexpr int max_tries{10};
|
||||
auto maybe_stop = utils::ResettableCounter<max_tries>();
|
||||
constexpr int waiting_period{200};
|
||||
bool added{false};
|
||||
while (!maybe_stop()) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
|
||||
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id));
|
||||
if (server_config) {
|
||||
spdlog::trace("Server with id {} added to cluster", raft_server_id);
|
||||
added = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!added) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster in {}ms", endpoint,
|
||||
max_tries * waiting_period);
|
||||
}
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
}
|
||||
|
||||
auto RaftState::GetAllCoordinators() const -> std::vector<ptr<srv_config>> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -118,9 +118,14 @@ void InMemoryReplicationHandlers::Register(dbms::DbmsHandler *dbms_handler, repl
|
||||
});
|
||||
server.rpc_server_.Register<replication_coordination_glue::SwapMainUUIDRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received SwapMainUUIDHandler");
|
||||
spdlog::debug("Received SwapMainUUIDRpc");
|
||||
InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms_handler, data, req_reader, res_builder);
|
||||
});
|
||||
server.rpc_server_.Register<storage::replication::ForceResetStorageRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received ForceResetStorageRpc");
|
||||
InMemoryReplicationHandlers::ForceResetStorageHandler(dbms_handler, data.uuid_, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler,
|
||||
@ -329,6 +334,78 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
spdlog::debug("Replication recovery from snapshot finished!");
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
storage::replication::ForceResetStorageReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
if (!current_main_uuid.has_value() || req.main_uuid != current_main_uuid) [[unlikely]] {
|
||||
LogWrongMain(current_main_uuid, req.main_uuid, storage::replication::SnapshotReq::kType.name);
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
|
||||
auto storage_guard = std::unique_lock{storage->main_lock_};
|
||||
|
||||
// Clear the database
|
||||
storage->vertices_.clear();
|
||||
storage->edges_.clear();
|
||||
storage->commit_log_.reset();
|
||||
storage->commit_log_.emplace();
|
||||
|
||||
storage->constraints_.existence_constraints_ = std::make_unique<storage::ExistenceConstraints>();
|
||||
storage->constraints_.unique_constraints_ = std::make_unique<storage::InMemoryUniqueConstraints>();
|
||||
storage->indices_.label_index_ = std::make_unique<storage::InMemoryLabelIndex>();
|
||||
storage->indices_.label_property_index_ = std::make_unique<storage::InMemoryLabelPropertyIndex>();
|
||||
|
||||
// Fine since we will force push when reading from WAL just random epoch with 0 timestamp, as it should be if it
|
||||
// acted as MAIN before
|
||||
storage->repl_storage_state_.epoch_.SetEpoch(std::string(utils::UUID{}));
|
||||
storage->repl_storage_state_.last_commit_timestamp_ = 0;
|
||||
|
||||
storage->repl_storage_state_.history.clear();
|
||||
storage->vertex_id_ = 0;
|
||||
storage->edge_id_ = 0;
|
||||
storage->timestamp_ = storage::kTimestampInitialId;
|
||||
|
||||
storage->CollectGarbage<true>(std::move(storage_guard), false);
|
||||
storage->vertices_.run_gc();
|
||||
storage->edges_.run_gc();
|
||||
|
||||
storage::replication::ForceResetStorageRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
|
||||
spdlog::trace("Deleting old snapshot files.");
|
||||
// Delete other durability files
|
||||
auto snapshot_files = storage::durability::GetSnapshotFiles(storage->recovery_.snapshot_directory_, storage->uuid_);
|
||||
for (const auto &[path, uuid, _] : snapshot_files) {
|
||||
spdlog::trace("Deleting snapshot file {}", path);
|
||||
storage->file_retainer_.DeleteFile(path);
|
||||
}
|
||||
|
||||
spdlog::trace("Deleting old WAL files.");
|
||||
auto wal_files = storage::durability::GetWalFiles(storage->recovery_.wal_directory_, storage->uuid_);
|
||||
if (wal_files) {
|
||||
for (const auto &wal_file : *wal_files) {
|
||||
spdlog::trace("Deleting WAL file {}", wal_file.path);
|
||||
storage->file_retainer_.DeleteFile(wal_file.path);
|
||||
}
|
||||
|
||||
storage->wal_file_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
@ -763,6 +840,20 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
transaction->DeleteLabelPropertyIndexStats(storage->NameToLabel(info.label));
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE: {
|
||||
spdlog::trace(" Create edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->CreateIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP: {
|
||||
spdlog::trace(" Drop edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->DropIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
spdlog::trace(" Create existence constraint on :{} ({})", delta.operation_label_property.label,
|
||||
delta.operation_label_property.property);
|
||||
|
@ -48,6 +48,9 @@ class InMemoryReplicationHandlers {
|
||||
|
||||
static void SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler, replication::RoleReplicaData &role_replica_data,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
static void ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
|
||||
static void LoadWal(storage::InMemoryStorage *storage, storage::replication::Decoder *decoder);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -22,113 +22,15 @@
|
||||
#include "utils/message.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr std::string_view delimiter = ":";
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(std::string_view address) {
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, address.data(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, address.data(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
}
|
||||
if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
}
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrIpAddress(
|
||||
std::string_view address, const std::optional<uint16_t> default_port) {
|
||||
/// expected address format:
|
||||
/// - "ip_address:port_number"
|
||||
/// - "ip_address"
|
||||
/// We parse the address first. If it's an IP address, a default port must
|
||||
// be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
// it into an ip address and a port number; even if a default port is given,
|
||||
// it won't be used, as we expect that it is given in the address string.
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{std::string(address), *default_port}; // TODO: (andi) Optimize throughout the code
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
ip_address = std::move(parts[0]);
|
||||
if (GetIpFamily(ip_address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
int64_t int_port{0};
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{ip_address, static_cast<uint16_t>(int_port)};
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseHostname(
|
||||
std::string_view address, const std::optional<uint16_t> default_port = {}) {
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (!IsResolvableAddress(address, *default_port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{std::string(address), *default_port}; // TODO: (andi) Optimize throughout the code
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
int64_t int_port{0};
|
||||
auto hostname = std::move(parts[0]);
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (IsResolvableAddress(hostname, static_cast<uint16_t>(int_port))) {
|
||||
return std::pair{hostname, static_cast<u_int16_t>(int_port)};
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::string Endpoint::SocketAddress() const {
|
||||
auto ip_address = address.empty() ? "EMPTY" : address;
|
||||
return ip_address + ":" + std::to_string(port);
|
||||
}
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
|
||||
Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip_address)), port(port) {
|
||||
IpFamily ip_family = GetIpFamily(address);
|
||||
@ -138,9 +40,23 @@ Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip
|
||||
family = ip_family;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
std::string Endpoint::SocketAddress() const { return fmt::format("{}:{}", address, port); }
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(std::string_view address) {
|
||||
// Ensure null-terminated
|
||||
auto const tmp = std::string(address);
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, tmp.c_str(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, tmp.c_str(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
}
|
||||
if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
}
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
// no need to cover the IpFamily::NONE case, as you can't even construct an
|
||||
@ -153,6 +69,7 @@ std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
return os << endpoint.address << ":" << endpoint.port;
|
||||
}
|
||||
|
||||
// NOTE: Intentional copy to ensure null-terminated string
|
||||
bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
|
||||
addrinfo hints{
|
||||
.ai_flags = AI_PASSIVE,
|
||||
@ -160,28 +77,65 @@ bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
|
||||
.ai_socktype = SOCK_STREAM // TCP socket
|
||||
};
|
||||
addrinfo *info = nullptr;
|
||||
auto status = getaddrinfo(address.data(), std::to_string(port).c_str(), &hints, &info);
|
||||
auto status = getaddrinfo(std::string(address).c_str(), std::to_string(port).c_str(), &hints, &info);
|
||||
if (info) freeaddrinfo(info);
|
||||
return status == 0;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrAddress(
|
||||
std::string_view address, const std::optional<uint16_t> default_port) {
|
||||
const std::string delimiter = ":";
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port) {
|
||||
auto const parts = utils::SplitView(address, delimiter);
|
||||
|
||||
if (parts.size() > 2) {
|
||||
return std::nullopt;
|
||||
}
|
||||
if (parts.size() == 2) {
|
||||
if (GetIpFamily(parts[0]) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
|
||||
auto const port = [default_port, &parts]() -> std::optional<uint16_t> {
|
||||
if (parts.size() == 2) {
|
||||
return static_cast<uint16_t>(utils::ParseInt(parts[1]));
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
return default_port;
|
||||
}();
|
||||
|
||||
if (!ValidatePort(port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::nullopt;
|
||||
|
||||
auto const addr = [address, &parts]() {
|
||||
if (parts.size() == 2) {
|
||||
return parts[0];
|
||||
}
|
||||
return address;
|
||||
}();
|
||||
|
||||
if (GetIpFamily(addr) == IpFamily::NONE) {
|
||||
if (IsResolvableAddress(addr, *port)) { // NOLINT
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
|
||||
auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
|
||||
if (!port) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.", *port,
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -19,11 +19,8 @@
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
/**
|
||||
* This class represents a network endpoint that is used in Socket.
|
||||
* It is used when connecting to an address and to get the current
|
||||
* connection address.
|
||||
*/
|
||||
using ParsedAddress = std::pair<std::string_view, uint16_t>;
|
||||
|
||||
struct Endpoint {
|
||||
static const struct needs_resolving_t {
|
||||
} needs_resolving;
|
||||
@ -31,59 +28,35 @@ struct Endpoint {
|
||||
Endpoint() = default;
|
||||
Endpoint(std::string ip_address, uint16_t port);
|
||||
Endpoint(needs_resolving_t, std::string hostname, uint16_t port);
|
||||
|
||||
Endpoint(Endpoint const &) = default;
|
||||
Endpoint(Endpoint &&) noexcept = default;
|
||||
|
||||
Endpoint &operator=(Endpoint const &) = default;
|
||||
Endpoint &operator=(Endpoint &&) noexcept = default;
|
||||
|
||||
~Endpoint() = default;
|
||||
|
||||
enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
|
||||
|
||||
std::string SocketAddress() const;
|
||||
static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port = {});
|
||||
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
std::string SocketAddress() const;
|
||||
|
||||
std::string address;
|
||||
uint16_t port{0};
|
||||
IpFamily family{IpFamily::NONE};
|
||||
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrAddress(
|
||||
std::string_view address, std::optional<uint16_t> default_port = {});
|
||||
|
||||
/**
|
||||
* Tries to parse the given string as either a socket address or ip address.
|
||||
* Expected address format:
|
||||
* - "ip_address:port_number"
|
||||
* - "ip_address"
|
||||
* We parse the address first. If it's an IP address, a default port must
|
||||
* be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
* it into an ip address and a port number; even if a default port is given,
|
||||
* it won't be used, as we expect that it is given in the address string.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrIpAddress(
|
||||
std::string_view address, std::optional<uint16_t> default_port = {});
|
||||
|
||||
/**
|
||||
* Tries to parse given string as either socket address or hostname.
|
||||
* Expected address format:
|
||||
* - "hostname:port_number"
|
||||
* - "hostname"
|
||||
* After we parse hostname and port we try to resolve the hostname into an ip_address.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseHostname(std::string_view address,
|
||||
std::optional<uint16_t> default_port);
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
|
||||
private:
|
||||
static IpFamily GetIpFamily(std::string_view address);
|
||||
|
||||
static bool IsResolvableAddress(std::string_view address, uint16_t port);
|
||||
|
||||
/**
|
||||
* Tries to resolve hostname to its corresponding IP address.
|
||||
* Given a DNS hostname, this function performs resolution and returns
|
||||
* the IP address associated with the hostname.
|
||||
*/
|
||||
static std::string ResolveHostnameIntoIpAddress(const std::string &address, uint16_t port);
|
||||
static auto ValidatePort(std::optional<uint16_t> port) -> bool;
|
||||
};
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -371,6 +371,62 @@ class VerticesIterable final {
|
||||
}
|
||||
};
|
||||
|
||||
class EdgesIterable final {
|
||||
std::variant<storage::EdgesIterable, std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *>
|
||||
iterable_;
|
||||
|
||||
public:
|
||||
class Iterator final {
|
||||
std::variant<storage::EdgesIterable::Iterator,
|
||||
std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator>
|
||||
it_;
|
||||
|
||||
public:
|
||||
explicit Iterator(storage::EdgesIterable::Iterator it) : it_(std::move(it)) {}
|
||||
explicit Iterator(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator it)
|
||||
: it_(it) {}
|
||||
|
||||
EdgeAccessor operator*() const {
|
||||
return std::visit([](auto &it_) { return EdgeAccessor(*it_); }, it_);
|
||||
}
|
||||
|
||||
Iterator &operator++() {
|
||||
std::visit([](auto &it_) { ++it_; }, it_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Iterator &other) const { return it_ == other.it_; }
|
||||
|
||||
bool operator!=(const Iterator &other) const { return !(other == *this); }
|
||||
};
|
||||
|
||||
explicit EdgesIterable(storage::EdgesIterable iterable) : iterable_(std::move(iterable)) {}
|
||||
explicit EdgesIterable(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *edges)
|
||||
: iterable_(edges) {}
|
||||
|
||||
Iterator begin() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.begin()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->begin()); }},
|
||||
iterable_);
|
||||
}
|
||||
|
||||
Iterator end() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.end()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->end()); }},
|
||||
iterable_);
|
||||
}
|
||||
};
|
||||
|
||||
class DbAccessor final {
|
||||
storage::Storage::Accessor *accessor_;
|
||||
|
||||
@ -416,6 +472,10 @@ class DbAccessor final {
|
||||
return VerticesIterable(accessor_->Vertices(label, property, lower, upper, view));
|
||||
}
|
||||
|
||||
EdgesIterable Edges(storage::View view, storage::EdgeTypeId edge_type) {
|
||||
return EdgesIterable(accessor_->Edges(edge_type, view));
|
||||
}
|
||||
|
||||
VertexAccessor InsertVertex() { return VertexAccessor(accessor_->CreateVertex()); }
|
||||
|
||||
storage::Result<EdgeAccessor> InsertEdge(VertexAccessor *from, VertexAccessor *to,
|
||||
@ -572,6 +632,8 @@ class DbAccessor final {
|
||||
return accessor_->LabelPropertyIndexExists(label, prop);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) const { return accessor_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return accessor_->GetIndexStats(label);
|
||||
}
|
||||
@ -638,6 +700,10 @@ class DbAccessor final {
|
||||
return accessor_->CreateIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> CreateIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->CreateIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::LabelId label) {
|
||||
return accessor_->DropIndex(label);
|
||||
}
|
||||
@ -647,6 +713,10 @@ class DbAccessor final {
|
||||
return accessor_->DropIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->DropIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
storage::LabelId label, storage::PropertyId property) {
|
||||
return accessor_->CreateExistenceConstraint(label, property);
|
||||
|
@ -242,6 +242,10 @@ void DumpLabelIndex(std::ostream *os, query::DbAccessor *dba, const storage::Lab
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << ";";
|
||||
}
|
||||
|
||||
void DumpEdgeTypeIndex(std::ostream *os, query::DbAccessor *dba, const storage::EdgeTypeId edge_type) {
|
||||
*os << "CREATE EDGE INDEX ON :" << EscapeName(dba->EdgeTypeToName(edge_type)) << ";";
|
||||
}
|
||||
|
||||
void DumpLabelPropertyIndex(std::ostream *os, query::DbAccessor *dba, storage::LabelId label,
|
||||
storage::PropertyId property) {
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << "(" << EscapeName(dba->PropertyToName(property))
|
||||
@ -297,7 +301,9 @@ PullPlanDump::PullPlanDump(DbAccessor *dba, dbms::DatabaseAccess db_acc)
|
||||
// Internal index cleanup
|
||||
CreateInternalIndexCleanupPullChunk(),
|
||||
// Dump all triggers
|
||||
CreateTriggersPullChunk()} {}
|
||||
CreateTriggersPullChunk(),
|
||||
// Dump all edge-type indices
|
||||
CreateEdgeTypeIndicesPullChunk()} {}
|
||||
|
||||
bool PullPlanDump::Pull(AnyStream *stream, std::optional<int> n) {
|
||||
// Iterate all functions that stream some results.
|
||||
@ -352,6 +358,33 @@ PullPlanDump::PullChunk PullPlanDump::CreateLabelIndicesPullChunk() {
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateEdgeTypeIndicesPullChunk() {
|
||||
// Dump all label indices
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
if (!indices_info_) {
|
||||
indices_info_.emplace(dba_->ListAllIndices());
|
||||
}
|
||||
const auto &edge_type = indices_info_->edge_type;
|
||||
|
||||
size_t local_counter = 0;
|
||||
while (global_index < edge_type.size() && (!n || local_counter < *n)) {
|
||||
std::ostringstream os;
|
||||
DumpEdgeTypeIndex(&os, dba_, edge_type[global_index]);
|
||||
stream->Result({TypedValue(os.str())});
|
||||
|
||||
++global_index;
|
||||
++local_counter;
|
||||
}
|
||||
|
||||
if (global_index == edge_type.size()) {
|
||||
return local_counter;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateLabelPropertyIndicesPullChunk() {
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
|
@ -63,5 +63,6 @@ struct PullPlanDump {
|
||||
PullChunk CreateDropInternalIndexPullChunk();
|
||||
PullChunk CreateInternalIndexCleanupPullChunk();
|
||||
PullChunk CreateTriggersPullChunk();
|
||||
PullChunk CreateEdgeTypeIndicesPullChunk();
|
||||
};
|
||||
} // namespace memgraph::query
|
||||
|
@ -186,6 +186,9 @@ constexpr utils::TypeInfo query::ProfileQuery::kType{utils::TypeId::AST_PROFILE_
|
||||
|
||||
constexpr utils::TypeInfo query::IndexQuery::kType{utils::TypeId::AST_INDEX_QUERY, "IndexQuery", &query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::EdgeIndexQuery::kType{utils::TypeId::AST_EDGE_INDEX_QUERY, "EdgeIndexQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::Create::kType{utils::TypeId::AST_CREATE, "Create", &query::Clause::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::CallProcedure::kType{utils::TypeId::AST_CALL_PROCEDURE, "CallProcedure",
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "query/interpret/awesome_memgraph_functions.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
namespace memgraph::query {
|
||||
@ -2223,6 +2224,34 @@ class IndexQuery : public memgraph::query::Query {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class EdgeIndexQuery : public memgraph::query::Query {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Action { CREATE, DROP };
|
||||
|
||||
EdgeIndexQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::EdgeIndexQuery::Action action_;
|
||||
memgraph::query::EdgeTypeIx edge_type_;
|
||||
|
||||
EdgeIndexQuery *Clone(AstStorage *storage) const override {
|
||||
EdgeIndexQuery *object = storage->Create<EdgeIndexQuery>();
|
||||
object->action_ = action_;
|
||||
object->edge_type_ = storage->GetEdgeTypeIx(edge_type_.name);
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
EdgeIndexQuery(Action action, EdgeTypeIx edge_type) : action_(action), edge_type_(edge_type) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class Create : public memgraph::query::Clause {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
@ -3586,7 +3615,7 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
bool Accept(HierarchicalTreeVisitor &visitor) override {
|
||||
if (visitor.PreVisit(*this)) {
|
||||
if (variable_) {
|
||||
variable_->Accept(visitor);
|
||||
throw utils::NotYetImplemented("Variable in pattern comprehension.");
|
||||
}
|
||||
pattern_->Accept(visitor);
|
||||
if (filter_) {
|
||||
@ -3615,7 +3644,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
int32_t symbol_pos_{-1};
|
||||
|
||||
PatternComprehension *Clone(AstStorage *storage) const override {
|
||||
PatternComprehension *object = storage->Create<PatternComprehension>();
|
||||
auto *object = storage->Create<PatternComprehension>();
|
||||
object->variable_ = variable_ ? variable_->Clone(storage) : nullptr;
|
||||
object->pattern_ = pattern_ ? pattern_->Clone(storage) : nullptr;
|
||||
object->filter_ = filter_ ? filter_->Clone(storage) : nullptr;
|
||||
object->resultExpr_ = resultExpr_ ? resultExpr_->Clone(storage) : nullptr;
|
||||
@ -3625,7 +3655,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
}
|
||||
|
||||
protected:
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern) : variable_(variable), pattern_(pattern) {}
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern, Where *filter, Expression *resultExpr)
|
||||
: variable_(variable), pattern_(pattern), filter_(filter), resultExpr_(resultExpr) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
|
@ -82,6 +82,7 @@ class AuthQuery;
|
||||
class ExplainQuery;
|
||||
class ProfileQuery;
|
||||
class IndexQuery;
|
||||
class EdgeIndexQuery;
|
||||
class DatabaseInfoQuery;
|
||||
class SystemInfoQuery;
|
||||
class ConstraintQuery;
|
||||
@ -143,11 +144,11 @@ class ExpressionVisitor
|
||||
|
||||
template <class TResult>
|
||||
class QueryVisitor
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, AuthQuery, DatabaseInfoQuery,
|
||||
SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery, LockPathQuery,
|
||||
FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery, StreamQuery,
|
||||
SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery, StorageModeQuery,
|
||||
AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery,
|
||||
CoordinatorQuery> {};
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery, AuthQuery,
|
||||
DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery,
|
||||
LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery,
|
||||
StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery,
|
||||
StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery,
|
||||
EdgeImportModeQuery, CoordinatorQuery> {};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -265,6 +265,27 @@ antlrcpp::Any CypherMainVisitor::visitDropIndex(MemgraphCypher::DropIndexContext
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "EdgeIndexQuery should have exactly one child!");
|
||||
auto *index_query = std::any_cast<EdgeIndexQuery *>(ctx->children[0]->accept(this));
|
||||
query_ = index_query;
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::CREATE;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::DROP;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "AuthQuery should have exactly one child!");
|
||||
auto *auth_query = std::any_cast<AuthQuery *>(ctx->children[0]->accept(this));
|
||||
|
@ -148,6 +148,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitIndexQuery(MemgraphCypher::IndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return IndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ExplainQuery*
|
||||
*/
|
||||
@ -499,6 +504,16 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitDropIndex(MemgraphCypher::DropIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return EdgeIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return DropEdgeIndex*
|
||||
*/
|
||||
antlrcpp::Any visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
|
@ -133,6 +133,7 @@ symbolicName : UnescapedSymbolicName
|
||||
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| edgeIndexQuery
|
||||
| explainQuery
|
||||
| profileQuery
|
||||
| databaseInfoQuery
|
||||
@ -527,3 +528,9 @@ showDatabase : SHOW DATABASE ;
|
||||
showDatabases : SHOW DATABASES ;
|
||||
|
||||
edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ;
|
||||
|
||||
createEdgeIndex : CREATE EDGE INDEX ON ':' labelName ;
|
||||
|
||||
dropEdgeIndex : DROP EDGE INDEX ON ':' labelName ;
|
||||
|
||||
edgeIndexQuery : createEdgeIndex | dropEdgeIndex ;
|
||||
|
@ -27,6 +27,8 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
|
||||
void Visit(IndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(EdgeIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AnalyzeGraphQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AuthQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::AUTH); }
|
||||
|
@ -53,6 +53,8 @@ class Symbol {
|
||||
bool user_declared() const { return user_declared_; }
|
||||
int token_position() const { return token_position_; }
|
||||
|
||||
bool IsSymbolAnonym() const { return name_.substr(0U, 4U) == "anon"; }
|
||||
|
||||
std::string name_;
|
||||
int64_t position_;
|
||||
bool user_declared_{true};
|
||||
|
@ -721,6 +721,32 @@ bool SymbolGenerator::PostVisit(EdgeAtom &) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(PatternComprehension &pc) {
|
||||
auto &scope = scopes_.back();
|
||||
|
||||
if (scope.in_set_property) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within SET clause.!");
|
||||
}
|
||||
|
||||
if (scope.in_with) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within WITH!");
|
||||
}
|
||||
|
||||
if (scope.in_reduce) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within REDUCE!");
|
||||
}
|
||||
|
||||
if (scope.num_if_operators) {
|
||||
throw utils::NotYetImplemented("IF operator cannot be used with Pattern Comprehension!");
|
||||
}
|
||||
|
||||
const auto &symbol = CreateAnonymousSymbol();
|
||||
pc.MapTo(symbol);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(PatternComprehension & /*pc*/) { return true; }
|
||||
|
||||
void SymbolGenerator::VisitWithIdentifiers(Expression *expr, const std::vector<Identifier *> &identifiers) {
|
||||
auto &scope = scopes_.back();
|
||||
std::vector<std::pair<std::optional<Symbol>, Identifier *>> prev_symbols;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -97,6 +97,8 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(NodeAtom &) override;
|
||||
bool PreVisit(EdgeAtom &) override;
|
||||
bool PostVisit(EdgeAtom &) override;
|
||||
bool PreVisit(PatternComprehension &) override;
|
||||
bool PostVisit(PatternComprehension &) override;
|
||||
|
||||
private:
|
||||
// Scope stores the state of where we are when visiting the AST and a map of
|
||||
|
@ -355,7 +355,7 @@ class ReplQueryHandler {
|
||||
const auto replication_config =
|
||||
replication::ReplicationClientConfig{.name = name,
|
||||
.mode = repl_mode,
|
||||
.ip_address = ip,
|
||||
.ip_address = std::string(ip),
|
||||
.port = port,
|
||||
.replica_check_frequency = replica_check_frequency,
|
||||
.ssl = std::nullopt};
|
||||
@ -454,12 +454,12 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
|
||||
.instance_name = std::string(instance_name),
|
||||
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
|
||||
.replication_ip_address = replication_ip,
|
||||
.replication_ip_address = std::string(replication_ip),
|
||||
.replication_port = replication_port};
|
||||
|
||||
auto coordinator_client_config =
|
||||
coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name),
|
||||
.ip_address = coordinator_server_ip,
|
||||
.ip_address = std::string(coordinator_server_ip),
|
||||
.port = coordinator_server_port,
|
||||
.instance_health_check_frequency_sec = instance_check_frequency,
|
||||
.instance_down_timeout_sec = instance_down_timeout,
|
||||
@ -497,7 +497,7 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address);
|
||||
if (maybe_ip_and_port) {
|
||||
auto const [ip, port] = *maybe_ip_and_port;
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, port, ip);
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port);
|
||||
coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
|
||||
} else {
|
||||
spdlog::error("Invalid raft socket address {}.", raft_socket_address);
|
||||
@ -1212,7 +1212,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
};
|
||||
|
||||
notifications->emplace_back(
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_COORDINATOR_SERVER,
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_REPLICATION_INSTANCE,
|
||||
fmt::format("Coordinator has registered coordinator server on {} for instance {}.",
|
||||
coordinator_socket_address_tv.ValueString(), coordinator_query->instance_name_));
|
||||
return callback;
|
||||
@ -1263,7 +1263,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
TypedValue{status.coord_socket_address}, TypedValue{status.health}, TypedValue{status.cluster_role}};
|
||||
};
|
||||
|
||||
return utils::fmap(converter, instances);
|
||||
return utils::fmap(instances, converter);
|
||||
};
|
||||
return callback;
|
||||
}
|
||||
@ -2679,6 +2679,75 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareEdgeIndexQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
std::vector<Notification> *notifications, CurrentDB ¤t_db) {
|
||||
if (in_explicit_transaction) {
|
||||
throw IndexInMulticommandTxException();
|
||||
}
|
||||
|
||||
auto *index_query = utils::Downcast<EdgeIndexQuery>(parsed_query.query);
|
||||
std::function<void(Notification &)> handler;
|
||||
|
||||
MG_ASSERT(current_db.db_acc_, "Index query expects a current DB");
|
||||
auto &db_acc = *current_db.db_acc_;
|
||||
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Index query expects a current DB transaction");
|
||||
auto *dba = &*current_db.execution_db_accessor_;
|
||||
|
||||
auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
|
||||
auto *storage = db_acc->storage();
|
||||
auto edge_type = storage->NameToEdgeType(index_query->edge_type_.name);
|
||||
|
||||
Notification index_notification(SeverityLevel::INFO);
|
||||
switch (index_query->action_) {
|
||||
case EdgeIndexQuery::Action::CREATE: {
|
||||
index_notification.code = NotificationCode::CREATE_INDEX;
|
||||
index_notification.title = fmt::format("Created index on edge-type {}.", index_query->edge_type_.name);
|
||||
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->CreateIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::EXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} already exists.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
case EdgeIndexQuery::Action::DROP: {
|
||||
index_notification.code = NotificationCode::DROP_INDEX;
|
||||
index_notification.title = fmt::format("Dropped index on edge-type {}.", index_query->edge_type_.name);
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->DropIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::NONEXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} doesn't exist.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return PreparedQuery{
|
||||
{},
|
||||
std::move(parsed_query.required_privileges),
|
||||
[handler = std::move(handler), notifications, index_notification = std::move(index_notification)](
|
||||
AnyStream * /*stream*/, std::optional<int> /*unused*/) mutable {
|
||||
handler(index_notification);
|
||||
notifications->push_back(index_notification);
|
||||
return QueryHandlerResult::COMMIT;
|
||||
},
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
InterpreterContext *interpreter_context, Interpreter &interpreter) {
|
||||
if (in_explicit_transaction) {
|
||||
@ -3483,6 +3552,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
auto *storage = database->storage();
|
||||
const std::string_view label_index_mark{"label"};
|
||||
const std::string_view label_property_index_mark{"label+property"};
|
||||
const std::string_view edge_type_index_mark{"edge-type"};
|
||||
auto info = dba->ListAllIndices();
|
||||
auto storage_acc = database->Access();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
@ -3497,6 +3567,10 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
TypedValue(storage->PropertyToName(item.second)),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateVertexCount(item.first, item.second)))});
|
||||
}
|
||||
for (const auto &item : info.edge_type) {
|
||||
results.push_back({TypedValue(edge_type_index_mark), TypedValue(storage->EdgeTypeToName(item)), TypedValue(),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateEdgeCount(item)))});
|
||||
}
|
||||
std::sort(results.begin(), results.end(), [&label_index_mark](const auto &record_1, const auto &record_2) {
|
||||
const auto type_1 = record_1[0].ValueString();
|
||||
const auto type_2 = record_2[0].ValueString();
|
||||
@ -4283,13 +4357,14 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
utils::Downcast<CypherQuery>(parsed_query.query) || utils::Downcast<ExplainQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ProfileQuery>(parsed_query.query) || utils::Downcast<DumpQuery>(parsed_query.query) ||
|
||||
utils::Downcast<TriggerQuery>(parsed_query.query) || utils::Downcast<AnalyzeGraphQuery>(parsed_query.query) ||
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<DatabaseInfoQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<EdgeIndexQuery>(parsed_query.query) ||
|
||||
utils::Downcast<DatabaseInfoQuery>(parsed_query.query) || utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
|
||||
if (!in_explicit_transaction_ && requires_db_transaction) {
|
||||
// TODO: ATM only a single database, will change when we have multiple database transactions
|
||||
bool could_commit = utils::Downcast<CypherQuery>(parsed_query.query) != nullptr;
|
||||
bool unique = utils::Downcast<IndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<EdgeIndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query) != nullptr ||
|
||||
upper_case_query.find(kSchemaAssert) != std::string::npos;
|
||||
SetupDatabaseTransaction(could_commit, unique);
|
||||
@ -4326,6 +4401,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
} else if (utils::Downcast<IndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<EdgeIndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareEdgeIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<AnalyzeGraphQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareAnalyzeGraphQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
|
||||
} else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
|
||||
|
@ -67,8 +67,8 @@ constexpr std::string_view GetCodeString(const NotificationCode code) {
|
||||
case NotificationCode::REGISTER_REPLICA:
|
||||
return "RegisterReplica"sv;
|
||||
#ifdef MG_ENTERPRISE
|
||||
case NotificationCode::REGISTER_COORDINATOR_SERVER:
|
||||
return "RegisterCoordinatorServer"sv;
|
||||
case NotificationCode::REGISTER_REPLICATION_INSTANCE:
|
||||
return "RegisterReplicationInstance"sv;
|
||||
case NotificationCode::ADD_COORDINATOR_INSTANCE:
|
||||
return "AddCoordinatorInstance"sv;
|
||||
case NotificationCode::UNREGISTER_INSTANCE:
|
||||
|
@ -43,7 +43,7 @@ enum class NotificationCode : uint8_t {
|
||||
REPLICA_PORT_WARNING,
|
||||
REGISTER_REPLICA,
|
||||
#ifdef MG_ENTERPRISE
|
||||
REGISTER_COORDINATOR_SERVER, // TODO: (andi) What is this?
|
||||
REGISTER_REPLICATION_INSTANCE,
|
||||
ADD_COORDINATOR_INSTANCE,
|
||||
UNREGISTER_INSTANCE,
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -114,6 +114,9 @@ class PlanHintsProvider final : public HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllById & /*unused*/) override { return true; }
|
||||
bool PostVisit(ScanAllById & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByEdgeType & /*unused*/) override { return true; }
|
||||
bool PostVisit(ScanAllByEdgeType & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(ConstructNamedPath & /*unused*/) override { return true; }
|
||||
bool PostVisit(ConstructNamedPath & /*unused*/) override { return true; }
|
||||
|
||||
@ -206,6 +209,14 @@ class PlanHintsProvider final : public HierarchicalLogicalOperatorVisitor {
|
||||
|
||||
bool PostVisit(IndexedJoin & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
op.input()->Accept(*this);
|
||||
op.list_collection_branch_->Accept(*this);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply & /*unused*/) override { return true; }
|
||||
|
||||
private:
|
||||
const SymbolTable &symbol_table_;
|
||||
std::vector<std::string> hints_;
|
||||
|
@ -105,6 +105,7 @@ extern const Event ScanAllByLabelPropertyRangeOperator;
|
||||
extern const Event ScanAllByLabelPropertyValueOperator;
|
||||
extern const Event ScanAllByLabelPropertyOperator;
|
||||
extern const Event ScanAllByIdOperator;
|
||||
extern const Event ScanAllByEdgeTypeOperator;
|
||||
extern const Event ExpandOperator;
|
||||
extern const Event ExpandVariableOperator;
|
||||
extern const Event ConstructNamedPathOperator;
|
||||
@ -517,6 +518,60 @@ class ScanAllCursor : public Cursor {
|
||||
const char *op_name_;
|
||||
};
|
||||
|
||||
template <typename TEdgesFun>
|
||||
class ScanAllByEdgeTypeCursor : public Cursor {
|
||||
public:
|
||||
explicit ScanAllByEdgeTypeCursor(const ScanAllByEdgeType &self, Symbol output_symbol, UniqueCursorPtr input_cursor,
|
||||
storage::View view, TEdgesFun get_edges, const char *op_name)
|
||||
: self_(self),
|
||||
output_symbol_(std::move(output_symbol)),
|
||||
input_cursor_(std::move(input_cursor)),
|
||||
view_(view),
|
||||
get_edges_(std::move(get_edges)),
|
||||
op_name_(op_name) {}
|
||||
|
||||
bool Pull(Frame &frame, ExecutionContext &context) override {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP_BY_REF(self_);
|
||||
|
||||
AbortCheck(context);
|
||||
|
||||
while (!vertices_ || vertices_it_.value() == vertices_end_it_.value()) {
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
auto next_vertices = get_edges_(frame, context);
|
||||
if (!next_vertices) continue;
|
||||
|
||||
vertices_.emplace(std::move(next_vertices.value()));
|
||||
vertices_it_.emplace(vertices_.value().begin());
|
||||
vertices_end_it_.emplace(vertices_.value().end());
|
||||
}
|
||||
|
||||
frame[output_symbol_] = *vertices_it_.value();
|
||||
++vertices_it_.value();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Shutdown() override { input_cursor_->Shutdown(); }
|
||||
|
||||
void Reset() override {
|
||||
input_cursor_->Reset();
|
||||
vertices_ = std::nullopt;
|
||||
vertices_it_ = std::nullopt;
|
||||
vertices_end_it_ = std::nullopt;
|
||||
}
|
||||
|
||||
private:
|
||||
const ScanAllByEdgeType &self_;
|
||||
const Symbol output_symbol_;
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
storage::View view_;
|
||||
TEdgesFun get_edges_;
|
||||
std::optional<typename std::result_of<TEdgesFun(Frame &, ExecutionContext &)>::type::value_type> vertices_;
|
||||
std::optional<decltype(vertices_.value().begin())> vertices_it_;
|
||||
std::optional<decltype(vertices_.value().end())> vertices_end_it_;
|
||||
const char *op_name_;
|
||||
};
|
||||
|
||||
ScanAll::ScanAll(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol, storage::View view)
|
||||
: input_(input ? input : std::make_shared<Once>()), output_symbol_(std::move(output_symbol)), view_(view) {}
|
||||
|
||||
@ -556,6 +611,33 @@ UniqueCursorPtr ScanAllByLabel::MakeCursor(utils::MemoryResource *mem) const {
|
||||
view_, std::move(vertices), "ScanAllByLabel");
|
||||
}
|
||||
|
||||
ScanAllByEdgeType::ScanAllByEdgeType(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol,
|
||||
storage::EdgeTypeId edge_type, storage::View view)
|
||||
: input_(input ? input : std::make_shared<Once>()),
|
||||
output_symbol_(std::move(output_symbol)),
|
||||
view_(view),
|
||||
edge_type_(edge_type) {}
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAllByEdgeType)
|
||||
|
||||
UniqueCursorPtr ScanAllByEdgeType::MakeCursor(utils::MemoryResource *mem) const {
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::ScanAllByEdgeTypeOperator);
|
||||
|
||||
auto edges = [this](Frame &, ExecutionContext &context) {
|
||||
auto *db = context.db_accessor;
|
||||
return std::make_optional(db->Edges(view_, edge_type_));
|
||||
};
|
||||
|
||||
return MakeUniqueCursorPtr<ScanAllByEdgeTypeCursor<decltype(edges)>>(
|
||||
mem, *this, output_symbol_, input_->MakeCursor(mem), view_, std::move(edges), "ScanAllByEdgeType");
|
||||
}
|
||||
|
||||
std::vector<Symbol> ScanAllByEdgeType::ModifiedSymbols(const SymbolTable &table) const {
|
||||
auto symbols = input_->ModifiedSymbols(table);
|
||||
symbols.emplace_back(output_symbol_);
|
||||
return symbols;
|
||||
}
|
||||
|
||||
// TODO(buda): Implement ScanAllByLabelProperty operator to iterate over
|
||||
// vertices that have the label and some value for the given property.
|
||||
|
||||
@ -5624,4 +5706,25 @@ UniqueCursorPtr HashJoin::MakeCursor(utils::MemoryResource *mem) const {
|
||||
return MakeUniqueCursorPtr<HashJoinCursor>(mem, *this, mem);
|
||||
}
|
||||
|
||||
RollUpApply::RollUpApply(const std::shared_ptr<LogicalOperator> &input,
|
||||
std::shared_ptr<LogicalOperator> &&second_branch)
|
||||
: input_(input), list_collection_branch_(second_branch) {}
|
||||
|
||||
std::vector<Symbol> RollUpApply::OutputSymbols(const SymbolTable & /*symbol_table*/) const {
|
||||
std::vector<Symbol> symbols;
|
||||
return symbols;
|
||||
}
|
||||
|
||||
std::vector<Symbol> RollUpApply::ModifiedSymbols(const SymbolTable &table) const { return OutputSymbols(table); }
|
||||
|
||||
bool RollUpApply::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
|
||||
if (visitor.PreVisit(*this)) {
|
||||
if (!input_ || !list_collection_branch_) {
|
||||
throw utils::NotYetImplemented("One of the branches in pattern comprehension is null! Please contact support.");
|
||||
}
|
||||
input_->Accept(visitor) && list_collection_branch_->Accept(visitor);
|
||||
}
|
||||
return visitor.PostVisit(*this);
|
||||
}
|
||||
|
||||
} // namespace memgraph::query::plan
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -99,6 +99,7 @@ class ScanAllByLabelPropertyRange;
|
||||
class ScanAllByLabelPropertyValue;
|
||||
class ScanAllByLabelProperty;
|
||||
class ScanAllById;
|
||||
class ScanAllByEdgeType;
|
||||
class Expand;
|
||||
class ExpandVariable;
|
||||
class ConstructNamedPath;
|
||||
@ -130,14 +131,15 @@ class EvaluatePatternFilter;
|
||||
class Apply;
|
||||
class IndexedJoin;
|
||||
class HashJoin;
|
||||
class RollUpApply;
|
||||
|
||||
using LogicalOperatorCompositeVisitor =
|
||||
utils::CompositeVisitor<Once, CreateNode, CreateExpand, ScanAll, ScanAllByLabel, ScanAllByLabelPropertyRange,
|
||||
ScanAllByLabelPropertyValue, ScanAllByLabelProperty, ScanAllById, Expand, ExpandVariable,
|
||||
ConstructNamedPath, Filter, Produce, Delete, SetProperty, SetProperties, SetLabels,
|
||||
RemoveProperty, RemoveLabels, EdgeUniquenessFilter, Accumulate, Aggregate, Skip, Limit,
|
||||
OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian, CallProcedure, LoadCsv,
|
||||
Foreach, EmptyResult, EvaluatePatternFilter, Apply, IndexedJoin, HashJoin>;
|
||||
ScanAllByLabelPropertyValue, ScanAllByLabelProperty, ScanAllById, ScanAllByEdgeType, Expand,
|
||||
ExpandVariable, ConstructNamedPath, Filter, Produce, Delete, SetProperty, SetProperties,
|
||||
SetLabels, RemoveProperty, RemoveLabels, EdgeUniquenessFilter, Accumulate, Aggregate, Skip,
|
||||
Limit, OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian, CallProcedure, LoadCsv,
|
||||
Foreach, EmptyResult, EvaluatePatternFilter, Apply, IndexedJoin, HashJoin, RollUpApply>;
|
||||
|
||||
using LogicalOperatorLeafVisitor = utils::LeafVisitor<Once>;
|
||||
|
||||
@ -591,6 +593,42 @@ class ScanAllByLabel : public memgraph::query::plan::ScanAll {
|
||||
}
|
||||
};
|
||||
|
||||
class ScanAllByEdgeType : public memgraph::query::plan::LogicalOperator {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
ScanAllByEdgeType() = default;
|
||||
ScanAllByEdgeType(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol, storage::EdgeTypeId edge_type,
|
||||
storage::View view = storage::View::OLD);
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
|
||||
bool HasSingleInput() const override { return true; }
|
||||
std::shared_ptr<LogicalOperator> input() const override { return input_; }
|
||||
void set_input(std::shared_ptr<LogicalOperator> input) override { input_ = input; }
|
||||
|
||||
std::string ToString() const override {
|
||||
return fmt::format("ScanAllByEdgeType ({} :{})", output_symbol_.name(), dba_->EdgeTypeToName(edge_type_));
|
||||
}
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
Symbol output_symbol_;
|
||||
storage::View view_;
|
||||
|
||||
storage::EdgeTypeId edge_type_;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<ScanAllByEdgeType>();
|
||||
object->input_ = input_ ? input_->Clone(storage) : nullptr;
|
||||
object->output_symbol_ = output_symbol_;
|
||||
object->view_ = view_;
|
||||
object->edge_type_ = edge_type_;
|
||||
return object;
|
||||
}
|
||||
};
|
||||
|
||||
/// Behaves like @c ScanAll, but produces only vertices with given label and
|
||||
/// property value which is inside a range (inclusive or exlusive).
|
||||
///
|
||||
@ -2634,5 +2672,38 @@ class HashJoin : public memgraph::query::plan::LogicalOperator {
|
||||
}
|
||||
};
|
||||
|
||||
/// RollUpApply operator is used to execute an expression which takes as input a pattern,
|
||||
/// and returns a list with content from the matched pattern
|
||||
/// It's used for a pattern expression or pattern comprehension in a query.
|
||||
class RollUpApply : public memgraph::query::plan::LogicalOperator {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
RollUpApply() = default;
|
||||
RollUpApply(const std::shared_ptr<LogicalOperator> &input, std::shared_ptr<LogicalOperator> &&second_branch);
|
||||
|
||||
bool HasSingleInput() const override { return false; }
|
||||
std::shared_ptr<LogicalOperator> input() const override { return input_; }
|
||||
void set_input(std::shared_ptr<LogicalOperator> input) override { input_ = input; }
|
||||
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override {
|
||||
throw utils::NotYetImplemented("Execution of Pattern comprehension is currently unsupported.");
|
||||
}
|
||||
std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<RollUpApply>();
|
||||
object->input_ = input_ ? input_->Clone(storage) : nullptr;
|
||||
object->list_collection_branch_ = list_collection_branch_ ? list_collection_branch_->Clone(storage) : nullptr;
|
||||
return object;
|
||||
}
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> list_collection_branch_;
|
||||
};
|
||||
|
||||
} // namespace plan
|
||||
} // namespace memgraph::query
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -49,6 +49,8 @@ constexpr utils::TypeInfo query::plan::ScanAllByLabelProperty::kType{
|
||||
|
||||
constexpr utils::TypeInfo query::plan::ScanAllById::kType{utils::TypeId::SCAN_ALL_BY_ID, "ScanAllById",
|
||||
&query::plan::ScanAll::kType};
|
||||
constexpr utils::TypeInfo query::plan::ScanAllByEdgeType::kType{utils::TypeId::SCAN_ALL_BY_EDGE_TYPE,
|
||||
"ScanAllByEdgeType", &query::plan::ScanAll::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::plan::ExpandCommon::kType{utils::TypeId::EXPAND_COMMON, "ExpandCommon", nullptr};
|
||||
|
||||
@ -154,4 +156,7 @@ constexpr utils::TypeInfo query::plan::IndexedJoin::kType{utils::TypeId::INDEXED
|
||||
|
||||
constexpr utils::TypeInfo query::plan::HashJoin::kType{utils::TypeId::HASH_JOIN, "HashJoin",
|
||||
&query::plan::LogicalOperator::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::plan::RollUpApply::kType{utils::TypeId::ROLLUP_APPLY, "RollUpApply",
|
||||
&query::plan::LogicalOperator::kType};
|
||||
} // namespace memgraph
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "query/plan/pretty_print.hpp"
|
||||
#include "query/plan/rewrite/edge_type_index_lookup.hpp"
|
||||
#include "query/plan/rewrite/index_lookup.hpp"
|
||||
#include "query/plan/rewrite/join.hpp"
|
||||
#include "query/plan/rule_based_planner.hpp"
|
||||
@ -54,8 +55,11 @@ class PostProcessor final {
|
||||
std::unique_ptr<LogicalOperator> Rewrite(std::unique_ptr<LogicalOperator> plan, TPlanningContext *context) {
|
||||
auto index_lookup_plan =
|
||||
RewriteWithIndexLookup(std::move(plan), context->symbol_table, context->ast_storage, context->db, index_hints_);
|
||||
return RewriteWithJoinRewriter(std::move(index_lookup_plan), context->symbol_table, context->ast_storage,
|
||||
context->db);
|
||||
auto join_plan =
|
||||
RewriteWithJoinRewriter(std::move(index_lookup_plan), context->symbol_table, context->ast_storage, context->db);
|
||||
auto edge_index_plan = RewriteWithEdgeTypeIndexRewriter(std::move(join_plan), context->symbol_table,
|
||||
context->ast_storage, context->db);
|
||||
return edge_index_plan;
|
||||
}
|
||||
|
||||
template <class TVertexCounts>
|
||||
|
@ -632,20 +632,20 @@ void AddMatching(const Match &match, SymbolTable &symbol_table, AstStorage &stor
|
||||
|
||||
// If there are any pattern filters, we add those as well
|
||||
for (auto &filter : matching.filters) {
|
||||
PatternFilterVisitor visitor(symbol_table, storage);
|
||||
PatternVisitor visitor(symbol_table, storage);
|
||||
|
||||
filter.expression->Accept(visitor);
|
||||
filter.matchings = visitor.getMatchings();
|
||||
filter.matchings = visitor.getFilterMatchings();
|
||||
}
|
||||
}
|
||||
|
||||
PatternFilterVisitor::PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage)
|
||||
PatternVisitor::PatternVisitor(SymbolTable &symbol_table, AstStorage &storage)
|
||||
: symbol_table_(symbol_table), storage_(storage) {}
|
||||
PatternFilterVisitor::PatternFilterVisitor(const PatternFilterVisitor &) = default;
|
||||
PatternFilterVisitor::PatternFilterVisitor(PatternFilterVisitor &&) noexcept = default;
|
||||
PatternFilterVisitor::~PatternFilterVisitor() = default;
|
||||
PatternVisitor::PatternVisitor(const PatternVisitor &) = default;
|
||||
PatternVisitor::PatternVisitor(PatternVisitor &&) noexcept = default;
|
||||
PatternVisitor::~PatternVisitor() = default;
|
||||
|
||||
void PatternFilterVisitor::Visit(Exists &op) {
|
||||
void PatternVisitor::Visit(Exists &op) {
|
||||
std::vector<Pattern *> patterns;
|
||||
patterns.push_back(op.pattern_);
|
||||
|
||||
@ -655,10 +655,14 @@ void PatternFilterVisitor::Visit(Exists &op) {
|
||||
filter_matching.type = PatternFilterType::EXISTS;
|
||||
filter_matching.symbol = std::make_optional<Symbol>(symbol_table_.at(op));
|
||||
|
||||
matchings_.push_back(std::move(filter_matching));
|
||||
filter_matchings_.push_back(std::move(filter_matching));
|
||||
}
|
||||
|
||||
std::vector<FilterMatching> PatternFilterVisitor::getMatchings() { return matchings_; }
|
||||
std::vector<FilterMatching> PatternVisitor::getFilterMatchings() { return filter_matchings_; }
|
||||
|
||||
std::vector<PatternComprehensionMatching> PatternVisitor::getPatternComprehensionMatchings() {
|
||||
return pattern_comprehension_matchings_;
|
||||
}
|
||||
|
||||
static void ParseForeach(query::Foreach &foreach, SingleQueryPart &query_part, AstStorage &storage,
|
||||
SymbolTable &symbol_table) {
|
||||
@ -672,6 +676,30 @@ static void ParseForeach(query::Foreach &foreach, SingleQueryPart &query_part, A
|
||||
}
|
||||
}
|
||||
|
||||
static void ParseReturn(query::Return &ret, AstStorage &storage, SymbolTable &symbol_table,
|
||||
std::unordered_map<std::string, PatternComprehensionMatching> &matchings) {
|
||||
PatternVisitor visitor(symbol_table, storage);
|
||||
|
||||
for (auto *expr : ret.body_.named_expressions) {
|
||||
expr->Accept(visitor);
|
||||
auto pattern_comprehension_matchings = visitor.getPatternComprehensionMatchings();
|
||||
for (auto &matching : pattern_comprehension_matchings) {
|
||||
matchings.emplace(expr->name_, matching);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PatternVisitor::Visit(NamedExpression &op) { op.expression_->Accept(*this); }
|
||||
|
||||
void PatternVisitor::Visit(PatternComprehension &op) {
|
||||
PatternComprehensionMatching matching;
|
||||
AddMatching({op.pattern_}, op.filter_, symbol_table_, storage_, matching);
|
||||
matching.result_expr = storage_.Create<NamedExpression>(symbol_table_.at(op).name(), op.resultExpr_);
|
||||
matching.result_expr->MapTo(symbol_table_.at(op));
|
||||
|
||||
pattern_comprehension_matchings_.push_back(std::move(matching));
|
||||
}
|
||||
|
||||
// Converts a Query to multiple QueryParts. In the process new Ast nodes may be
|
||||
// created, e.g. filter expressions.
|
||||
std::vector<SingleQueryPart> CollectSingleQueryParts(SymbolTable &symbol_table, AstStorage &storage,
|
||||
@ -703,7 +731,8 @@ std::vector<SingleQueryPart> CollectSingleQueryParts(SymbolTable &symbol_table,
|
||||
// This query part is done, continue with a new one.
|
||||
query_parts.emplace_back(SingleQueryPart{});
|
||||
query_part = &query_parts.back();
|
||||
} else if (utils::IsSubtype(*clause, Return::kType)) {
|
||||
} else if (auto *ret = utils::Downcast<Return>(clause)) {
|
||||
ParseReturn(*ret, storage, symbol_table, query_part->pattern_comprehension_matchings);
|
||||
return query_parts;
|
||||
}
|
||||
}
|
||||
|
@ -153,19 +153,20 @@ struct Expansion {
|
||||
ExpansionGroupId expansion_group_id = ExpansionGroupId();
|
||||
};
|
||||
|
||||
struct PatternComprehensionMatching;
|
||||
struct FilterMatching;
|
||||
|
||||
enum class PatternFilterType { EXISTS };
|
||||
|
||||
/// Collects matchings from filters that include patterns
|
||||
class PatternFilterVisitor : public ExpressionVisitor<void> {
|
||||
/// Collects matchings that include patterns
|
||||
class PatternVisitor : public ExpressionVisitor<void> {
|
||||
public:
|
||||
explicit PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage);
|
||||
PatternFilterVisitor(const PatternFilterVisitor &);
|
||||
PatternFilterVisitor &operator=(const PatternFilterVisitor &) = delete;
|
||||
PatternFilterVisitor(PatternFilterVisitor &&) noexcept;
|
||||
PatternFilterVisitor &operator=(PatternFilterVisitor &&) noexcept = delete;
|
||||
~PatternFilterVisitor() override;
|
||||
explicit PatternVisitor(SymbolTable &symbol_table, AstStorage &storage);
|
||||
PatternVisitor(const PatternVisitor &);
|
||||
PatternVisitor &operator=(const PatternVisitor &) = delete;
|
||||
PatternVisitor(PatternVisitor &&) noexcept;
|
||||
PatternVisitor &operator=(PatternVisitor &&) noexcept = delete;
|
||||
~PatternVisitor() override;
|
||||
|
||||
using ExpressionVisitor<void>::Visit;
|
||||
|
||||
@ -233,18 +234,22 @@ class PatternFilterVisitor : public ExpressionVisitor<void> {
|
||||
void Visit(PropertyLookup &op) override{};
|
||||
void Visit(AllPropertiesLookup &op) override{};
|
||||
void Visit(ParameterLookup &op) override{};
|
||||
void Visit(NamedExpression &op) override{};
|
||||
void Visit(RegexMatch &op) override{};
|
||||
void Visit(PatternComprehension &op) override{};
|
||||
void Visit(NamedExpression &op) override;
|
||||
void Visit(PatternComprehension &op) override;
|
||||
|
||||
std::vector<FilterMatching> getMatchings();
|
||||
std::vector<FilterMatching> getFilterMatchings();
|
||||
std::vector<PatternComprehensionMatching> getPatternComprehensionMatchings();
|
||||
|
||||
SymbolTable &symbol_table_;
|
||||
AstStorage &storage_;
|
||||
|
||||
private:
|
||||
/// Collection of matchings in the filter expression being analyzed.
|
||||
std::vector<FilterMatching> matchings_;
|
||||
std::vector<FilterMatching> filter_matchings_;
|
||||
|
||||
/// Collection of matchings in the pattern comprehension being analyzed.
|
||||
std::vector<PatternComprehensionMatching> pattern_comprehension_matchings_;
|
||||
};
|
||||
|
||||
/// Stores the symbols and expression used to filter a property.
|
||||
@ -495,6 +500,11 @@ inline auto Filters::IdFilters(const Symbol &symbol) const -> std::vector<Filter
|
||||
return filters;
|
||||
}
|
||||
|
||||
struct PatternComprehensionMatching : Matching {
|
||||
/// Pattern comprehension result named expression
|
||||
NamedExpression *result_expr = nullptr;
|
||||
};
|
||||
|
||||
/// @brief Represents a read (+ write) part of a query. Parts are split on
|
||||
/// `WITH` clauses.
|
||||
///
|
||||
@ -537,6 +547,14 @@ struct SingleQueryPart {
|
||||
/// in the `remaining_clauses` but rather in the `Foreach` itself and are guranteed
|
||||
/// to be processed in the same order by the semantics of the `RuleBasedPlanner`.
|
||||
std::vector<Matching> merge_matching{};
|
||||
|
||||
/// @brief @c NamedExpression name to @c PatternComprehensionMatching for each pattern comprehension.
|
||||
///
|
||||
/// Storing the normalized pattern of a @c PatternComprehension does not preclude storing the
|
||||
/// @c PatternComprehension clause itself inside `remaining_clauses`. The reason is that we
|
||||
/// need to have access to other parts of the clause, such as pattern, filter clauses.
|
||||
std::unordered_map<std::string, PatternComprehensionMatching> pattern_comprehension_matchings{};
|
||||
|
||||
/// @brief All the remaining clauses (without @c Match).
|
||||
std::vector<Clause *> remaining_clauses{};
|
||||
/// The subqueries vector are all the subqueries in this query part ordered in a list by
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -76,6 +76,13 @@ bool PlanPrinter::PreVisit(ScanAllById &op) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::ScanAllByEdgeType &op) {
|
||||
op.dba_ = dba_;
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
op.dba_ = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::Expand &op) {
|
||||
op.dba_ = dba_;
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
@ -143,6 +150,13 @@ bool PlanPrinter::PreVisit(query::plan::Union &op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::RollUpApply &op) {
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
Branch(*op.list_collection_branch_);
|
||||
op.input_->Accept(*this);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::CallProcedure &op) {
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
return true;
|
||||
@ -457,6 +471,19 @@ bool PlanToJsonVisitor::PreVisit(ScanAllById &op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanToJsonVisitor::PreVisit(ScanAllByEdgeType &op) {
|
||||
json self;
|
||||
self["name"] = "ScanAllByEdgeType";
|
||||
self["edge_type"] = ToJson(op.edge_type_, *dba_);
|
||||
self["output_symbol"] = ToJson(op.output_symbol_);
|
||||
|
||||
op.input_->Accept(*this);
|
||||
self["input"] = PopOutput();
|
||||
|
||||
output_ = std::move(self);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanToJsonVisitor::PreVisit(CreateNode &op) {
|
||||
json self;
|
||||
self["name"] = "CreateNode";
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -67,6 +67,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanAllByEdgeType &) override;
|
||||
|
||||
bool PreVisit(Expand &) override;
|
||||
bool PreVisit(ExpandVariable &) override;
|
||||
@ -91,6 +92,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(OrderBy &) override;
|
||||
bool PreVisit(Distinct &) override;
|
||||
bool PreVisit(Union &) override;
|
||||
bool PreVisit(RollUpApply &) override;
|
||||
|
||||
bool PreVisit(Unwind &) override;
|
||||
bool PreVisit(CallProcedure &) override;
|
||||
@ -203,6 +205,7 @@ class PlanToJsonVisitor : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanAllByEdgeType &) override;
|
||||
|
||||
bool PreVisit(EmptyResult &) override;
|
||||
bool PreVisit(Produce &) override;
|
||||
|
534
src/query/plan/rewrite/edge_type_index_lookup.hpp
Normal file
534
src/query/plan/rewrite/edge_type_index_lookup.hpp
Normal file
@ -0,0 +1,534 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
/// @file
|
||||
/// This file provides a plan rewriter which replaces `ScanAll` and `Expand`
|
||||
/// operations with `ScanAllByEdgeType` if possible. The public entrypoint is
|
||||
/// `RewriteWithEdgeTypeIndexRewriter`.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "query/plan/rewrite/index_lookup.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
|
||||
namespace memgraph::query::plan {
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <class TDbAccessor>
|
||||
class EdgeTypeIndexRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
public:
|
||||
EdgeTypeIndexRewriter(SymbolTable *symbol_table, AstStorage *ast_storage, TDbAccessor *db)
|
||||
: symbol_table_(symbol_table), ast_storage_(ast_storage), db_(db) {}
|
||||
|
||||
using HierarchicalLogicalOperatorVisitor::PostVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::PreVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::Visit;
|
||||
|
||||
bool Visit(Once &) override { return true; }
|
||||
|
||||
bool PreVisit(Filter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Filter & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAll &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == Once::kType) {
|
||||
const bool is_node_anon = op.output_symbol_.IsSymbolAnonym();
|
||||
once_under_scanall_ = is_node_anon;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(ScanAll &op) override {
|
||||
prev_ops_.pop_back();
|
||||
|
||||
if (EdgeTypeIndexingPossible()) {
|
||||
SetOnParent(op.input());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Expand &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == ScanAll::kType) {
|
||||
const bool only_one_edge_type = (op.common_.edge_types.size() == 1U);
|
||||
const bool expansion_is_named = !(op.common_.edge_symbol.IsSymbolAnonym());
|
||||
const bool expdanded_node_not_named = op.common_.node_symbol.IsSymbolAnonym();
|
||||
|
||||
edge_type_index_exist = only_one_edge_type ? db_->EdgeTypeIndexExists(op.common_.edge_types.front()) : false;
|
||||
|
||||
scanall_under_expand_ = only_one_edge_type && expansion_is_named && expdanded_node_not_named;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Expand &op) override {
|
||||
prev_ops_.pop_back();
|
||||
|
||||
if (EdgeTypeIndexingPossible()) {
|
||||
auto indexed_scan = GenEdgeTypeScan(op);
|
||||
SetOnParent(std::move(indexed_scan));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ExpandVariable &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(ExpandVariable &expand) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Merge &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.merge_match_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Merge &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Optional &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.optional_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Optional &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Cartesian &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Cartesian &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(IndexedJoin &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
RewriteBranch(&op.main_branch_);
|
||||
RewriteBranch(&op.sub_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(IndexedJoin &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(HashJoin &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(HashJoin &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Union &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
RewriteBranch(&op.left_op_);
|
||||
RewriteBranch(&op.right_op_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Union &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CreateNode &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CreateNode &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CreateExpand &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CreateExpand &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabel &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabel &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelPropertyRange &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelPropertyValue &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllById &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllById &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByEdgeType &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByEdgeType &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ConstructNamedPath &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ConstructNamedPath &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Produce &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == Expand::kType) {
|
||||
expand_under_produce_ = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Produce &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EmptyResult &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(EmptyResult &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Delete &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Delete &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetProperties &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetProperties &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetLabels &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetLabels &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RemoveProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(RemoveProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RemoveLabels &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(RemoveLabels &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EdgeUniquenessFilter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(EdgeUniquenessFilter &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Accumulate &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Accumulate &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Aggregate &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Aggregate &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Skip &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Skip &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Limit &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Limit &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(OrderBy &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(OrderBy &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Unwind &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Unwind &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Distinct &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Distinct &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CallProcedure &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CallProcedure &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Foreach &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.update_clauses_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Foreach &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EvaluatePatternFilter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(EvaluatePatternFilter & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Apply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.subquery_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Apply & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(LoadCsv &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(LoadCsv & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
SymbolTable *symbol_table_;
|
||||
AstStorage *ast_storage_;
|
||||
TDbAccessor *db_;
|
||||
// Collected filters, pending for examination if they can be used for advanced
|
||||
// lookup operations (by index, node ID, ...).
|
||||
Filters filters_;
|
||||
// Expressions which no longer need a plain Filter operator.
|
||||
std::unordered_set<Expression *> filter_exprs_for_removal_;
|
||||
std::vector<LogicalOperator *> prev_ops_;
|
||||
std::unordered_set<Symbol> cartesian_symbols_;
|
||||
|
||||
bool EdgeTypeIndexingPossible() const {
|
||||
return expand_under_produce_ && scanall_under_expand_ && once_under_scanall_ && edge_type_index_exist;
|
||||
}
|
||||
bool expand_under_produce_ = false;
|
||||
bool scanall_under_expand_ = false;
|
||||
bool once_under_scanall_ = false;
|
||||
bool edge_type_index_exist = false;
|
||||
|
||||
bool DefaultPreVisit() override {
|
||||
throw utils::NotYetImplemented("Operator not yet covered by EdgeTypeIndexRewriter");
|
||||
}
|
||||
|
||||
std::unique_ptr<ScanAllByEdgeType> GenEdgeTypeScan(const Expand &expand) {
|
||||
const auto &input = expand.input();
|
||||
const auto &output_symbol = expand.common_.edge_symbol;
|
||||
const auto &view = expand.view_;
|
||||
|
||||
// Extract edge_type from symbol
|
||||
auto edge_type = expand.common_.edge_types.front();
|
||||
return std::make_unique<ScanAllByEdgeType>(input, output_symbol, edge_type, view);
|
||||
}
|
||||
|
||||
void SetOnParent(const std::shared_ptr<LogicalOperator> &input) {
|
||||
MG_ASSERT(input);
|
||||
if (prev_ops_.empty()) {
|
||||
MG_ASSERT(!new_root_);
|
||||
new_root_ = input;
|
||||
return;
|
||||
}
|
||||
prev_ops_.back()->set_input(input);
|
||||
}
|
||||
|
||||
void RewriteBranch(std::shared_ptr<LogicalOperator> *branch) {
|
||||
EdgeTypeIndexRewriter<TDbAccessor> rewriter(symbol_table_, ast_storage_, db_);
|
||||
(*branch)->Accept(rewriter);
|
||||
if (rewriter.new_root_) {
|
||||
*branch = rewriter.new_root_;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <class TDbAccessor>
|
||||
std::unique_ptr<LogicalOperator> RewriteWithEdgeTypeIndexRewriter(std::unique_ptr<LogicalOperator> root_op,
|
||||
SymbolTable *symbol_table, AstStorage *ast_storage,
|
||||
TDbAccessor *db) {
|
||||
impl::EdgeTypeIndexRewriter<TDbAccessor> rewriter(symbol_table, ast_storage, db);
|
||||
root_op->Accept(rewriter);
|
||||
return root_op;
|
||||
}
|
||||
|
||||
} // namespace memgraph::query::plan
|
@ -595,6 +595,18 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.list_collection_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -455,6 +455,18 @@ class JoinRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.list_collection_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
|
@ -14,9 +14,12 @@
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
@ -40,7 +43,8 @@ namespace {
|
||||
class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
public:
|
||||
ReturnBodyContext(const ReturnBody &body, SymbolTable &symbol_table, const std::unordered_set<Symbol> &bound_symbols,
|
||||
AstStorage &storage, Where *where = nullptr)
|
||||
AstStorage &storage, std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops,
|
||||
Where *where = nullptr)
|
||||
: body_(body), symbol_table_(symbol_table), bound_symbols_(bound_symbols), storage_(storage), where_(where) {
|
||||
// Collect symbols from named expressions.
|
||||
output_symbols_.reserve(body_.named_expressions.size());
|
||||
@ -53,6 +57,14 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
output_symbols_.emplace_back(symbol_table_.at(*named_expr));
|
||||
named_expr->Accept(*this);
|
||||
named_expressions_.emplace_back(named_expr);
|
||||
if (pattern_comprehension_) {
|
||||
if (auto it = pc_ops.find(named_expr->name_); it != pc_ops.end()) {
|
||||
pattern_comprehension_op_ = std::move(it->second);
|
||||
pc_ops.erase(it);
|
||||
} else {
|
||||
throw utils::NotYetImplemented("Operation on top of pattern comprehension");
|
||||
}
|
||||
}
|
||||
}
|
||||
// Collect symbols used in group by expressions.
|
||||
if (!aggregations_.empty()) {
|
||||
@ -386,8 +398,20 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(PatternComprehension & /*unused*/) override {
|
||||
throw utils::NotYetImplemented("Planner can not handle pattern comprehension.");
|
||||
bool PreVisit(PatternComprehension & /*unused*/) override {
|
||||
pattern_compression_aggregations_start_index_ = has_aggregation_.size();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(PatternComprehension &pattern_comprehension) override {
|
||||
bool has_aggr = false;
|
||||
for (auto i = has_aggregation_.size(); i > pattern_compression_aggregations_start_index_; --i) {
|
||||
has_aggr |= has_aggregation_.back();
|
||||
has_aggregation_.pop_back();
|
||||
}
|
||||
has_aggregation_.emplace_back(has_aggr);
|
||||
pattern_comprehension_ = &pattern_comprehension;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates NamedExpression with an Identifier for each user declared symbol.
|
||||
@ -444,6 +468,10 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// named_expressions.
|
||||
const auto &output_symbols() const { return output_symbols_; }
|
||||
|
||||
const auto *pattern_comprehension() const { return pattern_comprehension_; }
|
||||
|
||||
std::shared_ptr<LogicalOperator> pattern_comprehension_op() const { return pattern_comprehension_op_; }
|
||||
|
||||
private:
|
||||
const ReturnBody &body_;
|
||||
SymbolTable &symbol_table_;
|
||||
@ -465,10 +493,13 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// group by it.
|
||||
std::list<bool> has_aggregation_;
|
||||
std::vector<NamedExpression *> named_expressions_;
|
||||
PatternComprehension *pattern_comprehension_ = nullptr;
|
||||
std::shared_ptr<LogicalOperator> pattern_comprehension_op_;
|
||||
size_t pattern_compression_aggregations_start_index_ = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator> input_op, bool advance_command,
|
||||
const ReturnBodyContext &body, bool accumulate = false) {
|
||||
const ReturnBodyContext &body, bool accumulate) {
|
||||
std::vector<Symbol> used_symbols(body.used_symbols().begin(), body.used_symbols().end());
|
||||
auto last_op = std::move(input_op);
|
||||
if (accumulate) {
|
||||
@ -482,6 +513,11 @@ std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator>
|
||||
std::vector<Symbol> remember(body.group_by_used_symbols().begin(), body.group_by_used_symbols().end());
|
||||
last_op = std::make_unique<Aggregate>(std::move(last_op), body.aggregations(), body.group_by(), remember);
|
||||
}
|
||||
|
||||
if (body.pattern_comprehension()) {
|
||||
last_op = std::make_unique<RollUpApply>(std::move(last_op), body.pattern_comprehension_op());
|
||||
}
|
||||
|
||||
last_op = std::make_unique<Produce>(std::move(last_op), body.named_expressions());
|
||||
// Distinct in ReturnBody only makes Produce values unique, so plan after it.
|
||||
if (body.distinct()) {
|
||||
@ -506,6 +542,7 @@ std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator>
|
||||
last_op = std::make_unique<Filter>(std::move(last_op), std::vector<std::shared_ptr<LogicalOperator>>{},
|
||||
body.where()->expression_);
|
||||
}
|
||||
|
||||
return last_op;
|
||||
}
|
||||
|
||||
@ -543,8 +580,9 @@ Expression *ExtractFilters(const std::unordered_set<Symbol> &bound_symbols, Filt
|
||||
return filter_expr;
|
||||
}
|
||||
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQueryPart> &single_query_parts,
|
||||
SymbolTable &symbol_table, AstStorage &storage) {
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(
|
||||
const std::vector<SingleQueryPart> &single_query_parts, SymbolTable &symbol_table, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
const auto &query = single_query_parts[0];
|
||||
|
||||
if (!query.matching.expansions.empty() || query.remaining_clauses.empty()) {
|
||||
@ -552,7 +590,7 @@ std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQuery
|
||||
}
|
||||
|
||||
if (std::unordered_set<Symbol> bound_symbols; auto *with = utils::Downcast<query::With>(query.remaining_clauses[0])) {
|
||||
auto input_op = impl::GenWith(*with, nullptr, symbol_table, false, bound_symbols, storage);
|
||||
auto input_op = impl::GenWith(*with, nullptr, symbol_table, false, bound_symbols, storage, pc_ops);
|
||||
return bound_symbols;
|
||||
}
|
||||
|
||||
@ -583,7 +621,8 @@ std::unique_ptr<LogicalOperator> GenNamedPaths(std::unique_ptr<LogicalOperator>
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage) {
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
// Similar to WITH clause, but we want to accumulate when the query writes to
|
||||
// the database. This way we handle the case when we want to return
|
||||
// expressions with the latest updated results. For example, `MATCH (n) -- ()
|
||||
@ -592,13 +631,14 @@ std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalO
|
||||
// final result of 'k' increments.
|
||||
bool accumulate = is_write;
|
||||
bool advance_command = false;
|
||||
ReturnBodyContext body(ret.body_, symbol_table, bound_symbols, storage);
|
||||
ReturnBodyContext body(ret.body_, symbol_table, bound_symbols, storage, pc_ops);
|
||||
return GenReturnBody(std::move(input_op), advance_command, body, accumulate);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage) {
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
// WITH clause is Accumulate/Aggregate (advance_command) + Produce and
|
||||
// optional Filter. In case of update and aggregation, we want to accumulate
|
||||
// first, so that when aggregating, we get the latest results. Similar to
|
||||
@ -606,7 +646,7 @@ std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOper
|
||||
bool accumulate = is_write;
|
||||
// No need to advance the command if we only performed reads.
|
||||
bool advance_command = is_write;
|
||||
ReturnBodyContext body(with.body_, symbol_table, bound_symbols, storage, with.where_);
|
||||
ReturnBodyContext body(with.body_, symbol_table, bound_symbols, storage, pc_ops, with.where_);
|
||||
auto last_op = GenReturnBody(std::move(input_op), advance_command, body, accumulate);
|
||||
// Reset bound symbols, so that only those in WITH are exposed.
|
||||
bound_symbols.clear();
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "query/frontend/ast/ast_visitor.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
@ -87,8 +88,9 @@ bool HasBoundFilterSymbols(const std::unordered_set<Symbol> &bound_symbols, cons
|
||||
|
||||
// Returns the set of symbols for the subquery that are actually referenced from the outer scope and
|
||||
// used in the subquery.
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQueryPart> &single_query_parts,
|
||||
SymbolTable &symbol_table, AstStorage &storage);
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(
|
||||
const std::vector<SingleQueryPart> &single_query_parts, SymbolTable &symbol_table, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
Symbol GetSymbol(NodeAtom *atom, const SymbolTable &symbol_table);
|
||||
Symbol GetSymbol(EdgeAtom *atom, const SymbolTable &symbol_table);
|
||||
@ -142,11 +144,13 @@ std::unique_ptr<LogicalOperator> GenNamedPaths(std::unique_ptr<LogicalOperator>
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage);
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage);
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenUnion(const CypherUnion &cypher_union, std::shared_ptr<LogicalOperator> left_op,
|
||||
std::shared_ptr<LogicalOperator> right_op, SymbolTable &symbol_table);
|
||||
@ -190,11 +194,24 @@ class RuleBasedPlanner {
|
||||
uint64_t merge_id = 0;
|
||||
uint64_t subquery_id = 0;
|
||||
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pattern_comprehension_ops;
|
||||
|
||||
if (single_query_part.pattern_comprehension_matchings.size() > 1) {
|
||||
throw utils::NotYetImplemented("Multiple pattern comprehensions.");
|
||||
}
|
||||
for (const auto &matching : single_query_part.pattern_comprehension_matchings) {
|
||||
std::unique_ptr<LogicalOperator> new_input;
|
||||
MatchContext match_ctx{matching.second, *context.symbol_table, context.bound_symbols};
|
||||
new_input = PlanMatching(match_ctx, std::move(new_input));
|
||||
new_input = std::make_unique<Produce>(std::move(new_input), std::vector{matching.second.result_expr});
|
||||
pattern_comprehension_ops.emplace(matching.first, std::move(new_input));
|
||||
}
|
||||
|
||||
for (const auto &clause : single_query_part.remaining_clauses) {
|
||||
MG_ASSERT(!utils::IsSubtype(*clause, Match::kType), "Unexpected Match in remaining clauses");
|
||||
if (auto *ret = utils::Downcast<Return>(clause)) {
|
||||
input_op = impl::GenReturn(*ret, std::move(input_op), *context.symbol_table, context.is_write_query,
|
||||
context.bound_symbols, *context.ast_storage);
|
||||
context.bound_symbols, *context.ast_storage, pattern_comprehension_ops);
|
||||
} else if (auto *merge = utils::Downcast<query::Merge>(clause)) {
|
||||
input_op = GenMerge(*merge, std::move(input_op), single_query_part.merge_matching[merge_id++]);
|
||||
// Treat MERGE clause as write, because we do not know if it will
|
||||
@ -202,7 +219,7 @@ class RuleBasedPlanner {
|
||||
context.is_write_query = true;
|
||||
} else if (auto *with = utils::Downcast<query::With>(clause)) {
|
||||
input_op = impl::GenWith(*with, std::move(input_op), *context.symbol_table, context.is_write_query,
|
||||
context.bound_symbols, *context.ast_storage);
|
||||
context.bound_symbols, *context.ast_storage, pattern_comprehension_ops);
|
||||
// WITH clause advances the command, so reset the flag.
|
||||
context.is_write_query = false;
|
||||
} else if (auto op = HandleWriteClause(clause, input_op, *context.symbol_table, context.bound_symbols)) {
|
||||
@ -241,7 +258,7 @@ class RuleBasedPlanner {
|
||||
single_query_part, merge_id);
|
||||
} else if (auto *call_sub = utils::Downcast<query::CallSubquery>(clause)) {
|
||||
input_op = HandleSubquery(std::move(input_op), single_query_part.subqueries[subquery_id++],
|
||||
*context.symbol_table, *context_->ast_storage);
|
||||
*context.symbol_table, *context_->ast_storage, pattern_comprehension_ops);
|
||||
} else {
|
||||
throw utils::NotYetImplemented("clause '{}' conversion to operator(s)", clause->GetTypeInfo().name);
|
||||
}
|
||||
@ -860,15 +877,15 @@ class RuleBasedPlanner {
|
||||
symbol);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> HandleSubquery(std::unique_ptr<LogicalOperator> last_op,
|
||||
std::shared_ptr<QueryParts> subquery, SymbolTable &symbol_table,
|
||||
AstStorage &storage) {
|
||||
std::unique_ptr<LogicalOperator> HandleSubquery(
|
||||
std::unique_ptr<LogicalOperator> last_op, std::shared_ptr<QueryParts> subquery, SymbolTable &symbol_table,
|
||||
AstStorage &storage, std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
std::unordered_set<Symbol> outer_scope_bound_symbols;
|
||||
outer_scope_bound_symbols.insert(std::make_move_iterator(context_->bound_symbols.begin()),
|
||||
std::make_move_iterator(context_->bound_symbols.end()));
|
||||
|
||||
context_->bound_symbols =
|
||||
impl::GetSubqueryBoundSymbols(subquery->query_parts[0].single_query_parts, symbol_table, storage);
|
||||
impl::GetSubqueryBoundSymbols(subquery->query_parts[0].single_query_parts, symbol_table, storage, pc_ops);
|
||||
|
||||
auto subquery_op = Plan(*subquery);
|
||||
|
||||
|
@ -78,6 +78,8 @@ class VertexCountCache {
|
||||
return db_->LabelPropertyIndexExists(label, property);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) { return db_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return db_->GetIndexStats(label);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -16,28 +16,15 @@
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
enum class ReplicationMode : std::uint8_t { SYNC, ASYNC };
|
||||
|
||||
inline auto ReplicationModeToString(ReplicationMode mode) -> std::string {
|
||||
switch (mode) {
|
||||
case ReplicationMode::SYNC:
|
||||
return "SYNC";
|
||||
case ReplicationMode::ASYNC:
|
||||
return "ASYNC";
|
||||
}
|
||||
throw std::invalid_argument("Invalid replication mode");
|
||||
}
|
||||
|
||||
inline auto ReplicationModeFromString(std::string_view mode) -> ReplicationMode {
|
||||
if (mode == "SYNC") {
|
||||
return ReplicationMode::SYNC;
|
||||
}
|
||||
if (mode == "ASYNC") {
|
||||
return ReplicationMode::ASYNC;
|
||||
}
|
||||
throw std::invalid_argument("Invalid replication mode");
|
||||
}
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(ReplicationMode, {
|
||||
{ReplicationMode::SYNC, "sync"},
|
||||
{ReplicationMode::ASYNC, "async"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::replication_coordination_glue
|
||||
|
@ -12,8 +12,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
// TODO: figure out a way of ensuring that usage of this type is never uninitialed/defaulted incorrectly to MAIN
|
||||
enum class ReplicationRole : uint8_t { MAIN, REPLICA };
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(ReplicationRole, {{ReplicationRole::MAIN, "main"}, {ReplicationRole::REPLICA, "replica"}})
|
||||
|
||||
} // namespace memgraph::replication_coordination_glue
|
||||
|
@ -210,8 +210,13 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(*instance_client_ptr, main_uuid);
|
||||
client->Start(storage, std::move(db_acc));
|
||||
bool const success = std::invoke([state = client->State()]() {
|
||||
// We force sync replicas in other situation
|
||||
if (state == storage::replication::ReplicaState::DIVERGED_FROM_MAIN) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
return FLAGS_coordinator_server_port != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
@ -271,8 +271,7 @@ auto ReplicationHandler::GetDatabasesHistories() -> replication_coordination_glu
|
||||
dbms_handler_.ForEach([&results](memgraph::dbms::DatabaseAccess db_acc) {
|
||||
auto &repl_storage_state = db_acc->storage()->repl_storage_state_;
|
||||
|
||||
std::vector<std::pair<std::string, uint64_t>> history = utils::fmap(
|
||||
[](const auto &elem) { return std::make_pair(elem.first, elem.second); }, repl_storage_state.history);
|
||||
std::vector<std::pair<std::string, uint64_t>> history = utils::fmap(repl_storage_state.history);
|
||||
|
||||
history.emplace_back(std::string(repl_storage_state.epoch_.id()), repl_storage_state.last_commit_timestamp_.load());
|
||||
replication_coordination_glue::DatabaseHistory repl{
|
||||
|
@ -21,8 +21,10 @@ add_library(mg-storage-v2 STATIC
|
||||
storage.cpp
|
||||
indices/indices.cpp
|
||||
all_vertices_iterable.cpp
|
||||
edges_iterable.cpp
|
||||
vertices_iterable.cpp
|
||||
inmemory/storage.cpp
|
||||
inmemory/edge_type_index.cpp
|
||||
inmemory/label_index.cpp
|
||||
inmemory/label_property_index.cpp
|
||||
inmemory/unique_constraints.cpp
|
||||
@ -30,6 +32,7 @@ add_library(mg-storage-v2 STATIC
|
||||
disk/edge_import_mode_cache.cpp
|
||||
disk/storage.cpp
|
||||
disk/rocksdb_storage.cpp
|
||||
disk/edge_type_index.cpp
|
||||
disk/label_index.cpp
|
||||
disk/label_property_index.cpp
|
||||
disk/unique_constraints.cpp
|
||||
|
49
src/storage/v2/disk/edge_type_index.cpp
Normal file
49
src/storage/v2/disk/edge_type_index.cpp
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "edge_type_index.hpp"
|
||||
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
bool DiskEdgeTypeIndex::DropIndex(EdgeTypeId /*edge_type*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DiskEdgeTypeIndex::IndexExists(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<EdgeTypeId> DiskEdgeTypeIndex::ListIndices() const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return {};
|
||||
}
|
||||
|
||||
uint64_t DiskEdgeTypeIndex::ApproximateEdgeCount(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return 0U;
|
||||
}
|
||||
|
||||
void DiskEdgeTypeIndex::UpdateOnEdgeCreation(Vertex * /*from*/, Vertex * /*to*/, EdgeRef /*edge_ref*/,
|
||||
EdgeTypeId /*edge_type*/, const Transaction & /*tx*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
void DiskEdgeTypeIndex::UpdateOnEdgeModification(Vertex * /*old_from*/, Vertex * /*old_to*/, Vertex * /*new_from*/,
|
||||
Vertex * /*new_to*/, EdgeRef /*edge_ref*/, EdgeTypeId /*edge_type*/,
|
||||
const Transaction & /*tx*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
35
src/storage/v2/disk/edge_type_index.hpp
Normal file
35
src/storage/v2/disk/edge_type_index.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "storage/v2/indices/edge_type_index.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class DiskEdgeTypeIndex : public storage::EdgeTypeIndex {
|
||||
public:
|
||||
bool DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
bool IndexExists(EdgeTypeId edge_type) const override;
|
||||
|
||||
std::vector<EdgeTypeId> ListIndices() const override;
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const override;
|
||||
|
||||
void UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) override;
|
||||
|
||||
void UpdateOnEdgeModification(Vertex *old_from, Vertex *old_to, Vertex *new_from, Vertex *new_to, EdgeRef edge_ref,
|
||||
EdgeTypeId edge_type, const Transaction &tx) override;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
@ -41,6 +41,7 @@
|
||||
#include "storage/v2/edge_accessor.hpp"
|
||||
#include "storage/v2/edge_import_mode.hpp"
|
||||
#include "storage/v2/edge_ref.hpp"
|
||||
#include "storage/v2/edges_iterable.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/modified_edge.hpp"
|
||||
#include "storage/v2/mvcc.hpp"
|
||||
@ -807,11 +808,21 @@ void DiskStorage::LoadVerticesFromDiskLabelPropertyIndexForIntervalSearch(
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable DiskStorage::DiskAccessor::Edges(EdgeTypeId /*edge_type*/, View /*view*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::DiskAccessor::ApproximateVertexCount() const {
|
||||
auto *disk_storage = static_cast<DiskStorage *>(storage_);
|
||||
return disk_storage->vertex_count_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::DiskAccessor::ApproximateEdgeCount(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::info("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return 0U;
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::GetDiskSpaceUsage() const {
|
||||
uint64_t main_disk_storage_size = utils::GetDirDiskUsage(config_.disk.main_storage_directory);
|
||||
uint64_t index_disk_storage_size = utils::GetDirDiskUsage(config_.disk.label_index_directory) +
|
||||
@ -1629,6 +1640,9 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_CREATE: {
|
||||
throw utils::NotYetImplemented("Edge-type indexing is not yet implemented on on-disk storage mode.");
|
||||
}
|
||||
case MetadataDelta::Action::LABEL_INDEX_DROP: {
|
||||
if (!disk_storage->durable_metadata_.PersistLabelIndexDeletion(md_delta.label)) {
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
@ -1641,6 +1655,9 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_DROP: {
|
||||
throw utils::NotYetImplemented("Edge-type indexing is not yet implemented on on-disk storage mode.");
|
||||
}
|
||||
case MetadataDelta::Action::LABEL_INDEX_STATS_SET: {
|
||||
throw utils::NotYetImplemented("SetIndexStats(stats) is not implemented for DiskStorage.");
|
||||
} break;
|
||||
@ -1917,6 +1934,11 @@ utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor:
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::CreateIndex(EdgeTypeId /*edge_type*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::DropIndex(LabelId label) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Create index requires a unique access to the storage!");
|
||||
auto *on_disk = static_cast<DiskStorage *>(storage_);
|
||||
@ -1945,6 +1967,11 @@ utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor:
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::DropIndex(EdgeTypeId /*edge_type*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageExistenceConstraintDefinitionError, void>
|
||||
DiskStorage::DiskAccessor::CreateExistenceConstraint(LabelId label, PropertyId property) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Create existence constraint requires a unique access to the storage!");
|
||||
@ -2053,6 +2080,12 @@ std::unique_ptr<Storage::Accessor> DiskStorage::UniqueAccess(
|
||||
return std::unique_ptr<DiskAccessor>(
|
||||
new DiskAccessor{Storage::Accessor::unique_access, this, isolation_level, storage_mode_});
|
||||
}
|
||||
|
||||
bool DiskStorage::DiskAccessor::EdgeTypeIndexExists(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::info("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return false;
|
||||
}
|
||||
|
||||
IndicesInfo DiskStorage::DiskAccessor::ListAllIndices() const {
|
||||
auto *on_disk = static_cast<DiskStorage *>(storage_);
|
||||
auto *disk_label_index = static_cast<DiskLabelIndex *>(on_disk->indices_.label_index_.get());
|
||||
|
@ -72,6 +72,8 @@ class DiskStorage final : public Storage {
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view) override;
|
||||
|
||||
EdgesIterable Edges(EdgeTypeId edge_type, View view) override;
|
||||
|
||||
uint64_t ApproximateVertexCount() const override;
|
||||
|
||||
uint64_t ApproximateVertexCount(LabelId /*label*/) const override { return 10; }
|
||||
@ -89,6 +91,8 @@ class DiskStorage final : public Storage {
|
||||
return 10;
|
||||
}
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const override;
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId & /*label*/) const override {
|
||||
return {};
|
||||
}
|
||||
@ -140,6 +144,8 @@ class DiskStorage final : public Storage {
|
||||
return disk_storage->indices_.label_property_index_->IndexExists(label, property);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(EdgeTypeId edge_type) const override;
|
||||
|
||||
IndicesInfo ListAllIndices() const override;
|
||||
|
||||
ConstraintsInfo ListAllConstraints() const override;
|
||||
@ -158,10 +164,14 @@ class DiskStorage final : public Storage {
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
utils::BasicResult<StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
LabelId label, PropertyId property) override;
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/durability/wal.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
@ -199,9 +200,18 @@ void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadat
|
||||
}
|
||||
spdlog::info("Label+property indices statistics are recreated.");
|
||||
|
||||
spdlog::info("Indices are recreated.");
|
||||
// Recover edge-type indices.
|
||||
spdlog::info("Recreating {} edge-type indices from metadata.", indices_metadata.edge.size());
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(indices->edge_type_index_.get());
|
||||
for (const auto &item : indices_metadata.edge) {
|
||||
if (!mem_edge_type_index->CreateIndex(item, vertices->access())) {
|
||||
throw RecoveryFailure("The edge-type index must be created here!");
|
||||
}
|
||||
spdlog::info("Index on :{} is recreated from metadata", name_id_mapper->IdToName(item.AsUint()));
|
||||
}
|
||||
spdlog::info("Edge-type indices are recreated.");
|
||||
|
||||
spdlog::info("Recreating constraints from metadata.");
|
||||
spdlog::info("Indices are recreated.");
|
||||
}
|
||||
|
||||
void RecoverExistenceConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
@ -358,7 +368,6 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
spdlog::warn(utils::MessageWithLink("No snapshot or WAL file found.", "https://memgr.ph/durability"));
|
||||
return std::nullopt;
|
||||
}
|
||||
// TODO(antoniofilipovic) What is the logic here?
|
||||
std::sort(wal_files.begin(), wal_files.end());
|
||||
// UUID used for durability is the UUID of the last WAL file.
|
||||
// Same for the epoch id.
|
||||
@ -437,17 +446,13 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
last_loaded_timestamp.emplace(recovery_info.next_timestamp - 1);
|
||||
}
|
||||
|
||||
bool epoch_history_empty = epoch_history->empty();
|
||||
bool epoch_not_recorded = !epoch_history_empty && epoch_history->back().first != wal_file.epoch_id;
|
||||
auto last_loaded_timestamp_value = last_loaded_timestamp.value_or(0);
|
||||
|
||||
if (epoch_history_empty || epoch_not_recorded) {
|
||||
epoch_history->emplace_back(std::string(wal_file.epoch_id), last_loaded_timestamp_value);
|
||||
}
|
||||
|
||||
auto last_epoch_updated = !epoch_history_empty && epoch_history->back().first == wal_file.epoch_id &&
|
||||
epoch_history->back().second < last_loaded_timestamp_value;
|
||||
if (last_epoch_updated) {
|
||||
if (epoch_history->empty() || epoch_history->back().first != wal_file.epoch_id) {
|
||||
// no history or new epoch, add it
|
||||
epoch_history->emplace_back(wal_file.epoch_id, last_loaded_timestamp_value);
|
||||
repl_storage_state.epoch_.SetEpoch(wal_file.epoch_id);
|
||||
} else if (epoch_history->back().second < last_loaded_timestamp_value) {
|
||||
// existing epoch, update with newer timestamp
|
||||
epoch_history->back().second = last_loaded_timestamp_value;
|
||||
}
|
||||
|
||||
@ -469,11 +474,11 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
|
||||
memgraph::metrics::Measure(memgraph::metrics::SnapshotRecoveryLatency_us,
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(timer.Elapsed()).count());
|
||||
spdlog::info("Set epoch id: {} with commit timestamp {}", std::string(repl_storage_state.epoch_.id()),
|
||||
repl_storage_state.last_commit_timestamp_);
|
||||
spdlog::trace("Set epoch id: {} with commit timestamp {}", std::string(repl_storage_state.epoch_.id()),
|
||||
repl_storage_state.last_commit_timestamp_);
|
||||
|
||||
std::for_each(repl_storage_state.history.begin(), repl_storage_state.history.end(), [](auto &history) {
|
||||
spdlog::info("epoch id: {} with commit timestamp {}", std::string(history.first), history.second);
|
||||
spdlog::trace("epoch id: {} with commit timestamp {}", std::string(history.first), history.second);
|
||||
});
|
||||
return recovery_info;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -37,6 +37,8 @@ enum class Marker : uint8_t {
|
||||
SECTION_CONSTRAINTS = 0x25,
|
||||
SECTION_DELTA = 0x26,
|
||||
SECTION_EPOCH_HISTORY = 0x27,
|
||||
SECTION_EDGE_INDICES = 0x28,
|
||||
|
||||
SECTION_OFFSETS = 0x42,
|
||||
|
||||
DELTA_VERTEX_CREATE = 0x50,
|
||||
@ -60,6 +62,8 @@ enum class Marker : uint8_t {
|
||||
DELTA_LABEL_INDEX_STATS_CLEAR = 0x62,
|
||||
DELTA_LABEL_PROPERTY_INDEX_STATS_SET = 0x63,
|
||||
DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR = 0x64,
|
||||
DELTA_EDGE_TYPE_INDEX_CREATE = 0x65,
|
||||
DELTA_EDGE_TYPE_INDEX_DROP = 0x66,
|
||||
|
||||
VALUE_FALSE = 0x00,
|
||||
VALUE_TRUE = 0xff,
|
||||
@ -85,6 +89,7 @@ static const Marker kMarkersAll[] = {
|
||||
Marker::SECTION_CONSTRAINTS,
|
||||
Marker::SECTION_DELTA,
|
||||
Marker::SECTION_EPOCH_HISTORY,
|
||||
Marker::SECTION_EDGE_INDICES,
|
||||
Marker::SECTION_OFFSETS,
|
||||
Marker::DELTA_VERTEX_CREATE,
|
||||
Marker::DELTA_VERTEX_DELETE,
|
||||
@ -103,6 +108,8 @@ static const Marker kMarkersAll[] = {
|
||||
Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
Marker::DELTA_LABEL_PROPERTY_INDEX_CREATE,
|
||||
Marker::DELTA_LABEL_PROPERTY_INDEX_DROP,
|
||||
Marker::DELTA_EDGE_TYPE_INDEX_CREATE,
|
||||
Marker::DELTA_EDGE_TYPE_INDEX_DROP,
|
||||
Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE,
|
||||
Marker::DELTA_EXISTENCE_CONSTRAINT_DROP,
|
||||
Marker::DELTA_UNIQUE_CONSTRAINT_CREATE,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -43,6 +43,7 @@ struct RecoveredIndicesAndConstraints {
|
||||
std::vector<std::pair<LabelId, PropertyId>> label_property;
|
||||
std::vector<std::pair<LabelId, LabelIndexStats>> label_stats;
|
||||
std::vector<std::pair<LabelId, std::pair<PropertyId, LabelPropertyIndexStats>>> label_property_stats;
|
||||
std::vector<EdgeTypeId> edge;
|
||||
} indices;
|
||||
|
||||
struct ConstraintsMetadata {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -332,6 +332,7 @@ std::optional<PropertyValue> Decoder::ReadPropertyValue() {
|
||||
case Marker::SECTION_CONSTRAINTS:
|
||||
case Marker::SECTION_DELTA:
|
||||
case Marker::SECTION_EPOCH_HISTORY:
|
||||
case Marker::SECTION_EDGE_INDICES:
|
||||
case Marker::SECTION_OFFSETS:
|
||||
case Marker::DELTA_VERTEX_CREATE:
|
||||
case Marker::DELTA_VERTEX_DELETE:
|
||||
@ -350,6 +351,8 @@ std::optional<PropertyValue> Decoder::ReadPropertyValue() {
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_CREATE:
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
|
||||
case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
|
||||
@ -435,6 +438,7 @@ bool Decoder::SkipPropertyValue() {
|
||||
case Marker::SECTION_CONSTRAINTS:
|
||||
case Marker::SECTION_DELTA:
|
||||
case Marker::SECTION_EPOCH_HISTORY:
|
||||
case Marker::SECTION_EDGE_INDICES:
|
||||
case Marker::SECTION_OFFSETS:
|
||||
case Marker::DELTA_VERTEX_CREATE:
|
||||
case Marker::DELTA_VERTEX_DELETE:
|
||||
@ -453,6 +457,8 @@ bool Decoder::SkipPropertyValue() {
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_CREATE:
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
|
||||
case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
|
||||
|
@ -153,6 +153,11 @@ SnapshotInfo ReadSnapshotInfo(const std::filesystem::path &path) {
|
||||
info.offset_edges = read_offset();
|
||||
info.offset_vertices = read_offset();
|
||||
info.offset_indices = read_offset();
|
||||
if (*version >= 17) {
|
||||
info.offset_edge_indices = read_offset();
|
||||
} else {
|
||||
info.offset_edge_indices = 0U;
|
||||
}
|
||||
info.offset_constraints = read_offset();
|
||||
info.offset_mapper = read_offset();
|
||||
info.offset_epoch_history = read_offset();
|
||||
@ -1379,10 +1384,11 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils
|
||||
return {info, recovery_info, std::move(indices_constraints)};
|
||||
}
|
||||
|
||||
RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipList<Vertex> *vertices,
|
||||
utils::SkipList<Edge> *edges,
|
||||
std::deque<std::pair<std::string, uint64_t>> *epoch_history,
|
||||
NameIdMapper *name_id_mapper, std::atomic<uint64_t> *edge_count, const Config &config) {
|
||||
RecoveredSnapshot LoadSnapshotVersion16(const std::filesystem::path &path, utils::SkipList<Vertex> *vertices,
|
||||
utils::SkipList<Edge> *edges,
|
||||
std::deque<std::pair<std::string, uint64_t>> *epoch_history,
|
||||
NameIdMapper *name_id_mapper, std::atomic<uint64_t> *edge_count,
|
||||
const Config &config) {
|
||||
RecoveryInfo recovery_info;
|
||||
RecoveredIndicesAndConstraints indices_constraints;
|
||||
|
||||
@ -1391,13 +1397,7 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
|
||||
if (!version) throw RecoveryFailure("Couldn't read snapshot magic and/or version!");
|
||||
|
||||
if (!IsVersionSupported(*version)) throw RecoveryFailure(fmt::format("Invalid snapshot version {}", *version));
|
||||
if (*version == 14U) {
|
||||
return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count,
|
||||
config.salient.items);
|
||||
}
|
||||
if (*version == 15U) {
|
||||
return LoadSnapshotVersion15(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config);
|
||||
}
|
||||
if (*version != 16U) throw RecoveryFailure(fmt::format("Expected snapshot version is 16, but got {}", *version));
|
||||
|
||||
// Cleanup of loaded data in case of failure.
|
||||
bool success = false;
|
||||
@ -1727,6 +1727,380 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
|
||||
return {info, recovery_info, std::move(indices_constraints)};
|
||||
}
|
||||
|
||||
RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipList<Vertex> *vertices,
|
||||
utils::SkipList<Edge> *edges,
|
||||
std::deque<std::pair<std::string, uint64_t>> *epoch_history,
|
||||
NameIdMapper *name_id_mapper, std::atomic<uint64_t> *edge_count, const Config &config) {
|
||||
RecoveryInfo recovery_info;
|
||||
RecoveredIndicesAndConstraints indices_constraints;
|
||||
|
||||
Decoder snapshot;
|
||||
const auto version = snapshot.Initialize(path, kSnapshotMagic);
|
||||
if (!version) throw RecoveryFailure("Couldn't read snapshot magic and/or version!");
|
||||
|
||||
if (!IsVersionSupported(*version)) throw RecoveryFailure(fmt::format("Invalid snapshot version {}", *version));
|
||||
if (*version == 14U) {
|
||||
return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count,
|
||||
config.salient.items);
|
||||
}
|
||||
if (*version == 15U) {
|
||||
return LoadSnapshotVersion15(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config);
|
||||
}
|
||||
if (*version == 16U) {
|
||||
return LoadSnapshotVersion16(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config);
|
||||
}
|
||||
|
||||
// Cleanup of loaded data in case of failure.
|
||||
bool success = false;
|
||||
utils::OnScopeExit cleanup([&] {
|
||||
if (!success) {
|
||||
edges->clear();
|
||||
vertices->clear();
|
||||
epoch_history->clear();
|
||||
}
|
||||
});
|
||||
|
||||
// Read snapshot info.
|
||||
const auto info = ReadSnapshotInfo(path);
|
||||
spdlog::info("Recovering {} vertices and {} edges.", info.vertices_count, info.edges_count);
|
||||
// Check for edges.
|
||||
bool snapshot_has_edges = info.offset_edges != 0;
|
||||
|
||||
// Recover mapper.
|
||||
std::unordered_map<uint64_t, uint64_t> snapshot_id_map;
|
||||
{
|
||||
spdlog::info("Recovering mapper metadata.");
|
||||
if (!snapshot.SetPosition(info.offset_mapper)) throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
|
||||
auto marker = snapshot.ReadMarker();
|
||||
if (!marker || *marker != Marker::SECTION_MAPPER) throw RecoveryFailure("Failed to read section mapper!");
|
||||
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Failed to read name-id mapper size!");
|
||||
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto id = snapshot.ReadUint();
|
||||
if (!id) throw RecoveryFailure("Failed to read id for name-id mapper!");
|
||||
auto name = snapshot.ReadString();
|
||||
if (!name) throw RecoveryFailure("Failed to read name for name-id mapper!");
|
||||
auto my_id = name_id_mapper->NameToId(*name);
|
||||
snapshot_id_map.emplace(*id, my_id);
|
||||
SPDLOG_TRACE("Mapping \"{}\"from snapshot id {} to actual id {}.", *name, *id, my_id);
|
||||
}
|
||||
}
|
||||
auto get_label_from_id = [&snapshot_id_map](uint64_t label_id) {
|
||||
auto it = snapshot_id_map.find(label_id);
|
||||
if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find label id in snapshot_id_map!");
|
||||
return LabelId::FromUint(it->second);
|
||||
};
|
||||
auto get_property_from_id = [&snapshot_id_map](uint64_t property_id) {
|
||||
auto it = snapshot_id_map.find(property_id);
|
||||
if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find property id in snapshot_id_map!");
|
||||
return PropertyId::FromUint(it->second);
|
||||
};
|
||||
auto get_edge_type_from_id = [&snapshot_id_map](uint64_t edge_type_id) {
|
||||
auto it = snapshot_id_map.find(edge_type_id);
|
||||
if (it == snapshot_id_map.end()) throw RecoveryFailure("Couldn't find edge type id in snapshot_id_map!");
|
||||
return EdgeTypeId::FromUint(it->second);
|
||||
};
|
||||
|
||||
// Reset current edge count.
|
||||
edge_count->store(0, std::memory_order_release);
|
||||
|
||||
{
|
||||
spdlog::info("Recovering edges.");
|
||||
// Recover edges.
|
||||
if (snapshot_has_edges) {
|
||||
// We don't need to check whether we store properties on edge or not, because `LoadPartialEdges` will always
|
||||
// iterate over the edges in the snapshot (if they exist) and the current configuration of properties on edge only
|
||||
// affect what it does:
|
||||
// 1. If properties are allowed on edges, then it loads the edges.
|
||||
// 2. If properties are not allowed on edges, then it checks that none of the edges have any properties.
|
||||
if (!snapshot.SetPosition(info.offset_edge_batches)) {
|
||||
throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
}
|
||||
const auto edge_batches = ReadBatchInfos(snapshot);
|
||||
|
||||
RecoverOnMultipleThreads(
|
||||
config.durability.recovery_thread_count,
|
||||
[path, edges, items = config.salient.items, &get_property_from_id](const size_t /*batch_index*/,
|
||||
const BatchInfo &batch) {
|
||||
LoadPartialEdges(path, *edges, batch.offset, batch.count, items, get_property_from_id);
|
||||
},
|
||||
edge_batches);
|
||||
}
|
||||
spdlog::info("Edges are recovered.");
|
||||
|
||||
// Recover vertices (labels and properties).
|
||||
spdlog::info("Recovering vertices.", info.vertices_count);
|
||||
uint64_t last_vertex_gid{0};
|
||||
|
||||
if (!snapshot.SetPosition(info.offset_vertex_batches)) {
|
||||
throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
}
|
||||
|
||||
const auto vertex_batches = ReadBatchInfos(snapshot);
|
||||
RecoverOnMultipleThreads(
|
||||
config.durability.recovery_thread_count,
|
||||
[path, vertices, &vertex_batches, &get_label_from_id, &get_property_from_id, &last_vertex_gid](
|
||||
const size_t batch_index, const BatchInfo &batch) {
|
||||
const auto last_vertex_gid_in_batch =
|
||||
LoadPartialVertices(path, *vertices, batch.offset, batch.count, get_label_from_id, get_property_from_id);
|
||||
if (batch_index == vertex_batches.size() - 1) {
|
||||
last_vertex_gid = last_vertex_gid_in_batch;
|
||||
}
|
||||
},
|
||||
vertex_batches);
|
||||
|
||||
spdlog::info("Vertices are recovered.");
|
||||
|
||||
// Recover vertices (in/out edges).
|
||||
spdlog::info("Recover connectivity.");
|
||||
recovery_info.vertex_batches.reserve(vertex_batches.size());
|
||||
for (const auto batch : vertex_batches) {
|
||||
recovery_info.vertex_batches.emplace_back(Gid::FromUint(0), batch.count);
|
||||
}
|
||||
std::atomic<uint64_t> highest_edge_gid{0};
|
||||
|
||||
RecoverOnMultipleThreads(
|
||||
config.durability.recovery_thread_count,
|
||||
[path, vertices, edges, edge_count, items = config.salient.items, snapshot_has_edges, &get_edge_type_from_id,
|
||||
&highest_edge_gid, &recovery_info](const size_t batch_index, const BatchInfo &batch) {
|
||||
const auto result = LoadPartialConnectivity(path, *vertices, *edges, batch.offset, batch.count, items,
|
||||
snapshot_has_edges, get_edge_type_from_id);
|
||||
edge_count->fetch_add(result.edge_count);
|
||||
auto known_highest_edge_gid = highest_edge_gid.load();
|
||||
while (known_highest_edge_gid < result.highest_edge_id) {
|
||||
highest_edge_gid.compare_exchange_weak(known_highest_edge_gid, result.highest_edge_id);
|
||||
}
|
||||
recovery_info.vertex_batches[batch_index].first = result.first_vertex_gid;
|
||||
},
|
||||
vertex_batches);
|
||||
|
||||
spdlog::info("Connectivity is recovered.");
|
||||
|
||||
// Set initial values for edge/vertex ID generators.
|
||||
recovery_info.next_edge_id = highest_edge_gid + 1;
|
||||
recovery_info.next_vertex_id = last_vertex_gid + 1;
|
||||
}
|
||||
|
||||
// Recover indices.
|
||||
{
|
||||
spdlog::info("Recovering metadata of indices.");
|
||||
if (!snapshot.SetPosition(info.offset_indices)) throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
|
||||
auto marker = snapshot.ReadMarker();
|
||||
if (!marker || *marker != Marker::SECTION_INDICES) throw RecoveryFailure("Couldn't read section indices!");
|
||||
|
||||
// Recover label indices.
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't read the number of label indices");
|
||||
spdlog::info("Recovering metadata of {} label indices.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label of label index!");
|
||||
AddRecoveredIndexConstraint(&indices_constraints.indices.label, get_label_from_id(*label),
|
||||
"The label index already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of label index for :{}", name_id_mapper->IdToName(snapshot_id_map.at(*label)));
|
||||
}
|
||||
spdlog::info("Metadata of label indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover label indices statistics.
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't read the number of entries for label index statistics!");
|
||||
spdlog::info("Recovering metadata of {} label indices statistics.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
const auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label while recovering label index statistics!");
|
||||
const auto count = snapshot.ReadUint();
|
||||
if (!count) throw RecoveryFailure("Couldn't read count for label index statistics!");
|
||||
const auto avg_degree = snapshot.ReadDouble();
|
||||
if (!avg_degree) throw RecoveryFailure("Couldn't read average degree for label index statistics");
|
||||
const auto label_id = get_label_from_id(*label);
|
||||
indices_constraints.indices.label_stats.emplace_back(label_id, LabelIndexStats{*count, *avg_degree});
|
||||
SPDLOG_TRACE("Recovered metadata of label index statistics for :{}",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)));
|
||||
}
|
||||
spdlog::info("Metadata of label indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover label+property indices.
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't recover the number of label property indices!");
|
||||
spdlog::info("Recovering metadata of {} label+property indices.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label for label property index!");
|
||||
auto property = snapshot.ReadUint();
|
||||
if (!property) throw RecoveryFailure("Couldn't read property for label property index");
|
||||
AddRecoveredIndexConstraint(&indices_constraints.indices.label_property,
|
||||
{get_label_from_id(*label), get_property_from_id(*property)},
|
||||
"The label+property index already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of label+property index for :{}({})",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)),
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*property)));
|
||||
}
|
||||
spdlog::info("Metadata of label+property indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover label+property indices statistics.
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't recover the number of entries for label property statistics!");
|
||||
spdlog::info("Recovering metadata of {} label+property indices statistics.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
const auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label for label property index statistics!");
|
||||
const auto property = snapshot.ReadUint();
|
||||
if (!property) throw RecoveryFailure("Couldn't read property for label property index statistics!");
|
||||
const auto count = snapshot.ReadUint();
|
||||
if (!count) throw RecoveryFailure("Couldn't read count for label property index statistics!!");
|
||||
const auto distinct_values_count = snapshot.ReadUint();
|
||||
if (!distinct_values_count)
|
||||
throw RecoveryFailure("Couldn't read distinct values count for label property index statistics!");
|
||||
const auto statistic = snapshot.ReadDouble();
|
||||
if (!statistic) throw RecoveryFailure("Couldn't read statistics value for label-property index statistics!");
|
||||
const auto avg_group_size = snapshot.ReadDouble();
|
||||
if (!avg_group_size)
|
||||
throw RecoveryFailure("Couldn't read average group size for label property index statistics!");
|
||||
const auto avg_degree = snapshot.ReadDouble();
|
||||
if (!avg_degree) throw RecoveryFailure("Couldn't read average degree for label property index statistics!");
|
||||
const auto label_id = get_label_from_id(*label);
|
||||
const auto property_id = get_property_from_id(*property);
|
||||
indices_constraints.indices.label_property_stats.emplace_back(
|
||||
label_id, std::make_pair(property_id, LabelPropertyIndexStats{*count, *distinct_values_count, *statistic,
|
||||
*avg_group_size, *avg_degree}));
|
||||
SPDLOG_TRACE("Recovered metadata of label+property index statistics for :{}({})",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)),
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*property)));
|
||||
}
|
||||
spdlog::info("Metadata of label+property indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover edge-type indices.
|
||||
spdlog::info("Recovering metadata of indices.");
|
||||
if (!snapshot.SetPosition(info.offset_edge_indices)) throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
|
||||
marker = snapshot.ReadMarker();
|
||||
if (!marker || *marker != Marker::SECTION_EDGE_INDICES)
|
||||
throw RecoveryFailure("Couldn't read section edge-indices!");
|
||||
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't read the number of edge-type indices");
|
||||
spdlog::info("Recovering metadata of {} edge-type indices.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto edge_type = snapshot.ReadUint();
|
||||
if (!edge_type) throw RecoveryFailure("Couldn't read edge-type of edge-type index!");
|
||||
AddRecoveredIndexConstraint(&indices_constraints.indices.edge, get_edge_type_from_id(*edge_type),
|
||||
"The edge-type index already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of edge-type index for :{}",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*edge_type)));
|
||||
}
|
||||
spdlog::info("Metadata of edge-type indices are recovered.");
|
||||
}
|
||||
|
||||
spdlog::info("Metadata of indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover constraints.
|
||||
{
|
||||
spdlog::info("Recovering metadata of constraints.");
|
||||
if (!snapshot.SetPosition(info.offset_constraints)) throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
|
||||
auto marker = snapshot.ReadMarker();
|
||||
if (!marker || *marker != Marker::SECTION_CONSTRAINTS)
|
||||
throw RecoveryFailure("Couldn't read section constraints marker!");
|
||||
|
||||
// Recover existence constraints.
|
||||
{
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't read the number of existence constraints!");
|
||||
spdlog::info("Recovering metadata of {} existence constraints.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label of existence constraints!");
|
||||
auto property = snapshot.ReadUint();
|
||||
if (!property) throw RecoveryFailure("Couldn't read property of existence constraints!");
|
||||
AddRecoveredIndexConstraint(&indices_constraints.constraints.existence,
|
||||
{get_label_from_id(*label), get_property_from_id(*property)},
|
||||
"The existence constraint already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of existence constraint for :{}({})",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)),
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*property)));
|
||||
}
|
||||
spdlog::info("Metadata of existence constraints are recovered.");
|
||||
}
|
||||
|
||||
// Recover unique constraints.
|
||||
// Snapshot version should be checked since unique constraints were
|
||||
// implemented in later versions of snapshot.
|
||||
if (*version >= kUniqueConstraintVersion) {
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't read the number of unique constraints!");
|
||||
spdlog::info("Recovering metadata of {} unique constraints.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read label of unique constraints!");
|
||||
auto properties_count = snapshot.ReadUint();
|
||||
if (!properties_count) throw RecoveryFailure("Couldn't read the number of properties in unique constraint!");
|
||||
std::set<PropertyId> properties;
|
||||
for (uint64_t j = 0; j < *properties_count; ++j) {
|
||||
auto property = snapshot.ReadUint();
|
||||
if (!property) throw RecoveryFailure("Couldn't read property of unique constraint!");
|
||||
properties.insert(get_property_from_id(*property));
|
||||
}
|
||||
AddRecoveredIndexConstraint(&indices_constraints.constraints.unique, {get_label_from_id(*label), properties},
|
||||
"The unique constraint already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of unique constraints for :{}",
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)));
|
||||
}
|
||||
spdlog::info("Metadata of unique constraints are recovered.");
|
||||
}
|
||||
spdlog::info("Metadata of constraints are recovered.");
|
||||
}
|
||||
|
||||
spdlog::info("Recovering metadata.");
|
||||
// Recover epoch history
|
||||
{
|
||||
if (!snapshot.SetPosition(info.offset_epoch_history)) throw RecoveryFailure("Couldn't read data from snapshot!");
|
||||
|
||||
const auto marker = snapshot.ReadMarker();
|
||||
if (!marker || *marker != Marker::SECTION_EPOCH_HISTORY)
|
||||
throw RecoveryFailure("Couldn't read section epoch history marker!");
|
||||
|
||||
const auto history_size = snapshot.ReadUint();
|
||||
if (!history_size) {
|
||||
throw RecoveryFailure("Couldn't read history size!");
|
||||
}
|
||||
|
||||
for (int i = 0; i < *history_size; ++i) {
|
||||
auto maybe_epoch_id = snapshot.ReadString();
|
||||
if (!maybe_epoch_id) {
|
||||
throw RecoveryFailure("Couldn't read maybe epoch id!");
|
||||
}
|
||||
const auto maybe_last_commit_timestamp = snapshot.ReadUint();
|
||||
if (!maybe_last_commit_timestamp) {
|
||||
throw RecoveryFailure("Couldn't read maybe last commit timestamp!");
|
||||
}
|
||||
epoch_history->emplace_back(std::move(*maybe_epoch_id), *maybe_last_commit_timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
spdlog::info("Metadata recovered.");
|
||||
// Recover timestamp.
|
||||
recovery_info.next_timestamp = info.start_timestamp + 1;
|
||||
|
||||
// Set success flag (to disable cleanup).
|
||||
success = true;
|
||||
|
||||
return {info, recovery_info, std::move(indices_constraints)};
|
||||
}
|
||||
|
||||
using OldSnapshotFiles = std::vector<std::pair<uint64_t, std::filesystem::path>>;
|
||||
void EnsureNecessaryWalFilesExist(const std::filesystem::path &wal_directory, const std::string &uuid,
|
||||
OldSnapshotFiles old_snapshot_files, Transaction *transaction,
|
||||
@ -1835,6 +2209,7 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
|
||||
uint64_t offset_edges = 0;
|
||||
uint64_t offset_vertices = 0;
|
||||
uint64_t offset_indices = 0;
|
||||
uint64_t offset_edge_indices = 0;
|
||||
uint64_t offset_constraints = 0;
|
||||
uint64_t offset_mapper = 0;
|
||||
uint64_t offset_metadata = 0;
|
||||
@ -1847,6 +2222,7 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
|
||||
snapshot.WriteUint(offset_edges);
|
||||
snapshot.WriteUint(offset_vertices);
|
||||
snapshot.WriteUint(offset_indices);
|
||||
snapshot.WriteUint(offset_edge_indices);
|
||||
snapshot.WriteUint(offset_constraints);
|
||||
snapshot.WriteUint(offset_mapper);
|
||||
snapshot.WriteUint(offset_epoch_history);
|
||||
@ -2106,6 +2482,17 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
|
||||
snapshot.SetPosition(last_pos);
|
||||
}
|
||||
}
|
||||
|
||||
// Write edge-type indices.
|
||||
offset_edge_indices = snapshot.GetPosition();
|
||||
snapshot.WriteMarker(Marker::SECTION_EDGE_INDICES);
|
||||
{
|
||||
auto edge_type = storage->indices_.edge_type_index_->ListIndices();
|
||||
snapshot.WriteUint(edge_type.size());
|
||||
for (const auto &item : edge_type) {
|
||||
write_mapping(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write constraints.
|
||||
@ -2196,6 +2583,7 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
|
||||
snapshot.WriteUint(offset_edges);
|
||||
snapshot.WriteUint(offset_vertices);
|
||||
snapshot.WriteUint(offset_indices);
|
||||
snapshot.WriteUint(offset_edge_indices);
|
||||
snapshot.WriteUint(offset_constraints);
|
||||
snapshot.WriteUint(offset_mapper);
|
||||
snapshot.WriteUint(offset_epoch_history);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -34,6 +34,7 @@ struct SnapshotInfo {
|
||||
uint64_t offset_edges;
|
||||
uint64_t offset_vertices;
|
||||
uint64_t offset_indices;
|
||||
uint64_t offset_edge_indices;
|
||||
uint64_t offset_constraints;
|
||||
uint64_t offset_mapper;
|
||||
uint64_t offset_epoch_history;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -23,6 +23,8 @@ enum class StorageMetadataOperation {
|
||||
LABEL_PROPERTY_INDEX_DROP,
|
||||
LABEL_PROPERTY_INDEX_STATS_SET,
|
||||
LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
EDGE_TYPE_INDEX_CREATE,
|
||||
EDGE_TYPE_INDEX_DROP,
|
||||
EXISTENCE_CONSTRAINT_CREATE,
|
||||
EXISTENCE_CONSTRAINT_DROP,
|
||||
UNIQUE_CONSTRAINT_CREATE,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -20,7 +20,7 @@ namespace memgraph::storage::durability {
|
||||
// The current version of snapshot and WAL encoding / decoding.
|
||||
// IMPORTANT: Please bump this version for every snapshot and/or WAL format
|
||||
// change!!!
|
||||
const uint64_t kVersion{16};
|
||||
const uint64_t kVersion{17};
|
||||
|
||||
const uint64_t kOldestSupportedVersion{14};
|
||||
const uint64_t kUniqueConstraintVersion{13};
|
||||
|
@ -95,6 +95,10 @@ Marker OperationToMarker(StorageMetadataOperation operation) {
|
||||
return Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_SET;
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
return Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR;
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_CREATE:
|
||||
return Marker::DELTA_EDGE_TYPE_INDEX_CREATE;
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP:
|
||||
return Marker::DELTA_EDGE_TYPE_INDEX_DROP;
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
|
||||
return Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE;
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
|
||||
@ -172,6 +176,10 @@ WalDeltaData::Type MarkerToWalDeltaDataType(Marker marker) {
|
||||
return WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET;
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
return WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR;
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
|
||||
return WalDeltaData::Type::EDGE_INDEX_CREATE;
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
|
||||
return WalDeltaData::Type::EDGE_INDEX_DROP;
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
|
||||
return WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE;
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
|
||||
@ -198,6 +206,7 @@ WalDeltaData::Type MarkerToWalDeltaDataType(Marker marker) {
|
||||
case Marker::SECTION_CONSTRAINTS:
|
||||
case Marker::SECTION_DELTA:
|
||||
case Marker::SECTION_EPOCH_HISTORY:
|
||||
case Marker::SECTION_EDGE_INDICES:
|
||||
case Marker::SECTION_OFFSETS:
|
||||
case Marker::VALUE_FALSE:
|
||||
case Marker::VALUE_TRUE:
|
||||
@ -280,6 +289,7 @@ WalDeltaData ReadSkipWalDeltaData(BaseDecoder *decoder) {
|
||||
}
|
||||
case WalDeltaData::Type::TRANSACTION_END:
|
||||
break;
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
case WalDeltaData::Type::LABEL_INDEX_CREATE:
|
||||
case WalDeltaData::Type::LABEL_INDEX_DROP:
|
||||
case WalDeltaData::Type::LABEL_INDEX_STATS_CLEAR:
|
||||
@ -295,6 +305,17 @@ WalDeltaData ReadSkipWalDeltaData(BaseDecoder *decoder) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE:
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP: {
|
||||
if constexpr (read_data) {
|
||||
auto edge_type = decoder->ReadString();
|
||||
if (!edge_type) throw RecoveryFailure("Invalid WAL data!");
|
||||
delta.operation_edge_type.edge_type = std::move(*edge_type);
|
||||
} else {
|
||||
if (!decoder->SkipString()) throw RecoveryFailure("Invalid WAL data!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_INDEX_STATS_SET: {
|
||||
if constexpr (read_data) {
|
||||
auto label = decoder->ReadString();
|
||||
@ -522,6 +543,9 @@ bool operator==(const WalDeltaData &a, const WalDeltaData &b) {
|
||||
case WalDeltaData::Type::UNIQUE_CONSTRAINT_DROP:
|
||||
return a.operation_label_properties.label == b.operation_label_properties.label &&
|
||||
a.operation_label_properties.properties == b.operation_label_properties.properties;
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE:
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP:
|
||||
return a.operation_edge_type.edge_type == b.operation_edge_type.edge_type;
|
||||
}
|
||||
}
|
||||
bool operator!=(const WalDeltaData &a, const WalDeltaData &b) { return !(a == b); }
|
||||
@ -703,6 +727,37 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
|
||||
}
|
||||
break;
|
||||
}
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_CREATE:
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP: {
|
||||
MG_ASSERT(false, "Invalid function call!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
|
||||
EdgeTypeId edge_type, uint64_t timestamp) {
|
||||
encoder->WriteMarker(Marker::SECTION_DELTA);
|
||||
encoder->WriteUint(timestamp);
|
||||
switch (operation) {
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_CREATE:
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP: {
|
||||
encoder->WriteMarker(OperationToMarker(operation));
|
||||
encoder->WriteString(name_id_mapper->IdToName(edge_type.AsUint()));
|
||||
break;
|
||||
}
|
||||
case StorageMetadataOperation::LABEL_INDEX_CREATE:
|
||||
case StorageMetadataOperation::LABEL_INDEX_DROP:
|
||||
case StorageMetadataOperation::LABEL_INDEX_STATS_CLEAR:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case StorageMetadataOperation::LABEL_INDEX_STATS_SET:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP:
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_SET:
|
||||
case StorageMetadataOperation::UNIQUE_CONSTRAINT_CREATE:
|
||||
case StorageMetadataOperation::UNIQUE_CONSTRAINT_DROP:
|
||||
MG_ASSERT(false, "Invalid function call!");
|
||||
}
|
||||
}
|
||||
|
||||
@ -887,6 +942,18 @@ RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConst
|
||||
"The label index doesn't exist!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE: {
|
||||
auto edge_type_id = EdgeTypeId::FromUint(name_id_mapper->NameToId(delta.operation_edge_type.edge_type));
|
||||
AddRecoveredIndexConstraint(&indices_constraints->indices.edge, edge_type_id,
|
||||
"The edge-type index already exists!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP: {
|
||||
auto edge_type_id = EdgeTypeId::FromUint(name_id_mapper->NameToId(delta.operation_edge_type.edge_type));
|
||||
RemoveRecoveredIndexConstraint(&indices_constraints->indices.edge, edge_type_id,
|
||||
"The edge-type index doesn't exist!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_INDEX_STATS_SET: {
|
||||
auto label_id = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_label_stats.label));
|
||||
LabelIndexStats stats{};
|
||||
@ -1088,6 +1155,11 @@ void WalFile::AppendOperation(StorageMetadataOperation operation, LabelId label,
|
||||
UpdateStats(timestamp);
|
||||
}
|
||||
|
||||
void WalFile::AppendOperation(StorageMetadataOperation operation, EdgeTypeId edge_type, uint64_t timestamp) {
|
||||
EncodeOperation(&wal_, name_id_mapper_, operation, edge_type, timestamp);
|
||||
UpdateStats(timestamp);
|
||||
}
|
||||
|
||||
void WalFile::Sync() { wal_.Sync(); }
|
||||
|
||||
uint64_t WalFile::GetSize() { return wal_.GetSize(); }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -67,6 +67,8 @@ struct WalDeltaData {
|
||||
LABEL_PROPERTY_INDEX_DROP,
|
||||
LABEL_PROPERTY_INDEX_STATS_SET,
|
||||
LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
EDGE_INDEX_CREATE,
|
||||
EDGE_INDEX_DROP,
|
||||
EXISTENCE_CONSTRAINT_CREATE,
|
||||
EXISTENCE_CONSTRAINT_DROP,
|
||||
UNIQUE_CONSTRAINT_CREATE,
|
||||
@ -111,6 +113,10 @@ struct WalDeltaData {
|
||||
std::set<std::string, std::less<>> properties;
|
||||
} operation_label_properties;
|
||||
|
||||
struct {
|
||||
std::string edge_type;
|
||||
} operation_edge_type;
|
||||
|
||||
struct {
|
||||
std::string label;
|
||||
std::string stats;
|
||||
@ -155,6 +161,8 @@ constexpr bool IsWalDeltaDataTypeTransactionEndVersion15(const WalDeltaData::Typ
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP:
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET:
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE:
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP:
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP:
|
||||
case WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE:
|
||||
@ -164,7 +172,7 @@ constexpr bool IsWalDeltaDataTypeTransactionEndVersion15(const WalDeltaData::Typ
|
||||
}
|
||||
|
||||
constexpr bool IsWalDeltaDataTypeTransactionEnd(const WalDeltaData::Type type, const uint64_t version = kVersion) {
|
||||
if (version < 16U) {
|
||||
if (version < 17U) {
|
||||
return IsWalDeltaDataTypeTransactionEndVersion15(type);
|
||||
}
|
||||
// All deltas are now handled in a transactional scope
|
||||
@ -208,6 +216,9 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
|
||||
LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
|
||||
void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
|
||||
EdgeTypeId edge_type, uint64_t timestamp);
|
||||
|
||||
/// Function used to load the WAL data into the storage.
|
||||
/// @throw RecoveryFailure
|
||||
RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConstraints *indices_constraints,
|
||||
@ -240,6 +251,8 @@ class WalFile {
|
||||
void AppendOperation(StorageMetadataOperation operation, LabelId label, const std::set<PropertyId> &properties,
|
||||
const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
|
||||
void AppendOperation(StorageMetadataOperation operation, EdgeTypeId edge_type, uint64_t timestamp);
|
||||
|
||||
void Sync();
|
||||
|
||||
uint64_t GetSize();
|
||||
|
149
src/storage/v2/edges_iterable.cpp
Normal file
149
src/storage/v2/edges_iterable.cpp
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/edges_iterable.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
EdgesIterable::EdgesIterable(InMemoryEdgeTypeIndex::Iterable edges) : type_(Type::BY_EDGE_TYPE_IN_MEMORY) {
|
||||
new (&in_memory_edges_by_edge_type_) InMemoryEdgeTypeIndex::Iterable(std::move(edges));
|
||||
}
|
||||
|
||||
EdgesIterable::EdgesIterable(EdgesIterable &&other) noexcept : type_(other.type_) {
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
InMemoryEdgeTypeIndex::Iterable(std::move(other.in_memory_edges_by_edge_type_));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable &EdgesIterable::operator=(EdgesIterable &&other) noexcept {
|
||||
Destroy();
|
||||
type_ = other.type_;
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
InMemoryEdgeTypeIndex::Iterable(std::move(other.in_memory_edges_by_edge_type_));
|
||||
break;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EdgesIterable::~EdgesIterable() { Destroy(); }
|
||||
|
||||
void EdgesIterable::Destroy() noexcept {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
in_memory_edges_by_edge_type_.InMemoryEdgeTypeIndex::Iterable::~Iterable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator EdgesIterable::begin() {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
return Iterator(in_memory_edges_by_edge_type_.begin());
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator EdgesIterable::end() {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
return Iterator(in_memory_edges_by_edge_type_.end());
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator::Iterator(InMemoryEdgeTypeIndex::Iterable::Iterator it) : type_(Type::BY_EDGE_TYPE_IN_MEMORY) {
|
||||
// NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg)
|
||||
new (&in_memory_edges_by_edge_type_) InMemoryEdgeTypeIndex::Iterable::Iterator(std::move(it));
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator::Iterator(const EdgesIterable::Iterator &other) : type_(other.type_) {
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator(other.in_memory_edges_by_edge_type_);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cert-oop54-cpp)
|
||||
EdgesIterable::Iterator &EdgesIterable::Iterator::operator=(const EdgesIterable::Iterator &other) {
|
||||
Destroy();
|
||||
type_ = other.type_;
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator(other.in_memory_edges_by_edge_type_);
|
||||
break;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator::Iterator(EdgesIterable::Iterator &&other) noexcept : type_(other.type_) {
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
// NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg)
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator(std::move(other.in_memory_edges_by_edge_type_));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator &EdgesIterable::Iterator::operator=(EdgesIterable::Iterator &&other) noexcept {
|
||||
Destroy();
|
||||
type_ = other.type_;
|
||||
switch (other.type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
new (&in_memory_edges_by_edge_type_)
|
||||
// NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg)
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator(std::move(other.in_memory_edges_by_edge_type_));
|
||||
break;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator::~Iterator() { Destroy(); }
|
||||
|
||||
void EdgesIterable::Iterator::Destroy() noexcept {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
in_memory_edges_by_edge_type_.InMemoryEdgeTypeIndex::Iterable::Iterator::~Iterator();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EdgeAccessor const &EdgesIterable::Iterator::operator*() const {
|
||||
switch (type_) {
|
||||
;
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
return *in_memory_edges_by_edge_type_;
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable::Iterator &EdgesIterable::Iterator::operator++() {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
++in_memory_edges_by_edge_type_;
|
||||
break;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool EdgesIterable::Iterator::operator==(const Iterator &other) const {
|
||||
switch (type_) {
|
||||
case Type::BY_EDGE_TYPE_IN_MEMORY:
|
||||
return in_memory_edges_by_edge_type_ == other.in_memory_edges_by_edge_type_;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
73
src/storage/v2/edges_iterable.hpp
Normal file
73
src/storage/v2/edges_iterable.hpp
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "storage/v2/all_vertices_iterable.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class InMemoryEdgeTypeIndex;
|
||||
|
||||
class EdgesIterable final {
|
||||
enum class Type { BY_EDGE_TYPE_IN_MEMORY };
|
||||
|
||||
Type type_;
|
||||
union {
|
||||
InMemoryEdgeTypeIndex::Iterable in_memory_edges_by_edge_type_;
|
||||
};
|
||||
|
||||
void Destroy() noexcept;
|
||||
|
||||
public:
|
||||
explicit EdgesIterable(InMemoryEdgeTypeIndex::Iterable);
|
||||
|
||||
EdgesIterable(const EdgesIterable &) = delete;
|
||||
EdgesIterable &operator=(const EdgesIterable &) = delete;
|
||||
|
||||
EdgesIterable(EdgesIterable &&) noexcept;
|
||||
EdgesIterable &operator=(EdgesIterable &&) noexcept;
|
||||
|
||||
~EdgesIterable();
|
||||
|
||||
class Iterator final {
|
||||
Type type_;
|
||||
union {
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator in_memory_edges_by_edge_type_;
|
||||
};
|
||||
|
||||
void Destroy() noexcept;
|
||||
|
||||
public:
|
||||
explicit Iterator(InMemoryEdgeTypeIndex::Iterable::Iterator);
|
||||
|
||||
Iterator(const Iterator &);
|
||||
Iterator &operator=(const Iterator &);
|
||||
|
||||
Iterator(Iterator &&) noexcept;
|
||||
Iterator &operator=(Iterator &&) noexcept;
|
||||
|
||||
~Iterator();
|
||||
|
||||
EdgeAccessor const &operator*() const;
|
||||
|
||||
Iterator &operator++();
|
||||
|
||||
bool operator==(const Iterator &other) const;
|
||||
bool operator!=(const Iterator &other) const { return !(*this == other); }
|
||||
};
|
||||
|
||||
Iterator begin();
|
||||
Iterator end();
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
46
src/storage/v2/indices/edge_type_index.hpp
Normal file
46
src/storage/v2/indices/edge_type_index.hpp
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "storage/v2/transaction.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class EdgeTypeIndex {
|
||||
public:
|
||||
EdgeTypeIndex() = default;
|
||||
|
||||
EdgeTypeIndex(const EdgeTypeIndex &) = delete;
|
||||
EdgeTypeIndex(EdgeTypeIndex &&) = delete;
|
||||
EdgeTypeIndex &operator=(const EdgeTypeIndex &) = delete;
|
||||
EdgeTypeIndex &operator=(EdgeTypeIndex &&) = delete;
|
||||
|
||||
virtual ~EdgeTypeIndex() = default;
|
||||
|
||||
virtual bool DropIndex(EdgeTypeId edge_type) = 0;
|
||||
|
||||
virtual bool IndexExists(EdgeTypeId edge_type) const = 0;
|
||||
|
||||
virtual std::vector<EdgeTypeId> ListIndices() const = 0;
|
||||
|
||||
virtual uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const = 0;
|
||||
|
||||
virtual void UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) = 0;
|
||||
|
||||
virtual void UpdateOnEdgeModification(Vertex *old_from, Vertex *old_to, Vertex *new_from, Vertex *new_to,
|
||||
EdgeRef edge_ref, EdgeTypeId edge_type, const Transaction &tx) = 0;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
@ -10,8 +10,10 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/indices/indices.hpp"
|
||||
#include "storage/v2/disk/edge_type_index.hpp"
|
||||
#include "storage/v2/disk/label_index.hpp"
|
||||
#include "storage/v2/disk/label_property_index.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
|
||||
@ -35,6 +37,8 @@ void Indices::AbortEntries(LabelId label, std::span<std::pair<PropertyValue, Ver
|
||||
void Indices::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) const {
|
||||
static_cast<InMemoryLabelIndex *>(label_index_.get())->RemoveObsoleteEntries(oldest_active_start_timestamp, token);
|
||||
static_cast<InMemoryLabelPropertyIndex *>(label_property_index_.get())
|
||||
->RemoveObsoleteEntries(oldest_active_start_timestamp, token);
|
||||
static_cast<InMemoryEdgeTypeIndex *>(edge_type_index_.get())
|
||||
->RemoveObsoleteEntries(oldest_active_start_timestamp, std::move(token));
|
||||
}
|
||||
|
||||
@ -53,14 +57,21 @@ void Indices::UpdateOnSetProperty(PropertyId property, const PropertyValue &valu
|
||||
label_property_index_->UpdateOnSetProperty(property, value, vertex, tx);
|
||||
}
|
||||
|
||||
void Indices::UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) const {
|
||||
edge_type_index_->UpdateOnEdgeCreation(from, to, edge_ref, edge_type, tx);
|
||||
}
|
||||
|
||||
Indices::Indices(const Config &config, StorageMode storage_mode) {
|
||||
std::invoke([this, config, storage_mode]() {
|
||||
if (storage_mode == StorageMode::IN_MEMORY_TRANSACTIONAL || storage_mode == StorageMode::IN_MEMORY_ANALYTICAL) {
|
||||
label_index_ = std::make_unique<InMemoryLabelIndex>();
|
||||
label_property_index_ = std::make_unique<InMemoryLabelPropertyIndex>();
|
||||
edge_type_index_ = std::make_unique<InMemoryEdgeTypeIndex>();
|
||||
} else {
|
||||
label_index_ = std::make_unique<DiskLabelIndex>(config);
|
||||
label_property_index_ = std::make_unique<DiskLabelPropertyIndex>(config);
|
||||
edge_type_index_ = std::make_unique<DiskEdgeTypeIndex>();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/indices/edge_type_index.hpp"
|
||||
#include "storage/v2/indices/label_index.hpp"
|
||||
#include "storage/v2/indices/label_property_index.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
@ -64,8 +65,12 @@ struct Indices {
|
||||
void UpdateOnSetProperty(PropertyId property, const PropertyValue &value, Vertex *vertex,
|
||||
const Transaction &tx) const;
|
||||
|
||||
void UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) const;
|
||||
|
||||
std::unique_ptr<LabelIndex> label_index_;
|
||||
std::unique_ptr<LabelPropertyIndex> label_property_index_;
|
||||
std::unique_ptr<EdgeTypeIndex> edge_type_index_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
318
src/storage/v2/inmemory/edge_type_index.cpp
Normal file
318
src/storage/v2/inmemory/edge_type_index.cpp
Normal file
@ -0,0 +1,318 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/indices/indices_utils.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using Delta = memgraph::storage::Delta;
|
||||
using Vertex = memgraph::storage::Vertex;
|
||||
using Edge = memgraph::storage::Edge;
|
||||
using EdgeRef = memgraph::storage::EdgeRef;
|
||||
using EdgeTypeId = memgraph::storage::EdgeTypeId;
|
||||
using Transaction = memgraph::storage::Transaction;
|
||||
using View = memgraph::storage::View;
|
||||
|
||||
bool IsIndexEntryVisible(Edge *edge, const Transaction *transaction, View view) {
|
||||
bool exists = true;
|
||||
bool deleted = true;
|
||||
Delta *delta = nullptr;
|
||||
{
|
||||
auto guard = std::shared_lock{edge->lock};
|
||||
deleted = edge->deleted;
|
||||
delta = edge->delta;
|
||||
}
|
||||
ApplyDeltasForRead(transaction, delta, view, [&](const Delta &delta) {
|
||||
switch (delta.action) {
|
||||
case Delta::Action::ADD_LABEL:
|
||||
case Delta::Action::REMOVE_LABEL:
|
||||
case Delta::Action::SET_PROPERTY:
|
||||
case Delta::Action::ADD_IN_EDGE:
|
||||
case Delta::Action::ADD_OUT_EDGE:
|
||||
case Delta::Action::REMOVE_IN_EDGE:
|
||||
case Delta::Action::REMOVE_OUT_EDGE:
|
||||
break;
|
||||
case Delta::Action::RECREATE_OBJECT: {
|
||||
deleted = false;
|
||||
break;
|
||||
}
|
||||
case Delta::Action::DELETE_DESERIALIZED_OBJECT:
|
||||
case Delta::Action::DELETE_OBJECT: {
|
||||
exists = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
return exists && !deleted;
|
||||
}
|
||||
|
||||
using ReturnType = std::optional<std::tuple<EdgeTypeId, Vertex *, EdgeRef>>;
|
||||
ReturnType VertexDeletedConnectedEdges(Vertex *vertex, Edge *edge, const Transaction *transaction, View view) {
|
||||
ReturnType link;
|
||||
Delta *delta = nullptr;
|
||||
{
|
||||
auto guard = std::shared_lock{vertex->lock};
|
||||
delta = vertex->delta;
|
||||
}
|
||||
ApplyDeltasForRead(transaction, delta, view, [&](const Delta &delta) {
|
||||
switch (delta.action) {
|
||||
case Delta::Action::ADD_LABEL:
|
||||
case Delta::Action::REMOVE_LABEL:
|
||||
case Delta::Action::SET_PROPERTY:
|
||||
break;
|
||||
case Delta::Action::ADD_IN_EDGE: {
|
||||
if (edge == delta.vertex_edge.edge.ptr) {
|
||||
link = {delta.vertex_edge.edge_type, delta.vertex_edge.vertex, delta.vertex_edge.edge};
|
||||
auto it = std::find(vertex->in_edges.begin(), vertex->in_edges.end(), link);
|
||||
MG_ASSERT(it == vertex->in_edges.end(), "Invalid database state!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
case Delta::Action::ADD_OUT_EDGE: {
|
||||
if (edge == delta.vertex_edge.edge.ptr) {
|
||||
link = {delta.vertex_edge.edge_type, delta.vertex_edge.vertex, delta.vertex_edge.edge};
|
||||
auto it = std::find(vertex->out_edges.begin(), vertex->out_edges.end(), link);
|
||||
MG_ASSERT(it == vertex->out_edges.end(), "Invalid database state!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
case Delta::Action::REMOVE_IN_EDGE:
|
||||
case Delta::Action::REMOVE_OUT_EDGE:
|
||||
case Delta::Action::RECREATE_OBJECT:
|
||||
case Delta::Action::DELETE_DESERIALIZED_OBJECT:
|
||||
case Delta::Action::DELETE_OBJECT:
|
||||
break;
|
||||
}
|
||||
});
|
||||
return link;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
bool InMemoryEdgeTypeIndex::CreateIndex(EdgeTypeId edge_type, utils::SkipList<Vertex>::Accessor vertices) {
|
||||
auto [it, emplaced] = index_.try_emplace(edge_type);
|
||||
if (!emplaced) {
|
||||
return false;
|
||||
}
|
||||
|
||||
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
|
||||
try {
|
||||
auto edge_acc = it->second.access();
|
||||
for (auto &from_vertex : vertices) {
|
||||
if (from_vertex.deleted) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (auto &edge : from_vertex.out_edges) {
|
||||
const auto type = std::get<kEdgeTypeIdPos>(edge);
|
||||
if (type == edge_type) {
|
||||
auto *to_vertex = std::get<kVertexPos>(edge);
|
||||
if (to_vertex->deleted) {
|
||||
continue;
|
||||
}
|
||||
edge_acc.insert({&from_vertex, to_vertex, std::get<kEdgeRefPos>(edge).ptr, 0});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (const utils::OutOfMemoryException &) {
|
||||
utils::MemoryTracker::OutOfMemoryExceptionBlocker oom_exception_blocker;
|
||||
index_.erase(it);
|
||||
throw;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool InMemoryEdgeTypeIndex::DropIndex(EdgeTypeId edge_type) { return index_.erase(edge_type) > 0; }
|
||||
|
||||
bool InMemoryEdgeTypeIndex::IndexExists(EdgeTypeId edge_type) const { return index_.find(edge_type) != index_.end(); }
|
||||
|
||||
std::vector<EdgeTypeId> InMemoryEdgeTypeIndex::ListIndices() const {
|
||||
std::vector<EdgeTypeId> ret;
|
||||
ret.reserve(index_.size());
|
||||
for (const auto &item : index_) {
|
||||
ret.push_back(item.first);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void InMemoryEdgeTypeIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) {
|
||||
auto maybe_stop = utils::ResettableCounter<2048>();
|
||||
|
||||
for (auto &label_storage : index_) {
|
||||
if (token.stop_requested()) return;
|
||||
|
||||
auto edges_acc = label_storage.second.access();
|
||||
for (auto it = edges_acc.begin(); it != edges_acc.end();) {
|
||||
if (maybe_stop() && token.stop_requested()) return;
|
||||
|
||||
auto next_it = it;
|
||||
++next_it;
|
||||
|
||||
if (it->timestamp >= oldest_active_start_timestamp) {
|
||||
it = next_it;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (next_it != edges_acc.end() || it->from_vertex->deleted || it->to_vertex->deleted ||
|
||||
!std::ranges::all_of(it->from_vertex->out_edges, [&](const auto &edge) {
|
||||
auto *to_vertex = std::get<InMemoryEdgeTypeIndex::kVertexPos>(edge);
|
||||
return to_vertex != it->to_vertex;
|
||||
})) {
|
||||
edges_acc.remove(*it);
|
||||
}
|
||||
|
||||
it = next_it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t InMemoryEdgeTypeIndex::ApproximateEdgeCount(EdgeTypeId edge_type) const {
|
||||
if (auto it = index_.find(edge_type); it != index_.end()) {
|
||||
return it->second.size();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void InMemoryEdgeTypeIndex::UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) {
|
||||
auto it = index_.find(edge_type);
|
||||
if (it == index_.end()) {
|
||||
return;
|
||||
}
|
||||
auto acc = it->second.access();
|
||||
acc.insert(Entry{from, to, edge_ref.ptr, tx.start_timestamp});
|
||||
}
|
||||
|
||||
void InMemoryEdgeTypeIndex::UpdateOnEdgeModification(Vertex *old_from, Vertex *old_to, Vertex *new_from, Vertex *new_to,
|
||||
EdgeRef edge_ref, EdgeTypeId edge_type, const Transaction &tx) {
|
||||
auto it = index_.find(edge_type);
|
||||
if (it == index_.end()) {
|
||||
return;
|
||||
}
|
||||
auto acc = it->second.access();
|
||||
|
||||
auto entry_to_update = std::ranges::find_if(acc, [&](const auto &entry) {
|
||||
return entry.from_vertex == old_from && entry.to_vertex == old_to && entry.edge == edge_ref.ptr;
|
||||
});
|
||||
|
||||
acc.remove(Entry{entry_to_update->from_vertex, entry_to_update->to_vertex, entry_to_update->edge,
|
||||
entry_to_update->timestamp});
|
||||
acc.insert(Entry{new_from, new_to, edge_ref.ptr, tx.start_timestamp});
|
||||
}
|
||||
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterable(utils::SkipList<Entry>::Accessor index_accessor, EdgeTypeId edge_type,
|
||||
View view, Storage *storage, Transaction *transaction)
|
||||
: index_accessor_(std::move(index_accessor)),
|
||||
edge_type_(edge_type),
|
||||
view_(view),
|
||||
storage_(storage),
|
||||
transaction_(transaction) {}
|
||||
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator::Iterator(Iterable *self, utils::SkipList<Entry>::Iterator index_iterator)
|
||||
: self_(self),
|
||||
index_iterator_(index_iterator),
|
||||
current_edge_accessor_(EdgeRef{nullptr}, EdgeTypeId::FromInt(0), nullptr, nullptr, self_->storage_, nullptr),
|
||||
current_edge_(nullptr) {
|
||||
AdvanceUntilValid();
|
||||
}
|
||||
|
||||
InMemoryEdgeTypeIndex::Iterable::Iterator &InMemoryEdgeTypeIndex::Iterable::Iterator::operator++() {
|
||||
++index_iterator_;
|
||||
AdvanceUntilValid();
|
||||
return *this;
|
||||
}
|
||||
|
||||
void InMemoryEdgeTypeIndex::Iterable::Iterator::AdvanceUntilValid() {
|
||||
for (; index_iterator_ != self_->index_accessor_.end(); ++index_iterator_) {
|
||||
auto *from_vertex = index_iterator_->from_vertex;
|
||||
auto *to_vertex = index_iterator_->to_vertex;
|
||||
|
||||
if (!IsIndexEntryVisible(index_iterator_->edge, self_->transaction_, self_->view_) || from_vertex->deleted ||
|
||||
to_vertex->deleted) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const bool edge_was_deleted = index_iterator_->edge->deleted;
|
||||
auto [edge_ref, edge_type, deleted_from_vertex, deleted_to_vertex] = GetEdgeInfo();
|
||||
MG_ASSERT(edge_ref != EdgeRef(nullptr), "Invalid database state!");
|
||||
|
||||
if (edge_was_deleted) {
|
||||
from_vertex = deleted_from_vertex;
|
||||
to_vertex = deleted_to_vertex;
|
||||
}
|
||||
|
||||
auto accessor = EdgeAccessor{edge_ref, edge_type, from_vertex, to_vertex, self_->storage_, self_->transaction_};
|
||||
if (!accessor.IsVisible(self_->view_)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
current_edge_accessor_ = accessor;
|
||||
current_edge_ = edge_ref;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<EdgeRef, EdgeTypeId, Vertex *, Vertex *> InMemoryEdgeTypeIndex::Iterable::Iterator::GetEdgeInfo() {
|
||||
auto *from_vertex = index_iterator_->from_vertex;
|
||||
auto *to_vertex = index_iterator_->to_vertex;
|
||||
|
||||
if (index_iterator_->edge->deleted) {
|
||||
const auto missing_in_edge =
|
||||
VertexDeletedConnectedEdges(from_vertex, index_iterator_->edge, self_->transaction_, self_->view_);
|
||||
const auto missing_out_edge =
|
||||
VertexDeletedConnectedEdges(to_vertex, index_iterator_->edge, self_->transaction_, self_->view_);
|
||||
if (missing_in_edge && missing_out_edge &&
|
||||
std::get<kEdgeRefPos>(*missing_in_edge) == std::get<kEdgeRefPos>(*missing_out_edge)) {
|
||||
return std::make_tuple(std::get<kEdgeRefPos>(*missing_in_edge), std::get<kEdgeTypeIdPos>(*missing_in_edge),
|
||||
to_vertex, from_vertex);
|
||||
}
|
||||
}
|
||||
|
||||
const auto &from_edges = from_vertex->out_edges;
|
||||
const auto &to_edges = to_vertex->in_edges;
|
||||
|
||||
auto it = std::find_if(from_edges.begin(), from_edges.end(), [&](const auto &from_entry) {
|
||||
const auto &from_edge = std::get<kEdgeRefPos>(from_entry);
|
||||
return std::any_of(to_edges.begin(), to_edges.end(), [&](const auto &to_entry) {
|
||||
const auto &to_edge = std::get<kEdgeRefPos>(to_entry);
|
||||
return index_iterator_->edge->gid == from_edge.ptr->gid && from_edge.ptr->gid == to_edge.ptr->gid;
|
||||
});
|
||||
});
|
||||
|
||||
if (it != from_edges.end()) {
|
||||
const auto &from_edge = std::get<kEdgeRefPos>(*it);
|
||||
return std::make_tuple(from_edge, std::get<kEdgeTypeIdPos>(*it), from_vertex, to_vertex);
|
||||
}
|
||||
|
||||
return {EdgeRef(nullptr), EdgeTypeId::FromUint(0U), nullptr, nullptr};
|
||||
}
|
||||
|
||||
void InMemoryEdgeTypeIndex::RunGC() {
|
||||
for (auto &index_entry : index_) {
|
||||
index_entry.second.run_gc();
|
||||
}
|
||||
}
|
||||
|
||||
InMemoryEdgeTypeIndex::Iterable InMemoryEdgeTypeIndex::Edges(EdgeTypeId edge_type, View view, Storage *storage,
|
||||
Transaction *transaction) {
|
||||
const auto it = index_.find(edge_type);
|
||||
MG_ASSERT(it != index_.end(), "Index for edge-type {} doesn't exist", edge_type.AsUint());
|
||||
return {it->second.access(), edge_type, view, storage, transaction};
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
113
src/storage/v2/inmemory/edge_type_index.hpp
Normal file
113
src/storage/v2/inmemory/edge_type_index.hpp
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <utility>
|
||||
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/edge_accessor.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/indices/edge_type_index.hpp"
|
||||
#include "storage/v2/indices/label_index_stats.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class InMemoryEdgeTypeIndex : public storage::EdgeTypeIndex {
|
||||
private:
|
||||
struct Entry {
|
||||
Vertex *from_vertex;
|
||||
Vertex *to_vertex;
|
||||
|
||||
Edge *edge;
|
||||
|
||||
uint64_t timestamp;
|
||||
|
||||
bool operator<(const Entry &rhs) const { return edge->gid < rhs.edge->gid; }
|
||||
bool operator==(const Entry &rhs) const { return edge->gid == rhs.edge->gid; }
|
||||
};
|
||||
|
||||
public:
|
||||
InMemoryEdgeTypeIndex() = default;
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
bool CreateIndex(EdgeTypeId edge_type, utils::SkipList<Vertex>::Accessor vertices);
|
||||
|
||||
/// Returns false if there was no index to drop
|
||||
bool DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
bool IndexExists(EdgeTypeId edge_type) const override;
|
||||
|
||||
std::vector<EdgeTypeId> ListIndices() const override;
|
||||
|
||||
void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token);
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const override;
|
||||
|
||||
void UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) override;
|
||||
|
||||
void UpdateOnEdgeModification(Vertex *old_from, Vertex *old_to, Vertex *new_from, Vertex *new_to, EdgeRef edge_ref,
|
||||
EdgeTypeId edge_type, const Transaction &tx) override;
|
||||
|
||||
static constexpr std::size_t kEdgeTypeIdPos = 0U;
|
||||
static constexpr std::size_t kVertexPos = 1U;
|
||||
static constexpr std::size_t kEdgeRefPos = 2U;
|
||||
|
||||
class Iterable {
|
||||
public:
|
||||
Iterable(utils::SkipList<Entry>::Accessor index_accessor, EdgeTypeId edge_type, View view, Storage *storage,
|
||||
Transaction *transaction);
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
Iterator(Iterable *self, utils::SkipList<Entry>::Iterator index_iterator);
|
||||
|
||||
EdgeAccessor const &operator*() const { return current_edge_accessor_; }
|
||||
|
||||
bool operator==(const Iterator &other) const { return index_iterator_ == other.index_iterator_; }
|
||||
bool operator!=(const Iterator &other) const { return index_iterator_ != other.index_iterator_; }
|
||||
|
||||
Iterator &operator++();
|
||||
|
||||
private:
|
||||
void AdvanceUntilValid();
|
||||
std::tuple<EdgeRef, EdgeTypeId, Vertex *, Vertex *> GetEdgeInfo();
|
||||
|
||||
Iterable *self_;
|
||||
utils::SkipList<Entry>::Iterator index_iterator_;
|
||||
EdgeAccessor current_edge_accessor_;
|
||||
EdgeRef current_edge_{nullptr};
|
||||
};
|
||||
|
||||
Iterator begin() { return {this, index_accessor_.begin()}; }
|
||||
Iterator end() { return {this, index_accessor_.end()}; }
|
||||
|
||||
private:
|
||||
utils::SkipList<Entry>::Accessor index_accessor_;
|
||||
EdgeTypeId edge_type_;
|
||||
View view_;
|
||||
Storage *storage_;
|
||||
Transaction *transaction_;
|
||||
};
|
||||
|
||||
void RunGC();
|
||||
|
||||
Iterable Edges(EdgeTypeId edge_type, View view, Storage *storage, Transaction *transaction);
|
||||
|
||||
private:
|
||||
std::map<EdgeTypeId, utils::SkipList<Entry>> index_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
@ -20,6 +20,7 @@
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/edge_direction.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
#include "storage/v2/metadata_delta.hpp"
|
||||
|
||||
/// REPLICATION ///
|
||||
@ -109,7 +110,7 @@ InMemoryStorage::InMemoryStorage(Config config)
|
||||
timestamp_ = std::max(timestamp_, info->next_timestamp);
|
||||
if (info->last_commit_timestamp) {
|
||||
repl_storage_state_.last_commit_timestamp_ = *info->last_commit_timestamp;
|
||||
spdlog::info("Recovering last commit timestamp {}", *info->last_commit_timestamp);
|
||||
spdlog::trace("Recovering last commit timestamp {}", *info->last_commit_timestamp);
|
||||
}
|
||||
}
|
||||
} else if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED ||
|
||||
@ -350,6 +351,9 @@ Result<EdgeAccessor> InMemoryStorage::InMemoryAccessor::CreateEdge(VertexAccesso
|
||||
transaction_.manyDeltasCache.Invalidate(from_vertex, edge_type, EdgeDirection::OUT);
|
||||
transaction_.manyDeltasCache.Invalidate(to_vertex, edge_type, EdgeDirection::IN);
|
||||
|
||||
// Update indices if they exist.
|
||||
storage_->indices_.UpdateOnEdgeCreation(from_vertex, to_vertex, edge, edge_type, transaction_);
|
||||
|
||||
// Increment edge count.
|
||||
storage_->edge_count_.fetch_add(1, std::memory_order_acq_rel);
|
||||
}};
|
||||
@ -553,6 +557,11 @@ Result<EdgeAccessor> InMemoryStorage::InMemoryAccessor::EdgeSetFrom(EdgeAccessor
|
||||
CreateAndLinkDelta(&transaction_, to_vertex, Delta::RemoveInEdgeTag(), edge_type, new_from_vertex, edge_ref);
|
||||
to_vertex->in_edges.emplace_back(edge_type, new_from_vertex, edge_ref);
|
||||
|
||||
auto *in_memory = static_cast<InMemoryStorage *>(storage_);
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
|
||||
mem_edge_type_index->UpdateOnEdgeModification(old_from_vertex, to_vertex, new_from_vertex, to_vertex, edge_ref,
|
||||
edge_type, transaction_);
|
||||
|
||||
transaction_.manyDeltasCache.Invalidate(new_from_vertex, edge_type, EdgeDirection::OUT);
|
||||
transaction_.manyDeltasCache.Invalidate(old_from_vertex, edge_type, EdgeDirection::OUT);
|
||||
transaction_.manyDeltasCache.Invalidate(to_vertex, edge_type, EdgeDirection::IN);
|
||||
@ -659,6 +668,11 @@ Result<EdgeAccessor> InMemoryStorage::InMemoryAccessor::EdgeSetTo(EdgeAccessor *
|
||||
CreateAndLinkDelta(&transaction_, new_to_vertex, Delta::RemoveInEdgeTag(), edge_type, from_vertex, edge_ref);
|
||||
new_to_vertex->in_edges.emplace_back(edge_type, from_vertex, edge_ref);
|
||||
|
||||
auto *in_memory = static_cast<InMemoryStorage *>(storage_);
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
|
||||
mem_edge_type_index->UpdateOnEdgeModification(from_vertex, old_to_vertex, from_vertex, new_to_vertex, edge_ref,
|
||||
edge_type, transaction_);
|
||||
|
||||
transaction_.manyDeltasCache.Invalidate(from_vertex, edge_type, EdgeDirection::OUT);
|
||||
transaction_.manyDeltasCache.Invalidate(old_to_vertex, edge_type, EdgeDirection::IN);
|
||||
transaction_.manyDeltasCache.Invalidate(new_to_vertex, edge_type, EdgeDirection::IN);
|
||||
@ -1264,6 +1278,18 @@ utils::BasicResult<StorageIndexDefinitionError, void> InMemoryStorage::InMemoryA
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> InMemoryStorage::InMemoryAccessor::CreateIndex(
|
||||
EdgeTypeId edge_type) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Create index requires a unique access to the storage!");
|
||||
auto *in_memory = static_cast<InMemoryStorage *>(storage_);
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
|
||||
if (!mem_edge_type_index->CreateIndex(edge_type, in_memory->vertices_.access())) {
|
||||
return StorageIndexDefinitionError{IndexDefinitionError{}};
|
||||
}
|
||||
transaction_.md_deltas.emplace_back(MetadataDelta::edge_index_create, edge_type);
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> InMemoryStorage::InMemoryAccessor::DropIndex(LabelId label) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Dropping label index requires a unique access to the storage!");
|
||||
auto *in_memory = static_cast<InMemoryStorage *>(storage_);
|
||||
@ -1292,6 +1318,18 @@ utils::BasicResult<StorageIndexDefinitionError, void> InMemoryStorage::InMemoryA
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> InMemoryStorage::InMemoryAccessor::DropIndex(
|
||||
EdgeTypeId edge_type) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Drop index requires a unique access to the storage!");
|
||||
auto *in_memory = static_cast<InMemoryStorage *>(storage_);
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
|
||||
if (!mem_edge_type_index->DropIndex(edge_type)) {
|
||||
return StorageIndexDefinitionError{IndexDefinitionError{}};
|
||||
}
|
||||
transaction_.md_deltas.emplace_back(MetadataDelta::edge_index_drop, edge_type);
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageExistenceConstraintDefinitionError, void>
|
||||
InMemoryStorage::InMemoryAccessor::CreateExistenceConstraint(LabelId label, PropertyId property) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Creating existence requires a unique access to the storage!");
|
||||
@ -1383,6 +1421,11 @@ VerticesIterable InMemoryStorage::InMemoryAccessor::Vertices(
|
||||
mem_label_property_index->Vertices(label, property, lower_bound, upper_bound, view, storage_, &transaction_));
|
||||
}
|
||||
|
||||
EdgesIterable InMemoryStorage::InMemoryAccessor::Edges(EdgeTypeId edge_type, View view) {
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(storage_->indices_.edge_type_index_.get());
|
||||
return EdgesIterable(mem_edge_type_index->Edges(edge_type, view, storage_, &transaction_));
|
||||
}
|
||||
|
||||
Transaction InMemoryStorage::CreateTransaction(
|
||||
IsolationLevel isolation_level, StorageMode storage_mode,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
@ -2017,6 +2060,10 @@ bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::LABEL_INDEX_CREATE, md_delta.label,
|
||||
final_commit_timestamp);
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_CREATE: {
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::EDGE_TYPE_INDEX_CREATE, md_delta.edge_type,
|
||||
final_commit_timestamp);
|
||||
} break;
|
||||
case MetadataDelta::Action::LABEL_PROPERTY_INDEX_CREATE: {
|
||||
const auto &info = md_delta.label_property;
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE, info.label,
|
||||
@ -2026,6 +2073,10 @@ bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::LABEL_INDEX_DROP, md_delta.label,
|
||||
final_commit_timestamp);
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_DROP: {
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::EDGE_TYPE_INDEX_DROP, md_delta.edge_type,
|
||||
final_commit_timestamp);
|
||||
} break;
|
||||
case MetadataDelta::Action::LABEL_PROPERTY_INDEX_DROP: {
|
||||
const auto &info = md_delta.label_property;
|
||||
AppendToWalDataDefinition(durability::StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP, info.label,
|
||||
@ -2091,6 +2142,12 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera
|
||||
repl_storage_state_.AppendOperation(operation, label, properties, stats, property_stats, final_commit_timestamp);
|
||||
}
|
||||
|
||||
void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
|
||||
uint64_t final_commit_timestamp) {
|
||||
wal_file_->AppendOperation(operation, edge_type, final_commit_timestamp);
|
||||
repl_storage_state_.AppendOperation(operation, edge_type, final_commit_timestamp);
|
||||
}
|
||||
|
||||
void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
|
||||
const std::set<PropertyId> &properties,
|
||||
LabelPropertyIndexStats property_stats,
|
||||
@ -2240,7 +2297,8 @@ IndicesInfo InMemoryStorage::InMemoryAccessor::ListAllIndices() const {
|
||||
auto *mem_label_index = static_cast<InMemoryLabelIndex *>(in_memory->indices_.label_index_.get());
|
||||
auto *mem_label_property_index =
|
||||
static_cast<InMemoryLabelPropertyIndex *>(in_memory->indices_.label_property_index_.get());
|
||||
return {mem_label_index->ListIndices(), mem_label_property_index->ListIndices()};
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(in_memory->indices_.edge_type_index_.get());
|
||||
return {mem_label_index->ListIndices(), mem_label_property_index->ListIndices(), mem_edge_type_index->ListIndices()};
|
||||
}
|
||||
ConstraintsInfo InMemoryStorage::InMemoryAccessor::ListAllConstraints() const {
|
||||
const auto *mem_storage = static_cast<InMemoryStorage *>(storage_);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include "storage/v2/indices/label_index_stats.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
#include "storage/v2/inmemory/replication/recovery.hpp"
|
||||
@ -53,6 +54,7 @@ class InMemoryStorage final : public Storage {
|
||||
const InMemoryStorage *storage);
|
||||
friend class InMemoryLabelIndex;
|
||||
friend class InMemoryLabelPropertyIndex;
|
||||
friend class InMemoryEdgeTypeIndex;
|
||||
|
||||
public:
|
||||
enum class CreateSnapshotError : uint8_t { DisabledForReplica, ReachedMaxNumTries };
|
||||
@ -107,6 +109,8 @@ class InMemoryStorage final : public Storage {
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view) override;
|
||||
|
||||
EdgesIterable Edges(EdgeTypeId edge_type, View view) override;
|
||||
|
||||
/// Return approximate number of all vertices in the database.
|
||||
/// Note that this is always an over-estimate and never an under-estimate.
|
||||
uint64_t ApproximateVertexCount() const override {
|
||||
@ -145,6 +149,10 @@ class InMemoryStorage final : public Storage {
|
||||
label, property, lower, upper);
|
||||
}
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId id) const override {
|
||||
return static_cast<InMemoryStorage *>(storage_)->indices_.edge_type_index_->ApproximateEdgeCount(id);
|
||||
}
|
||||
|
||||
template <typename TResult, typename TIndex, typename TIndexKey>
|
||||
std::optional<TResult> GetIndexStatsForIndex(TIndex *index, TIndexKey &&key) const {
|
||||
return index->GetIndexStats(key);
|
||||
@ -204,6 +212,10 @@ class InMemoryStorage final : public Storage {
|
||||
return static_cast<InMemoryStorage *>(storage_)->indices_.label_property_index_->IndexExists(label, property);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(EdgeTypeId edge_type) const override {
|
||||
return static_cast<InMemoryStorage *>(storage_)->indices_.edge_type_index_->IndexExists(edge_type);
|
||||
}
|
||||
|
||||
IndicesInfo ListAllIndices() const override;
|
||||
|
||||
ConstraintsInfo ListAllConstraints() const override;
|
||||
@ -239,6 +251,14 @@ class InMemoryStorage final : public Storage {
|
||||
/// @throw std::bad_alloc
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
/// Create an index.
|
||||
/// Returns void if the index has been created.
|
||||
/// Returns `StorageIndexDefinitionError` if an error occures. Error can be:
|
||||
/// * `ReplicationError`: there is at least one SYNC replica that has not confirmed receiving the transaction.
|
||||
/// * `IndexDefinitionError`: the index already exists.
|
||||
/// @throw std::bad_alloc
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
/// Drop an existing index.
|
||||
/// Returns void if the index has been dropped.
|
||||
/// Returns `StorageIndexDefinitionError` if an error occures. Error can be:
|
||||
@ -253,6 +273,13 @@ class InMemoryStorage final : public Storage {
|
||||
/// * `IndexDefinitionError`: the index does not exist.
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
/// Drop an existing index.
|
||||
/// Returns void if the index has been dropped.
|
||||
/// Returns `StorageIndexDefinitionError` if an error occures. Error can be:
|
||||
/// * `ReplicationError`: there is at least one SYNC replica that has not confirmed receiving the transaction.
|
||||
/// * `IndexDefinitionError`: the index does not exist.
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
/// Returns void if the existence constraint has been created.
|
||||
/// Returns `StorageExistenceConstraintDefinitionError` if an error occures. Error can be:
|
||||
/// * `ReplicationError`: there is at least one SYNC replica that has not confirmed receiving the transaction.
|
||||
@ -374,20 +401,17 @@ class InMemoryStorage final : public Storage {
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
[[nodiscard]] bool AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp,
|
||||
DatabaseAccessProtector db_acc);
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
|
||||
uint64_t final_commit_timestamp);
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
|
||||
uint64_t final_commit_timestamp);
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
|
||||
const std::set<PropertyId> &properties, uint64_t final_commit_timestamp);
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label, LabelIndexStats stats,
|
||||
uint64_t final_commit_timestamp);
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
|
||||
const std::set<PropertyId> &properties, LabelPropertyIndexStats property_stats,
|
||||
uint64_t final_commit_timestamp);
|
||||
/// Return true in all cases excepted if any sync replicas have not sent confirmation.
|
||||
void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label,
|
||||
const std::set<PropertyId> &properties, LabelIndexStats stats,
|
||||
LabelPropertyIndexStats property_stats, uint64_t final_commit_timestamp);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -35,6 +35,8 @@ struct MetadataDelta {
|
||||
LABEL_PROPERTY_INDEX_DROP,
|
||||
LABEL_PROPERTY_INDEX_STATS_SET,
|
||||
LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
EDGE_INDEX_CREATE,
|
||||
EDGE_INDEX_DROP,
|
||||
EXISTENCE_CONSTRAINT_CREATE,
|
||||
EXISTENCE_CONSTRAINT_DROP,
|
||||
UNIQUE_CONSTRAINT_CREATE,
|
||||
@ -57,6 +59,10 @@ struct MetadataDelta {
|
||||
} label_property_index_stats_set;
|
||||
static constexpr struct LabelPropertyIndexStatsClear {
|
||||
} label_property_index_stats_clear;
|
||||
static constexpr struct EdgeIndexCreate {
|
||||
} edge_index_create;
|
||||
static constexpr struct EdgeIndexDrop {
|
||||
} edge_index_drop;
|
||||
static constexpr struct ExistenceConstraintCreate {
|
||||
} existence_constraint_create;
|
||||
static constexpr struct ExistenceConstraintDrop {
|
||||
@ -87,6 +93,11 @@ struct MetadataDelta {
|
||||
MetadataDelta(LabelPropertyIndexStatsClear /*tag*/, LabelId label)
|
||||
: action(Action::LABEL_PROPERTY_INDEX_STATS_CLEAR), label{label} {}
|
||||
|
||||
MetadataDelta(EdgeIndexCreate /*tag*/, EdgeTypeId edge_type)
|
||||
: action(Action::EDGE_INDEX_CREATE), edge_type(edge_type) {}
|
||||
|
||||
MetadataDelta(EdgeIndexDrop /*tag*/, EdgeTypeId edge_type) : action(Action::EDGE_INDEX_DROP), edge_type(edge_type) {}
|
||||
|
||||
MetadataDelta(ExistenceConstraintCreate /*tag*/, LabelId label, PropertyId property)
|
||||
: action(Action::EXISTENCE_CONSTRAINT_CREATE), label_property{label, property} {}
|
||||
|
||||
@ -114,6 +125,8 @@ struct MetadataDelta {
|
||||
case Action::LABEL_PROPERTY_INDEX_DROP:
|
||||
case Action::LABEL_PROPERTY_INDEX_STATS_SET:
|
||||
case Action::LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case Action::EDGE_INDEX_CREATE:
|
||||
case Action::EDGE_INDEX_DROP:
|
||||
case Action::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case Action::EXISTENCE_CONSTRAINT_DROP:
|
||||
break;
|
||||
@ -129,6 +142,8 @@ struct MetadataDelta {
|
||||
union {
|
||||
LabelId label;
|
||||
|
||||
EdgeTypeId edge_type;
|
||||
|
||||
struct {
|
||||
LabelId label;
|
||||
PropertyId property;
|
||||
|
@ -76,13 +76,37 @@ void ReplicationStorageClient::UpdateReplicaState(Storage *storage, DatabaseAcce
|
||||
}
|
||||
}
|
||||
if (branching_point) {
|
||||
spdlog::error(
|
||||
"You cannot register Replica {} to this Main because at one point "
|
||||
"Replica {} acted as the Main instance. Both the Main and Replica {} "
|
||||
"now hold unique data. Please resolve data conflicts and start the "
|
||||
"replication on a clean instance.",
|
||||
client_.name_, client_.name_, client_.name_);
|
||||
replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::DIVERGED_FROM_MAIN; });
|
||||
auto replica_state = replica_state_.Lock();
|
||||
if (*replica_state == replication::ReplicaState::DIVERGED_FROM_MAIN) {
|
||||
return;
|
||||
}
|
||||
*replica_state = replication::ReplicaState::DIVERGED_FROM_MAIN;
|
||||
|
||||
auto log_error = [client_name = client_.name_]() {
|
||||
spdlog::error(
|
||||
"You cannot register Replica {} to this Main because at one point "
|
||||
"Replica {} acted as the Main instance. Both the Main and Replica {} "
|
||||
"now hold unique data. Please resolve data conflicts and start the "
|
||||
"replication on a clean instance.",
|
||||
client_name, client_name, client_name);
|
||||
};
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (!FLAGS_coordinator_server_port) {
|
||||
log_error();
|
||||
return;
|
||||
}
|
||||
client_.thread_pool_.AddTask([storage, gk = std::move(db_acc), this] {
|
||||
const auto [success, timestamp] = this->ForceResetStorage(storage);
|
||||
if (success) {
|
||||
spdlog::info("Successfully reset storage of REPLICA {} to timestamp {}.", client_.name_, timestamp);
|
||||
return;
|
||||
}
|
||||
spdlog::error("You cannot register REPLICA {} to this MAIN because MAIN couldn't reset REPLICA's storage.",
|
||||
client_.name_);
|
||||
});
|
||||
#else
|
||||
log_error();
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
@ -265,8 +289,6 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph:
|
||||
spdlog::debug("Starting replica recovery");
|
||||
auto *mem_storage = static_cast<InMemoryStorage *>(storage);
|
||||
|
||||
// TODO(antoniofilipovic): Can we get stuck here in while loop if replica commit timestamp is not updated when using
|
||||
// only snapshot
|
||||
while (true) {
|
||||
auto file_locker = mem_storage->file_retainer_.AddLocker();
|
||||
|
||||
@ -335,6 +357,21 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph:
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<bool, uint64_t> ReplicationStorageClient::ForceResetStorage(memgraph::storage::Storage *storage) {
|
||||
utils::OnScopeExit set_to_maybe_behind{
|
||||
[this]() { replica_state_.WithLock([](auto &state) { state = replication::ReplicaState::MAYBE_BEHIND; }); }};
|
||||
try {
|
||||
auto stream{client_.rpc_client_.Stream<replication::ForceResetStorageRpc>(main_uuid_, storage->uuid())};
|
||||
const auto res = stream.AwaitResponse();
|
||||
return std::pair{res.success, res.current_commit_timestamp};
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
spdlog::error(
|
||||
utils::MessageWithLink("Couldn't ForceReset data to {}.", client_.name_, "https://memgr.ph/replication"));
|
||||
}
|
||||
|
||||
return {false, 0};
|
||||
}
|
||||
|
||||
////// ReplicaStream //////
|
||||
ReplicaStream::ReplicaStream(Storage *storage, rpc::Client &rpc_client, const uint64_t current_seq_num,
|
||||
utils::UUID main_uuid)
|
||||
@ -370,6 +407,12 @@ void ReplicaStream::AppendOperation(durability::StorageMetadataOperation operati
|
||||
timestamp);
|
||||
}
|
||||
|
||||
void ReplicaStream::AppendOperation(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
|
||||
uint64_t timestamp) {
|
||||
replication::Encoder encoder(stream_.GetBuilder());
|
||||
EncodeOperation(&encoder, storage_->name_id_mapper_.get(), operation, edge_type, timestamp);
|
||||
}
|
||||
|
||||
replication::AppendDeltasRes ReplicaStream::Finalize() { return stream_.AwaitResponse(); }
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
@ -65,6 +65,9 @@ class ReplicaStream {
|
||||
const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
|
||||
/// @throw rpc::RpcFailedException
|
||||
void AppendOperation(durability::StorageMetadataOperation operation, EdgeTypeId edge_type, uint64_t timestamp);
|
||||
|
||||
/// @throw rpc::RpcFailedException
|
||||
replication::AppendDeltasRes Finalize();
|
||||
|
||||
@ -188,6 +191,13 @@ class ReplicationStorageClient {
|
||||
*/
|
||||
void UpdateReplicaState(Storage *storage, DatabaseAccessProtector db_acc);
|
||||
|
||||
/**
|
||||
* @brief Forcefully reset storage to as it is when started from scratch.
|
||||
*
|
||||
* @param storage pointer to the storage associated with the client
|
||||
*/
|
||||
std::pair<bool, uint64_t> ForceResetStorage(Storage *storage);
|
||||
|
||||
void LogRpcFailure();
|
||||
|
||||
/**
|
||||
|
@ -53,6 +53,16 @@ void ReplicationStorageState::AppendOperation(durability::StorageMetadataOperati
|
||||
});
|
||||
}
|
||||
|
||||
void ReplicationStorageState::AppendOperation(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
|
||||
uint64_t final_commit_timestamp) {
|
||||
replication_clients_.WithLock([&](auto &clients) {
|
||||
for (auto &client : clients) {
|
||||
client->IfStreamingTransaction(
|
||||
[&](auto &stream) { stream.AppendOperation(operation, edge_type, final_commit_timestamp); });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool ReplicationStorageState::FinalizeTransaction(uint64_t timestamp, Storage *storage,
|
||||
DatabaseAccessProtector db_acc) {
|
||||
return replication_clients_.WithLock([=, db_acc = std::move(db_acc)](auto &clients) mutable {
|
||||
|
@ -46,6 +46,8 @@ struct ReplicationStorageState {
|
||||
void AppendOperation(durability::StorageMetadataOperation operation, LabelId label,
|
||||
const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t final_commit_timestamp);
|
||||
void AppendOperation(durability::StorageMetadataOperation operation, EdgeTypeId edge_type,
|
||||
uint64_t final_commit_timestamp);
|
||||
bool FinalizeTransaction(uint64_t timestamp, Storage *storage, DatabaseAccessProtector db_acc);
|
||||
|
||||
// Getters
|
||||
|
@ -59,6 +59,19 @@ void TimestampRes::Save(const TimestampRes &self, memgraph::slk::Builder *builde
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void TimestampRes::Load(TimestampRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); }
|
||||
|
||||
void ForceResetStorageReq::Save(const ForceResetStorageReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void ForceResetStorageReq::Load(ForceResetStorageReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
void ForceResetStorageRes::Save(const ForceResetStorageRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void ForceResetStorageRes::Load(ForceResetStorageRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
} // namespace storage::replication
|
||||
|
||||
constexpr utils::TypeInfo storage::replication::AppendDeltasReq::kType{utils::TypeId::REP_APPEND_DELTAS_REQ,
|
||||
@ -97,6 +110,12 @@ constexpr utils::TypeInfo storage::replication::TimestampReq::kType{utils::TypeI
|
||||
constexpr utils::TypeInfo storage::replication::TimestampRes::kType{utils::TypeId::REP_TIMESTAMP_RES, "TimestampRes",
|
||||
nullptr};
|
||||
|
||||
constexpr utils::TypeInfo storage::replication::ForceResetStorageReq::kType{utils::TypeId::REP_FORCE_RESET_STORAGE_REQ,
|
||||
"ForceResetStorageReq", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo storage::replication::ForceResetStorageRes::kType{utils::TypeId::REP_FORCE_RESET_STORAGE_RES,
|
||||
"ForceResetStorageRes", nullptr};
|
||||
|
||||
// Autogenerated SLK serialization code
|
||||
namespace slk {
|
||||
// Serialize code for TimestampRes
|
||||
@ -255,6 +274,30 @@ void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::
|
||||
memgraph::slk::Load(&self->seq_num, reader);
|
||||
}
|
||||
|
||||
// Serialize code for ForceResetStorageReq
|
||||
|
||||
void Save(const memgraph::storage::replication::ForceResetStorageReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.main_uuid, builder);
|
||||
memgraph::slk::Save(self.db_uuid, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::storage::replication::ForceResetStorageReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->main_uuid, reader);
|
||||
memgraph::slk::Load(&self->db_uuid, reader);
|
||||
}
|
||||
|
||||
// Serialize code for ForceResetStorageRes
|
||||
|
||||
void Save(const memgraph::storage::replication::ForceResetStorageRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.success, builder);
|
||||
memgraph::slk::Save(self.current_commit_timestamp, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::storage::replication::ForceResetStorageRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->success, reader);
|
||||
memgraph::slk::Load(&self->current_commit_timestamp, reader);
|
||||
}
|
||||
|
||||
// Serialize SalientConfig
|
||||
|
||||
void Save(const memgraph::storage::SalientConfig &self, memgraph::slk::Builder *builder) {
|
||||
|
@ -210,6 +210,36 @@ struct TimestampRes {
|
||||
|
||||
using TimestampRpc = rpc::RequestResponse<TimestampReq, TimestampRes>;
|
||||
|
||||
struct ForceResetStorageReq {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(ForceResetStorageReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const ForceResetStorageReq &self, memgraph::slk::Builder *builder);
|
||||
ForceResetStorageReq() = default;
|
||||
explicit ForceResetStorageReq(const utils::UUID &main_uuid, const utils::UUID &db_uuid)
|
||||
: main_uuid{main_uuid}, db_uuid{db_uuid} {}
|
||||
|
||||
utils::UUID main_uuid;
|
||||
utils::UUID db_uuid;
|
||||
};
|
||||
|
||||
struct ForceResetStorageRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(ForceResetStorageRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const ForceResetStorageRes &self, memgraph::slk::Builder *builder);
|
||||
ForceResetStorageRes() = default;
|
||||
ForceResetStorageRes(bool success, uint64_t current_commit_timestamp)
|
||||
: success(success), current_commit_timestamp(current_commit_timestamp) {}
|
||||
|
||||
bool success;
|
||||
uint64_t current_commit_timestamp;
|
||||
};
|
||||
|
||||
using ForceResetStorageRpc = rpc::RequestResponse<ForceResetStorageReq, ForceResetStorageRes>;
|
||||
|
||||
} // namespace memgraph::storage::replication
|
||||
|
||||
// SLK serialization declarations
|
||||
@ -267,4 +297,12 @@ void Save(const memgraph::storage::SalientConfig &self, memgraph::slk::Builder *
|
||||
|
||||
void Load(memgraph::storage::SalientConfig *self, memgraph::slk::Reader *reader);
|
||||
|
||||
void Save(const memgraph::storage::replication::ForceResetStorageReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::storage::replication::ForceResetStorageReq *self, memgraph::slk::Reader *reader);
|
||||
|
||||
void Save(const memgraph::storage::replication::ForceResetStorageRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::storage::replication::ForceResetStorageRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
} // namespace memgraph::slk
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
#include "storage/v2/durability/wal.hpp"
|
||||
#include "storage/v2/edge_accessor.hpp"
|
||||
#include "storage/v2/edges_iterable.hpp"
|
||||
#include "storage/v2/indices/indices.hpp"
|
||||
#include "storage/v2/mvcc.hpp"
|
||||
#include "storage/v2/replication/enums.hpp"
|
||||
@ -61,6 +62,7 @@ class EdgeAccessor;
|
||||
struct IndicesInfo {
|
||||
std::vector<LabelId> label;
|
||||
std::vector<std::pair<LabelId, PropertyId>> label_property;
|
||||
std::vector<EdgeTypeId> edge_type;
|
||||
};
|
||||
|
||||
struct ConstraintsInfo {
|
||||
@ -172,6 +174,8 @@ class Storage {
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view) = 0;
|
||||
|
||||
virtual EdgesIterable Edges(EdgeTypeId edge_type, View view) = 0;
|
||||
|
||||
virtual Result<std::optional<VertexAccessor>> DeleteVertex(VertexAccessor *vertex);
|
||||
|
||||
virtual Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> DetachDeleteVertex(
|
||||
@ -192,6 +196,8 @@ class Storage {
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper) const = 0;
|
||||
|
||||
virtual uint64_t ApproximateEdgeCount(EdgeTypeId id) const = 0;
|
||||
|
||||
virtual std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const = 0;
|
||||
|
||||
virtual std::optional<storage::LabelPropertyIndexStats> GetIndexStats(
|
||||
@ -224,6 +230,8 @@ class Storage {
|
||||
|
||||
virtual bool LabelPropertyIndexExists(LabelId label, PropertyId property) const = 0;
|
||||
|
||||
virtual bool EdgeTypeIndexExists(EdgeTypeId edge_type) const = 0;
|
||||
|
||||
virtual IndicesInfo ListAllIndices() const = 0;
|
||||
|
||||
virtual ConstraintsInfo ListAllConstraints() const = 0;
|
||||
@ -268,10 +276,14 @@ class Storage {
|
||||
|
||||
virtual utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(LabelId label, PropertyId property) = 0;
|
||||
|
||||
virtual utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(EdgeTypeId edge_type) = 0;
|
||||
|
||||
virtual utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label) = 0;
|
||||
|
||||
virtual utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label, PropertyId property) = 0;
|
||||
|
||||
virtual utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(EdgeTypeId edge_type) = 0;
|
||||
|
||||
virtual utils::BasicResult<StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
LabelId label, PropertyId property) = 0;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -10,7 +10,6 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/vertices_iterable.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
VerticesIterable::VerticesIterable(AllVerticesIterable vertices) : type_(Type::ALL) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -26,6 +26,7 @@
|
||||
M(ScanAllByLabelPropertyValueOperator, Operator, "Number of times ScanAllByLabelPropertyValue operator was used.") \
|
||||
M(ScanAllByLabelPropertyOperator, Operator, "Number of times ScanAllByLabelProperty operator was used.") \
|
||||
M(ScanAllByIdOperator, Operator, "Number of times ScanAllById operator was used.") \
|
||||
M(ScanAllByEdgeTypeOperator, Operator, "Number of times ScanAllByEdgeTypeOperator operator was used.") \
|
||||
M(ExpandOperator, Operator, "Number of times Expand operator was used.") \
|
||||
M(ExpandVariableOperator, Operator, "Number of times ExpandVariable operator was used.") \
|
||||
M(ConstructNamedPathOperator, Operator, "Number of times ConstructNamedPath operator was used.") \
|
||||
|
@ -19,9 +19,9 @@
|
||||
namespace memgraph::utils {
|
||||
|
||||
template <template <typename, typename...> class Container, typename T, typename Allocator = std::allocator<T>,
|
||||
typename F, typename R = std::invoke_result_t<F, T>>
|
||||
typename F = std::identity, typename R = std::decay_t<std::invoke_result_t<F, T>>>
|
||||
requires ranges::range<Container<T, Allocator>> &&
|
||||
(!std::same_as<Container<T, Allocator>, std::string>)auto fmap(F &&f, const Container<T, Allocator> &v)
|
||||
(!std::same_as<Container<T, Allocator>, std::string>)auto fmap(const Container<T, Allocator> &v, F &&f = {})
|
||||
-> std::vector<R> {
|
||||
return v | ranges::views::transform(std::forward<F>(f)) | ranges::to<std::vector<R>>();
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -32,6 +32,7 @@ enum class TypeId : uint64_t {
|
||||
SCAN_ALL_BY_LABEL_PROPERTY_VALUE,
|
||||
SCAN_ALL_BY_LABEL_PROPERTY,
|
||||
SCAN_ALL_BY_ID,
|
||||
SCAN_ALL_BY_EDGE_TYPE,
|
||||
EXPAND_COMMON,
|
||||
EXPAND,
|
||||
EXPANSION_LAMBDA,
|
||||
@ -68,6 +69,7 @@ enum class TypeId : uint64_t {
|
||||
APPLY,
|
||||
INDEXED_JOIN,
|
||||
HASH_JOIN,
|
||||
ROLLUP_APPLY,
|
||||
|
||||
// Replication
|
||||
// NOTE: these NEED to be stable in the 2000+ range (see rpc version)
|
||||
@ -99,6 +101,8 @@ enum class TypeId : uint64_t {
|
||||
REP_DROP_AUTH_DATA_RES,
|
||||
REP_TRY_SET_MAIN_UUID_REQ,
|
||||
REP_TRY_SET_MAIN_UUID_RES,
|
||||
REP_FORCE_RESET_STORAGE_REQ,
|
||||
REP_FORCE_RESET_STORAGE_RES,
|
||||
|
||||
// Coordinator
|
||||
COORD_FAILOVER_REQ,
|
||||
@ -182,6 +186,7 @@ enum class TypeId : uint64_t {
|
||||
AST_EXPLAIN_QUERY,
|
||||
AST_PROFILE_QUERY,
|
||||
AST_INDEX_QUERY,
|
||||
AST_EDGE_INDEX_QUERY,
|
||||
AST_CREATE,
|
||||
AST_CALL_PROCEDURE,
|
||||
AST_MATCH,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user