Merge branch 'master' into fix-valuetype-function-on-all-data-types
This commit is contained in:
commit
3c4faa8663
60
.github/workflows/release_build_test.yaml
vendored
60
.github/workflows/release_build_test.yaml
vendored
@ -56,6 +56,15 @@ jobs:
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-10 $BUILD_TYPE
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
env:
|
||||
AWS_S3_BUCKET: "deps.memgraph.io"
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "eu-west-1"
|
||||
SOURCE_DIR: "build/output"
|
||||
DEST_DIR: "memgraph-unofficial/${{ github.ref_name }}/"
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@ -75,6 +84,15 @@ jobs:
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-22.04 $BUILD_TYPE
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
env:
|
||||
AWS_S3_BUCKET: "deps.memgraph.io"
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "eu-west-1"
|
||||
SOURCE_DIR: "build/output"
|
||||
DEST_DIR: "memgraph-unofficial/${{ github.ref_name }}/"
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@ -86,7 +104,7 @@ jobs:
|
||||
needs: [Ubuntu20_04]
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64]
|
||||
# M1 Mac mini is sometimes slower
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 150
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v4
|
||||
@ -101,6 +119,26 @@ jobs:
|
||||
name: ubuntu-22.04-aarch64
|
||||
path: build/output/ubuntu-22.04-arm/memgraph*.deb
|
||||
|
||||
PushToS3Ubuntu20_04_ARM:
|
||||
if: github.ref_type == 'tag'
|
||||
needs: [PackageUbuntu20_04_ARM]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download package
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ubuntu-22.04-aarch64
|
||||
path: build/output/release
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
env:
|
||||
AWS_S3_BUCKET: "deps.memgraph.io"
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "eu-west-1"
|
||||
SOURCE_DIR: "build/output/release"
|
||||
DEST_DIR: "memgraph-unofficial/${{ github.ref_name }}/"
|
||||
|
||||
PackageDebian11:
|
||||
if: github.ref_type == 'tag'
|
||||
needs: [Debian10, Ubuntu20_04]
|
||||
@ -114,6 +152,15 @@ jobs:
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11 $BUILD_TYPE
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
env:
|
||||
AWS_S3_BUCKET: "deps.memgraph.io"
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "eu-west-1"
|
||||
SOURCE_DIR: "build/output"
|
||||
DEST_DIR: "memgraph-unofficial/${{ github.ref_name }}/"
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@ -125,7 +172,7 @@ jobs:
|
||||
needs: [Debian10, Ubuntu20_04]
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64]
|
||||
# M1 Mac mini is sometimes slower
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 150
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v4
|
||||
@ -140,16 +187,15 @@ jobs:
|
||||
name: debian-11-aarch64
|
||||
path: build/output/debian-11-arm/memgraph*.deb
|
||||
|
||||
PushToS3:
|
||||
PushToS3Debian11_ARM:
|
||||
if: github.ref_type == 'tag'
|
||||
needs: [PackageDebian10, PackageDebian11, PackageDebian11_ARM, PackageUbuntu20_04, PackageUbuntu20_04_ARM]
|
||||
needs: [PackageDebian11_ARM]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
- name: Download package
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
# name: # if name input parameter is not provided, all artifacts are downloaded
|
||||
# and put in directories named after each one.
|
||||
name: debian-11-aarch64
|
||||
path: build/output/release
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
|
@ -45,6 +45,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
readline-devel # for memgraph console
|
||||
python3-devel # for query modules
|
||||
openssl-devel
|
||||
openssl
|
||||
libseccomp-devel
|
||||
python3 python3-pip nmap-ncat # for tests
|
||||
#
|
||||
|
@ -43,6 +43,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
readline-devel # for memgraph console
|
||||
python3-devel # for query modules
|
||||
openssl-devel
|
||||
openssl
|
||||
libseccomp-devel
|
||||
python3 python-virtualenv python3-pip nmap-ncat # for qa, macro_benchmark and stress tests
|
||||
#
|
||||
|
@ -20,14 +20,18 @@ if [ ! -f "$INPUT" ]; then
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create consraints manually if needed${COLOR_NULL}"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/^:begin/BEGIN/g; s/^BEGIN$/BEGIN;/g;' \
|
||||
-e 's/^:commit/COMMIT/g; s/^COMMIT$/COMMIT;/g;' \
|
||||
-e '/^CALL/d; /^SCHEMA AWAIT/d;' \
|
||||
-e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" > "$OUTPUT"
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
|
61
import/n2mg_separate_files_cypherl.sh
Executable file
61
import/n2mg_separate_files_cypherl.sh
Executable file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 5 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT="$5"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT"
|
||||
|
||||
cat "$INPUT_NODES" >> "$OUTPUT"
|
||||
cat "$INPUT_RELATIONSHIPS" >> "$OUTPUT"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT | mgconsole\`"
|
64
import/n2mg_separate_files_cypherls.sh
Executable file
64
import/n2mg_separate_files_cypherls.sh
Executable file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_schema_path output_file_nodes_path output_file_relationships_path output_file_cleanup_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 8 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT_SCHEMA="$5"
|
||||
OUTPUT_NODES="$6"
|
||||
OUTPUT_RELATIONSHIPS="$7"
|
||||
OUTPUT_CLEANUP="$8"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT_SCHEMA"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT_SCHEMA"
|
||||
|
||||
cat "$INPUT_NODES" > "$OUTPUT_NODES"
|
||||
cat "$INPUT_RELATIONSHIPS" > "$OUTPUT_RELATIONSHIPS"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher files under $OUTPUT_SCHEMA, $OUTPUT_NODES, $OUTPUT_RELATIONSHIPS and $OUTPUT_CLEANUP"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT_SCHEMA | mgconsole\`, \`cat $OUTPUT_NODES | mgconsole\`, \`cat $OUTPUT_RELATIONSHIPS | mgconsole\` and \`cat $OUTPUT_CLEANUP | mgconsole\`"
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -9,10 +9,11 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include <mgp.hpp>
|
||||
#include "utils/string.hpp"
|
||||
|
||||
#include <optional>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Schema {
|
||||
|
||||
@ -37,6 +38,7 @@ constexpr std::string_view kParameterIndices = "indices";
|
||||
constexpr std::string_view kParameterUniqueConstraints = "unique_constraints";
|
||||
constexpr std::string_view kParameterExistenceConstraints = "existence_constraints";
|
||||
constexpr std::string_view kParameterDropExisting = "drop_existing";
|
||||
constexpr int kInitialNumberOfPropertyOccurances = 1;
|
||||
|
||||
std::string TypeOf(const mgp::Type &type);
|
||||
|
||||
@ -108,83 +110,79 @@ void Schema::ProcessPropertiesRel(mgp::Record &record, const std::string_view &t
|
||||
record.Insert(std::string(kReturnMandatory).c_str(), mandatory);
|
||||
}
|
||||
|
||||
struct Property {
|
||||
std::string name;
|
||||
mgp::Value value;
|
||||
struct PropertyInfo {
|
||||
std::unordered_set<std::string> property_types; // property types
|
||||
int64_t number_of_property_occurrences = 0;
|
||||
|
||||
Property(const std::string &name, mgp::Value &&value) : name(name), value(std::move(value)) {}
|
||||
PropertyInfo() = default;
|
||||
explicit PropertyInfo(std::string &&property_type)
|
||||
: property_types({std::move(property_type)}),
|
||||
number_of_property_occurrences(Schema::kInitialNumberOfPropertyOccurances) {}
|
||||
};
|
||||
|
||||
struct LabelsInfo {
|
||||
std::unordered_map<std::string, PropertyInfo> properties; // key is a property name
|
||||
int64_t number_of_label_occurrences = 0;
|
||||
};
|
||||
|
||||
struct LabelsHash {
|
||||
std::size_t operator()(const std::set<std::string> &set) const {
|
||||
std::size_t seed = set.size();
|
||||
for (const auto &i : set) {
|
||||
seed ^= std::hash<std::string>{}(i) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
std::size_t operator()(const std::set<std::string> &s) const { return boost::hash_range(s.begin(), s.end()); }
|
||||
};
|
||||
|
||||
struct LabelsComparator {
|
||||
bool operator()(const std::set<std::string> &lhs, const std::set<std::string> &rhs) const { return lhs == rhs; }
|
||||
};
|
||||
|
||||
struct PropertyComparator {
|
||||
bool operator()(const Property &lhs, const Property &rhs) const { return lhs.name < rhs.name; }
|
||||
};
|
||||
|
||||
struct PropertyInfo {
|
||||
std::set<Property, PropertyComparator> properties;
|
||||
bool mandatory;
|
||||
};
|
||||
|
||||
void Schema::NodeTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph, mgp_result *result,
|
||||
mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
try {
|
||||
std::unordered_map<std::set<std::string>, PropertyInfo, LabelsHash, LabelsComparator> node_types_properties;
|
||||
std::unordered_map<std::set<std::string>, LabelsInfo, LabelsHash, LabelsComparator> node_types_properties;
|
||||
|
||||
for (auto node : mgp::Graph(memgraph_graph).Nodes()) {
|
||||
for (const auto node : mgp::Graph(memgraph_graph).Nodes()) {
|
||||
std::set<std::string> labels_set = {};
|
||||
for (auto label : node.Labels()) {
|
||||
for (const auto label : node.Labels()) {
|
||||
labels_set.emplace(label);
|
||||
}
|
||||
|
||||
if (node_types_properties.find(labels_set) == node_types_properties.end()) {
|
||||
node_types_properties[labels_set] = PropertyInfo{std::set<Property, PropertyComparator>(), true};
|
||||
}
|
||||
node_types_properties[labels_set].number_of_label_occurrences++;
|
||||
|
||||
if (node.Properties().empty()) {
|
||||
node_types_properties[labels_set].mandatory = false; // if there is node with no property, it is not mandatory
|
||||
continue;
|
||||
}
|
||||
|
||||
auto &property_info = node_types_properties.at(labels_set);
|
||||
for (auto &[key, prop] : node.Properties()) {
|
||||
property_info.properties.emplace(key, std::move(prop));
|
||||
if (property_info.mandatory) {
|
||||
property_info.mandatory =
|
||||
property_info.properties.size() == 1; // if there is only one property, it is mandatory
|
||||
auto &labels_info = node_types_properties.at(labels_set);
|
||||
for (const auto &[key, prop] : node.Properties()) {
|
||||
auto prop_type = TypeOf(prop.Type());
|
||||
if (labels_info.properties.find(key) == labels_info.properties.end()) {
|
||||
labels_info.properties[key] = PropertyInfo{std::move(prop_type)};
|
||||
} else {
|
||||
labels_info.properties[key].property_types.emplace(prop_type);
|
||||
labels_info.properties[key].number_of_property_occurrences++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &[labels, property_info] : node_types_properties) {
|
||||
for (auto &[node_type, labels_info] : node_types_properties) { // node type is a set of labels
|
||||
std::string label_type;
|
||||
mgp::List labels_list = mgp::List();
|
||||
for (auto const &label : labels) {
|
||||
auto labels_list = mgp::List();
|
||||
for (const auto &label : node_type) {
|
||||
label_type += ":`" + std::string(label) + "`";
|
||||
labels_list.AppendExtend(mgp::Value(label));
|
||||
}
|
||||
for (auto const &prop : property_info.properties) {
|
||||
for (const auto &prop : labels_info.properties) {
|
||||
auto prop_types = mgp::List();
|
||||
for (const auto &prop_type : prop.second.property_types) {
|
||||
prop_types.AppendExtend(mgp::Value(prop_type));
|
||||
}
|
||||
bool mandatory = prop.second.number_of_property_occurrences == labels_info.number_of_label_occurrences;
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesNode(record, label_type, labels_list, prop.name, TypeOf(prop.value.Type()),
|
||||
property_info.mandatory);
|
||||
ProcessPropertiesNode(record, label_type, labels_list, prop.first, prop_types, mandatory);
|
||||
}
|
||||
if (property_info.properties.empty()) {
|
||||
if (labels_info.properties.empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesNode<std::string>(record, label_type, labels_list, "", "", false);
|
||||
ProcessPropertiesNode<mgp::List>(record, label_type, labels_list, "", mgp::List(), false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -197,40 +195,45 @@ void Schema::NodeTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph,
|
||||
void Schema::RelTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
|
||||
std::unordered_map<std::string, PropertyInfo> rel_types_properties;
|
||||
std::unordered_map<std::string, LabelsInfo> rel_types_properties;
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
try {
|
||||
const mgp::Graph graph = mgp::Graph(memgraph_graph);
|
||||
for (auto rel : graph.Relationships()) {
|
||||
const auto graph = mgp::Graph(memgraph_graph);
|
||||
for (const auto rel : graph.Relationships()) {
|
||||
std::string rel_type = std::string(rel.Type());
|
||||
if (rel_types_properties.find(rel_type) == rel_types_properties.end()) {
|
||||
rel_types_properties[rel_type] = PropertyInfo{std::set<Property, PropertyComparator>(), true};
|
||||
}
|
||||
|
||||
rel_types_properties[rel_type].number_of_label_occurrences++;
|
||||
|
||||
if (rel.Properties().empty()) {
|
||||
rel_types_properties[rel_type].mandatory = false; // if there is rel with no property, it is not mandatory
|
||||
continue;
|
||||
}
|
||||
|
||||
auto &property_info = rel_types_properties.at(rel_type);
|
||||
auto &labels_info = rel_types_properties.at(rel_type);
|
||||
for (auto &[key, prop] : rel.Properties()) {
|
||||
property_info.properties.emplace(key, std::move(prop));
|
||||
if (property_info.mandatory) {
|
||||
property_info.mandatory =
|
||||
property_info.properties.size() == 1; // if there is only one property, it is mandatory
|
||||
auto prop_type = TypeOf(prop.Type());
|
||||
if (labels_info.properties.find(key) == labels_info.properties.end()) {
|
||||
labels_info.properties[key] = PropertyInfo{std::move(prop_type)};
|
||||
} else {
|
||||
labels_info.properties[key].property_types.emplace(prop_type);
|
||||
labels_info.properties[key].number_of_property_occurrences++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &[type, property_info] : rel_types_properties) {
|
||||
std::string type_str = ":`" + std::string(type) + "`";
|
||||
for (auto const &prop : property_info.properties) {
|
||||
for (auto &[rel_type, labels_info] : rel_types_properties) {
|
||||
std::string type_str = ":`" + std::string(rel_type) + "`";
|
||||
for (const auto &prop : labels_info.properties) {
|
||||
auto prop_types = mgp::List();
|
||||
for (const auto &prop_type : prop.second.property_types) {
|
||||
prop_types.AppendExtend(mgp::Value(prop_type));
|
||||
}
|
||||
bool mandatory = prop.second.number_of_property_occurrences == labels_info.number_of_label_occurrences;
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesRel(record, type_str, prop.name, TypeOf(prop.value.Type()), property_info.mandatory);
|
||||
ProcessPropertiesRel(record, type_str, prop.first, prop_types, mandatory);
|
||||
}
|
||||
if (property_info.properties.empty()) {
|
||||
if (labels_info.properties.empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesRel<std::string>(record, type_str, "", "", false);
|
||||
ProcessPropertiesRel<mgp::List>(record, type_str, "", mgp::List(), false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -16,11 +16,14 @@ target_sources(mg-coordination
|
||||
include/coordination/raft_state.hpp
|
||||
include/coordination/rpc_errors.hpp
|
||||
|
||||
include/nuraft/raft_log_action.hpp
|
||||
include/nuraft/coordinator_cluster_state.hpp
|
||||
include/nuraft/coordinator_log_store.hpp
|
||||
include/nuraft/coordinator_state_machine.hpp
|
||||
include/nuraft/coordinator_state_manager.hpp
|
||||
|
||||
PRIVATE
|
||||
coordinator_config.cpp
|
||||
coordinator_client.cpp
|
||||
coordinator_state.cpp
|
||||
coordinator_rpc.cpp
|
||||
@ -33,6 +36,7 @@ target_sources(mg-coordination
|
||||
coordinator_log_store.cpp
|
||||
coordinator_state_machine.cpp
|
||||
coordinator_state_manager.cpp
|
||||
coordinator_cluster_state.cpp
|
||||
)
|
||||
target_include_directories(mg-coordination PUBLIC include)
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
@ -30,7 +31,7 @@ auto CreateClientContext(memgraph::coordination::CoordinatorClientConfig const &
|
||||
} // namespace
|
||||
|
||||
CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb)
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb)
|
||||
: rpc_context_{CreateClientContext(config)},
|
||||
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
|
||||
&rpc_context_},
|
||||
@ -40,7 +41,9 @@ CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, Coordi
|
||||
fail_cb_{std::move(fail_cb)} {}
|
||||
|
||||
auto CoordinatorClient::InstanceName() const -> std::string { return config_.instance_name; }
|
||||
auto CoordinatorClient::SocketAddress() const -> std::string { return rpc_client_.Endpoint().SocketAddress(); }
|
||||
|
||||
auto CoordinatorClient::CoordinatorSocketAddress() const -> std::string { return config_.CoordinatorSocketAddress(); }
|
||||
auto CoordinatorClient::ReplicationSocketAddress() const -> std::string { return config_.ReplicationSocketAddress(); }
|
||||
|
||||
auto CoordinatorClient::InstanceDownTimeoutSec() const -> std::chrono::seconds {
|
||||
return config_.instance_down_timeout_sec;
|
||||
@ -63,11 +66,15 @@ void CoordinatorClient::StartFrequentCheck() {
|
||||
[this, instance_name = config_.instance_name] {
|
||||
try {
|
||||
spdlog::trace("Sending frequent heartbeat to machine {} on {}", instance_name,
|
||||
rpc_client_.Endpoint().SocketAddress());
|
||||
config_.CoordinatorSocketAddress());
|
||||
{ // NOTE: This is intentionally scoped so that stream lock could get released.
|
||||
auto stream{rpc_client_.Stream<memgraph::replication_coordination_glue::FrequentHeartbeatRpc>()};
|
||||
stream.AwaitResponse();
|
||||
}
|
||||
// Subtle race condition:
|
||||
// acquiring of lock needs to happen before function call, as function callback can be changed
|
||||
// for instance after lock is already acquired
|
||||
// (failover case when instance is promoted to MAIN)
|
||||
succ_cb_(coord_instance_, instance_name);
|
||||
} catch (rpc::RpcFailedException const &) {
|
||||
fail_cb_(coord_instance_, instance_name);
|
||||
@ -79,11 +86,6 @@ void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
|
||||
void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
|
||||
void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
|
||||
|
||||
auto CoordinatorClient::SetCallbacks(HealthCheckCallback succ_cb, HealthCheckCallback fail_cb) -> void {
|
||||
succ_cb_ = std::move(succ_cb);
|
||||
fail_cb_ = std::move(fail_cb);
|
||||
}
|
||||
|
||||
auto CoordinatorClient::ReplicationClientInfo() const -> ReplClientInfo { return config_.replication_client_info; }
|
||||
|
||||
auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid,
|
||||
@ -117,7 +119,7 @@ auto CoordinatorClient::DemoteToReplica() const -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bool {
|
||||
auto CoordinatorClient::SendSwapMainUUIDRpc(utils::UUID const &uuid) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<replication_coordination_glue::SwapMainUUIDRpc>(uuid)};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
@ -131,7 +133,7 @@ auto CoordinatorClient::SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bo
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendUnregisterReplicaRpc(std::string const &instance_name) const -> bool {
|
||||
auto CoordinatorClient::SendUnregisterReplicaRpc(std::string_view instance_name) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<UnregisterReplicaRpc>(instance_name)};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
@ -171,5 +173,17 @@ auto CoordinatorClient::SendEnableWritingOnMainRpc() const -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendGetInstanceTimestampsRpc() const
|
||||
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories> {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<coordination::GetDatabaseHistoriesRpc>()};
|
||||
return stream.AwaitResponse().database_histories;
|
||||
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
spdlog::error("RPC error occured while sending GetInstance UUID RPC");
|
||||
return GetInstanceUUIDError::RPC_EXCEPTION;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
147
src/coordination/coordinator_cluster_state.cpp
Normal file
147
src/coordination/coordinator_cluster_state.cpp
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "nuraft/coordinator_cluster_state.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
#include <shared_mutex>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state) {
|
||||
j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state) {
|
||||
j.at("config").get_to(instance_state.config);
|
||||
j.at("status").get_to(instance_state.status);
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances)
|
||||
: instances_{std::move(instances)} {}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instances_ = other.instances_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
|
||||
: instances_{std::move(other.instances_)} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instances_ = std::move(other.instances_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::MainExists() const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return std::ranges::any_of(instances_,
|
||||
[](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::REPLICA;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
switch (log_action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
|
||||
auto const &config = std::get<CoordinatorClientConfig>(log_entry);
|
||||
instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
instances_.erase(instance_name);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::MAIN;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::REPLICA;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UPDATE_UUID: {
|
||||
uuid_ = std::get<utils::UUID>(log_entry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
|
||||
auto const log = nlohmann::json(instances_).dump();
|
||||
|
||||
data = buffer::alloc(sizeof(uint32_t) + log.size());
|
||||
buffer_serializer bs(data);
|
||||
bs.put_str(log);
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
|
||||
buffer_serializer bs(data);
|
||||
auto const j = nlohmann::json::parse(bs.get_str());
|
||||
auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>();
|
||||
|
||||
return CoordinatorClusterState{std::move(instances)};
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
|
||||
|
||||
auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it =
|
||||
std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
if (it == instances_.end()) {
|
||||
return {};
|
||||
}
|
||||
return it->first;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
54
src/coordination/coordinator_config.cpp
Normal file
54
src/coordination/coordinator_config.cpp
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"replication_mode", config.replication_mode},
|
||||
{"replication_ip_address", config.replication_ip_address},
|
||||
{"replication_port", config.replication_port}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
|
||||
config.replication_ip_address = j.at("replication_ip_address").get<std::string>();
|
||||
config.replication_port = j.at("replication_port").get<uint16_t>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"ip_address", config.ip_address},
|
||||
{"port", config.port},
|
||||
{"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
|
||||
{"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
|
||||
{"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
|
||||
{"replication_client_info", config.replication_client_info}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.ip_address = j.at("ip_address").get<std::string>();
|
||||
config.port = j.at("port").get<uint16_t>();
|
||||
config.instance_health_check_frequency_sec =
|
||||
std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
|
||||
config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
|
||||
config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
|
||||
config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>();
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -57,6 +57,17 @@ void CoordinatorHandlers::Register(memgraph::coordination::CoordinatorServer &se
|
||||
spdlog::info("Received GetInstanceUUIDRpc on coordinator server");
|
||||
CoordinatorHandlers::GetInstanceUUIDHandler(replication_handler, req_reader, res_builder);
|
||||
});
|
||||
|
||||
server.Register<coordination::GetDatabaseHistoriesRpc>(
|
||||
[&replication_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void {
|
||||
spdlog::info("Received GetDatabasesHistoryRpc on coordinator server");
|
||||
CoordinatorHandlers::GetDatabaseHistoriesHandler(replication_handler, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::GetDatabaseHistoriesHandler(replication::ReplicationHandler &replication_handler,
|
||||
slk::Reader * /*req_reader*/, slk::Builder *res_builder) {
|
||||
slk::Save(coordination::GetDatabaseHistoriesRes{replication_handler.GetDatabasesHistories()}, res_builder);
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::SwapMainUUIDHandler(replication::ReplicationHandler &replication_handler,
|
||||
|
@ -15,10 +15,12 @@
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/fmt.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
#include "utils/functional.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
|
||||
#include <range/v3/view.hpp>
|
||||
#include <shared_mutex>
|
||||
@ -30,144 +32,156 @@ using nuraft::srv_config;
|
||||
|
||||
CoordinatorInstance::CoordinatorInstance()
|
||||
: raft_state_(RaftState::MakeRaftState(
|
||||
[this] { std::ranges::for_each(repl_instances_, &ReplicationInstance::StartFrequentCheck); },
|
||||
[this] { std::ranges::for_each(repl_instances_, &ReplicationInstance::StopFrequentCheck); })) {
|
||||
auto find_repl_instance = [](CoordinatorInstance *self,
|
||||
std::string_view repl_instance_name) -> ReplicationInstance & {
|
||||
auto repl_instance =
|
||||
std::ranges::find_if(self->repl_instances_, [repl_instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == repl_instance_name;
|
||||
});
|
||||
[this]() {
|
||||
spdlog::info("Leader changed, starting all replication instances!");
|
||||
auto const instances = raft_state_.GetInstances();
|
||||
auto replicas = instances | ranges::views::filter([](auto const &instance) {
|
||||
return instance.status == ReplicationRole::REPLICA;
|
||||
});
|
||||
|
||||
MG_ASSERT(repl_instance != self->repl_instances_.end(), "Instance {} not found during callback!",
|
||||
repl_instance_name);
|
||||
return *repl_instance;
|
||||
std::ranges::for_each(replicas, [this](auto &replica) {
|
||||
spdlog::info("Started pinging replication instance {}", replica.config.instance_name);
|
||||
repl_instances_.emplace_back(this, replica.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
});
|
||||
|
||||
auto main = instances | ranges::views::filter(
|
||||
[](auto const &instance) { return instance.status == ReplicationRole::MAIN; });
|
||||
|
||||
std::ranges::for_each(main, [this](auto &main_instance) {
|
||||
spdlog::info("Started pinging main instance {}", main_instance.config.instance_name);
|
||||
repl_instances_.emplace_back(this, main_instance.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback);
|
||||
});
|
||||
|
||||
std::ranges::for_each(repl_instances_, [this](auto &instance) {
|
||||
instance.SetNewMainUUID(raft_state_.GetUUID());
|
||||
instance.StartFrequentCheck();
|
||||
});
|
||||
},
|
||||
[this]() {
|
||||
spdlog::info("Leader changed, stopping all replication instances!");
|
||||
repl_instances_.clear();
|
||||
})) {
|
||||
client_succ_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetSuccessCallback(), self, repl_instance_name);
|
||||
};
|
||||
|
||||
replica_succ_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
client_fail_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
|
||||
// We need to get replicas UUID from time to time to ensure replica is listening to correct main
|
||||
// and that it didn't go down for less time than we could notice
|
||||
// We need to get id of main replica is listening to
|
||||
// and swap if necessary
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(self->GetMainUUID())) {
|
||||
spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
};
|
||||
|
||||
replica_fail_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
};
|
||||
|
||||
main_succ_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
|
||||
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
|
||||
if (repl_instance.IsAlive()) {
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
|
||||
|
||||
auto const curr_main_uuid = self->GetMainUUID();
|
||||
if (curr_main_uuid == repl_instance_uuid.value()) {
|
||||
if (!repl_instance.EnableWritingOnMain()) {
|
||||
spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(antoniof) make demoteToReplica idempotent since main can be demoted to replica but
|
||||
// swapUUID can fail
|
||||
if (repl_instance.DemoteToReplica(self->replica_succ_cb_, self->replica_fail_cb_)) {
|
||||
repl_instance.OnSuccessPing();
|
||||
spdlog::info("Instance {} demoted to replica", repl_instance_name);
|
||||
} else {
|
||||
spdlog::error("Instance {} failed to become replica", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(curr_main_uuid)) {
|
||||
spdlog::error(fmt::format("Failed to swap uuid for demoted main instance {}", repl_instance.InstanceName()));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
main_fail_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing main failure callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set");
|
||||
|
||||
if (!repl_instance.IsAlive() && self->GetMainUUID() == repl_instance_uuid.value()) {
|
||||
spdlog::info("Cluster without main instance, trying automatic failover");
|
||||
self->TryFailover(); // TODO: (andi) Initiate failover
|
||||
}
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetFailCallback(), self, repl_instance_name);
|
||||
};
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance & {
|
||||
auto repl_instance =
|
||||
std::ranges::find_if(repl_instances_, [replication_instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == replication_instance_name;
|
||||
});
|
||||
|
||||
MG_ASSERT(repl_instance != repl_instances_.end(), "Instance {} not found during callback!",
|
||||
replication_instance_name);
|
||||
return *repl_instance;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
auto const coord_instances = raft_state_.GetAllCoordinators();
|
||||
|
||||
auto const stringify_repl_role = [](ReplicationInstance const &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "unknown";
|
||||
if (instance.IsMain()) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
auto const repl_instance_to_status = [&stringify_repl_role](ReplicationInstance const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.InstanceName(),
|
||||
.coord_socket_address = instance.SocketAddress(),
|
||||
.cluster_role = stringify_repl_role(instance),
|
||||
.is_alive = instance.IsAlive()};
|
||||
};
|
||||
|
||||
auto const coord_instance_to_status = [](ptr<srv_config> const &instance) -> InstanceStatus {
|
||||
return {.instance_name = "coordinator_" + std::to_string(instance->get_id()),
|
||||
.raft_socket_address = instance->get_endpoint(),
|
||||
.cluster_role = "coordinator",
|
||||
.is_alive = true}; // TODO: (andi) Get this info from RAFT and test it or when we will move
|
||||
// CoordinatorState to every instance, we can be smarter about this using our RPC.
|
||||
.health = "unknown"}; // TODO: (andi) Get this info from RAFT and test it or when we will move
|
||||
};
|
||||
auto instances_status = utils::fmap(raft_state_.GetAllCoordinators(), coord_instance_to_status);
|
||||
|
||||
auto instances_status = utils::fmap(coord_instance_to_status, coord_instances);
|
||||
{
|
||||
auto lock = std::shared_lock{coord_instance_lock_};
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), repl_instance_to_status);
|
||||
if (raft_state_.IsLeader()) {
|
||||
auto const stringify_repl_role = [this](ReplicationInstance const &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "unknown";
|
||||
if (raft_state_.IsMain(instance.InstanceName())) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
auto const stringify_repl_health = [](ReplicationInstance const &instance) -> std::string {
|
||||
return instance.IsAlive() ? "up" : "down";
|
||||
};
|
||||
|
||||
auto process_repl_instance_as_leader =
|
||||
[&stringify_repl_role, &stringify_repl_health](ReplicationInstance const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.InstanceName(),
|
||||
.coord_socket_address = instance.CoordinatorSocketAddress(),
|
||||
.cluster_role = stringify_repl_role(instance),
|
||||
.health = stringify_repl_health(instance)};
|
||||
};
|
||||
|
||||
{
|
||||
auto lock = std::shared_lock{coord_instance_lock_};
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), process_repl_instance_as_leader);
|
||||
}
|
||||
} else {
|
||||
auto const stringify_inst_status = [](ReplicationRole status) -> std::string {
|
||||
return status == ReplicationRole::MAIN ? "main" : "replica";
|
||||
};
|
||||
|
||||
// TODO: (andi) Add capability that followers can also return socket addresses
|
||||
auto process_repl_instance_as_follower = [&stringify_inst_status](auto const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.config.instance_name,
|
||||
.cluster_role = stringify_inst_status(instance.status),
|
||||
.health = "unknown"};
|
||||
};
|
||||
|
||||
std::ranges::transform(raft_state_.GetInstances(), std::back_inserter(instances_status),
|
||||
process_repl_instance_as_follower);
|
||||
}
|
||||
|
||||
return instances_status;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::TryFailover() -> void {
|
||||
auto alive_replicas = repl_instances_ | ranges::views::filter(&ReplicationInstance::IsReplica) |
|
||||
ranges::views::filter(&ReplicationInstance::IsAlive);
|
||||
auto const is_replica = [this](ReplicationInstance const &instance) { return IsReplica(instance.InstanceName()); };
|
||||
|
||||
auto alive_replicas =
|
||||
repl_instances_ | ranges::views::filter(is_replica) | ranges::views::filter(&ReplicationInstance::IsAlive);
|
||||
|
||||
if (ranges::empty(alive_replicas)) {
|
||||
spdlog::warn("Failover failed since all replicas are down!");
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Smarter choice
|
||||
auto new_main = ranges::begin(alive_replicas);
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Failover failed since the instance is not the leader!");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const get_ts = [](ReplicationInstance &replica) { return replica.GetClient().SendGetInstanceTimestampsRpc(); };
|
||||
|
||||
auto maybe_instance_db_histories = alive_replicas | ranges::views::transform(get_ts) | ranges::to<std::vector>();
|
||||
|
||||
auto const ts_has_error = [](auto const &res) -> bool { return res.HasError(); };
|
||||
|
||||
if (std::ranges::any_of(maybe_instance_db_histories, ts_has_error)) {
|
||||
spdlog::error("Aborting failover as at least one instance didn't provide per database history.");
|
||||
return;
|
||||
}
|
||||
|
||||
auto transform_to_pairs = ranges::views::transform([](auto const &zipped) {
|
||||
auto &[replica, res] = zipped;
|
||||
return std::make_pair(replica.InstanceName(), res.GetValue());
|
||||
});
|
||||
|
||||
auto instance_db_histories =
|
||||
ranges::views::zip(alive_replicas, maybe_instance_db_histories) | transform_to_pairs | ranges::to<std::vector>();
|
||||
|
||||
auto [most_up_to_date_instance, latest_epoch, latest_commit_timestamp] =
|
||||
ChooseMostUpToDateInstance(instance_db_histories);
|
||||
|
||||
spdlog::trace("The most up to date instance is {} with epoch {} and {} latest commit timestamp",
|
||||
most_up_to_date_instance, latest_epoch, latest_commit_timestamp); // NOLINT
|
||||
|
||||
auto *new_main = &FindReplicationInstance(most_up_to_date_instance);
|
||||
|
||||
new_main->PauseFrequentCheck();
|
||||
utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
|
||||
@ -177,41 +191,56 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
};
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
// If for some replicas swap fails, for others on successful ping we will revert back on next change
|
||||
// or we will do failover first again and then it will be consistent again
|
||||
for (auto &other_replica_instance : alive_replicas | ranges::views::filter(is_not_new_main)) {
|
||||
if (!other_replica_instance.SendSwapAndUpdateUUID(new_main_uuid)) {
|
||||
spdlog::error(fmt::format("Failed to swap uuid for instance {} which is alive, aborting failover",
|
||||
other_replica_instance.InstanceName()));
|
||||
return;
|
||||
}
|
||||
if (std::ranges::any_of(alive_replicas | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return;
|
||||
}
|
||||
|
||||
auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
|
||||
ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
|
||||
ranges::to<ReplicationClientsInfo>();
|
||||
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), main_succ_cb_, main_fail_cb_)) {
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), &CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback)) {
|
||||
spdlog::warn("Failover failed since promoting replica to main failed!");
|
||||
return;
|
||||
}
|
||||
// TODO: (andi) This should be replicated across all coordinator instances with Raft log
|
||||
SetMainUUID(new_main_uuid);
|
||||
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto const new_main_instance_name = new_main->InstanceName();
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name)) {
|
||||
return;
|
||||
}
|
||||
|
||||
spdlog::info("Failover successful! Instance {} promoted to main.", new_main->InstanceName());
|
||||
}
|
||||
|
||||
// TODO: (andi) Make sure you cannot put coordinator instance to the main
|
||||
auto CoordinatorInstance::SetReplicationInstanceToMain(std::string instance_name)
|
||||
auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> SetInstanceToMainCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, &ReplicationInstance::IsMain)) {
|
||||
if (raft_state_.MainExists()) {
|
||||
return SetInstanceToMainCoordinatorStatus::MAIN_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return SetInstanceToMainCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const is_new_main = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
|
||||
auto new_main = std::ranges::find_if(repl_instances_, is_new_main);
|
||||
|
||||
if (new_main == repl_instances_.end()) {
|
||||
@ -229,85 +258,93 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string instance_name
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
for (auto &other_instance : repl_instances_ | ranges::views::filter(is_not_new_main)) {
|
||||
if (!other_instance.SendSwapAndUpdateUUID(new_main_uuid)) {
|
||||
spdlog::error(
|
||||
fmt::format("Failed to swap uuid for instance {}, aborting failover", other_instance.InstanceName()));
|
||||
return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
|
||||
}
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_ | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
|
||||
}
|
||||
|
||||
ReplicationClientsInfo repl_clients_info;
|
||||
repl_clients_info.reserve(repl_instances_.size() - 1);
|
||||
std::ranges::transform(repl_instances_ | ranges::views::filter(is_not_new_main),
|
||||
std::back_inserter(repl_clients_info), &ReplicationInstance::ReplicationClientInfo);
|
||||
auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
|
||||
ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
|
||||
ranges::to<ReplicationClientsInfo>();
|
||||
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), main_succ_cb_, main_fail_cb_)) {
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), &CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback)) {
|
||||
return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN;
|
||||
}
|
||||
|
||||
// TODO: (andi) This should be replicated across all coordinator instances with Raft log
|
||||
SetMainUUID(new_main_uuid);
|
||||
spdlog::info("Instance {} promoted to main", instance_name);
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(instance_name)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
spdlog::info("Instance {} promoted to main on leader", instance_name);
|
||||
return SetInstanceToMainCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig config)
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
auto instance_name = config.instance_name;
|
||||
|
||||
auto const name_matches = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, name_matches)) {
|
||||
if (std::ranges::any_of(repl_instances_, [instance_name = config.instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
|
||||
auto const socket_address_matches = [&config](ReplicationInstance const &instance) {
|
||||
return instance.SocketAddress() == config.SocketAddress();
|
||||
};
|
||||
if (std::ranges::any_of(repl_instances_, [&config](ReplicationInstance const &instance) {
|
||||
return instance.CoordinatorSocketAddress() == config.CoordinatorSocketAddress();
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::COORD_ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, socket_address_matches)) {
|
||||
return RegisterInstanceCoordinatorStatus::ENDPOINT_EXISTS;
|
||||
if (std::ranges::any_of(repl_instances_, [&config](ReplicationInstance const &instance) {
|
||||
return instance.ReplicationSocketAddress() == config.ReplicationSocketAddress();
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::REPL_ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return RegisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const res = raft_state_.AppendRegisterReplicationInstance(instance_name);
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for registering instance {}. Most likely the reason is that the instance is not "
|
||||
"the "
|
||||
"leader.",
|
||||
config.instance_name);
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_COULD_NOT_ACCEPT;
|
||||
}
|
||||
auto const undo_action_ = [this]() { repl_instances_.pop_back(); };
|
||||
|
||||
spdlog::info("Request for registering instance {} accepted", instance_name);
|
||||
try {
|
||||
repl_instances_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_);
|
||||
} catch (CoordinatorRegisterInstanceException const &) {
|
||||
auto *new_instance = &repl_instances_.emplace_back(this, config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
|
||||
if (!new_instance->SendDemoteToReplicaRpc()) {
|
||||
spdlog::error("Failed to send demote to replica rpc for instance {}", config.instance_name);
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to register instance {} with error code {}", instance_name, res->get_result_code());
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_COULD_NOT_APPEND;
|
||||
if (!raft_state_.AppendRegisterReplicationInstanceLog(config)) {
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
spdlog::info("Instance {} registered", instance_name);
|
||||
new_instance->StartFrequentCheck();
|
||||
|
||||
spdlog::info("Instance {} registered", config.instance_name);
|
||||
return RegisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::UnregisterReplicationInstance(std::string instance_name)
|
||||
auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return UnregisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const name_matches = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
@ -317,31 +354,208 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string instance_nam
|
||||
return UnregisterInstanceCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
if (inst_to_remove->IsMain() && inst_to_remove->IsAlive()) {
|
||||
auto const is_main = [this](ReplicationInstance const &instance) {
|
||||
return IsMain(instance.InstanceName()) && instance.GetMainUUID() == raft_state_.GetUUID() && instance.IsAlive();
|
||||
};
|
||||
|
||||
if (is_main(*inst_to_remove)) {
|
||||
return UnregisterInstanceCoordinatorStatus::IS_MAIN;
|
||||
}
|
||||
|
||||
inst_to_remove->StopFrequentCheck();
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, &ReplicationInstance::IsMain);
|
||||
MG_ASSERT(curr_main != repl_instances_.end(), "There must be a main instance when unregistering a replica");
|
||||
if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
|
||||
inst_to_remove->StartFrequentCheck();
|
||||
return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, is_main);
|
||||
|
||||
if (curr_main != repl_instances_.end() && curr_main->IsAlive()) {
|
||||
if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
|
||||
inst_to_remove->StartFrequentCheck();
|
||||
return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
std::erase_if(repl_instances_, name_matches);
|
||||
|
||||
if (!raft_state_.AppendUnregisterReplicationInstanceLog(instance_name)) {
|
||||
return UnregisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
return UnregisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, std::move(raft_address));
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::GetMainUUID() const -> utils::UUID { return main_uuid_; }
|
||||
void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main fail callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Replication instance must have uuid set");
|
||||
|
||||
// TODO: (andi) Add to the RAFT log.
|
||||
auto CoordinatorInstance::SetMainUUID(utils::UUID new_uuid) -> void { main_uuid_ = new_uuid; }
|
||||
// NOLINTNEXTLINE
|
||||
if (!repl_instance.IsAlive() && raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
spdlog::info("Cluster without main instance, trying automatic failover");
|
||||
TryFailover();
|
||||
}
|
||||
}
|
||||
|
||||
void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (repl_instance.IsAlive()) {
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
if (!repl_instance.EnableWritingOnMain()) {
|
||||
spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Demoting main instance {} to replica failed since the instance is not the leader!",
|
||||
repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (repl_instance.DemoteToReplica(&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback)) {
|
||||
repl_instance.OnSuccessPing();
|
||||
spdlog::info("Instance {} demoted to replica", repl_instance_name);
|
||||
} else {
|
||||
spdlog::error("Instance {} failed to become replica", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for demoted main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsReplicaLog(repl_instance_name)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
// We need to get replicas UUID from time to time to ensure replica is listening to correct main
|
||||
// and that it didn't go down for less time than we could notice
|
||||
// We need to get id of main replica is listening to
|
||||
// and swap if necessary
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
}
|
||||
|
||||
void CoordinatorInstance::ReplicaFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica fail callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnFailPing();
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> instance_database_histories)
|
||||
-> NewMainRes {
|
||||
std::optional<NewMainRes> new_main_res;
|
||||
std::for_each(
|
||||
instance_database_histories.begin(), instance_database_histories.end(),
|
||||
[&new_main_res](const InstanceNameDbHistories &instance_res_pair) {
|
||||
const auto &[instance_name, instance_db_histories] = instance_res_pair;
|
||||
|
||||
// Find default db for instance and its history
|
||||
auto default_db_history_data = std::ranges::find_if(
|
||||
instance_db_histories, [default_db = memgraph::dbms::kDefaultDB](
|
||||
const replication_coordination_glue::DatabaseHistory &db_timestamps) {
|
||||
return db_timestamps.name == default_db;
|
||||
});
|
||||
|
||||
std::ranges::for_each(
|
||||
instance_db_histories,
|
||||
[&instance_name = instance_name](const replication_coordination_glue::DatabaseHistory &db_history) {
|
||||
spdlog::debug("Instance {}: name {}, default db {}", instance_name, db_history.name,
|
||||
memgraph::dbms::kDefaultDB);
|
||||
});
|
||||
|
||||
MG_ASSERT(default_db_history_data != instance_db_histories.end(), "No history for instance");
|
||||
|
||||
const auto &instance_default_db_history = default_db_history_data->history;
|
||||
|
||||
std::ranges::for_each(instance_default_db_history | ranges::views::reverse,
|
||||
[&instance_name = instance_name](const auto &epoch_history_it) {
|
||||
spdlog::debug("Instance {}: epoch {}, last_commit_timestamp: {}", instance_name,
|
||||
std::get<0>(epoch_history_it), std::get<1>(epoch_history_it));
|
||||
});
|
||||
|
||||
// get latest epoch
|
||||
// get latest timestamp
|
||||
|
||||
if (!new_main_res) {
|
||||
const auto &[epoch, timestamp] = *instance_default_db_history.crbegin();
|
||||
new_main_res = std::make_optional<NewMainRes>({instance_name, epoch, timestamp});
|
||||
spdlog::debug("Currently the most up to date instance is {} with epoch {} and {} latest commit timestamp",
|
||||
instance_name, epoch, timestamp);
|
||||
return;
|
||||
}
|
||||
|
||||
bool found_same_point{false};
|
||||
std::string last_most_up_to_date_epoch{new_main_res->latest_epoch};
|
||||
for (auto [epoch, timestamp] : ranges::reverse_view(instance_default_db_history)) {
|
||||
if (new_main_res->latest_commit_timestamp < timestamp) {
|
||||
new_main_res = std::make_optional<NewMainRes>({instance_name, epoch, timestamp});
|
||||
spdlog::trace("Found the new most up to date instance {} with epoch {} and {} latest commit timestamp",
|
||||
instance_name, epoch, timestamp);
|
||||
}
|
||||
|
||||
// we found point at which they were same
|
||||
if (epoch == last_most_up_to_date_epoch) {
|
||||
found_same_point = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_same_point) {
|
||||
spdlog::error("Didn't find same history epoch {} for instance {} and instance {}", last_most_up_to_date_epoch,
|
||||
new_main_res->most_up_to_date_instance, instance_name);
|
||||
}
|
||||
});
|
||||
|
||||
return std::move(*new_main_res);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::IsMain(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -62,34 +62,33 @@ ptr<log_entry> CoordinatorLogStore::last_entry() const {
|
||||
|
||||
uint64_t CoordinatorLogStore::append(ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
uint64_t next_slot{0};
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
}
|
||||
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
uint64_t next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
|
||||
return next_slot;
|
||||
}
|
||||
|
||||
// TODO: (andi) I think this is used for resolving conflicts inside NuRaft, check...
|
||||
// different compared to in_memory_log_store.cxx
|
||||
void CoordinatorLogStore::write_at(uint64_t index, ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
|
||||
// Discard all logs equal to or greater than `index.
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
}
|
||||
|
||||
ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start, uint64_t end) {
|
||||
auto ret = cs_new<std::vector<ptr<log_entry>>>();
|
||||
ret->resize(end - start);
|
||||
|
||||
for (uint64_t i = start, curr_index = 0; i < end; ++i, ++curr_index) {
|
||||
for (uint64_t i = start, curr_index = 0; i < end; i++, curr_index++) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
@ -105,21 +104,14 @@ ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start
|
||||
}
|
||||
|
||||
ptr<log_entry> CoordinatorLogStore::entry_at(uint64_t index) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
src = FindOrDefault_(index);
|
||||
}
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
ptr<log_entry> src = FindOrDefault_(index);
|
||||
return MakeClone(src);
|
||||
}
|
||||
|
||||
uint64_t CoordinatorLogStore::term_at(uint64_t index) {
|
||||
uint64_t term = 0;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
term = FindOrDefault_(index)->get_term();
|
||||
}
|
||||
return term;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
return FindOrDefault_(index)->get_term();
|
||||
}
|
||||
|
||||
ptr<buffer> CoordinatorLogStore::pack(uint64_t index, int32 cnt) {
|
||||
|
@ -76,9 +76,9 @@ void EnableWritingOnMainRes::Load(EnableWritingOnMainRes *self, memgraph::slk::R
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void EnableWritingOnMainReq::Save(EnableWritingOnMainReq const &self, memgraph::slk::Builder *builder) {}
|
||||
void EnableWritingOnMainReq::Save(EnableWritingOnMainReq const & /*self*/, memgraph::slk::Builder * /*builder*/) {}
|
||||
|
||||
void EnableWritingOnMainReq::Load(EnableWritingOnMainReq *self, memgraph::slk::Reader *reader) {}
|
||||
void EnableWritingOnMainReq::Load(EnableWritingOnMainReq * /*self*/, memgraph::slk::Reader * /*reader*/) {}
|
||||
|
||||
// GetInstanceUUID
|
||||
void GetInstanceUUIDReq::Save(const GetInstanceUUIDReq &self, memgraph::slk::Builder *builder) {
|
||||
@ -97,6 +97,24 @@ void GetInstanceUUIDRes::Load(GetInstanceUUIDRes *self, memgraph::slk::Reader *r
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
// GetDatabaseHistoriesRpc
|
||||
|
||||
void GetDatabaseHistoriesReq::Save(const GetDatabaseHistoriesReq & /*self*/, memgraph::slk::Builder * /*builder*/) {
|
||||
/* nothing to serialize */
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesReq::Load(GetDatabaseHistoriesReq * /*self*/, memgraph::slk::Reader * /*reader*/) {
|
||||
/* nothing to serialize */
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesRes::Save(const GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesRes::Load(GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
} // namespace coordination
|
||||
|
||||
constexpr utils::TypeInfo coordination::PromoteReplicaToMainReq::kType{utils::TypeId::COORD_FAILOVER_REQ,
|
||||
@ -130,6 +148,12 @@ constexpr utils::TypeInfo coordination::GetInstanceUUIDReq::kType{utils::TypeId:
|
||||
constexpr utils::TypeInfo coordination::GetInstanceUUIDRes::kType{utils::TypeId::COORD_GET_UUID_RES, "CoordGetUUIDRes",
|
||||
nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::GetDatabaseHistoriesReq::kType{utils::TypeId::COORD_GET_INSTANCE_DATABASES_REQ,
|
||||
"GetInstanceDatabasesReq", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::GetDatabaseHistoriesRes::kType{utils::TypeId::COORD_GET_INSTANCE_DATABASES_RES,
|
||||
"GetInstanceDatabasesRes", nullptr};
|
||||
|
||||
namespace slk {
|
||||
|
||||
// PromoteReplicaToMainRpc
|
||||
@ -213,6 +237,16 @@ void Load(memgraph::coordination::GetInstanceUUIDRes *self, memgraph::slk::Reade
|
||||
memgraph::slk::Load(&self->uuid, reader);
|
||||
}
|
||||
|
||||
// GetInstanceTimestampsReq
|
||||
|
||||
void Save(const memgraph::coordination::GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.database_histories, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->database_histories, reader);
|
||||
}
|
||||
|
||||
} // namespace slk
|
||||
|
||||
} // namespace memgraph
|
||||
|
@ -41,7 +41,7 @@ CoordinatorState::CoordinatorState() {
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig config)
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
@ -56,7 +56,8 @@ auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig confi
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus {
|
||||
auto CoordinatorState::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot unregister instance since variant holds wrong alternative");
|
||||
|
||||
@ -70,7 +71,8 @@ auto CoordinatorState::UnregisterReplicationInstance(std::string instance_name)
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus {
|
||||
auto CoordinatorState::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> SetInstanceToMainCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
|
||||
@ -96,8 +98,8 @@ auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
|
||||
return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
|
||||
}
|
||||
|
||||
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
|
@ -12,100 +12,204 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr int MAX_SNAPSHOTS = 3;
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
auto CoordinatorStateMachine::EncodeRegisterReplicationInstance(const std::string &name) -> ptr<buffer> {
|
||||
std::string str_log = name + "_replica";
|
||||
ptr<buffer> log = buffer::alloc(sizeof(uint32_t) + str_log.size());
|
||||
buffer_serializer bs(log);
|
||||
bs.put_str(str_log);
|
||||
return log;
|
||||
auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return cluster_state_.FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeRegisterReplicationInstance(buffer &data) -> std::string {
|
||||
auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
|
||||
|
||||
auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
|
||||
auto const log_dump = log.dump();
|
||||
ptr<buffer> log_buf = buffer::alloc(sizeof(uint32_t) + log_dump.size());
|
||||
buffer_serializer bs(log_buf);
|
||||
bs.put_str(log_dump);
|
||||
return log_buf;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UNREGISTER_REPLICATION_INSTANCE}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
|
||||
buffer_serializer bs(data);
|
||||
return bs.get_str();
|
||||
auto const json = nlohmann::json::parse(bs.get_str());
|
||||
|
||||
auto const action = json["action"].get<RaftLogAction>();
|
||||
auto const &info = json["info"];
|
||||
|
||||
switch (action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
|
||||
return {info.get<CoordinatorClientConfig>(), action};
|
||||
case RaftLogAction::UPDATE_UUID:
|
||||
return {info.get<utils::UUID>(), action};
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA:
|
||||
return {info.get<std::string>(), action};
|
||||
}
|
||||
throw std::runtime_error("Unknown action");
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::pre_commit(ulong const log_idx, buffer &data) -> ptr<buffer> {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("pre_commit {} : {}", log_idx, str);
|
||||
return nullptr;
|
||||
}
|
||||
auto CoordinatorStateMachine::pre_commit(ulong const /*log_idx*/, buffer & /*data*/) -> ptr<buffer> { return nullptr; }
|
||||
|
||||
auto CoordinatorStateMachine::commit(ulong const log_idx, buffer &data) -> ptr<buffer> {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("commit {} : {}", log_idx, str);
|
||||
|
||||
spdlog::debug("Commit: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
auto const [parsed_data, log_action] = DecodeLog(data);
|
||||
cluster_state_.DoAction(parsed_data, log_action);
|
||||
last_committed_idx_ = log_idx;
|
||||
return nullptr;
|
||||
|
||||
// Return raft log number
|
||||
ptr<buffer> ret = buffer::alloc(sizeof(log_idx));
|
||||
buffer_serializer bs_ret(ret);
|
||||
bs_ret.put_u64(log_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::commit_config(ulong const log_idx, ptr<cluster_config> & /*new_conf*/) -> void {
|
||||
last_committed_idx_ = log_idx;
|
||||
spdlog::debug("Commit config: log_idx={}", log_idx);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::rollback(ulong const log_idx, buffer &data) -> void {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("rollback {} : {}", log_idx, str);
|
||||
// NOTE: Nothing since we don't do anything in pre_commit
|
||||
spdlog::debug("Rollback: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::read_logical_snp_obj(snapshot & /*snapshot*/, void *& /*user_snp_ctx*/, ulong /*obj_id*/,
|
||||
auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /*user_snp_ctx*/, ulong obj_id,
|
||||
ptr<buffer> &data_out, bool &is_last_obj) -> int {
|
||||
// Put dummy data.
|
||||
data_out = buffer::alloc(sizeof(int32));
|
||||
buffer_serializer bs(data_out);
|
||||
bs.put_i32(0);
|
||||
spdlog::debug("read logical snapshot object, obj_id: {}", obj_id);
|
||||
|
||||
ptr<SnapshotCtx> ctx = nullptr;
|
||||
{
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
auto entry = snapshots_.find(snapshot.get_last_log_idx());
|
||||
if (entry == snapshots_.end()) {
|
||||
data_out = nullptr;
|
||||
is_last_obj = true;
|
||||
return 0;
|
||||
}
|
||||
ctx = entry->second;
|
||||
}
|
||||
|
||||
if (obj_id == 0) {
|
||||
// Object ID == 0: first object, put dummy data.
|
||||
data_out = buffer::alloc(sizeof(int32));
|
||||
buffer_serializer bs(data_out);
|
||||
bs.put_i32(0);
|
||||
is_last_obj = false;
|
||||
} else {
|
||||
// Object ID > 0: second object, put actual value.
|
||||
ctx->cluster_state_.Serialize(data_out);
|
||||
}
|
||||
|
||||
is_last_obj = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &s, ulong &obj_id, buffer & /*data*/, bool /*is_first_obj*/,
|
||||
bool /*is_last_obj*/) -> void {
|
||||
spdlog::info("save snapshot {} term {} object ID", s.get_last_log_idx(), s.get_last_log_term(), obj_id);
|
||||
// Request next object.
|
||||
obj_id++;
|
||||
auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &obj_id, buffer &data, bool is_first_obj,
|
||||
bool is_last_obj) -> void {
|
||||
spdlog::debug("save logical snapshot object, obj_id: {}, is_first_obj: {}, is_last_obj: {}", obj_id, is_first_obj,
|
||||
is_last_obj);
|
||||
|
||||
if (obj_id == 0) {
|
||||
ptr<buffer> snp_buf = snapshot.serialize();
|
||||
auto ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
} else {
|
||||
auto cluster_state = CoordinatorClusterState::Deserialize(data);
|
||||
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
auto entry = snapshots_.find(snapshot.get_last_log_idx());
|
||||
DMG_ASSERT(entry != snapshots_.end());
|
||||
entry->second->cluster_state_ = cluster_state;
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
|
||||
spdlog::info("apply snapshot {} term {}", s.get_last_log_idx(), s.get_last_log_term());
|
||||
{
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
last_snapshot_ = snapshot::deserialize(*snp_buf);
|
||||
}
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("apply snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
|
||||
auto entry = snapshots_.find(s.get_last_log_idx());
|
||||
if (entry == snapshots_.end()) return false;
|
||||
|
||||
cluster_state_ = entry->second->cluster_state_;
|
||||
return true;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::free_user_snp_ctx(void *&user_snp_ctx) -> void {}
|
||||
|
||||
auto CoordinatorStateMachine::last_snapshot() -> ptr<snapshot> {
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
return last_snapshot_;
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("last_snapshot");
|
||||
auto entry = snapshots_.rbegin();
|
||||
if (entry == snapshots_.rend()) return nullptr;
|
||||
|
||||
ptr<SnapshotCtx> ctx = entry->second;
|
||||
return ctx->snapshot_;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::last_commit_index() -> ulong { return last_committed_idx_; }
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void {
|
||||
spdlog::info("create snapshot {} term {}", s.get_last_log_idx(), s.get_last_log_term());
|
||||
// Clone snapshot from `s`.
|
||||
{
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
last_snapshot_ = snapshot::deserialize(*snp_buf);
|
||||
}
|
||||
spdlog::debug("create_snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
ptr<snapshot> ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
|
||||
ptr<std::exception> except(nullptr);
|
||||
bool ret = true;
|
||||
when_done(ret, except);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -> void {
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("create_snapshot_internal, last_log_idx: {}", snapshot->get_last_log_idx());
|
||||
|
||||
auto ctx = cs_new<SnapshotCtx>(snapshot, cluster_state_);
|
||||
snapshots_[snapshot->get_last_log_idx()] = ctx;
|
||||
|
||||
while (snapshots_.size() > MAX_SNAPSHOTS) {
|
||||
snapshots_.erase(snapshots_.begin());
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetInstances() const -> std::vector<InstanceState> {
|
||||
return cluster_state_.GetInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "rpc_errors.hpp"
|
||||
#include "utils/result.hpp"
|
||||
@ -23,13 +24,13 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
using HealthCheckCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
|
||||
using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
|
||||
using ReplicationClientsInfo = std::vector<ReplClientInfo>;
|
||||
|
||||
class CoordinatorClient {
|
||||
public:
|
||||
explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb);
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb);
|
||||
|
||||
~CoordinatorClient() = default;
|
||||
|
||||
@ -45,16 +46,17 @@ class CoordinatorClient {
|
||||
void ResumeFrequentCheck();
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto SocketAddress() const -> std::string;
|
||||
auto CoordinatorSocketAddress() const -> std::string;
|
||||
auto ReplicationSocketAddress() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto DemoteToReplica() const -> bool;
|
||||
|
||||
auto SendPromoteReplicaToMainRpc(const utils::UUID &uuid, ReplicationClientsInfo replication_clients_info) const
|
||||
auto SendPromoteReplicaToMainRpc(utils::UUID const &uuid, ReplicationClientsInfo replication_clients_info) const
|
||||
-> bool;
|
||||
|
||||
auto SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bool;
|
||||
auto SendSwapMainUUIDRpc(utils::UUID const &uuid) const -> bool;
|
||||
|
||||
auto SendUnregisterReplicaRpc(std::string const &instance_name) const -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto SendEnableWritingOnMainRpc() const -> bool;
|
||||
|
||||
@ -62,7 +64,8 @@ class CoordinatorClient {
|
||||
|
||||
auto ReplicationClientInfo() const -> ReplClientInfo;
|
||||
|
||||
auto SetCallbacks(HealthCheckCallback succ_cb, HealthCheckCallback fail_cb) -> void;
|
||||
auto SendGetInstanceTimestampsRpc() const
|
||||
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>;
|
||||
|
||||
auto RpcClient() -> rpc::Client & { return rpc_client_; }
|
||||
|
||||
@ -82,8 +85,8 @@ class CoordinatorClient {
|
||||
|
||||
CoordinatorClientConfig config_;
|
||||
CoordinatorInstance *coord_instance_;
|
||||
HealthCheckCallback succ_cb_;
|
||||
HealthCheckCallback fail_cb_;
|
||||
HealthCheckClientCallback succ_cb_;
|
||||
HealthCheckClientCallback fail_cb_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -14,12 +14,16 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
@ -32,7 +36,11 @@ struct CoordinatorClientConfig {
|
||||
std::chrono::seconds instance_down_timeout_sec{5};
|
||||
std::chrono::seconds instance_get_uuid_frequency_sec{10};
|
||||
|
||||
auto SocketAddress() const -> std::string { return ip_address + ":" + std::to_string(port); }
|
||||
auto CoordinatorSocketAddress() const -> std::string { return fmt::format("{}:{}", ip_address, port); }
|
||||
auto ReplicationSocketAddress() const -> std::string {
|
||||
return fmt::format("{}:{}", replication_client_info.replication_ip_address,
|
||||
replication_client_info.replication_port);
|
||||
}
|
||||
|
||||
struct ReplicationClientInfo {
|
||||
std::string instance_name;
|
||||
@ -75,5 +83,11 @@ struct CoordinatorServerConfig {
|
||||
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config);
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config);
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -83,5 +83,16 @@ class RaftCouldNotParseFlagsException final : public utils::BasicException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(RaftCouldNotParseFlagsException)
|
||||
};
|
||||
|
||||
class InvalidRaftLogActionException final : public utils::BasicException {
|
||||
public:
|
||||
explicit InvalidRaftLogActionException(std::string_view what) noexcept : BasicException(what) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit InvalidRaftLogActionException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
|
||||
: InvalidRaftLogActionException(fmt::format(fmt, std::forward<Args>(args)...)) {}
|
||||
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException)
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -41,6 +41,9 @@ class CoordinatorHandlers {
|
||||
|
||||
static void GetInstanceUUIDHandler(replication::ReplicationHandler &replication_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
|
||||
static void GetDatabaseHistoriesHandler(replication::ReplicationHandler &replication_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#include "coordination/replication_instance.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
|
||||
@ -25,33 +26,54 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
struct NewMainRes {
|
||||
std::string most_up_to_date_instance;
|
||||
std::string latest_epoch;
|
||||
uint64_t latest_commit_timestamp;
|
||||
};
|
||||
using InstanceNameDbHistories = std::pair<std::string, replication_coordination_glue::DatabaseHistories>;
|
||||
|
||||
class CoordinatorInstance {
|
||||
public:
|
||||
CoordinatorInstance();
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string_view instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<InstanceStatus>;
|
||||
|
||||
auto TryFailover() -> void;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
auto GetMainUUID() const -> utils::UUID;
|
||||
|
||||
auto SetMainUUID(utils::UUID new_uuid) -> void;
|
||||
static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes;
|
||||
|
||||
private:
|
||||
HealthCheckCallback main_succ_cb_, main_fail_cb_, replica_succ_cb_, replica_fail_cb_;
|
||||
HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
|
||||
|
||||
// NOTE: Must be std::list because we rely on pointer stability
|
||||
auto OnRaftCommitCallback(TRaftLog const &log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &;
|
||||
|
||||
void MainFailCallback(std::string_view);
|
||||
|
||||
void MainSuccessCallback(std::string_view);
|
||||
|
||||
void ReplicaSuccessCallback(std::string_view);
|
||||
|
||||
void ReplicaFailCallback(std::string_view);
|
||||
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
// NOTE: Must be std::list because we rely on pointer stability.
|
||||
// Leader and followers should both have same view on repl_instances_
|
||||
std::list<ReplicationInstance> repl_instances_;
|
||||
mutable utils::RWLock coord_instance_lock_{utils::RWLock::Priority::READ};
|
||||
|
||||
utils::UUID main_uuid_;
|
||||
mutable utils::ResourceLock coord_instance_lock_{};
|
||||
|
||||
RaftState raft_state_;
|
||||
};
|
||||
|
@ -15,6 +15,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
@ -89,7 +90,7 @@ struct UnregisterReplicaReq {
|
||||
static void Load(UnregisterReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(UnregisterReplicaReq const &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit UnregisterReplicaReq(std::string instance_name) : instance_name(std::move(instance_name)) {}
|
||||
explicit UnregisterReplicaReq(std::string_view inst_name) : instance_name(inst_name) {}
|
||||
|
||||
UnregisterReplicaReq() = default;
|
||||
|
||||
@ -161,6 +162,32 @@ struct GetInstanceUUIDRes {
|
||||
|
||||
using GetInstanceUUIDRpc = rpc::RequestResponse<GetInstanceUUIDReq, GetInstanceUUIDRes>;
|
||||
|
||||
struct GetDatabaseHistoriesReq {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(GetDatabaseHistoriesReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const GetDatabaseHistoriesReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
GetDatabaseHistoriesReq() = default;
|
||||
};
|
||||
|
||||
struct GetDatabaseHistoriesRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit GetDatabaseHistoriesRes(const replication_coordination_glue::DatabaseHistories &database_histories)
|
||||
: database_histories(database_histories) {}
|
||||
GetDatabaseHistoriesRes() = default;
|
||||
|
||||
replication_coordination_glue::DatabaseHistories database_histories;
|
||||
};
|
||||
|
||||
using GetDatabaseHistoriesRpc = rpc::RequestResponse<GetDatabaseHistoriesReq, GetDatabaseHistoriesRes>;
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
||||
// SLK serialization declarations
|
||||
@ -183,15 +210,21 @@ void Save(const memgraph::coordination::GetInstanceUUIDReq &self, memgraph::slk:
|
||||
void Load(memgraph::coordination::GetInstanceUUIDReq *self, memgraph::slk::Reader *reader);
|
||||
void Save(const memgraph::coordination::GetInstanceUUIDRes &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::GetInstanceUUIDRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// UnregisterReplicaRpc
|
||||
void Save(memgraph::coordination::UnregisterReplicaRes const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::UnregisterReplicaRes *self, memgraph::slk::Reader *reader);
|
||||
void Save(memgraph::coordination::UnregisterReplicaReq const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::UnregisterReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// EnableWritingOnMainRpc
|
||||
void Save(memgraph::coordination::EnableWritingOnMainRes const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::EnableWritingOnMainRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// GetDatabaseHistoriesRpc
|
||||
void Save(const memgraph::coordination::GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
} // namespace memgraph::slk
|
||||
|
||||
#endif
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
@ -34,5 +35,18 @@ inline void Load(ReplicationClientInfo *obj, Reader *reader) {
|
||||
Load(&obj->replication_ip_address, reader);
|
||||
Load(&obj->replication_port, reader);
|
||||
}
|
||||
|
||||
inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) {
|
||||
Save(obj.db_uuid, builder);
|
||||
Save(obj.history, builder);
|
||||
Save(obj.name, builder);
|
||||
}
|
||||
|
||||
inline void Load(replication_coordination_glue::DatabaseHistory *obj, Reader *reader) {
|
||||
Load(&obj->db_uuid, reader);
|
||||
Load(&obj->history, reader);
|
||||
Load(&obj->name, reader);
|
||||
}
|
||||
|
||||
} // namespace memgraph::slk
|
||||
#endif
|
||||
|
@ -33,14 +33,16 @@ class CoordinatorState {
|
||||
CoordinatorState(CoordinatorState &&) noexcept = delete;
|
||||
CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string_view instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
// NOTE: The client code must check that the server exists before calling this method.
|
||||
auto GetCoordinatorServer() const -> CoordinatorServer &;
|
||||
|
@ -26,7 +26,7 @@ struct InstanceStatus {
|
||||
std::string raft_socket_address;
|
||||
std::string coord_socket_address;
|
||||
std::string cluster_role;
|
||||
bool is_alive;
|
||||
std::string health;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -14,11 +14,17 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include <flags/replication.hpp>
|
||||
#include "io/network/endpoint.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
struct CoordinatorClientConfig;
|
||||
|
||||
using BecomeLeaderCb = std::function<void()>;
|
||||
using BecomeFollowerCb = std::function<void()>;
|
||||
|
||||
@ -47,26 +53,38 @@ class RaftState {
|
||||
RaftState &operator=(RaftState &&other) noexcept = default;
|
||||
~RaftState();
|
||||
|
||||
static auto MakeRaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb) -> RaftState;
|
||||
static auto MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState;
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto RaftSocketAddress() const -> std::string;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>;
|
||||
|
||||
auto RequestLeadership() -> bool;
|
||||
auto IsLeader() const -> bool;
|
||||
|
||||
auto AppendRegisterReplicationInstance(std::string const &instance) -> ptr<raft_result>;
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
// TODO: (andi) I think variables below can be abstracted
|
||||
auto AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool;
|
||||
auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
|
||||
auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
// TODO: (andi) I think variables below can be abstracted/clean them.
|
||||
io::network::Endpoint raft_endpoint_;
|
||||
uint32_t raft_server_id_;
|
||||
uint32_t raft_port_;
|
||||
std::string raft_address_;
|
||||
|
||||
ptr<state_machine> state_machine_;
|
||||
ptr<state_mgr> state_manager_;
|
||||
ptr<CoordinatorStateMachine> state_machine_;
|
||||
ptr<CoordinatorStateManager> state_manager_;
|
||||
ptr<raft_server> raft_server_;
|
||||
ptr<logger> logger_;
|
||||
raft_launcher launcher_;
|
||||
|
@ -19,12 +19,12 @@ namespace memgraph::coordination {
|
||||
|
||||
enum class RegisterInstanceCoordinatorStatus : uint8_t {
|
||||
NAME_EXISTS,
|
||||
ENDPOINT_EXISTS,
|
||||
COORD_ENDPOINT_EXISTS,
|
||||
REPL_ENDPOINT_EXISTS,
|
||||
NOT_COORDINATOR,
|
||||
RPC_FAILED,
|
||||
NOT_LEADER,
|
||||
RAFT_COULD_NOT_ACCEPT,
|
||||
RAFT_COULD_NOT_APPEND,
|
||||
RPC_FAILED,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS
|
||||
};
|
||||
|
||||
@ -32,8 +32,9 @@ enum class UnregisterInstanceCoordinatorStatus : uint8_t {
|
||||
NO_INSTANCE_WITH_NAME,
|
||||
IS_MAIN,
|
||||
NOT_COORDINATOR,
|
||||
NOT_LEADER,
|
||||
RPC_FAILED,
|
||||
NOT_LEADER,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS,
|
||||
};
|
||||
|
||||
@ -41,9 +42,11 @@ enum class SetInstanceToMainCoordinatorStatus : uint8_t {
|
||||
NO_INSTANCE_WITH_NAME,
|
||||
MAIN_ALREADY_EXISTS,
|
||||
NOT_COORDINATOR,
|
||||
SUCCESS,
|
||||
NOT_LEADER,
|
||||
RAFT_LOG_ERROR,
|
||||
COULD_NOT_PROMOTE_TO_MAIN,
|
||||
SWAP_UUID_FAILED
|
||||
SWAP_UUID_FAILED,
|
||||
SUCCESS,
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -17,18 +17,24 @@
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
class ReplicationInstance;
|
||||
|
||||
using HealthCheckInstanceCallback = void (CoordinatorInstance::*)(std::string_view);
|
||||
|
||||
class ReplicationInstance {
|
||||
public:
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckCallback succ_cb,
|
||||
HealthCheckCallback fail_cb);
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckClientCallback succ_cb,
|
||||
HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb);
|
||||
|
||||
ReplicationInstance(ReplicationInstance const &other) = delete;
|
||||
ReplicationInstance &operator=(ReplicationInstance const &other) = delete;
|
||||
@ -45,14 +51,16 @@ class ReplicationInstance {
|
||||
auto IsAlive() const -> bool;
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto SocketAddress() const -> std::string;
|
||||
auto CoordinatorSocketAddress() const -> std::string;
|
||||
auto ReplicationSocketAddress() const -> std::string;
|
||||
|
||||
auto IsReplica() const -> bool;
|
||||
auto IsMain() const -> bool;
|
||||
auto PromoteToMain(utils::UUID const &uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckInstanceCallback main_succ_cb, HealthCheckInstanceCallback main_fail_cb) -> bool;
|
||||
|
||||
auto PromoteToMain(utils::UUID uuid, ReplicationClientsInfo repl_clients_info, HealthCheckCallback main_succ_cb,
|
||||
HealthCheckCallback main_fail_cb) -> bool;
|
||||
auto DemoteToReplica(HealthCheckCallback replica_succ_cb, HealthCheckCallback replica_fail_cb) -> bool;
|
||||
auto SendDemoteToReplicaRpc() -> bool;
|
||||
|
||||
auto DemoteToReplica(HealthCheckInstanceCallback replica_succ_cb, HealthCheckInstanceCallback replica_fail_cb)
|
||||
-> bool;
|
||||
|
||||
auto StartFrequentCheck() -> void;
|
||||
auto StopFrequentCheck() -> void;
|
||||
@ -63,9 +71,8 @@ class ReplicationInstance {
|
||||
|
||||
auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool;
|
||||
|
||||
auto SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid) -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string const &instance_name) -> bool;
|
||||
|
||||
auto SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid) -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string_view instance_name) -> bool;
|
||||
|
||||
auto SendGetInstanceUUID() -> utils::BasicResult<coordination::GetInstanceUUIDError, std::optional<utils::UUID>>;
|
||||
auto GetClient() -> CoordinatorClient &;
|
||||
@ -74,11 +81,13 @@ class ReplicationInstance {
|
||||
|
||||
auto SetNewMainUUID(utils::UUID const &main_uuid) -> void;
|
||||
auto ResetMainUUID() -> void;
|
||||
auto GetMainUUID() const -> const std::optional<utils::UUID> &;
|
||||
auto GetMainUUID() const -> std::optional<utils::UUID> const &;
|
||||
|
||||
auto GetSuccessCallback() -> HealthCheckInstanceCallback &;
|
||||
auto GetFailCallback() -> HealthCheckInstanceCallback &;
|
||||
|
||||
private:
|
||||
CoordinatorClient client_;
|
||||
replication_coordination_glue::ReplicationRole replication_role_;
|
||||
std::chrono::system_clock::time_point last_response_time_{};
|
||||
bool is_alive_{false};
|
||||
std::chrono::system_clock::time_point last_check_of_uuid_{};
|
||||
@ -90,8 +99,12 @@ class ReplicationInstance {
|
||||
// so we need to send swap uuid again
|
||||
std::optional<utils::UUID> main_uuid_;
|
||||
|
||||
HealthCheckInstanceCallback succ_cb_;
|
||||
HealthCheckInstanceCallback fail_cb_;
|
||||
|
||||
friend bool operator==(ReplicationInstance const &first, ReplicationInstance const &second) {
|
||||
return first.client_ == second.client_ && first.replication_role_ == second.replication_role_;
|
||||
return first.client_ == second.client_ && first.last_response_time_ == second.last_response_time_ &&
|
||||
first.is_alive_ == second.is_alive_ && first.main_uuid_ == second.main_uuid_;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -11,4 +11,5 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
enum class GetInstanceUUIDError { NO_RESPONSE, RPC_EXCEPTION };
|
||||
enum class GetInstanceTimestampsError { NO_RESPONSE, RPC_EXCEPTION };
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -0,0 +1,92 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include <range/v3/view.hpp>
|
||||
#include "json/json.hpp"
|
||||
|
||||
#include <map>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using replication_coordination_glue::ReplicationRole;
|
||||
|
||||
struct InstanceState {
|
||||
CoordinatorClientConfig config;
|
||||
ReplicationRole status;
|
||||
|
||||
friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config && lhs.status == rhs.status;
|
||||
}
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state);
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state);
|
||||
|
||||
using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>;
|
||||
|
||||
using nuraft::buffer;
|
||||
using nuraft::buffer_serializer;
|
||||
using nuraft::ptr;
|
||||
|
||||
class CoordinatorClusterState {
|
||||
public:
|
||||
CoordinatorClusterState() = default;
|
||||
explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState const &);
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState const &);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState &&other) noexcept;
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept;
|
||||
~CoordinatorClusterState() = default;
|
||||
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
|
||||
auto MainExists() const -> bool;
|
||||
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void;
|
||||
|
||||
auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
auto Serialize(ptr<buffer> &data) -> void;
|
||||
|
||||
static auto Deserialize(buffer &data) -> CoordinatorClusterState;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
std::map<std::string, InstanceState, std::less<>> instances_{};
|
||||
utils::UUID uuid_{};
|
||||
mutable utils::ResourceLock log_lock_{};
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -13,9 +13,15 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/coordinator_cluster_state.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
#include <variant>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using nuraft::async_result;
|
||||
@ -36,9 +42,19 @@ class CoordinatorStateMachine : public state_machine {
|
||||
CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete;
|
||||
~CoordinatorStateMachine() override {}
|
||||
|
||||
static auto EncodeRegisterReplicationInstance(const std::string &name) -> ptr<buffer>;
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
static auto DecodeRegisterReplicationInstance(buffer &data) -> std::string;
|
||||
static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
|
||||
static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>;
|
||||
|
||||
static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>;
|
||||
|
||||
auto pre_commit(ulong log_idx, buffer &data) -> ptr<buffer> override;
|
||||
|
||||
@ -64,11 +80,27 @@ class CoordinatorStateMachine : public state_machine {
|
||||
|
||||
auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
struct SnapshotCtx {
|
||||
SnapshotCtx(ptr<snapshot> &snapshot, CoordinatorClusterState const &cluster_state)
|
||||
: snapshot_(snapshot), cluster_state_(cluster_state) {}
|
||||
|
||||
ptr<snapshot> snapshot_;
|
||||
CoordinatorClusterState cluster_state_;
|
||||
};
|
||||
|
||||
auto create_snapshot_internal(ptr<snapshot> snapshot) -> void;
|
||||
|
||||
CoordinatorClusterState cluster_state_;
|
||||
std::atomic<uint64_t> last_committed_idx_{0};
|
||||
|
||||
ptr<snapshot> last_snapshot_;
|
||||
std::map<uint64_t, ptr<SnapshotCtx>> snapshots_;
|
||||
std::mutex snapshots_lock_;
|
||||
|
||||
ptr<snapshot> last_snapshot_;
|
||||
std::mutex last_snapshot_lock_;
|
||||
};
|
||||
|
||||
|
42
src/coordination/include/nuraft/raft_log_action.hpp
Normal file
42
src/coordination/include/nuraft/raft_log_action.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
enum class RaftLogAction : uint8_t {
|
||||
REGISTER_REPLICATION_INSTANCE,
|
||||
UNREGISTER_REPLICATION_INSTANCE,
|
||||
SET_INSTANCE_AS_MAIN,
|
||||
SET_INSTANCE_AS_REPLICA,
|
||||
UPDATE_UUID
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {
|
||||
{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
|
||||
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
|
||||
{RaftLogAction::UPDATE_UUID, "update_uuid"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -10,12 +10,11 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include <chrono>
|
||||
|
||||
#include "coordination/raft_state.hpp"
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
@ -33,31 +32,35 @@ using raft_result = cmd_result<ptr<buffer>>;
|
||||
|
||||
RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
|
||||
uint32_t raft_port, std::string raft_address)
|
||||
: raft_server_id_(raft_server_id),
|
||||
raft_port_(raft_port),
|
||||
raft_address_(std::move(raft_address)),
|
||||
: raft_endpoint_(raft_address, raft_port),
|
||||
raft_server_id_(raft_server_id),
|
||||
state_machine_(cs_new<CoordinatorStateMachine>()),
|
||||
state_manager_(
|
||||
cs_new<CoordinatorStateManager>(raft_server_id_, raft_address_ + ":" + std::to_string(raft_port_))),
|
||||
state_manager_(cs_new<CoordinatorStateManager>(raft_server_id_, raft_endpoint_.SocketAddress())),
|
||||
logger_(nullptr),
|
||||
become_leader_cb_(std::move(become_leader_cb)),
|
||||
become_follower_cb_(std::move(become_follower_cb)) {}
|
||||
|
||||
auto RaftState::InitRaftServer() -> void {
|
||||
asio_service::options asio_opts;
|
||||
asio_opts.thread_pool_size_ = 1; // TODO: (andi) Improve this
|
||||
asio_opts.thread_pool_size_ = 1;
|
||||
|
||||
raft_params params;
|
||||
params.heart_beat_interval_ = 100;
|
||||
params.election_timeout_lower_bound_ = 200;
|
||||
params.election_timeout_upper_bound_ = 400;
|
||||
// 5 logs are preserved before the last snapshot
|
||||
params.reserved_log_items_ = 5;
|
||||
// Create snapshot for every 5 log appends
|
||||
params.snapshot_distance_ = 5;
|
||||
params.client_req_timeout_ = 3000;
|
||||
params.return_method_ = raft_params::blocking;
|
||||
|
||||
// If the leader doesn't receive any response from quorum nodes
|
||||
// in 200ms, it will step down.
|
||||
// This allows us to achieve strong consistency even if network partition
|
||||
// happens between the current leader and followers.
|
||||
// The value must be <= election_timeout_lower_bound_ so that cluster can never
|
||||
// have multiple leaders.
|
||||
params.leadership_expiry_ = 200;
|
||||
|
||||
raft_server::init_options init_opts;
|
||||
init_opts.raft_callback_ = [this](cb_func::Type event_type, cb_func::Param *param) -> nuraft::CbReturnCode {
|
||||
if (event_type == cb_func::BecomeLeader) {
|
||||
@ -72,11 +75,11 @@ auto RaftState::InitRaftServer() -> void {
|
||||
|
||||
raft_launcher launcher;
|
||||
|
||||
raft_server_ = launcher.init(state_machine_, state_manager_, logger_, static_cast<int>(raft_port_), asio_opts, params,
|
||||
init_opts);
|
||||
raft_server_ =
|
||||
launcher.init(state_machine_, state_manager_, logger_, raft_endpoint_.port, asio_opts, params, init_opts);
|
||||
|
||||
if (!raft_server_) {
|
||||
throw RaftServerStartException("Failed to launch raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to launch raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto maybe_stop = utils::ResettableCounter<20>();
|
||||
@ -87,38 +90,61 @@ auto RaftState::InitRaftServer() -> void {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
} while (!maybe_stop());
|
||||
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb) -> RaftState {
|
||||
uint32_t raft_server_id{0};
|
||||
uint32_t raft_port{0};
|
||||
try {
|
||||
raft_server_id = FLAGS_raft_server_id;
|
||||
raft_port = FLAGS_raft_server_port;
|
||||
} catch (std::exception const &e) {
|
||||
throw RaftCouldNotParseFlagsException("Failed to parse flags: {}", e.what());
|
||||
}
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState {
|
||||
uint32_t raft_server_id = FLAGS_raft_server_id;
|
||||
uint32_t raft_port = FLAGS_raft_server_port;
|
||||
|
||||
auto raft_state =
|
||||
RaftState(std::move(become_leader_cb), std::move(become_follower_cb), raft_server_id, raft_port, "127.0.0.1");
|
||||
|
||||
raft_state.InitRaftServer();
|
||||
return raft_state;
|
||||
}
|
||||
|
||||
RaftState::~RaftState() { launcher_.shutdown(); }
|
||||
|
||||
auto RaftState::InstanceName() const -> std::string { return "coordinator_" + std::to_string(raft_server_id_); }
|
||||
auto RaftState::InstanceName() const -> std::string {
|
||||
return fmt::format("coordinator_{}", std::to_string(raft_server_id_));
|
||||
}
|
||||
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_address_ + ":" + std::to_string(raft_port_); }
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
|
||||
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void {
|
||||
auto const endpoint = raft_address + ":" + std::to_string(raft_port);
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address)
|
||||
-> void {
|
||||
auto const endpoint = fmt::format("{}:{}", raft_address, raft_port);
|
||||
srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
|
||||
if (!raft_server_->add_srv(srv_config_to_add)->get_accepted()) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster", endpoint);
|
||||
|
||||
auto cmd_result = raft_server_->add_srv(srv_config_to_add);
|
||||
|
||||
if (cmd_result->get_result_code() == nuraft::cmd_result_code::OK) {
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
} else {
|
||||
throw RaftAddServerException("Failed to accept request to add server {} to the cluster with error code {}",
|
||||
endpoint, cmd_result->get_result_code());
|
||||
}
|
||||
|
||||
// Waiting for server to join
|
||||
constexpr int max_tries{10};
|
||||
auto maybe_stop = utils::ResettableCounter<max_tries>();
|
||||
constexpr int waiting_period{200};
|
||||
bool added{false};
|
||||
while (!maybe_stop()) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
|
||||
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id));
|
||||
if (server_config) {
|
||||
spdlog::trace("Server with id {} added to cluster", raft_server_id);
|
||||
added = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!added) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster in {}ms", endpoint,
|
||||
max_tries * waiting_period);
|
||||
}
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
}
|
||||
|
||||
auto RaftState::GetAllCoordinators() const -> std::vector<ptr<srv_config>> {
|
||||
@ -131,10 +157,123 @@ auto RaftState::IsLeader() const -> bool { return raft_server_->is_leader(); }
|
||||
|
||||
auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); }
|
||||
|
||||
auto RaftState::AppendRegisterReplicationInstance(std::string const &instance) -> ptr<raft_result> {
|
||||
auto new_log = CoordinatorStateMachine::EncodeRegisterReplicationInstance(instance);
|
||||
return raft_server_->append_entries({new_log});
|
||||
auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for registering instance {}. Most likely the reason is that the instance is not "
|
||||
"the "
|
||||
"leader.",
|
||||
config.instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for registering instance {} accepted", config.instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to register instance {} with error code {}", config.instance_name, res->get_result_code());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUnregisterInstance(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for unregistering instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for unregistering instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to unregister instance {} with error code {}", instance_name, res->get_result_code());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for promoting instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for promoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, res->get_result_code());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsReplica(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for demoting instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Request for demoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, res->get_result_code());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUpdateUUID(uuid);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for updating UUID. Most likely the reason is that the instance is not "
|
||||
"the leader.");
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Request for updating UUID accepted");
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to update UUID with error code {}", res->get_result_code());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return state_machine_->FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); }
|
||||
|
||||
auto RaftState::IsMain(std::string_view instance_name) const -> bool { return state_machine_->IsMain(instance_name); }
|
||||
|
||||
auto RaftState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto RaftState::GetInstances() const -> std::vector<InstanceState> { return state_machine_->GetInstances(); }
|
||||
|
||||
auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -13,21 +13,20 @@
|
||||
|
||||
#include "coordination/replication_instance.hpp"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "replication_coordination_glue/handler.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb)
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb,
|
||||
HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb)
|
||||
: client_(peer, std::move(config), std::move(succ_cb), std::move(fail_cb)),
|
||||
replication_role_(replication_coordination_glue::ReplicationRole::REPLICA) {
|
||||
if (!client_.DemoteToReplica()) {
|
||||
throw CoordinatorRegisterInstanceException("Failed to demote instance {} to replica", client_.InstanceName());
|
||||
}
|
||||
|
||||
client_.StartFrequentCheck();
|
||||
}
|
||||
succ_cb_(succ_instance_cb),
|
||||
fail_cb_(fail_instance_cb) {}
|
||||
|
||||
auto ReplicationInstance::OnSuccessPing() -> void {
|
||||
last_response_time_ = std::chrono::system_clock::now();
|
||||
@ -46,37 +45,34 @@ auto ReplicationInstance::IsReadyForUUIDPing() -> bool {
|
||||
}
|
||||
|
||||
auto ReplicationInstance::InstanceName() const -> std::string { return client_.InstanceName(); }
|
||||
auto ReplicationInstance::SocketAddress() const -> std::string { return client_.SocketAddress(); }
|
||||
auto ReplicationInstance::CoordinatorSocketAddress() const -> std::string { return client_.CoordinatorSocketAddress(); }
|
||||
auto ReplicationInstance::ReplicationSocketAddress() const -> std::string { return client_.ReplicationSocketAddress(); }
|
||||
auto ReplicationInstance::IsAlive() const -> bool { return is_alive_; }
|
||||
|
||||
auto ReplicationInstance::IsReplica() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
}
|
||||
auto ReplicationInstance::IsMain() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::PromoteToMain(utils::UUID new_uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckCallback main_succ_cb, HealthCheckCallback main_fail_cb) -> bool {
|
||||
auto ReplicationInstance::PromoteToMain(utils::UUID const &new_uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckInstanceCallback main_succ_cb,
|
||||
HealthCheckInstanceCallback main_fail_cb) -> bool {
|
||||
if (!client_.SendPromoteReplicaToMainRpc(new_uuid, std::move(repl_clients_info))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
replication_role_ = replication_coordination_glue::ReplicationRole::MAIN;
|
||||
main_uuid_ = new_uuid;
|
||||
client_.SetCallbacks(std::move(main_succ_cb), std::move(main_fail_cb));
|
||||
succ_cb_ = main_succ_cb;
|
||||
fail_cb_ = main_fail_cb;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::DemoteToReplica(HealthCheckCallback replica_succ_cb, HealthCheckCallback replica_fail_cb)
|
||||
-> bool {
|
||||
auto ReplicationInstance::SendDemoteToReplicaRpc() -> bool { return client_.DemoteToReplica(); }
|
||||
|
||||
auto ReplicationInstance::DemoteToReplica(HealthCheckInstanceCallback replica_succ_cb,
|
||||
HealthCheckInstanceCallback replica_fail_cb) -> bool {
|
||||
if (!client_.DemoteToReplica()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
replication_role_ = replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
client_.SetCallbacks(std::move(replica_succ_cb), std::move(replica_fail_cb));
|
||||
succ_cb_ = replica_succ_cb;
|
||||
fail_cb_ = replica_fail_cb;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -90,10 +86,12 @@ auto ReplicationInstance::ReplicationClientInfo() const -> CoordinatorClientConf
|
||||
return client_.ReplicationClientInfo();
|
||||
}
|
||||
|
||||
auto ReplicationInstance::GetSuccessCallback() -> HealthCheckInstanceCallback & { return succ_cb_; }
|
||||
auto ReplicationInstance::GetFailCallback() -> HealthCheckInstanceCallback & { return fail_cb_; }
|
||||
|
||||
auto ReplicationInstance::GetClient() -> CoordinatorClient & { return client_; }
|
||||
|
||||
auto ReplicationInstance::SetNewMainUUID(utils::UUID const &main_uuid) -> void { main_uuid_ = main_uuid; }
|
||||
auto ReplicationInstance::ResetMainUUID() -> void { main_uuid_ = std::nullopt; }
|
||||
auto ReplicationInstance::GetMainUUID() const -> std::optional<utils::UUID> const & { return main_uuid_; }
|
||||
|
||||
auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool {
|
||||
@ -106,6 +104,7 @@ auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &cur
|
||||
}
|
||||
UpdateReplicaLastResponseUUID();
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (res.GetValue().has_value() && res.GetValue().value() == curr_main_uuid) {
|
||||
return true;
|
||||
}
|
||||
@ -113,7 +112,7 @@ auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &cur
|
||||
return SendSwapAndUpdateUUID(curr_main_uuid);
|
||||
}
|
||||
|
||||
auto ReplicationInstance::SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid) -> bool {
|
||||
auto ReplicationInstance::SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid) -> bool {
|
||||
if (!replication_coordination_glue::SendSwapMainUUIDRpc(client_.RpcClient(), new_main_uuid)) {
|
||||
return false;
|
||||
}
|
||||
@ -121,7 +120,7 @@ auto ReplicationInstance::SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::SendUnregisterReplicaRpc(std::string const &instance_name) -> bool {
|
||||
auto ReplicationInstance::SendUnregisterReplicaRpc(std::string_view instance_name) -> bool {
|
||||
return client_.SendUnregisterReplicaRpc(instance_name);
|
||||
}
|
||||
|
||||
|
@ -20,28 +20,28 @@ namespace memgraph::dbms {
|
||||
CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state)
|
||||
: coordinator_state_(coordinator_state) {}
|
||||
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(memgraph::coordination::CoordinatorClientConfig config)
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus {
|
||||
return coordinator_state_.RegisterReplicationInstance(config);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::UnregisterReplicationInstance(std::string instance_name)
|
||||
auto CoordinatorHandler::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> coordination::UnregisterInstanceCoordinatorStatus {
|
||||
return coordinator_state_.UnregisterReplicationInstance(std::move(instance_name));
|
||||
return coordinator_state_.UnregisterReplicationInstance(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::SetReplicationInstanceToMain(std::string instance_name)
|
||||
auto CoordinatorHandler::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> coordination::SetInstanceToMainCoordinatorStatus {
|
||||
return coordinator_state_.SetReplicationInstanceToMain(std::move(instance_name));
|
||||
return coordinator_state_.SetReplicationInstanceToMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::InstanceStatus> {
|
||||
return coordinator_state_.ShowInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, std::move(raft_address));
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -30,16 +30,17 @@ class CoordinatorHandler {
|
||||
|
||||
// TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to
|
||||
// RegisterInstance
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig config)
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus;
|
||||
|
||||
auto UnregisterReplicationInstance(std::string instance_name) -> coordination::UnregisterInstanceCoordinatorStatus;
|
||||
auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> coordination::UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
auto SetReplicationInstanceToMain(std::string instance_name) -> coordination::SetInstanceToMainCoordinatorStatus;
|
||||
auto SetReplicationInstanceToMain(std::string_view instance_name) -> coordination::SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<coordination::InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
private:
|
||||
coordination::CoordinatorState &coordinator_state_;
|
||||
|
@ -110,9 +110,9 @@ class Database {
|
||||
* @param force_directory Use the configured directory, do not try to decipher the multi-db version
|
||||
* @return DatabaseInfo
|
||||
*/
|
||||
DatabaseInfo GetInfo(bool force_directory, replication_coordination_glue::ReplicationRole replication_role) const {
|
||||
DatabaseInfo GetInfo(replication_coordination_glue::ReplicationRole replication_role) const {
|
||||
DatabaseInfo info;
|
||||
info.storage_info = storage_->GetInfo(force_directory, replication_role);
|
||||
info.storage_info = storage_->GetInfo(replication_role);
|
||||
info.triggers = trigger_store_.GetTriggerInfo().size();
|
||||
info.streams = streams_.GetStreamInfo().size();
|
||||
return info;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -185,6 +185,16 @@ DbmsHandler::DbmsHandler(storage::Config config, replication::ReplicationState &
|
||||
auto directories = std::set{std::string{kDefaultDB}};
|
||||
|
||||
// Recover previous databases
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::SYSTEM_REPLICATION) && !recovery_on_startup) {
|
||||
// This will result in dropping databases on SystemRecoveryHandler
|
||||
// for MT case, and for single DB case we might not even set replication as commit timestamp is checked
|
||||
spdlog::warn(
|
||||
"Data recovery on startup not set, this will result in dropping database in case of multi-tenancy enabled.");
|
||||
}
|
||||
|
||||
// TODO: Problem is if user doesn't set this up "database" name won't be recovered
|
||||
// but if storage-recover-on-startup is true storage will be recovered which is an issue
|
||||
spdlog::info("Data recovery on startup set to {}", recovery_on_startup);
|
||||
if (recovery_on_startup) {
|
||||
auto it = durability_->begin(std::string(kDBPrefix));
|
||||
auto end = durability_->end(std::string(kDBPrefix));
|
||||
@ -410,9 +420,10 @@ void DbmsHandler::UpdateDurability(const storage::Config &config, std::optional<
|
||||
if (!durability_) return;
|
||||
// Save database in a list of active databases
|
||||
const auto &key = Durability::GenKey(config.salient.name);
|
||||
if (rel_dir == std::nullopt)
|
||||
if (rel_dir == std::nullopt) {
|
||||
rel_dir =
|
||||
std::filesystem::relative(config.durability.storage_directory, default_config_.durability.storage_directory);
|
||||
}
|
||||
const auto &val = Durability::GenVal(config.salient.uuid, *rel_dir);
|
||||
durability_->Put(key, val);
|
||||
}
|
||||
|
@ -155,6 +155,8 @@ class DbmsHandler {
|
||||
spdlog::debug("Trying to create db '{}' on replica which already exists.", config.name);
|
||||
|
||||
auto db = Get_(config.name);
|
||||
spdlog::debug("Aligning database with name {} which has UUID {}, where config UUID is {}", config.name,
|
||||
std::string(db->uuid()), std::string(config.uuid));
|
||||
if (db->uuid() == config.uuid) { // Same db
|
||||
return db;
|
||||
}
|
||||
@ -163,18 +165,22 @@ class DbmsHandler {
|
||||
|
||||
// TODO: Fix this hack
|
||||
if (config.name == kDefaultDB) {
|
||||
spdlog::debug("Last commit timestamp for DB {} is {}", kDefaultDB,
|
||||
db->storage()->repl_storage_state_.last_commit_timestamp_);
|
||||
// This seems correct, if database made progress
|
||||
if (db->storage()->repl_storage_state_.last_commit_timestamp_ != storage::kTimestampInitialId) {
|
||||
spdlog::debug("Default storage is not clean, cannot update UUID...");
|
||||
return NewError::GENERIC; // Update error
|
||||
}
|
||||
spdlog::debug("Update default db's UUID");
|
||||
spdlog::debug("Updated default db's UUID");
|
||||
// Default db cannot be deleted and remade, have to just update the UUID
|
||||
db->storage()->config_.salient.uuid = config.uuid;
|
||||
UpdateDurability(db->storage()->config_, ".");
|
||||
return db;
|
||||
}
|
||||
|
||||
spdlog::debug("Drop database and recreate with the correct UUID");
|
||||
spdlog::debug("Dropping database {} with UUID: {} and recreating with the correct UUID: {}", config.name,
|
||||
std::string(db->uuid()), std::string(config.uuid));
|
||||
// Defer drop
|
||||
(void)Delete_(db->name());
|
||||
// Second attempt
|
||||
@ -266,10 +272,6 @@ class DbmsHandler {
|
||||
bool IsMain() const { return repl_state_.IsMain(); }
|
||||
bool IsReplica() const { return repl_state_.IsReplica(); }
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// coordination::CoordinatorState &CoordinatorState() { return coordinator_state_; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Return all active databases.
|
||||
*
|
||||
@ -302,7 +304,7 @@ class DbmsHandler {
|
||||
auto db_acc_opt = db_gk.access();
|
||||
if (db_acc_opt) {
|
||||
auto &db_acc = *db_acc_opt;
|
||||
const auto &info = db_acc->GetInfo(false, replication_role);
|
||||
const auto &info = db_acc->GetInfo(replication_role);
|
||||
const auto &storage_info = info.storage_info;
|
||||
stats.num_vertex += storage_info.vertex_count;
|
||||
stats.num_edges += storage_info.edge_count;
|
||||
@ -338,7 +340,7 @@ class DbmsHandler {
|
||||
auto db_acc_opt = db_gk.access();
|
||||
if (db_acc_opt) {
|
||||
auto &db_acc = *db_acc_opt;
|
||||
res.push_back(db_acc->GetInfo(false, replication_role));
|
||||
res.push_back(db_acc->GetInfo(replication_role));
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/durability/version.hpp"
|
||||
#include "storage/v2/fmt.hpp"
|
||||
#include "storage/v2/indices/label_index_stats.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
@ -119,9 +118,14 @@ void InMemoryReplicationHandlers::Register(dbms::DbmsHandler *dbms_handler, repl
|
||||
});
|
||||
server.rpc_server_.Register<replication_coordination_glue::SwapMainUUIDRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received SwapMainUUIDHandler");
|
||||
spdlog::debug("Received SwapMainUUIDRpc");
|
||||
InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms_handler, data, req_reader, res_builder);
|
||||
});
|
||||
server.rpc_server_.Register<storage::replication::ForceResetStorageRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received ForceResetStorageRpc");
|
||||
InMemoryReplicationHandlers::ForceResetStorageHandler(dbms_handler, data.uuid_, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler,
|
||||
@ -135,7 +139,7 @@ void InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms::DbmsHandler *dbms_ha
|
||||
|
||||
replication_coordination_glue::SwapMainUUIDReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
spdlog::info(fmt::format("Set replica data UUID to main uuid {}", std::string(req.uuid)));
|
||||
spdlog::info("Set replica data UUID to main uuid {}", std::string(req.uuid));
|
||||
dbms_handler->ReplicationState().TryPersistRoleReplica(role_replica_data.config, req.uuid);
|
||||
role_replica_data.uuid_ = req.uuid;
|
||||
|
||||
@ -330,6 +334,78 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
spdlog::debug("Replication recovery from snapshot finished!");
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
storage::replication::ForceResetStorageReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
if (!current_main_uuid.has_value() || req.main_uuid != current_main_uuid) [[unlikely]] {
|
||||
LogWrongMain(current_main_uuid, req.main_uuid, storage::replication::SnapshotReq::kType.name);
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
|
||||
auto storage_guard = std::unique_lock{storage->main_lock_};
|
||||
|
||||
// Clear the database
|
||||
storage->vertices_.clear();
|
||||
storage->edges_.clear();
|
||||
storage->commit_log_.reset();
|
||||
storage->commit_log_.emplace();
|
||||
|
||||
storage->constraints_.existence_constraints_ = std::make_unique<storage::ExistenceConstraints>();
|
||||
storage->constraints_.unique_constraints_ = std::make_unique<storage::InMemoryUniqueConstraints>();
|
||||
storage->indices_.label_index_ = std::make_unique<storage::InMemoryLabelIndex>();
|
||||
storage->indices_.label_property_index_ = std::make_unique<storage::InMemoryLabelPropertyIndex>();
|
||||
|
||||
// Fine since we will force push when reading from WAL just random epoch with 0 timestamp, as it should be if it
|
||||
// acted as MAIN before
|
||||
storage->repl_storage_state_.epoch_.SetEpoch(std::string(utils::UUID{}));
|
||||
storage->repl_storage_state_.last_commit_timestamp_ = 0;
|
||||
|
||||
storage->repl_storage_state_.history.clear();
|
||||
storage->vertex_id_ = 0;
|
||||
storage->edge_id_ = 0;
|
||||
storage->timestamp_ = storage::kTimestampInitialId;
|
||||
|
||||
storage->CollectGarbage<true>(std::move(storage_guard), false);
|
||||
storage->vertices_.run_gc();
|
||||
storage->edges_.run_gc();
|
||||
|
||||
storage::replication::ForceResetStorageRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
|
||||
spdlog::trace("Deleting old snapshot files.");
|
||||
// Delete other durability files
|
||||
auto snapshot_files = storage::durability::GetSnapshotFiles(storage->recovery_.snapshot_directory_, storage->uuid_);
|
||||
for (const auto &[path, uuid, _] : snapshot_files) {
|
||||
spdlog::trace("Deleting snapshot file {}", path);
|
||||
storage->file_retainer_.DeleteFile(path);
|
||||
}
|
||||
|
||||
spdlog::trace("Deleting old WAL files.");
|
||||
auto wal_files = storage::durability::GetWalFiles(storage->recovery_.wal_directory_, storage->uuid_);
|
||||
if (wal_files) {
|
||||
for (const auto &wal_file : *wal_files) {
|
||||
spdlog::trace("Deleting WAL file {}", wal_file.path);
|
||||
storage->file_retainer_.DeleteFile(wal_file.path);
|
||||
}
|
||||
|
||||
storage->wal_file_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
@ -764,6 +840,20 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
transaction->DeleteLabelPropertyIndexStats(storage->NameToLabel(info.label));
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE: {
|
||||
spdlog::trace(" Create edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->CreateIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP: {
|
||||
spdlog::trace(" Drop edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->DropIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
spdlog::trace(" Create existence constraint on :{} ({})", delta.operation_label_property.label,
|
||||
delta.operation_label_property.property);
|
||||
|
@ -48,6 +48,9 @@ class InMemoryReplicationHandlers {
|
||||
|
||||
static void SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler, replication::RoleReplicaData &role_replica_data,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
static void ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
|
||||
static void LoadWal(storage::InMemoryStorage *storage, storage::replication::Decoder *decoder);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -22,113 +22,15 @@
|
||||
#include "utils/message.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr std::string_view delimiter = ":";
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(const std::string &address) {
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, address.c_str(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, address.c_str(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
} else if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
} else {
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrIpAddress(
|
||||
const std::string &address, const std::optional<uint16_t> default_port) {
|
||||
/// expected address format:
|
||||
/// - "ip_address:port_number"
|
||||
/// - "ip_address"
|
||||
/// We parse the address first. If it's an IP address, a default port must
|
||||
// be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
// it into an ip address and a port number; even if a default port is given,
|
||||
// it won't be used, as we expect that it is given in the address string.
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{address, *default_port};
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
ip_address = std::move(parts[0]);
|
||||
if (GetIpFamily(ip_address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
int64_t int_port{0};
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{ip_address, static_cast<uint16_t>(int_port)};
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseHostname(
|
||||
const std::string &address, const std::optional<uint16_t> default_port = {}) {
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (!IsResolvableAddress(address, *default_port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{address, *default_port};
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
int64_t int_port{0};
|
||||
auto hostname = std::move(parts[0]);
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (IsResolvableAddress(hostname, static_cast<uint16_t>(int_port))) {
|
||||
return std::pair{hostname, static_cast<u_int16_t>(int_port)};
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::string Endpoint::SocketAddress() const {
|
||||
auto ip_address = address.empty() ? "EMPTY" : address;
|
||||
return ip_address + ":" + std::to_string(port);
|
||||
}
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
|
||||
Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip_address)), port(port) {
|
||||
IpFamily ip_family = GetIpFamily(address);
|
||||
@ -138,9 +40,23 @@ Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip
|
||||
family = ip_family;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
std::string Endpoint::SocketAddress() const { return fmt::format("{}:{}", address, port); }
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(std::string_view address) {
|
||||
// Ensure null-terminated
|
||||
auto const tmp = std::string(address);
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, tmp.c_str(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, tmp.c_str(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
}
|
||||
if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
}
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
// no need to cover the IpFamily::NONE case, as you can't even construct an
|
||||
@ -153,35 +69,73 @@ std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
return os << endpoint.address << ":" << endpoint.port;
|
||||
}
|
||||
|
||||
bool Endpoint::IsResolvableAddress(const std::string &address, uint16_t port) {
|
||||
// NOTE: Intentional copy to ensure null-terminated string
|
||||
bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
|
||||
addrinfo hints{
|
||||
.ai_flags = AI_PASSIVE,
|
||||
.ai_family = AF_UNSPEC, // IPv4 and IPv6
|
||||
.ai_socktype = SOCK_STREAM // TCP socket
|
||||
};
|
||||
addrinfo *info = nullptr;
|
||||
auto status = getaddrinfo(address.c_str(), std::to_string(port).c_str(), &hints, &info);
|
||||
auto status = getaddrinfo(std::string(address).c_str(), std::to_string(port).c_str(), &hints, &info);
|
||||
if (info) freeaddrinfo(info);
|
||||
return status == 0;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrAddress(
|
||||
const std::string &address, const std::optional<uint16_t> default_port) {
|
||||
const std::string delimiter = ":";
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port) {
|
||||
auto const parts = utils::SplitView(address, delimiter);
|
||||
|
||||
if (parts.size() > 2) {
|
||||
return std::nullopt;
|
||||
}
|
||||
if (parts.size() == 2) {
|
||||
if (GetIpFamily(parts[0]) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
|
||||
auto const port = [default_port, &parts]() -> std::optional<uint16_t> {
|
||||
if (parts.size() == 2) {
|
||||
return static_cast<uint16_t>(utils::ParseInt(parts[1]));
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
return default_port;
|
||||
}();
|
||||
|
||||
if (!ValidatePort(port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::nullopt;
|
||||
|
||||
auto const addr = [address, &parts]() {
|
||||
if (parts.size() == 2) {
|
||||
return parts[0];
|
||||
}
|
||||
return address;
|
||||
}();
|
||||
|
||||
if (GetIpFamily(addr) == IpFamily::NONE) {
|
||||
if (IsResolvableAddress(addr, *port)) { // NOLINT
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
|
||||
auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
|
||||
if (!port) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.", *port,
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -19,11 +19,8 @@
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
/**
|
||||
* This class represents a network endpoint that is used in Socket.
|
||||
* It is used when connecting to an address and to get the current
|
||||
* connection address.
|
||||
*/
|
||||
using ParsedAddress = std::pair<std::string_view, uint16_t>;
|
||||
|
||||
struct Endpoint {
|
||||
static const struct needs_resolving_t {
|
||||
} needs_resolving;
|
||||
@ -31,59 +28,35 @@ struct Endpoint {
|
||||
Endpoint() = default;
|
||||
Endpoint(std::string ip_address, uint16_t port);
|
||||
Endpoint(needs_resolving_t, std::string hostname, uint16_t port);
|
||||
|
||||
Endpoint(Endpoint const &) = default;
|
||||
Endpoint(Endpoint &&) noexcept = default;
|
||||
|
||||
Endpoint &operator=(Endpoint const &) = default;
|
||||
Endpoint &operator=(Endpoint &&) noexcept = default;
|
||||
|
||||
~Endpoint() = default;
|
||||
|
||||
enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
|
||||
|
||||
std::string SocketAddress() const;
|
||||
static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port = {});
|
||||
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
std::string SocketAddress() const;
|
||||
|
||||
std::string address;
|
||||
uint16_t port{0};
|
||||
IpFamily family{IpFamily::NONE};
|
||||
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrAddress(const std::string &address,
|
||||
std::optional<uint16_t> default_port);
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
|
||||
/**
|
||||
* Tries to parse the given string as either a socket address or ip address.
|
||||
* Expected address format:
|
||||
* - "ip_address:port_number"
|
||||
* - "ip_address"
|
||||
* We parse the address first. If it's an IP address, a default port must
|
||||
* be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
* it into an ip address and a port number; even if a default port is given,
|
||||
* it won't be used, as we expect that it is given in the address string.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrIpAddress(
|
||||
const std::string &address, std::optional<uint16_t> default_port = {});
|
||||
private:
|
||||
static IpFamily GetIpFamily(std::string_view address);
|
||||
|
||||
/**
|
||||
* Tries to parse given string as either socket address or hostname.
|
||||
* Expected address format:
|
||||
* - "hostname:port_number"
|
||||
* - "hostname"
|
||||
* After we parse hostname and port we try to resolve the hostname into an ip_address.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseHostname(const std::string &address,
|
||||
std::optional<uint16_t> default_port);
|
||||
static bool IsResolvableAddress(std::string_view address, uint16_t port);
|
||||
|
||||
static IpFamily GetIpFamily(const std::string &address);
|
||||
|
||||
static bool IsResolvableAddress(const std::string &address, uint16_t port);
|
||||
|
||||
/**
|
||||
* Tries to resolve hostname to its corresponding IP address.
|
||||
* Given a DNS hostname, this function performs resolution and returns
|
||||
* the IP address associated with the hostname.
|
||||
*/
|
||||
static std::string ResolveHostnameIntoIpAddress(const std::string &address, uint16_t port);
|
||||
static auto ValidatePort(std::optional<uint16_t> port) -> bool;
|
||||
};
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -334,7 +334,8 @@ int main(int argc, char **argv) {
|
||||
.salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.salient.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
|
||||
spdlog::info("config recover on startup {}, flags {} {}", db_config.durability.recover_on_startup,
|
||||
FLAGS_storage_recover_on_startup, FLAGS_data_recovery_on_startup);
|
||||
memgraph::utils::Scheduler jemalloc_purge_scheduler;
|
||||
jemalloc_purge_scheduler.Run("Jemalloc purge", std::chrono::seconds(FLAGS_storage_gc_cycle_sec),
|
||||
[] { memgraph::memory::PurgeUnusedMemory(); });
|
||||
|
@ -122,11 +122,11 @@ static bool my_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, siz
|
||||
|
||||
[[maybe_unused]] auto blocker = memgraph::utils::MemoryTracker::OutOfMemoryExceptionBlocker{};
|
||||
if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
|
||||
bool ok = GetQueriesMemoryControl().TrackAllocOnCurrentThread(length);
|
||||
[[maybe_unused]] bool ok = GetQueriesMemoryControl().TrackAllocOnCurrentThread(length);
|
||||
DMG_ASSERT(ok);
|
||||
}
|
||||
|
||||
auto ok = memgraph::utils::total_memory_tracker.Alloc(static_cast<int64_t>(length));
|
||||
[[maybe_unused]] auto ok = memgraph::utils::total_memory_tracker.Alloc(static_cast<int64_t>(length));
|
||||
DMG_ASSERT(ok);
|
||||
|
||||
return false;
|
||||
|
@ -416,7 +416,7 @@ memgraph::storage::PropertyValue StringToValue(const std::string &str, const std
|
||||
std::string GetIdSpace(const std::string &type) {
|
||||
// The format of this field is as follows:
|
||||
// [START_|END_]ID[(<id_space>)]
|
||||
std::regex format(R"(^(START_|END_)?ID(\(([^\(\)]+)\))?$)", std::regex::extended);
|
||||
static std::regex format(R"(^(START_|END_)?ID(\(([^\(\)]+)\))?$)", std::regex::extended);
|
||||
std::smatch res;
|
||||
if (!std::regex_match(type, res, format))
|
||||
throw LoadException(
|
||||
|
@ -371,6 +371,62 @@ class VerticesIterable final {
|
||||
}
|
||||
};
|
||||
|
||||
class EdgesIterable final {
|
||||
std::variant<storage::EdgesIterable, std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *>
|
||||
iterable_;
|
||||
|
||||
public:
|
||||
class Iterator final {
|
||||
std::variant<storage::EdgesIterable::Iterator,
|
||||
std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator>
|
||||
it_;
|
||||
|
||||
public:
|
||||
explicit Iterator(storage::EdgesIterable::Iterator it) : it_(std::move(it)) {}
|
||||
explicit Iterator(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator it)
|
||||
: it_(it) {}
|
||||
|
||||
EdgeAccessor operator*() const {
|
||||
return std::visit([](auto &it_) { return EdgeAccessor(*it_); }, it_);
|
||||
}
|
||||
|
||||
Iterator &operator++() {
|
||||
std::visit([](auto &it_) { ++it_; }, it_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Iterator &other) const { return it_ == other.it_; }
|
||||
|
||||
bool operator!=(const Iterator &other) const { return !(other == *this); }
|
||||
};
|
||||
|
||||
explicit EdgesIterable(storage::EdgesIterable iterable) : iterable_(std::move(iterable)) {}
|
||||
explicit EdgesIterable(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *edges)
|
||||
: iterable_(edges) {}
|
||||
|
||||
Iterator begin() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.begin()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->begin()); }},
|
||||
iterable_);
|
||||
}
|
||||
|
||||
Iterator end() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.end()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->end()); }},
|
||||
iterable_);
|
||||
}
|
||||
};
|
||||
|
||||
class DbAccessor final {
|
||||
storage::Storage::Accessor *accessor_;
|
||||
|
||||
@ -416,6 +472,10 @@ class DbAccessor final {
|
||||
return VerticesIterable(accessor_->Vertices(label, property, lower, upper, view));
|
||||
}
|
||||
|
||||
EdgesIterable Edges(storage::View view, storage::EdgeTypeId edge_type) {
|
||||
return EdgesIterable(accessor_->Edges(edge_type, view));
|
||||
}
|
||||
|
||||
VertexAccessor InsertVertex() { return VertexAccessor(accessor_->CreateVertex()); }
|
||||
|
||||
storage::Result<EdgeAccessor> InsertEdge(VertexAccessor *from, VertexAccessor *to,
|
||||
@ -572,6 +632,8 @@ class DbAccessor final {
|
||||
return accessor_->LabelPropertyIndexExists(label, prop);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) const { return accessor_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return accessor_->GetIndexStats(label);
|
||||
}
|
||||
@ -638,6 +700,10 @@ class DbAccessor final {
|
||||
return accessor_->CreateIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> CreateIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->CreateIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::LabelId label) {
|
||||
return accessor_->DropIndex(label);
|
||||
}
|
||||
@ -647,6 +713,10 @@ class DbAccessor final {
|
||||
return accessor_->DropIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->DropIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
storage::LabelId label, storage::PropertyId property) {
|
||||
return accessor_->CreateExistenceConstraint(label, property);
|
||||
|
@ -242,6 +242,10 @@ void DumpLabelIndex(std::ostream *os, query::DbAccessor *dba, const storage::Lab
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << ";";
|
||||
}
|
||||
|
||||
void DumpEdgeTypeIndex(std::ostream *os, query::DbAccessor *dba, const storage::EdgeTypeId edge_type) {
|
||||
*os << "CREATE EDGE INDEX ON :" << EscapeName(dba->EdgeTypeToName(edge_type)) << ";";
|
||||
}
|
||||
|
||||
void DumpLabelPropertyIndex(std::ostream *os, query::DbAccessor *dba, storage::LabelId label,
|
||||
storage::PropertyId property) {
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << "(" << EscapeName(dba->PropertyToName(property))
|
||||
@ -297,7 +301,9 @@ PullPlanDump::PullPlanDump(DbAccessor *dba, dbms::DatabaseAccess db_acc)
|
||||
// Internal index cleanup
|
||||
CreateInternalIndexCleanupPullChunk(),
|
||||
// Dump all triggers
|
||||
CreateTriggersPullChunk()} {}
|
||||
CreateTriggersPullChunk(),
|
||||
// Dump all edge-type indices
|
||||
CreateEdgeTypeIndicesPullChunk()} {}
|
||||
|
||||
bool PullPlanDump::Pull(AnyStream *stream, std::optional<int> n) {
|
||||
// Iterate all functions that stream some results.
|
||||
@ -352,6 +358,33 @@ PullPlanDump::PullChunk PullPlanDump::CreateLabelIndicesPullChunk() {
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateEdgeTypeIndicesPullChunk() {
|
||||
// Dump all label indices
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
if (!indices_info_) {
|
||||
indices_info_.emplace(dba_->ListAllIndices());
|
||||
}
|
||||
const auto &edge_type = indices_info_->edge_type;
|
||||
|
||||
size_t local_counter = 0;
|
||||
while (global_index < edge_type.size() && (!n || local_counter < *n)) {
|
||||
std::ostringstream os;
|
||||
DumpEdgeTypeIndex(&os, dba_, edge_type[global_index]);
|
||||
stream->Result({TypedValue(os.str())});
|
||||
|
||||
++global_index;
|
||||
++local_counter;
|
||||
}
|
||||
|
||||
if (global_index == edge_type.size()) {
|
||||
return local_counter;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateLabelPropertyIndicesPullChunk() {
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
|
@ -63,5 +63,6 @@ struct PullPlanDump {
|
||||
PullChunk CreateDropInternalIndexPullChunk();
|
||||
PullChunk CreateInternalIndexCleanupPullChunk();
|
||||
PullChunk CreateTriggersPullChunk();
|
||||
PullChunk CreateEdgeTypeIndicesPullChunk();
|
||||
};
|
||||
} // namespace memgraph::query
|
||||
|
@ -186,6 +186,9 @@ constexpr utils::TypeInfo query::ProfileQuery::kType{utils::TypeId::AST_PROFILE_
|
||||
|
||||
constexpr utils::TypeInfo query::IndexQuery::kType{utils::TypeId::AST_INDEX_QUERY, "IndexQuery", &query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::EdgeIndexQuery::kType{utils::TypeId::AST_EDGE_INDEX_QUERY, "EdgeIndexQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::Create::kType{utils::TypeId::AST_CREATE, "Create", &query::Clause::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::CallProcedure::kType{utils::TypeId::AST_CALL_PROCEDURE, "CallProcedure",
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "query/interpret/awesome_memgraph_functions.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
namespace memgraph::query {
|
||||
@ -2223,6 +2224,34 @@ class IndexQuery : public memgraph::query::Query {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class EdgeIndexQuery : public memgraph::query::Query {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Action { CREATE, DROP };
|
||||
|
||||
EdgeIndexQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::EdgeIndexQuery::Action action_;
|
||||
memgraph::query::EdgeTypeIx edge_type_;
|
||||
|
||||
EdgeIndexQuery *Clone(AstStorage *storage) const override {
|
||||
EdgeIndexQuery *object = storage->Create<EdgeIndexQuery>();
|
||||
object->action_ = action_;
|
||||
object->edge_type_ = storage->GetEdgeTypeIx(edge_type_.name);
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
EdgeIndexQuery(Action action, EdgeTypeIx edge_type) : action_(action), edge_type_(edge_type) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class Create : public memgraph::query::Clause {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
@ -3035,8 +3064,6 @@ class ReplicationQuery : public memgraph::query::Query {
|
||||
|
||||
enum class SyncMode { SYNC, ASYNC };
|
||||
|
||||
enum class ReplicaState { READY, REPLICATING, RECOVERY, MAYBE_BEHIND, DIVERGED_FROM_MAIN };
|
||||
|
||||
ReplicationQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
@ -3588,7 +3615,7 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
bool Accept(HierarchicalTreeVisitor &visitor) override {
|
||||
if (visitor.PreVisit(*this)) {
|
||||
if (variable_) {
|
||||
variable_->Accept(visitor);
|
||||
throw utils::NotYetImplemented("Variable in pattern comprehension.");
|
||||
}
|
||||
pattern_->Accept(visitor);
|
||||
if (filter_) {
|
||||
@ -3617,7 +3644,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
int32_t symbol_pos_{-1};
|
||||
|
||||
PatternComprehension *Clone(AstStorage *storage) const override {
|
||||
PatternComprehension *object = storage->Create<PatternComprehension>();
|
||||
auto *object = storage->Create<PatternComprehension>();
|
||||
object->variable_ = variable_ ? variable_->Clone(storage) : nullptr;
|
||||
object->pattern_ = pattern_ ? pattern_->Clone(storage) : nullptr;
|
||||
object->filter_ = filter_ ? filter_->Clone(storage) : nullptr;
|
||||
object->resultExpr_ = resultExpr_ ? resultExpr_->Clone(storage) : nullptr;
|
||||
@ -3627,7 +3655,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
}
|
||||
|
||||
protected:
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern) : variable_(variable), pattern_(pattern) {}
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern, Where *filter, Expression *resultExpr)
|
||||
: variable_(variable), pattern_(pattern), filter_(filter), resultExpr_(resultExpr) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
|
@ -82,6 +82,7 @@ class AuthQuery;
|
||||
class ExplainQuery;
|
||||
class ProfileQuery;
|
||||
class IndexQuery;
|
||||
class EdgeIndexQuery;
|
||||
class DatabaseInfoQuery;
|
||||
class SystemInfoQuery;
|
||||
class ConstraintQuery;
|
||||
@ -143,11 +144,11 @@ class ExpressionVisitor
|
||||
|
||||
template <class TResult>
|
||||
class QueryVisitor
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, AuthQuery, DatabaseInfoQuery,
|
||||
SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery, LockPathQuery,
|
||||
FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery, StreamQuery,
|
||||
SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery, StorageModeQuery,
|
||||
AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery,
|
||||
CoordinatorQuery> {};
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery, AuthQuery,
|
||||
DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery,
|
||||
LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery,
|
||||
StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery,
|
||||
StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery,
|
||||
EdgeImportModeQuery, CoordinatorQuery> {};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -265,6 +265,27 @@ antlrcpp::Any CypherMainVisitor::visitDropIndex(MemgraphCypher::DropIndexContext
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "EdgeIndexQuery should have exactly one child!");
|
||||
auto *index_query = std::any_cast<EdgeIndexQuery *>(ctx->children[0]->accept(this));
|
||||
query_ = index_query;
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::CREATE;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::DROP;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "AuthQuery should have exactly one child!");
|
||||
auto *auth_query = std::any_cast<AuthQuery *>(ctx->children[0]->accept(this));
|
||||
|
@ -148,6 +148,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitIndexQuery(MemgraphCypher::IndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return IndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ExplainQuery*
|
||||
*/
|
||||
@ -499,6 +504,16 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitDropIndex(MemgraphCypher::DropIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return EdgeIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return DropEdgeIndex*
|
||||
*/
|
||||
antlrcpp::Any visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
|
@ -133,6 +133,7 @@ symbolicName : UnescapedSymbolicName
|
||||
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| edgeIndexQuery
|
||||
| explainQuery
|
||||
| profileQuery
|
||||
| databaseInfoQuery
|
||||
@ -527,3 +528,9 @@ showDatabase : SHOW DATABASE ;
|
||||
showDatabases : SHOW DATABASES ;
|
||||
|
||||
edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ;
|
||||
|
||||
createEdgeIndex : CREATE EDGE INDEX ON ':' labelName ;
|
||||
|
||||
dropEdgeIndex : DROP EDGE INDEX ON ':' labelName ;
|
||||
|
||||
edgeIndexQuery : createEdgeIndex | dropEdgeIndex ;
|
||||
|
@ -27,6 +27,8 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
|
||||
void Visit(IndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(EdgeIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AnalyzeGraphQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AuthQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::AUTH); }
|
||||
|
@ -53,6 +53,8 @@ class Symbol {
|
||||
bool user_declared() const { return user_declared_; }
|
||||
int token_position() const { return token_position_; }
|
||||
|
||||
bool IsSymbolAnonym() const { return name_.substr(0U, 4U) == "anon"; }
|
||||
|
||||
std::string name_;
|
||||
int64_t position_;
|
||||
bool user_declared_{true};
|
||||
|
@ -721,6 +721,32 @@ bool SymbolGenerator::PostVisit(EdgeAtom &) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(PatternComprehension &pc) {
|
||||
auto &scope = scopes_.back();
|
||||
|
||||
if (scope.in_set_property) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within SET clause.!");
|
||||
}
|
||||
|
||||
if (scope.in_with) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within WITH!");
|
||||
}
|
||||
|
||||
if (scope.in_reduce) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within REDUCE!");
|
||||
}
|
||||
|
||||
if (scope.num_if_operators) {
|
||||
throw utils::NotYetImplemented("IF operator cannot be used with Pattern Comprehension!");
|
||||
}
|
||||
|
||||
const auto &symbol = CreateAnonymousSymbol();
|
||||
pc.MapTo(symbol);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(PatternComprehension & /*pc*/) { return true; }
|
||||
|
||||
void SymbolGenerator::VisitWithIdentifiers(Expression *expr, const std::vector<Identifier *> &identifiers) {
|
||||
auto &scope = scopes_.back();
|
||||
std::vector<std::pair<std::optional<Symbol>, Identifier *>> prev_symbols;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -97,6 +97,8 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(NodeAtom &) override;
|
||||
bool PreVisit(EdgeAtom &) override;
|
||||
bool PostVisit(EdgeAtom &) override;
|
||||
bool PreVisit(PatternComprehension &) override;
|
||||
bool PostVisit(PatternComprehension &) override;
|
||||
|
||||
private:
|
||||
// Scope stores the state of where we are when visiting the AST and a map of
|
||||
|
@ -297,16 +297,6 @@ inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode
|
||||
|
||||
class ReplQueryHandler {
|
||||
public:
|
||||
struct ReplicaInfo {
|
||||
std::string name;
|
||||
std::string socket_address;
|
||||
ReplicationQuery::SyncMode sync_mode;
|
||||
std::optional<double> timeout;
|
||||
uint64_t current_timestamp_of_replica;
|
||||
uint64_t current_number_of_timestamp_behind_master;
|
||||
ReplicationQuery::ReplicaState state;
|
||||
};
|
||||
|
||||
explicit ReplQueryHandler(query::ReplicationQueryHandler &replication_query_handler)
|
||||
: handler_{&replication_query_handler} {}
|
||||
|
||||
@ -365,7 +355,7 @@ class ReplQueryHandler {
|
||||
const auto replication_config =
|
||||
replication::ReplicationClientConfig{.name = name,
|
||||
.mode = repl_mode,
|
||||
.ip_address = ip,
|
||||
.ip_address = std::string(ip),
|
||||
.port = port,
|
||||
.replica_check_frequency = replica_check_frequency,
|
||||
.ssl = std::nullopt};
|
||||
@ -397,58 +387,16 @@ class ReplQueryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<ReplicaInfo> ShowReplicas(const dbms::Database &db) const {
|
||||
if (handler_->IsReplica()) {
|
||||
// replica can't show registered replicas (it shouldn't have any)
|
||||
throw QueryRuntimeException("Replica can't show registered replicas (it shouldn't have any)!");
|
||||
std::vector<ReplicasInfo> ShowReplicas() const {
|
||||
auto info = handler_->ShowReplicas();
|
||||
if (info.HasError()) {
|
||||
switch (info.GetError()) {
|
||||
case ShowReplicaError::NOT_MAIN:
|
||||
throw QueryRuntimeException("Replica can't show registered replicas (it shouldn't have any)!");
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Combine results? Have a single place with clients???
|
||||
// Also authentication checks (replica + database visibility)
|
||||
const auto repl_infos = db.storage()->ReplicasInfo();
|
||||
std::vector<ReplicaInfo> replicas;
|
||||
replicas.reserve(repl_infos.size());
|
||||
|
||||
const auto from_info = [](const auto &repl_info) -> ReplicaInfo {
|
||||
ReplicaInfo replica;
|
||||
replica.name = repl_info.name;
|
||||
replica.socket_address = repl_info.endpoint.SocketAddress();
|
||||
switch (repl_info.mode) {
|
||||
case replication_coordination_glue::ReplicationMode::SYNC:
|
||||
replica.sync_mode = ReplicationQuery::SyncMode::SYNC;
|
||||
break;
|
||||
case replication_coordination_glue::ReplicationMode::ASYNC:
|
||||
replica.sync_mode = ReplicationQuery::SyncMode::ASYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
replica.current_timestamp_of_replica = repl_info.timestamp_info.current_timestamp_of_replica;
|
||||
replica.current_number_of_timestamp_behind_master =
|
||||
repl_info.timestamp_info.current_number_of_timestamp_behind_master;
|
||||
|
||||
switch (repl_info.state) {
|
||||
case storage::replication::ReplicaState::READY:
|
||||
replica.state = ReplicationQuery::ReplicaState::READY;
|
||||
break;
|
||||
case storage::replication::ReplicaState::REPLICATING:
|
||||
replica.state = ReplicationQuery::ReplicaState::REPLICATING;
|
||||
break;
|
||||
case storage::replication::ReplicaState::RECOVERY:
|
||||
replica.state = ReplicationQuery::ReplicaState::RECOVERY;
|
||||
break;
|
||||
case storage::replication::ReplicaState::MAYBE_BEHIND:
|
||||
replica.state = ReplicationQuery::ReplicaState::MAYBE_BEHIND;
|
||||
break;
|
||||
case storage::replication::ReplicaState::DIVERGED_FROM_MAIN:
|
||||
replica.state = ReplicationQuery::ReplicaState::DIVERGED_FROM_MAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
return replica;
|
||||
};
|
||||
|
||||
std::transform(repl_infos.begin(), repl_infos.end(), std::back_inserter(replicas), from_info);
|
||||
return replicas;
|
||||
return info.GetValue().entries_;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -462,7 +410,7 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
|
||||
: coordinator_handler_(coordinator_state) {}
|
||||
|
||||
void UnregisterInstance(std::string const &instance_name) override {
|
||||
void UnregisterInstance(std::string_view instance_name) override {
|
||||
auto status = coordinator_handler_.UnregisterReplicationInstance(instance_name);
|
||||
switch (status) {
|
||||
using enum memgraph::coordination::UnregisterInstanceCoordinatorStatus;
|
||||
@ -475,6 +423,8 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
throw QueryRuntimeException("UNREGISTER INSTANCE query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't unregister replica instance since coordinator is not a leader!");
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't unregister replica instance since raft server couldn't append the log!");
|
||||
case RPC_FAILED:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't unregister replica instance because current main instance couldn't unregister replica!");
|
||||
@ -483,20 +433,18 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterReplicationInstance(std::string const &coordinator_socket_address,
|
||||
std::string const &replication_socket_address,
|
||||
void RegisterReplicationInstance(std::string_view coordinator_socket_address,
|
||||
std::string_view replication_socket_address,
|
||||
std::chrono::seconds const &instance_check_frequency,
|
||||
std::chrono::seconds const &instance_down_timeout,
|
||||
std::chrono::seconds const &instance_get_uuid_frequency,
|
||||
std::string const &instance_name, CoordinatorQuery::SyncMode sync_mode) override {
|
||||
const auto maybe_replication_ip_port =
|
||||
io::network::Endpoint::ParseSocketOrAddress(replication_socket_address, std::nullopt);
|
||||
std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override {
|
||||
const auto maybe_replication_ip_port = io::network::Endpoint::ParseSocketOrAddress(replication_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
|
||||
const auto maybe_coordinator_ip_port =
|
||||
io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address, std::nullopt);
|
||||
const auto maybe_coordinator_ip_port = io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
@ -504,14 +452,14 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
const auto [replication_ip, replication_port] = *maybe_replication_ip_port;
|
||||
const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port;
|
||||
const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
|
||||
.instance_name = instance_name,
|
||||
.instance_name = std::string(instance_name),
|
||||
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
|
||||
.replication_ip_address = replication_ip,
|
||||
.replication_ip_address = std::string(replication_ip),
|
||||
.replication_port = replication_port};
|
||||
|
||||
auto coordinator_client_config =
|
||||
coordination::CoordinatorClientConfig{.instance_name = instance_name,
|
||||
.ip_address = coordinator_server_ip,
|
||||
coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name),
|
||||
.ip_address = std::string(coordinator_server_ip),
|
||||
.port = coordinator_server_port,
|
||||
.instance_health_check_frequency_sec = instance_check_frequency,
|
||||
.instance_down_timeout_sec = instance_down_timeout,
|
||||
@ -524,18 +472,17 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
using enum memgraph::coordination::RegisterInstanceCoordinatorStatus;
|
||||
case NAME_EXISTS:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since instance with such name already exists!");
|
||||
case ENDPOINT_EXISTS:
|
||||
case COORD_ENDPOINT_EXISTS:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since instance with such endpoint already exists!");
|
||||
"Couldn't register replica instance since instance with such coordinator endpoint already exists!");
|
||||
case REPL_ENDPOINT_EXISTS:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since instance with such replication endpoint already exists!");
|
||||
case NOT_COORDINATOR:
|
||||
throw QueryRuntimeException("REGISTER INSTANCE query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since coordinator is not a leader!");
|
||||
case RAFT_COULD_NOT_ACCEPT:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since raft server couldn't accept the log! Most likely the raft "
|
||||
"instance is not a leader!");
|
||||
case RAFT_COULD_NOT_APPEND:
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since raft server couldn't append the log!");
|
||||
case RPC_FAILED:
|
||||
throw QueryRuntimeException(
|
||||
@ -546,19 +493,19 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string const &raft_socket_address) -> void override {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrIpAddress(raft_socket_address);
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view raft_socket_address) -> void override {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address);
|
||||
if (maybe_ip_and_port) {
|
||||
auto const [ip, port] = *maybe_ip_and_port;
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, port, ip);
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port);
|
||||
coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
|
||||
} else {
|
||||
spdlog::error("Invalid raft socket address {}.", raft_socket_address);
|
||||
}
|
||||
}
|
||||
|
||||
void SetReplicationInstanceToMain(const std::string &instance_name) override {
|
||||
auto status = coordinator_handler_.SetReplicationInstanceToMain(instance_name);
|
||||
void SetReplicationInstanceToMain(std::string_view instance_name) override {
|
||||
auto const status = coordinator_handler_.SetReplicationInstanceToMain(instance_name);
|
||||
switch (status) {
|
||||
using enum memgraph::coordination::SetInstanceToMainCoordinatorStatus;
|
||||
case NO_INSTANCE_WITH_NAME:
|
||||
@ -567,6 +514,10 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
throw QueryRuntimeException("Couldn't set instance to main since there is already a main instance in cluster!");
|
||||
case NOT_COORDINATOR:
|
||||
throw QueryRuntimeException("SET INSTANCE TO MAIN query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't set instance to main since coordinator is not a leader!");
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't promote instance since raft server couldn't append the log!");
|
||||
case COULD_NOT_PROMOTE_TO_MAIN:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't set replica instance to main! Check coordinator and replica for more logs");
|
||||
@ -1092,50 +1043,98 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
#endif
|
||||
|
||||
callback.header = {
|
||||
"name", "socket_address", "sync_mode", "current_timestamp_of_replica", "number_of_timestamp_behind_master",
|
||||
"state"};
|
||||
bool full_info = false;
|
||||
#ifdef MG_ENTERPRISE
|
||||
full_info = license::global_license_checker.IsEnterpriseValidFast();
|
||||
#endif
|
||||
|
||||
callback.header = {"name", "socket_address", "sync_mode", "system_info", "data_info"};
|
||||
|
||||
callback.fn = [handler = ReplQueryHandler{replication_query_handler}, replica_nfields = callback.header.size(),
|
||||
db_acc = current_db.db_acc_] {
|
||||
const auto &replicas = handler.ShowReplicas(*db_acc->get());
|
||||
full_info] {
|
||||
auto const sync_mode_to_tv = [](memgraph::replication_coordination_glue::ReplicationMode sync_mode) {
|
||||
using namespace std::string_view_literals;
|
||||
switch (sync_mode) {
|
||||
using enum memgraph::replication_coordination_glue::ReplicationMode;
|
||||
case SYNC:
|
||||
return TypedValue{"sync"sv};
|
||||
case ASYNC:
|
||||
return TypedValue{"async"sv};
|
||||
}
|
||||
};
|
||||
|
||||
auto const replica_sys_state_to_tv = [](memgraph::replication::ReplicationClient::State state) {
|
||||
using namespace std::string_view_literals;
|
||||
switch (state) {
|
||||
using enum memgraph::replication::ReplicationClient::State;
|
||||
case BEHIND:
|
||||
return TypedValue{"invalid"sv};
|
||||
case READY:
|
||||
return TypedValue{"ready"sv};
|
||||
case RECOVERY:
|
||||
return TypedValue{"recovery"sv};
|
||||
}
|
||||
};
|
||||
|
||||
auto const sys_info_to_tv = [&](ReplicaSystemInfoState orig) {
|
||||
auto info = std::map<std::string, TypedValue>{};
|
||||
info.emplace("ts", TypedValue{static_cast<int64_t>(orig.ts_)});
|
||||
// TODO: behind not implemented
|
||||
info.emplace("behind", TypedValue{/* static_cast<int64_t>(orig.behind_) */});
|
||||
info.emplace("status", replica_sys_state_to_tv(orig.state_));
|
||||
return TypedValue{std::move(info)};
|
||||
};
|
||||
|
||||
auto const replica_state_to_tv = [](memgraph::storage::replication::ReplicaState state) {
|
||||
using namespace std::string_view_literals;
|
||||
switch (state) {
|
||||
using enum memgraph::storage::replication::ReplicaState;
|
||||
case READY:
|
||||
return TypedValue{"ready"sv};
|
||||
case REPLICATING:
|
||||
return TypedValue{"replicating"sv};
|
||||
case RECOVERY:
|
||||
return TypedValue{"recovery"sv};
|
||||
case MAYBE_BEHIND:
|
||||
return TypedValue{"invalid"sv};
|
||||
case DIVERGED_FROM_MAIN:
|
||||
return TypedValue{"diverged"sv};
|
||||
}
|
||||
};
|
||||
|
||||
auto const info_to_tv = [&](ReplicaInfoState orig) {
|
||||
auto info = std::map<std::string, TypedValue>{};
|
||||
info.emplace("ts", TypedValue{static_cast<int64_t>(orig.ts_)});
|
||||
info.emplace("behind", TypedValue{static_cast<int64_t>(orig.behind_)});
|
||||
info.emplace("status", replica_state_to_tv(orig.state_));
|
||||
return TypedValue{std::move(info)};
|
||||
};
|
||||
|
||||
auto const data_info_to_tv = [&](std::map<std::string, ReplicaInfoState> orig) {
|
||||
auto data_info = std::map<std::string, TypedValue>{};
|
||||
for (auto &[name, info] : orig) {
|
||||
data_info.emplace(name, info_to_tv(info));
|
||||
}
|
||||
return TypedValue{std::move(data_info)};
|
||||
};
|
||||
|
||||
auto replicas = handler.ShowReplicas();
|
||||
auto typed_replicas = std::vector<std::vector<TypedValue>>{};
|
||||
typed_replicas.reserve(replicas.size());
|
||||
for (const auto &replica : replicas) {
|
||||
for (auto &replica : replicas) {
|
||||
std::vector<TypedValue> typed_replica;
|
||||
typed_replica.reserve(replica_nfields);
|
||||
|
||||
typed_replica.emplace_back(replica.name);
|
||||
typed_replica.emplace_back(replica.socket_address);
|
||||
|
||||
switch (replica.sync_mode) {
|
||||
case ReplicationQuery::SyncMode::SYNC:
|
||||
typed_replica.emplace_back("sync");
|
||||
break;
|
||||
case ReplicationQuery::SyncMode::ASYNC:
|
||||
typed_replica.emplace_back("async");
|
||||
break;
|
||||
}
|
||||
|
||||
typed_replica.emplace_back(static_cast<int64_t>(replica.current_timestamp_of_replica));
|
||||
typed_replica.emplace_back(static_cast<int64_t>(replica.current_number_of_timestamp_behind_master));
|
||||
|
||||
switch (replica.state) {
|
||||
case ReplicationQuery::ReplicaState::READY:
|
||||
typed_replica.emplace_back("ready");
|
||||
break;
|
||||
case ReplicationQuery::ReplicaState::REPLICATING:
|
||||
typed_replica.emplace_back("replicating");
|
||||
break;
|
||||
case ReplicationQuery::ReplicaState::RECOVERY:
|
||||
typed_replica.emplace_back("recovery");
|
||||
break;
|
||||
case ReplicationQuery::ReplicaState::MAYBE_BEHIND:
|
||||
typed_replica.emplace_back("invalid");
|
||||
break;
|
||||
case ReplicationQuery::ReplicaState::DIVERGED_FROM_MAIN:
|
||||
typed_replica.emplace_back("diverged");
|
||||
break;
|
||||
typed_replica.emplace_back(replica.name_);
|
||||
typed_replica.emplace_back(replica.socket_address_);
|
||||
typed_replica.emplace_back(sync_mode_to_tv(replica.sync_mode_));
|
||||
if (full_info) {
|
||||
typed_replica.emplace_back(sys_info_to_tv(replica.system_info_));
|
||||
} else {
|
||||
// Set to NULL
|
||||
typed_replica.emplace_back(TypedValue{});
|
||||
}
|
||||
typed_replica.emplace_back(data_info_to_tv(replica.data_info_));
|
||||
|
||||
typed_replicas.emplace_back(std::move(typed_replica));
|
||||
}
|
||||
@ -1213,7 +1212,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
};
|
||||
|
||||
notifications->emplace_back(
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_COORDINATOR_SERVER,
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_REPLICATION_INSTANCE,
|
||||
fmt::format("Coordinator has registered coordinator server on {} for instance {}.",
|
||||
coordinator_socket_address_tv.ValueString(), coordinator_query->instance_name_));
|
||||
return callback;
|
||||
@ -1255,17 +1254,16 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
throw QueryRuntimeException("Only coordinator can run SHOW INSTANCES.");
|
||||
}
|
||||
|
||||
callback.header = {"name", "raft_socket_address", "coordinator_socket_address", "alive", "role"};
|
||||
callback.header = {"name", "raft_socket_address", "coordinator_socket_address", "health", "role"};
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
replica_nfields = callback.header.size()]() mutable {
|
||||
auto const instances = handler.ShowInstances();
|
||||
auto const converter = [](const auto &status) -> std::vector<TypedValue> {
|
||||
return {TypedValue{status.instance_name}, TypedValue{status.raft_socket_address},
|
||||
TypedValue{status.coord_socket_address}, TypedValue{status.is_alive},
|
||||
TypedValue{status.cluster_role}};
|
||||
TypedValue{status.coord_socket_address}, TypedValue{status.health}, TypedValue{status.cluster_role}};
|
||||
};
|
||||
|
||||
return utils::fmap(converter, instances);
|
||||
return utils::fmap(instances, converter);
|
||||
};
|
||||
return callback;
|
||||
}
|
||||
@ -2681,6 +2679,75 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareEdgeIndexQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
std::vector<Notification> *notifications, CurrentDB ¤t_db) {
|
||||
if (in_explicit_transaction) {
|
||||
throw IndexInMulticommandTxException();
|
||||
}
|
||||
|
||||
auto *index_query = utils::Downcast<EdgeIndexQuery>(parsed_query.query);
|
||||
std::function<void(Notification &)> handler;
|
||||
|
||||
MG_ASSERT(current_db.db_acc_, "Index query expects a current DB");
|
||||
auto &db_acc = *current_db.db_acc_;
|
||||
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Index query expects a current DB transaction");
|
||||
auto *dba = &*current_db.execution_db_accessor_;
|
||||
|
||||
auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
|
||||
auto *storage = db_acc->storage();
|
||||
auto edge_type = storage->NameToEdgeType(index_query->edge_type_.name);
|
||||
|
||||
Notification index_notification(SeverityLevel::INFO);
|
||||
switch (index_query->action_) {
|
||||
case EdgeIndexQuery::Action::CREATE: {
|
||||
index_notification.code = NotificationCode::CREATE_INDEX;
|
||||
index_notification.title = fmt::format("Created index on edge-type {}.", index_query->edge_type_.name);
|
||||
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->CreateIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::EXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} already exists.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
case EdgeIndexQuery::Action::DROP: {
|
||||
index_notification.code = NotificationCode::DROP_INDEX;
|
||||
index_notification.title = fmt::format("Dropped index on edge-type {}.", index_query->edge_type_.name);
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->DropIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::NONEXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} doesn't exist.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return PreparedQuery{
|
||||
{},
|
||||
std::move(parsed_query.required_privileges),
|
||||
[handler = std::move(handler), notifications, index_notification = std::move(index_notification)](
|
||||
AnyStream * /*stream*/, std::optional<int> /*unused*/) mutable {
|
||||
handler(index_notification);
|
||||
notifications->push_back(index_notification);
|
||||
return QueryHandlerResult::COMMIT;
|
||||
},
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
InterpreterContext *interpreter_context, Interpreter &interpreter) {
|
||||
if (in_explicit_transaction) {
|
||||
@ -3485,6 +3552,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
auto *storage = database->storage();
|
||||
const std::string_view label_index_mark{"label"};
|
||||
const std::string_view label_property_index_mark{"label+property"};
|
||||
const std::string_view edge_type_index_mark{"edge-type"};
|
||||
auto info = dba->ListAllIndices();
|
||||
auto storage_acc = database->Access();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
@ -3499,6 +3567,10 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
TypedValue(storage->PropertyToName(item.second)),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateVertexCount(item.first, item.second)))});
|
||||
}
|
||||
for (const auto &item : info.edge_type) {
|
||||
results.push_back({TypedValue(edge_type_index_mark), TypedValue(storage->EdgeTypeToName(item)), TypedValue(),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateEdgeCount(item)))});
|
||||
}
|
||||
std::sort(results.begin(), results.end(), [&label_index_mark](const auto &record_1, const auto &record_2) {
|
||||
const auto type_1 = record_1[0].ValueString();
|
||||
const auto type_2 = record_2[0].ValueString();
|
||||
@ -4285,13 +4357,14 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
utils::Downcast<CypherQuery>(parsed_query.query) || utils::Downcast<ExplainQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ProfileQuery>(parsed_query.query) || utils::Downcast<DumpQuery>(parsed_query.query) ||
|
||||
utils::Downcast<TriggerQuery>(parsed_query.query) || utils::Downcast<AnalyzeGraphQuery>(parsed_query.query) ||
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<DatabaseInfoQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<EdgeIndexQuery>(parsed_query.query) ||
|
||||
utils::Downcast<DatabaseInfoQuery>(parsed_query.query) || utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
|
||||
if (!in_explicit_transaction_ && requires_db_transaction) {
|
||||
// TODO: ATM only a single database, will change when we have multiple database transactions
|
||||
bool could_commit = utils::Downcast<CypherQuery>(parsed_query.query) != nullptr;
|
||||
bool unique = utils::Downcast<IndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<EdgeIndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query) != nullptr ||
|
||||
upper_case_query.find(kSchemaAssert) != std::string::npos;
|
||||
SetupDatabaseTransaction(could_commit, unique);
|
||||
@ -4328,6 +4401,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
} else if (utils::Downcast<IndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<EdgeIndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareEdgeIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<AnalyzeGraphQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareAnalyzeGraphQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
|
||||
} else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
|
||||
|
@ -84,16 +84,6 @@ class CoordinatorQueryHandler {
|
||||
CoordinatorQueryHandler(CoordinatorQueryHandler &&) = default;
|
||||
CoordinatorQueryHandler &operator=(CoordinatorQueryHandler &&) = default;
|
||||
|
||||
struct Replica {
|
||||
std::string name;
|
||||
std::string socket_address;
|
||||
ReplicationQuery::SyncMode sync_mode;
|
||||
std::optional<double> timeout;
|
||||
uint64_t current_timestamp_of_replica;
|
||||
uint64_t current_number_of_timestamp_behind_master;
|
||||
ReplicationQuery::ReplicaState state;
|
||||
};
|
||||
|
||||
struct MainReplicaStatus {
|
||||
std::string_view name;
|
||||
std::string_view socket_address;
|
||||
@ -105,25 +95,24 @@ class CoordinatorQueryHandler {
|
||||
};
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual void RegisterReplicationInstance(std::string const &coordinator_socket_address,
|
||||
std::string const &replication_socket_address,
|
||||
virtual void RegisterReplicationInstance(std::string_view coordinator_socket_address,
|
||||
std::string_view replication_socket_address,
|
||||
std::chrono::seconds const &instance_health_check_frequency,
|
||||
std::chrono::seconds const &instance_down_timeout,
|
||||
std::chrono::seconds const &instance_get_uuid_frequency,
|
||||
std::string const &instance_name, CoordinatorQuery::SyncMode sync_mode) = 0;
|
||||
std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) = 0;
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual void UnregisterInstance(std::string const &instance_name) = 0;
|
||||
virtual void UnregisterInstance(std::string_view instance_name) = 0;
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual void SetReplicationInstanceToMain(const std::string &instance_name) = 0;
|
||||
virtual void SetReplicationInstanceToMain(std::string_view instance_name) = 0;
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual std::vector<coordination::InstanceStatus> ShowInstances() const = 0;
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string const &coordinator_socket_address)
|
||||
-> void = 0;
|
||||
virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view coordinator_socket_address) -> void = 0;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -67,8 +67,8 @@ constexpr std::string_view GetCodeString(const NotificationCode code) {
|
||||
case NotificationCode::REGISTER_REPLICA:
|
||||
return "RegisterReplica"sv;
|
||||
#ifdef MG_ENTERPRISE
|
||||
case NotificationCode::REGISTER_COORDINATOR_SERVER:
|
||||
return "RegisterCoordinatorServer"sv;
|
||||
case NotificationCode::REGISTER_REPLICATION_INSTANCE:
|
||||
return "RegisterReplicationInstance"sv;
|
||||
case NotificationCode::ADD_COORDINATOR_INSTANCE:
|
||||
return "AddCoordinatorInstance"sv;
|
||||
case NotificationCode::UNREGISTER_INSTANCE:
|
||||
|
@ -43,7 +43,7 @@ enum class NotificationCode : uint8_t {
|
||||
REPLICA_PORT_WARNING,
|
||||
REGISTER_REPLICA,
|
||||
#ifdef MG_ENTERPRISE
|
||||
REGISTER_COORDINATOR_SERVER, // TODO: (andi) What is this?
|
||||
REGISTER_REPLICATION_INSTANCE,
|
||||
ADD_COORDINATOR_INSTANCE,
|
||||
UNREGISTER_INSTANCE,
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -114,6 +114,9 @@ class PlanHintsProvider final : public HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllById & /*unused*/) override { return true; }
|
||||
bool PostVisit(ScanAllById & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByEdgeType & /*unused*/) override { return true; }
|
||||
bool PostVisit(ScanAllByEdgeType & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(ConstructNamedPath & /*unused*/) override { return true; }
|
||||
bool PostVisit(ConstructNamedPath & /*unused*/) override { return true; }
|
||||
|
||||
@ -206,6 +209,14 @@ class PlanHintsProvider final : public HierarchicalLogicalOperatorVisitor {
|
||||
|
||||
bool PostVisit(IndexedJoin & /*unused*/) override { return true; }
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
op.input()->Accept(*this);
|
||||
op.list_collection_branch_->Accept(*this);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply & /*unused*/) override { return true; }
|
||||
|
||||
private:
|
||||
const SymbolTable &symbol_table_;
|
||||
std::vector<std::string> hints_;
|
||||
|
@ -105,6 +105,7 @@ extern const Event ScanAllByLabelPropertyRangeOperator;
|
||||
extern const Event ScanAllByLabelPropertyValueOperator;
|
||||
extern const Event ScanAllByLabelPropertyOperator;
|
||||
extern const Event ScanAllByIdOperator;
|
||||
extern const Event ScanAllByEdgeTypeOperator;
|
||||
extern const Event ExpandOperator;
|
||||
extern const Event ExpandVariableOperator;
|
||||
extern const Event ConstructNamedPathOperator;
|
||||
@ -517,6 +518,60 @@ class ScanAllCursor : public Cursor {
|
||||
const char *op_name_;
|
||||
};
|
||||
|
||||
template <typename TEdgesFun>
|
||||
class ScanAllByEdgeTypeCursor : public Cursor {
|
||||
public:
|
||||
explicit ScanAllByEdgeTypeCursor(const ScanAllByEdgeType &self, Symbol output_symbol, UniqueCursorPtr input_cursor,
|
||||
storage::View view, TEdgesFun get_edges, const char *op_name)
|
||||
: self_(self),
|
||||
output_symbol_(std::move(output_symbol)),
|
||||
input_cursor_(std::move(input_cursor)),
|
||||
view_(view),
|
||||
get_edges_(std::move(get_edges)),
|
||||
op_name_(op_name) {}
|
||||
|
||||
bool Pull(Frame &frame, ExecutionContext &context) override {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP_BY_REF(self_);
|
||||
|
||||
AbortCheck(context);
|
||||
|
||||
while (!vertices_ || vertices_it_.value() == vertices_end_it_.value()) {
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
auto next_vertices = get_edges_(frame, context);
|
||||
if (!next_vertices) continue;
|
||||
|
||||
vertices_.emplace(std::move(next_vertices.value()));
|
||||
vertices_it_.emplace(vertices_.value().begin());
|
||||
vertices_end_it_.emplace(vertices_.value().end());
|
||||
}
|
||||
|
||||
frame[output_symbol_] = *vertices_it_.value();
|
||||
++vertices_it_.value();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Shutdown() override { input_cursor_->Shutdown(); }
|
||||
|
||||
void Reset() override {
|
||||
input_cursor_->Reset();
|
||||
vertices_ = std::nullopt;
|
||||
vertices_it_ = std::nullopt;
|
||||
vertices_end_it_ = std::nullopt;
|
||||
}
|
||||
|
||||
private:
|
||||
const ScanAllByEdgeType &self_;
|
||||
const Symbol output_symbol_;
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
storage::View view_;
|
||||
TEdgesFun get_edges_;
|
||||
std::optional<typename std::result_of<TEdgesFun(Frame &, ExecutionContext &)>::type::value_type> vertices_;
|
||||
std::optional<decltype(vertices_.value().begin())> vertices_it_;
|
||||
std::optional<decltype(vertices_.value().end())> vertices_end_it_;
|
||||
const char *op_name_;
|
||||
};
|
||||
|
||||
ScanAll::ScanAll(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol, storage::View view)
|
||||
: input_(input ? input : std::make_shared<Once>()), output_symbol_(std::move(output_symbol)), view_(view) {}
|
||||
|
||||
@ -556,6 +611,33 @@ UniqueCursorPtr ScanAllByLabel::MakeCursor(utils::MemoryResource *mem) const {
|
||||
view_, std::move(vertices), "ScanAllByLabel");
|
||||
}
|
||||
|
||||
ScanAllByEdgeType::ScanAllByEdgeType(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol,
|
||||
storage::EdgeTypeId edge_type, storage::View view)
|
||||
: input_(input ? input : std::make_shared<Once>()),
|
||||
output_symbol_(std::move(output_symbol)),
|
||||
view_(view),
|
||||
edge_type_(edge_type) {}
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAllByEdgeType)
|
||||
|
||||
UniqueCursorPtr ScanAllByEdgeType::MakeCursor(utils::MemoryResource *mem) const {
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::ScanAllByEdgeTypeOperator);
|
||||
|
||||
auto edges = [this](Frame &, ExecutionContext &context) {
|
||||
auto *db = context.db_accessor;
|
||||
return std::make_optional(db->Edges(view_, edge_type_));
|
||||
};
|
||||
|
||||
return MakeUniqueCursorPtr<ScanAllByEdgeTypeCursor<decltype(edges)>>(
|
||||
mem, *this, output_symbol_, input_->MakeCursor(mem), view_, std::move(edges), "ScanAllByEdgeType");
|
||||
}
|
||||
|
||||
std::vector<Symbol> ScanAllByEdgeType::ModifiedSymbols(const SymbolTable &table) const {
|
||||
auto symbols = input_->ModifiedSymbols(table);
|
||||
symbols.emplace_back(output_symbol_);
|
||||
return symbols;
|
||||
}
|
||||
|
||||
// TODO(buda): Implement ScanAllByLabelProperty operator to iterate over
|
||||
// vertices that have the label and some value for the given property.
|
||||
|
||||
@ -5624,4 +5706,25 @@ UniqueCursorPtr HashJoin::MakeCursor(utils::MemoryResource *mem) const {
|
||||
return MakeUniqueCursorPtr<HashJoinCursor>(mem, *this, mem);
|
||||
}
|
||||
|
||||
RollUpApply::RollUpApply(const std::shared_ptr<LogicalOperator> &input,
|
||||
std::shared_ptr<LogicalOperator> &&second_branch)
|
||||
: input_(input), list_collection_branch_(second_branch) {}
|
||||
|
||||
std::vector<Symbol> RollUpApply::OutputSymbols(const SymbolTable & /*symbol_table*/) const {
|
||||
std::vector<Symbol> symbols;
|
||||
return symbols;
|
||||
}
|
||||
|
||||
std::vector<Symbol> RollUpApply::ModifiedSymbols(const SymbolTable &table) const { return OutputSymbols(table); }
|
||||
|
||||
bool RollUpApply::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
|
||||
if (visitor.PreVisit(*this)) {
|
||||
if (!input_ || !list_collection_branch_) {
|
||||
throw utils::NotYetImplemented("One of the branches in pattern comprehension is null! Please contact support.");
|
||||
}
|
||||
input_->Accept(visitor) && list_collection_branch_->Accept(visitor);
|
||||
}
|
||||
return visitor.PostVisit(*this);
|
||||
}
|
||||
|
||||
} // namespace memgraph::query::plan
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -99,6 +99,7 @@ class ScanAllByLabelPropertyRange;
|
||||
class ScanAllByLabelPropertyValue;
|
||||
class ScanAllByLabelProperty;
|
||||
class ScanAllById;
|
||||
class ScanAllByEdgeType;
|
||||
class Expand;
|
||||
class ExpandVariable;
|
||||
class ConstructNamedPath;
|
||||
@ -130,14 +131,15 @@ class EvaluatePatternFilter;
|
||||
class Apply;
|
||||
class IndexedJoin;
|
||||
class HashJoin;
|
||||
class RollUpApply;
|
||||
|
||||
using LogicalOperatorCompositeVisitor =
|
||||
utils::CompositeVisitor<Once, CreateNode, CreateExpand, ScanAll, ScanAllByLabel, ScanAllByLabelPropertyRange,
|
||||
ScanAllByLabelPropertyValue, ScanAllByLabelProperty, ScanAllById, Expand, ExpandVariable,
|
||||
ConstructNamedPath, Filter, Produce, Delete, SetProperty, SetProperties, SetLabels,
|
||||
RemoveProperty, RemoveLabels, EdgeUniquenessFilter, Accumulate, Aggregate, Skip, Limit,
|
||||
OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian, CallProcedure, LoadCsv,
|
||||
Foreach, EmptyResult, EvaluatePatternFilter, Apply, IndexedJoin, HashJoin>;
|
||||
ScanAllByLabelPropertyValue, ScanAllByLabelProperty, ScanAllById, ScanAllByEdgeType, Expand,
|
||||
ExpandVariable, ConstructNamedPath, Filter, Produce, Delete, SetProperty, SetProperties,
|
||||
SetLabels, RemoveProperty, RemoveLabels, EdgeUniquenessFilter, Accumulate, Aggregate, Skip,
|
||||
Limit, OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian, CallProcedure, LoadCsv,
|
||||
Foreach, EmptyResult, EvaluatePatternFilter, Apply, IndexedJoin, HashJoin, RollUpApply>;
|
||||
|
||||
using LogicalOperatorLeafVisitor = utils::LeafVisitor<Once>;
|
||||
|
||||
@ -591,6 +593,42 @@ class ScanAllByLabel : public memgraph::query::plan::ScanAll {
|
||||
}
|
||||
};
|
||||
|
||||
class ScanAllByEdgeType : public memgraph::query::plan::LogicalOperator {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
ScanAllByEdgeType() = default;
|
||||
ScanAllByEdgeType(const std::shared_ptr<LogicalOperator> &input, Symbol output_symbol, storage::EdgeTypeId edge_type,
|
||||
storage::View view = storage::View::OLD);
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
|
||||
bool HasSingleInput() const override { return true; }
|
||||
std::shared_ptr<LogicalOperator> input() const override { return input_; }
|
||||
void set_input(std::shared_ptr<LogicalOperator> input) override { input_ = input; }
|
||||
|
||||
std::string ToString() const override {
|
||||
return fmt::format("ScanAllByEdgeType ({} :{})", output_symbol_.name(), dba_->EdgeTypeToName(edge_type_));
|
||||
}
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
Symbol output_symbol_;
|
||||
storage::View view_;
|
||||
|
||||
storage::EdgeTypeId edge_type_;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<ScanAllByEdgeType>();
|
||||
object->input_ = input_ ? input_->Clone(storage) : nullptr;
|
||||
object->output_symbol_ = output_symbol_;
|
||||
object->view_ = view_;
|
||||
object->edge_type_ = edge_type_;
|
||||
return object;
|
||||
}
|
||||
};
|
||||
|
||||
/// Behaves like @c ScanAll, but produces only vertices with given label and
|
||||
/// property value which is inside a range (inclusive or exlusive).
|
||||
///
|
||||
@ -2634,5 +2672,38 @@ class HashJoin : public memgraph::query::plan::LogicalOperator {
|
||||
}
|
||||
};
|
||||
|
||||
/// RollUpApply operator is used to execute an expression which takes as input a pattern,
|
||||
/// and returns a list with content from the matched pattern
|
||||
/// It's used for a pattern expression or pattern comprehension in a query.
|
||||
class RollUpApply : public memgraph::query::plan::LogicalOperator {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
RollUpApply() = default;
|
||||
RollUpApply(const std::shared_ptr<LogicalOperator> &input, std::shared_ptr<LogicalOperator> &&second_branch);
|
||||
|
||||
bool HasSingleInput() const override { return false; }
|
||||
std::shared_ptr<LogicalOperator> input() const override { return input_; }
|
||||
void set_input(std::shared_ptr<LogicalOperator> input) override { input_ = input; }
|
||||
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override {
|
||||
throw utils::NotYetImplemented("Execution of Pattern comprehension is currently unsupported.");
|
||||
}
|
||||
std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<RollUpApply>();
|
||||
object->input_ = input_ ? input_->Clone(storage) : nullptr;
|
||||
object->list_collection_branch_ = list_collection_branch_ ? list_collection_branch_->Clone(storage) : nullptr;
|
||||
return object;
|
||||
}
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> list_collection_branch_;
|
||||
};
|
||||
|
||||
} // namespace plan
|
||||
} // namespace memgraph::query
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -49,6 +49,8 @@ constexpr utils::TypeInfo query::plan::ScanAllByLabelProperty::kType{
|
||||
|
||||
constexpr utils::TypeInfo query::plan::ScanAllById::kType{utils::TypeId::SCAN_ALL_BY_ID, "ScanAllById",
|
||||
&query::plan::ScanAll::kType};
|
||||
constexpr utils::TypeInfo query::plan::ScanAllByEdgeType::kType{utils::TypeId::SCAN_ALL_BY_EDGE_TYPE,
|
||||
"ScanAllByEdgeType", &query::plan::ScanAll::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::plan::ExpandCommon::kType{utils::TypeId::EXPAND_COMMON, "ExpandCommon", nullptr};
|
||||
|
||||
@ -154,4 +156,7 @@ constexpr utils::TypeInfo query::plan::IndexedJoin::kType{utils::TypeId::INDEXED
|
||||
|
||||
constexpr utils::TypeInfo query::plan::HashJoin::kType{utils::TypeId::HASH_JOIN, "HashJoin",
|
||||
&query::plan::LogicalOperator::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::plan::RollUpApply::kType{utils::TypeId::ROLLUP_APPLY, "RollUpApply",
|
||||
&query::plan::LogicalOperator::kType};
|
||||
} // namespace memgraph
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "query/plan/pretty_print.hpp"
|
||||
#include "query/plan/rewrite/edge_type_index_lookup.hpp"
|
||||
#include "query/plan/rewrite/index_lookup.hpp"
|
||||
#include "query/plan/rewrite/join.hpp"
|
||||
#include "query/plan/rule_based_planner.hpp"
|
||||
@ -54,8 +55,11 @@ class PostProcessor final {
|
||||
std::unique_ptr<LogicalOperator> Rewrite(std::unique_ptr<LogicalOperator> plan, TPlanningContext *context) {
|
||||
auto index_lookup_plan =
|
||||
RewriteWithIndexLookup(std::move(plan), context->symbol_table, context->ast_storage, context->db, index_hints_);
|
||||
return RewriteWithJoinRewriter(std::move(index_lookup_plan), context->symbol_table, context->ast_storage,
|
||||
context->db);
|
||||
auto join_plan =
|
||||
RewriteWithJoinRewriter(std::move(index_lookup_plan), context->symbol_table, context->ast_storage, context->db);
|
||||
auto edge_index_plan = RewriteWithEdgeTypeIndexRewriter(std::move(join_plan), context->symbol_table,
|
||||
context->ast_storage, context->db);
|
||||
return edge_index_plan;
|
||||
}
|
||||
|
||||
template <class TVertexCounts>
|
||||
|
@ -632,20 +632,20 @@ void AddMatching(const Match &match, SymbolTable &symbol_table, AstStorage &stor
|
||||
|
||||
// If there are any pattern filters, we add those as well
|
||||
for (auto &filter : matching.filters) {
|
||||
PatternFilterVisitor visitor(symbol_table, storage);
|
||||
PatternVisitor visitor(symbol_table, storage);
|
||||
|
||||
filter.expression->Accept(visitor);
|
||||
filter.matchings = visitor.getMatchings();
|
||||
filter.matchings = visitor.getFilterMatchings();
|
||||
}
|
||||
}
|
||||
|
||||
PatternFilterVisitor::PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage)
|
||||
PatternVisitor::PatternVisitor(SymbolTable &symbol_table, AstStorage &storage)
|
||||
: symbol_table_(symbol_table), storage_(storage) {}
|
||||
PatternFilterVisitor::PatternFilterVisitor(const PatternFilterVisitor &) = default;
|
||||
PatternFilterVisitor::PatternFilterVisitor(PatternFilterVisitor &&) noexcept = default;
|
||||
PatternFilterVisitor::~PatternFilterVisitor() = default;
|
||||
PatternVisitor::PatternVisitor(const PatternVisitor &) = default;
|
||||
PatternVisitor::PatternVisitor(PatternVisitor &&) noexcept = default;
|
||||
PatternVisitor::~PatternVisitor() = default;
|
||||
|
||||
void PatternFilterVisitor::Visit(Exists &op) {
|
||||
void PatternVisitor::Visit(Exists &op) {
|
||||
std::vector<Pattern *> patterns;
|
||||
patterns.push_back(op.pattern_);
|
||||
|
||||
@ -655,10 +655,14 @@ void PatternFilterVisitor::Visit(Exists &op) {
|
||||
filter_matching.type = PatternFilterType::EXISTS;
|
||||
filter_matching.symbol = std::make_optional<Symbol>(symbol_table_.at(op));
|
||||
|
||||
matchings_.push_back(std::move(filter_matching));
|
||||
filter_matchings_.push_back(std::move(filter_matching));
|
||||
}
|
||||
|
||||
std::vector<FilterMatching> PatternFilterVisitor::getMatchings() { return matchings_; }
|
||||
std::vector<FilterMatching> PatternVisitor::getFilterMatchings() { return filter_matchings_; }
|
||||
|
||||
std::vector<PatternComprehensionMatching> PatternVisitor::getPatternComprehensionMatchings() {
|
||||
return pattern_comprehension_matchings_;
|
||||
}
|
||||
|
||||
static void ParseForeach(query::Foreach &foreach, SingleQueryPart &query_part, AstStorage &storage,
|
||||
SymbolTable &symbol_table) {
|
||||
@ -672,6 +676,30 @@ static void ParseForeach(query::Foreach &foreach, SingleQueryPart &query_part, A
|
||||
}
|
||||
}
|
||||
|
||||
static void ParseReturn(query::Return &ret, AstStorage &storage, SymbolTable &symbol_table,
|
||||
std::unordered_map<std::string, PatternComprehensionMatching> &matchings) {
|
||||
PatternVisitor visitor(symbol_table, storage);
|
||||
|
||||
for (auto *expr : ret.body_.named_expressions) {
|
||||
expr->Accept(visitor);
|
||||
auto pattern_comprehension_matchings = visitor.getPatternComprehensionMatchings();
|
||||
for (auto &matching : pattern_comprehension_matchings) {
|
||||
matchings.emplace(expr->name_, matching);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PatternVisitor::Visit(NamedExpression &op) { op.expression_->Accept(*this); }
|
||||
|
||||
void PatternVisitor::Visit(PatternComprehension &op) {
|
||||
PatternComprehensionMatching matching;
|
||||
AddMatching({op.pattern_}, op.filter_, symbol_table_, storage_, matching);
|
||||
matching.result_expr = storage_.Create<NamedExpression>(symbol_table_.at(op).name(), op.resultExpr_);
|
||||
matching.result_expr->MapTo(symbol_table_.at(op));
|
||||
|
||||
pattern_comprehension_matchings_.push_back(std::move(matching));
|
||||
}
|
||||
|
||||
// Converts a Query to multiple QueryParts. In the process new Ast nodes may be
|
||||
// created, e.g. filter expressions.
|
||||
std::vector<SingleQueryPart> CollectSingleQueryParts(SymbolTable &symbol_table, AstStorage &storage,
|
||||
@ -703,7 +731,8 @@ std::vector<SingleQueryPart> CollectSingleQueryParts(SymbolTable &symbol_table,
|
||||
// This query part is done, continue with a new one.
|
||||
query_parts.emplace_back(SingleQueryPart{});
|
||||
query_part = &query_parts.back();
|
||||
} else if (utils::IsSubtype(*clause, Return::kType)) {
|
||||
} else if (auto *ret = utils::Downcast<Return>(clause)) {
|
||||
ParseReturn(*ret, storage, symbol_table, query_part->pattern_comprehension_matchings);
|
||||
return query_parts;
|
||||
}
|
||||
}
|
||||
|
@ -153,19 +153,20 @@ struct Expansion {
|
||||
ExpansionGroupId expansion_group_id = ExpansionGroupId();
|
||||
};
|
||||
|
||||
struct PatternComprehensionMatching;
|
||||
struct FilterMatching;
|
||||
|
||||
enum class PatternFilterType { EXISTS };
|
||||
|
||||
/// Collects matchings from filters that include patterns
|
||||
class PatternFilterVisitor : public ExpressionVisitor<void> {
|
||||
/// Collects matchings that include patterns
|
||||
class PatternVisitor : public ExpressionVisitor<void> {
|
||||
public:
|
||||
explicit PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage);
|
||||
PatternFilterVisitor(const PatternFilterVisitor &);
|
||||
PatternFilterVisitor &operator=(const PatternFilterVisitor &) = delete;
|
||||
PatternFilterVisitor(PatternFilterVisitor &&) noexcept;
|
||||
PatternFilterVisitor &operator=(PatternFilterVisitor &&) noexcept = delete;
|
||||
~PatternFilterVisitor() override;
|
||||
explicit PatternVisitor(SymbolTable &symbol_table, AstStorage &storage);
|
||||
PatternVisitor(const PatternVisitor &);
|
||||
PatternVisitor &operator=(const PatternVisitor &) = delete;
|
||||
PatternVisitor(PatternVisitor &&) noexcept;
|
||||
PatternVisitor &operator=(PatternVisitor &&) noexcept = delete;
|
||||
~PatternVisitor() override;
|
||||
|
||||
using ExpressionVisitor<void>::Visit;
|
||||
|
||||
@ -233,18 +234,22 @@ class PatternFilterVisitor : public ExpressionVisitor<void> {
|
||||
void Visit(PropertyLookup &op) override{};
|
||||
void Visit(AllPropertiesLookup &op) override{};
|
||||
void Visit(ParameterLookup &op) override{};
|
||||
void Visit(NamedExpression &op) override{};
|
||||
void Visit(RegexMatch &op) override{};
|
||||
void Visit(PatternComprehension &op) override{};
|
||||
void Visit(NamedExpression &op) override;
|
||||
void Visit(PatternComprehension &op) override;
|
||||
|
||||
std::vector<FilterMatching> getMatchings();
|
||||
std::vector<FilterMatching> getFilterMatchings();
|
||||
std::vector<PatternComprehensionMatching> getPatternComprehensionMatchings();
|
||||
|
||||
SymbolTable &symbol_table_;
|
||||
AstStorage &storage_;
|
||||
|
||||
private:
|
||||
/// Collection of matchings in the filter expression being analyzed.
|
||||
std::vector<FilterMatching> matchings_;
|
||||
std::vector<FilterMatching> filter_matchings_;
|
||||
|
||||
/// Collection of matchings in the pattern comprehension being analyzed.
|
||||
std::vector<PatternComprehensionMatching> pattern_comprehension_matchings_;
|
||||
};
|
||||
|
||||
/// Stores the symbols and expression used to filter a property.
|
||||
@ -495,6 +500,11 @@ inline auto Filters::IdFilters(const Symbol &symbol) const -> std::vector<Filter
|
||||
return filters;
|
||||
}
|
||||
|
||||
struct PatternComprehensionMatching : Matching {
|
||||
/// Pattern comprehension result named expression
|
||||
NamedExpression *result_expr = nullptr;
|
||||
};
|
||||
|
||||
/// @brief Represents a read (+ write) part of a query. Parts are split on
|
||||
/// `WITH` clauses.
|
||||
///
|
||||
@ -537,6 +547,14 @@ struct SingleQueryPart {
|
||||
/// in the `remaining_clauses` but rather in the `Foreach` itself and are guranteed
|
||||
/// to be processed in the same order by the semantics of the `RuleBasedPlanner`.
|
||||
std::vector<Matching> merge_matching{};
|
||||
|
||||
/// @brief @c NamedExpression name to @c PatternComprehensionMatching for each pattern comprehension.
|
||||
///
|
||||
/// Storing the normalized pattern of a @c PatternComprehension does not preclude storing the
|
||||
/// @c PatternComprehension clause itself inside `remaining_clauses`. The reason is that we
|
||||
/// need to have access to other parts of the clause, such as pattern, filter clauses.
|
||||
std::unordered_map<std::string, PatternComprehensionMatching> pattern_comprehension_matchings{};
|
||||
|
||||
/// @brief All the remaining clauses (without @c Match).
|
||||
std::vector<Clause *> remaining_clauses{};
|
||||
/// The subqueries vector are all the subqueries in this query part ordered in a list by
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -76,6 +76,13 @@ bool PlanPrinter::PreVisit(ScanAllById &op) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::ScanAllByEdgeType &op) {
|
||||
op.dba_ = dba_;
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
op.dba_ = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::Expand &op) {
|
||||
op.dba_ = dba_;
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
@ -143,6 +150,13 @@ bool PlanPrinter::PreVisit(query::plan::Union &op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::RollUpApply &op) {
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
Branch(*op.list_collection_branch_);
|
||||
op.input_->Accept(*this);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::CallProcedure &op) {
|
||||
WithPrintLn([&op](auto &out) { out << "* " << op.ToString(); });
|
||||
return true;
|
||||
@ -457,6 +471,19 @@ bool PlanToJsonVisitor::PreVisit(ScanAllById &op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanToJsonVisitor::PreVisit(ScanAllByEdgeType &op) {
|
||||
json self;
|
||||
self["name"] = "ScanAllByEdgeType";
|
||||
self["edge_type"] = ToJson(op.edge_type_, *dba_);
|
||||
self["output_symbol"] = ToJson(op.output_symbol_);
|
||||
|
||||
op.input_->Accept(*this);
|
||||
self["input"] = PopOutput();
|
||||
|
||||
output_ = std::move(self);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanToJsonVisitor::PreVisit(CreateNode &op) {
|
||||
json self;
|
||||
self["name"] = "CreateNode";
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -67,6 +67,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanAllByEdgeType &) override;
|
||||
|
||||
bool PreVisit(Expand &) override;
|
||||
bool PreVisit(ExpandVariable &) override;
|
||||
@ -91,6 +92,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(OrderBy &) override;
|
||||
bool PreVisit(Distinct &) override;
|
||||
bool PreVisit(Union &) override;
|
||||
bool PreVisit(RollUpApply &) override;
|
||||
|
||||
bool PreVisit(Unwind &) override;
|
||||
bool PreVisit(CallProcedure &) override;
|
||||
@ -203,6 +205,7 @@ class PlanToJsonVisitor : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanAllByEdgeType &) override;
|
||||
|
||||
bool PreVisit(EmptyResult &) override;
|
||||
bool PreVisit(Produce &) override;
|
||||
|
534
src/query/plan/rewrite/edge_type_index_lookup.hpp
Normal file
534
src/query/plan/rewrite/edge_type_index_lookup.hpp
Normal file
@ -0,0 +1,534 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
/// @file
|
||||
/// This file provides a plan rewriter which replaces `ScanAll` and `Expand`
|
||||
/// operations with `ScanAllByEdgeType` if possible. The public entrypoint is
|
||||
/// `RewriteWithEdgeTypeIndexRewriter`.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "query/plan/rewrite/index_lookup.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
|
||||
namespace memgraph::query::plan {
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <class TDbAccessor>
|
||||
class EdgeTypeIndexRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
public:
|
||||
EdgeTypeIndexRewriter(SymbolTable *symbol_table, AstStorage *ast_storage, TDbAccessor *db)
|
||||
: symbol_table_(symbol_table), ast_storage_(ast_storage), db_(db) {}
|
||||
|
||||
using HierarchicalLogicalOperatorVisitor::PostVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::PreVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::Visit;
|
||||
|
||||
bool Visit(Once &) override { return true; }
|
||||
|
||||
bool PreVisit(Filter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Filter & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAll &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == Once::kType) {
|
||||
const bool is_node_anon = op.output_symbol_.IsSymbolAnonym();
|
||||
once_under_scanall_ = is_node_anon;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(ScanAll &op) override {
|
||||
prev_ops_.pop_back();
|
||||
|
||||
if (EdgeTypeIndexingPossible()) {
|
||||
SetOnParent(op.input());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Expand &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == ScanAll::kType) {
|
||||
const bool only_one_edge_type = (op.common_.edge_types.size() == 1U);
|
||||
const bool expansion_is_named = !(op.common_.edge_symbol.IsSymbolAnonym());
|
||||
const bool expdanded_node_not_named = op.common_.node_symbol.IsSymbolAnonym();
|
||||
|
||||
edge_type_index_exist = only_one_edge_type ? db_->EdgeTypeIndexExists(op.common_.edge_types.front()) : false;
|
||||
|
||||
scanall_under_expand_ = only_one_edge_type && expansion_is_named && expdanded_node_not_named;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Expand &op) override {
|
||||
prev_ops_.pop_back();
|
||||
|
||||
if (EdgeTypeIndexingPossible()) {
|
||||
auto indexed_scan = GenEdgeTypeScan(op);
|
||||
SetOnParent(std::move(indexed_scan));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ExpandVariable &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(ExpandVariable &expand) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Merge &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.merge_match_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Merge &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Optional &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.optional_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Optional &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Cartesian &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(Cartesian &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(IndexedJoin &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
RewriteBranch(&op.main_branch_);
|
||||
RewriteBranch(&op.sub_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(IndexedJoin &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(HashJoin &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(HashJoin &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Union &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
RewriteBranch(&op.left_op_);
|
||||
RewriteBranch(&op.right_op_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Union &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CreateNode &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CreateNode &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CreateExpand &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CreateExpand &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabel &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabel &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelPropertyRange &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelPropertyValue &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByLabelProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByLabelProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllById &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllById &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllByEdgeType &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllByEdgeType &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ConstructNamedPath &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ConstructNamedPath &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Produce &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
|
||||
if (op.input()->GetTypeInfo() == Expand::kType) {
|
||||
expand_under_produce_ = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Produce &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EmptyResult &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(EmptyResult &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Delete &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Delete &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetProperties &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetProperties &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(SetLabels &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(SetLabels &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RemoveProperty &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(RemoveProperty &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RemoveLabels &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(RemoveLabels &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EdgeUniquenessFilter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(EdgeUniquenessFilter &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Accumulate &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Accumulate &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Aggregate &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Aggregate &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Skip &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Skip &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Limit &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Limit &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(OrderBy &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(OrderBy &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Unwind &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Unwind &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Distinct &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Distinct &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(CallProcedure &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(CallProcedure &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Foreach &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.update_clauses_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Foreach &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(EvaluatePatternFilter &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(EvaluatePatternFilter & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Apply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.subquery_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Apply & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(LoadCsv &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(LoadCsv & /*op*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
SymbolTable *symbol_table_;
|
||||
AstStorage *ast_storage_;
|
||||
TDbAccessor *db_;
|
||||
// Collected filters, pending for examination if they can be used for advanced
|
||||
// lookup operations (by index, node ID, ...).
|
||||
Filters filters_;
|
||||
// Expressions which no longer need a plain Filter operator.
|
||||
std::unordered_set<Expression *> filter_exprs_for_removal_;
|
||||
std::vector<LogicalOperator *> prev_ops_;
|
||||
std::unordered_set<Symbol> cartesian_symbols_;
|
||||
|
||||
bool EdgeTypeIndexingPossible() const {
|
||||
return expand_under_produce_ && scanall_under_expand_ && once_under_scanall_ && edge_type_index_exist;
|
||||
}
|
||||
bool expand_under_produce_ = false;
|
||||
bool scanall_under_expand_ = false;
|
||||
bool once_under_scanall_ = false;
|
||||
bool edge_type_index_exist = false;
|
||||
|
||||
bool DefaultPreVisit() override {
|
||||
throw utils::NotYetImplemented("Operator not yet covered by EdgeTypeIndexRewriter");
|
||||
}
|
||||
|
||||
std::unique_ptr<ScanAllByEdgeType> GenEdgeTypeScan(const Expand &expand) {
|
||||
const auto &input = expand.input();
|
||||
const auto &output_symbol = expand.common_.edge_symbol;
|
||||
const auto &view = expand.view_;
|
||||
|
||||
// Extract edge_type from symbol
|
||||
auto edge_type = expand.common_.edge_types.front();
|
||||
return std::make_unique<ScanAllByEdgeType>(input, output_symbol, edge_type, view);
|
||||
}
|
||||
|
||||
void SetOnParent(const std::shared_ptr<LogicalOperator> &input) {
|
||||
MG_ASSERT(input);
|
||||
if (prev_ops_.empty()) {
|
||||
MG_ASSERT(!new_root_);
|
||||
new_root_ = input;
|
||||
return;
|
||||
}
|
||||
prev_ops_.back()->set_input(input);
|
||||
}
|
||||
|
||||
void RewriteBranch(std::shared_ptr<LogicalOperator> *branch) {
|
||||
EdgeTypeIndexRewriter<TDbAccessor> rewriter(symbol_table_, ast_storage_, db_);
|
||||
(*branch)->Accept(rewriter);
|
||||
if (rewriter.new_root_) {
|
||||
*branch = rewriter.new_root_;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <class TDbAccessor>
|
||||
std::unique_ptr<LogicalOperator> RewriteWithEdgeTypeIndexRewriter(std::unique_ptr<LogicalOperator> root_op,
|
||||
SymbolTable *symbol_table, AstStorage *ast_storage,
|
||||
TDbAccessor *db) {
|
||||
impl::EdgeTypeIndexRewriter<TDbAccessor> rewriter(symbol_table, ast_storage, db);
|
||||
root_op->Accept(rewriter);
|
||||
return root_op;
|
||||
}
|
||||
|
||||
} // namespace memgraph::query::plan
|
@ -595,6 +595,18 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.list_collection_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -455,6 +455,18 @@ class JoinRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.list_collection_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
|
@ -14,9 +14,12 @@
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
@ -40,7 +43,8 @@ namespace {
|
||||
class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
public:
|
||||
ReturnBodyContext(const ReturnBody &body, SymbolTable &symbol_table, const std::unordered_set<Symbol> &bound_symbols,
|
||||
AstStorage &storage, Where *where = nullptr)
|
||||
AstStorage &storage, std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops,
|
||||
Where *where = nullptr)
|
||||
: body_(body), symbol_table_(symbol_table), bound_symbols_(bound_symbols), storage_(storage), where_(where) {
|
||||
// Collect symbols from named expressions.
|
||||
output_symbols_.reserve(body_.named_expressions.size());
|
||||
@ -53,6 +57,14 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
output_symbols_.emplace_back(symbol_table_.at(*named_expr));
|
||||
named_expr->Accept(*this);
|
||||
named_expressions_.emplace_back(named_expr);
|
||||
if (pattern_comprehension_) {
|
||||
if (auto it = pc_ops.find(named_expr->name_); it != pc_ops.end()) {
|
||||
pattern_comprehension_op_ = std::move(it->second);
|
||||
pc_ops.erase(it);
|
||||
} else {
|
||||
throw utils::NotYetImplemented("Operation on top of pattern comprehension");
|
||||
}
|
||||
}
|
||||
}
|
||||
// Collect symbols used in group by expressions.
|
||||
if (!aggregations_.empty()) {
|
||||
@ -386,8 +398,20 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(PatternComprehension & /*unused*/) override {
|
||||
throw utils::NotYetImplemented("Planner can not handle pattern comprehension.");
|
||||
bool PreVisit(PatternComprehension & /*unused*/) override {
|
||||
pattern_compression_aggregations_start_index_ = has_aggregation_.size();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostVisit(PatternComprehension &pattern_comprehension) override {
|
||||
bool has_aggr = false;
|
||||
for (auto i = has_aggregation_.size(); i > pattern_compression_aggregations_start_index_; --i) {
|
||||
has_aggr |= has_aggregation_.back();
|
||||
has_aggregation_.pop_back();
|
||||
}
|
||||
has_aggregation_.emplace_back(has_aggr);
|
||||
pattern_comprehension_ = &pattern_comprehension;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates NamedExpression with an Identifier for each user declared symbol.
|
||||
@ -444,6 +468,10 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// named_expressions.
|
||||
const auto &output_symbols() const { return output_symbols_; }
|
||||
|
||||
const auto *pattern_comprehension() const { return pattern_comprehension_; }
|
||||
|
||||
std::shared_ptr<LogicalOperator> pattern_comprehension_op() const { return pattern_comprehension_op_; }
|
||||
|
||||
private:
|
||||
const ReturnBody &body_;
|
||||
SymbolTable &symbol_table_;
|
||||
@ -465,10 +493,13 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// group by it.
|
||||
std::list<bool> has_aggregation_;
|
||||
std::vector<NamedExpression *> named_expressions_;
|
||||
PatternComprehension *pattern_comprehension_ = nullptr;
|
||||
std::shared_ptr<LogicalOperator> pattern_comprehension_op_;
|
||||
size_t pattern_compression_aggregations_start_index_ = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator> input_op, bool advance_command,
|
||||
const ReturnBodyContext &body, bool accumulate = false) {
|
||||
const ReturnBodyContext &body, bool accumulate) {
|
||||
std::vector<Symbol> used_symbols(body.used_symbols().begin(), body.used_symbols().end());
|
||||
auto last_op = std::move(input_op);
|
||||
if (accumulate) {
|
||||
@ -482,6 +513,11 @@ std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator>
|
||||
std::vector<Symbol> remember(body.group_by_used_symbols().begin(), body.group_by_used_symbols().end());
|
||||
last_op = std::make_unique<Aggregate>(std::move(last_op), body.aggregations(), body.group_by(), remember);
|
||||
}
|
||||
|
||||
if (body.pattern_comprehension()) {
|
||||
last_op = std::make_unique<RollUpApply>(std::move(last_op), body.pattern_comprehension_op());
|
||||
}
|
||||
|
||||
last_op = std::make_unique<Produce>(std::move(last_op), body.named_expressions());
|
||||
// Distinct in ReturnBody only makes Produce values unique, so plan after it.
|
||||
if (body.distinct()) {
|
||||
@ -506,6 +542,7 @@ std::unique_ptr<LogicalOperator> GenReturnBody(std::unique_ptr<LogicalOperator>
|
||||
last_op = std::make_unique<Filter>(std::move(last_op), std::vector<std::shared_ptr<LogicalOperator>>{},
|
||||
body.where()->expression_);
|
||||
}
|
||||
|
||||
return last_op;
|
||||
}
|
||||
|
||||
@ -543,8 +580,9 @@ Expression *ExtractFilters(const std::unordered_set<Symbol> &bound_symbols, Filt
|
||||
return filter_expr;
|
||||
}
|
||||
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQueryPart> &single_query_parts,
|
||||
SymbolTable &symbol_table, AstStorage &storage) {
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(
|
||||
const std::vector<SingleQueryPart> &single_query_parts, SymbolTable &symbol_table, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
const auto &query = single_query_parts[0];
|
||||
|
||||
if (!query.matching.expansions.empty() || query.remaining_clauses.empty()) {
|
||||
@ -552,7 +590,7 @@ std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQuery
|
||||
}
|
||||
|
||||
if (std::unordered_set<Symbol> bound_symbols; auto *with = utils::Downcast<query::With>(query.remaining_clauses[0])) {
|
||||
auto input_op = impl::GenWith(*with, nullptr, symbol_table, false, bound_symbols, storage);
|
||||
auto input_op = impl::GenWith(*with, nullptr, symbol_table, false, bound_symbols, storage, pc_ops);
|
||||
return bound_symbols;
|
||||
}
|
||||
|
||||
@ -583,7 +621,8 @@ std::unique_ptr<LogicalOperator> GenNamedPaths(std::unique_ptr<LogicalOperator>
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage) {
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
// Similar to WITH clause, but we want to accumulate when the query writes to
|
||||
// the database. This way we handle the case when we want to return
|
||||
// expressions with the latest updated results. For example, `MATCH (n) -- ()
|
||||
@ -592,13 +631,14 @@ std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalO
|
||||
// final result of 'k' increments.
|
||||
bool accumulate = is_write;
|
||||
bool advance_command = false;
|
||||
ReturnBodyContext body(ret.body_, symbol_table, bound_symbols, storage);
|
||||
ReturnBodyContext body(ret.body_, symbol_table, bound_symbols, storage, pc_ops);
|
||||
return GenReturnBody(std::move(input_op), advance_command, body, accumulate);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage) {
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
// WITH clause is Accumulate/Aggregate (advance_command) + Produce and
|
||||
// optional Filter. In case of update and aggregation, we want to accumulate
|
||||
// first, so that when aggregating, we get the latest results. Similar to
|
||||
@ -606,7 +646,7 @@ std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOper
|
||||
bool accumulate = is_write;
|
||||
// No need to advance the command if we only performed reads.
|
||||
bool advance_command = is_write;
|
||||
ReturnBodyContext body(with.body_, symbol_table, bound_symbols, storage, with.where_);
|
||||
ReturnBodyContext body(with.body_, symbol_table, bound_symbols, storage, pc_ops, with.where_);
|
||||
auto last_op = GenReturnBody(std::move(input_op), advance_command, body, accumulate);
|
||||
// Reset bound symbols, so that only those in WITH are exposed.
|
||||
bound_symbols.clear();
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "query/frontend/ast/ast_visitor.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
@ -87,8 +88,9 @@ bool HasBoundFilterSymbols(const std::unordered_set<Symbol> &bound_symbols, cons
|
||||
|
||||
// Returns the set of symbols for the subquery that are actually referenced from the outer scope and
|
||||
// used in the subquery.
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(const std::vector<SingleQueryPart> &single_query_parts,
|
||||
SymbolTable &symbol_table, AstStorage &storage);
|
||||
std::unordered_set<Symbol> GetSubqueryBoundSymbols(
|
||||
const std::vector<SingleQueryPart> &single_query_parts, SymbolTable &symbol_table, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
Symbol GetSymbol(NodeAtom *atom, const SymbolTable &symbol_table);
|
||||
Symbol GetSymbol(EdgeAtom *atom, const SymbolTable &symbol_table);
|
||||
@ -142,11 +144,13 @@ std::unique_ptr<LogicalOperator> GenNamedPaths(std::unique_ptr<LogicalOperator>
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenReturn(Return &ret, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage);
|
||||
const std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenWith(With &with, std::unique_ptr<LogicalOperator> input_op,
|
||||
SymbolTable &symbol_table, bool is_write,
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage);
|
||||
std::unordered_set<Symbol> &bound_symbols, AstStorage &storage,
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops);
|
||||
|
||||
std::unique_ptr<LogicalOperator> GenUnion(const CypherUnion &cypher_union, std::shared_ptr<LogicalOperator> left_op,
|
||||
std::shared_ptr<LogicalOperator> right_op, SymbolTable &symbol_table);
|
||||
@ -190,11 +194,24 @@ class RuleBasedPlanner {
|
||||
uint64_t merge_id = 0;
|
||||
uint64_t subquery_id = 0;
|
||||
|
||||
std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pattern_comprehension_ops;
|
||||
|
||||
if (single_query_part.pattern_comprehension_matchings.size() > 1) {
|
||||
throw utils::NotYetImplemented("Multiple pattern comprehensions.");
|
||||
}
|
||||
for (const auto &matching : single_query_part.pattern_comprehension_matchings) {
|
||||
std::unique_ptr<LogicalOperator> new_input;
|
||||
MatchContext match_ctx{matching.second, *context.symbol_table, context.bound_symbols};
|
||||
new_input = PlanMatching(match_ctx, std::move(new_input));
|
||||
new_input = std::make_unique<Produce>(std::move(new_input), std::vector{matching.second.result_expr});
|
||||
pattern_comprehension_ops.emplace(matching.first, std::move(new_input));
|
||||
}
|
||||
|
||||
for (const auto &clause : single_query_part.remaining_clauses) {
|
||||
MG_ASSERT(!utils::IsSubtype(*clause, Match::kType), "Unexpected Match in remaining clauses");
|
||||
if (auto *ret = utils::Downcast<Return>(clause)) {
|
||||
input_op = impl::GenReturn(*ret, std::move(input_op), *context.symbol_table, context.is_write_query,
|
||||
context.bound_symbols, *context.ast_storage);
|
||||
context.bound_symbols, *context.ast_storage, pattern_comprehension_ops);
|
||||
} else if (auto *merge = utils::Downcast<query::Merge>(clause)) {
|
||||
input_op = GenMerge(*merge, std::move(input_op), single_query_part.merge_matching[merge_id++]);
|
||||
// Treat MERGE clause as write, because we do not know if it will
|
||||
@ -202,7 +219,7 @@ class RuleBasedPlanner {
|
||||
context.is_write_query = true;
|
||||
} else if (auto *with = utils::Downcast<query::With>(clause)) {
|
||||
input_op = impl::GenWith(*with, std::move(input_op), *context.symbol_table, context.is_write_query,
|
||||
context.bound_symbols, *context.ast_storage);
|
||||
context.bound_symbols, *context.ast_storage, pattern_comprehension_ops);
|
||||
// WITH clause advances the command, so reset the flag.
|
||||
context.is_write_query = false;
|
||||
} else if (auto op = HandleWriteClause(clause, input_op, *context.symbol_table, context.bound_symbols)) {
|
||||
@ -241,7 +258,7 @@ class RuleBasedPlanner {
|
||||
single_query_part, merge_id);
|
||||
} else if (auto *call_sub = utils::Downcast<query::CallSubquery>(clause)) {
|
||||
input_op = HandleSubquery(std::move(input_op), single_query_part.subqueries[subquery_id++],
|
||||
*context.symbol_table, *context_->ast_storage);
|
||||
*context.symbol_table, *context_->ast_storage, pattern_comprehension_ops);
|
||||
} else {
|
||||
throw utils::NotYetImplemented("clause '{}' conversion to operator(s)", clause->GetTypeInfo().name);
|
||||
}
|
||||
@ -860,15 +877,15 @@ class RuleBasedPlanner {
|
||||
symbol);
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> HandleSubquery(std::unique_ptr<LogicalOperator> last_op,
|
||||
std::shared_ptr<QueryParts> subquery, SymbolTable &symbol_table,
|
||||
AstStorage &storage) {
|
||||
std::unique_ptr<LogicalOperator> HandleSubquery(
|
||||
std::unique_ptr<LogicalOperator> last_op, std::shared_ptr<QueryParts> subquery, SymbolTable &symbol_table,
|
||||
AstStorage &storage, std::unordered_map<std::string, std::shared_ptr<LogicalOperator>> pc_ops) {
|
||||
std::unordered_set<Symbol> outer_scope_bound_symbols;
|
||||
outer_scope_bound_symbols.insert(std::make_move_iterator(context_->bound_symbols.begin()),
|
||||
std::make_move_iterator(context_->bound_symbols.end()));
|
||||
|
||||
context_->bound_symbols =
|
||||
impl::GetSubqueryBoundSymbols(subquery->query_parts[0].single_query_parts, symbol_table, storage);
|
||||
impl::GetSubqueryBoundSymbols(subquery->query_parts[0].single_query_parts, symbol_table, storage, pc_ops);
|
||||
|
||||
auto subquery_op = Plan(*subquery);
|
||||
|
||||
|
@ -78,6 +78,8 @@ class VertexCountCache {
|
||||
return db_->LabelPropertyIndexExists(label, property);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) { return db_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return db_->GetIndexStats(label);
|
||||
}
|
||||
|
@ -3798,7 +3798,7 @@ void PrintFuncSignature(const mgp_func &func, std::ostream &stream) {
|
||||
|
||||
bool IsValidIdentifierName(const char *name) {
|
||||
if (!name) return false;
|
||||
std::regex regex("[_[:alpha:]][_[:alnum:]]*");
|
||||
static std::regex regex("[_[:alpha:]][_[:alnum:]]*");
|
||||
return std::regex_match(name, regex);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
@ -31,6 +33,7 @@ enum class RegisterReplicaError : uint8_t {
|
||||
COULD_NOT_BE_PERSISTED,
|
||||
ERROR_ACCEPTING_MAIN
|
||||
};
|
||||
|
||||
enum class UnregisterReplicaResult : uint8_t {
|
||||
NOT_MAIN,
|
||||
COULD_NOT_BE_PERSISTED,
|
||||
@ -38,6 +41,47 @@ enum class UnregisterReplicaResult : uint8_t {
|
||||
SUCCESS,
|
||||
};
|
||||
|
||||
enum class ShowReplicaError : uint8_t {
|
||||
NOT_MAIN,
|
||||
};
|
||||
|
||||
struct ReplicaSystemInfoState {
|
||||
uint64_t ts_;
|
||||
uint64_t behind_;
|
||||
replication::ReplicationClient::State state_;
|
||||
};
|
||||
|
||||
struct ReplicaInfoState {
|
||||
ReplicaInfoState(uint64_t ts, uint64_t behind, storage::replication::ReplicaState state)
|
||||
: ts_(ts), behind_(behind), state_(state) {}
|
||||
|
||||
uint64_t ts_;
|
||||
uint64_t behind_;
|
||||
storage::replication::ReplicaState state_;
|
||||
};
|
||||
|
||||
struct ReplicasInfo {
|
||||
ReplicasInfo(std::string name, std::string socket_address, replication_coordination_glue::ReplicationMode sync_mode,
|
||||
ReplicaSystemInfoState system_info, std::map<std::string, ReplicaInfoState> data_info)
|
||||
: name_(std::move(name)),
|
||||
socket_address_(std::move(socket_address)),
|
||||
sync_mode_(sync_mode),
|
||||
system_info_(std::move(system_info)),
|
||||
data_info_(std::move(data_info)) {}
|
||||
|
||||
std::string name_;
|
||||
std::string socket_address_;
|
||||
memgraph::replication_coordination_glue::ReplicationMode sync_mode_;
|
||||
ReplicaSystemInfoState system_info_;
|
||||
std::map<std::string, ReplicaInfoState> data_info_;
|
||||
};
|
||||
|
||||
struct ReplicasInfos {
|
||||
explicit ReplicasInfos(std::vector<ReplicasInfo> entries) : entries_(std::move(entries)) {}
|
||||
|
||||
std::vector<ReplicasInfo> entries_;
|
||||
};
|
||||
|
||||
/// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
|
||||
struct ReplicationQueryHandler {
|
||||
virtual ~ReplicationQueryHandler() = default;
|
||||
@ -66,6 +110,8 @@ struct ReplicationQueryHandler {
|
||||
virtual auto GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole = 0;
|
||||
virtual bool IsMain() const = 0;
|
||||
virtual bool IsReplica() const = 0;
|
||||
|
||||
virtual auto ShowReplicas() const -> utils::BasicResult<ShowReplicaError, ReplicasInfos> = 0;
|
||||
};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -14,7 +14,9 @@
|
||||
#include "replication/config.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
#include "utils/scheduler.hpp"
|
||||
#include "utils/spin_lock.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
|
||||
@ -114,8 +116,9 @@ struct ReplicationClient {
|
||||
enum class State {
|
||||
BEHIND,
|
||||
READY,
|
||||
RECOVERY,
|
||||
};
|
||||
utils::Synchronized<State> state_{State::BEHIND};
|
||||
utils::Synchronized<State, utils::WritePrioritizedRWLock> state_{State::BEHIND};
|
||||
|
||||
replication_coordination_glue::ReplicationMode mode_{replication_coordination_glue::ReplicationMode::SYNC};
|
||||
// This thread pool is used for background tasks so we don't
|
||||
|
@ -7,6 +7,7 @@ target_sources(mg-repl_coord_glue
|
||||
mode.hpp
|
||||
role.hpp
|
||||
handler.hpp
|
||||
common.hpp
|
||||
|
||||
PRIVATE
|
||||
messages.cpp
|
||||
|
32
src/replication_coordination_glue/common.hpp
Normal file
32
src/replication_coordination_glue/common.hpp
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "rpc/client.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
#include <deque>
|
||||
#include "messages.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
struct DatabaseHistory {
|
||||
memgraph::utils::UUID db_uuid;
|
||||
std::vector<std::pair<std::string, uint64_t>> history;
|
||||
std::string name;
|
||||
};
|
||||
|
||||
using DatabaseHistories = std::vector<DatabaseHistory>;
|
||||
|
||||
} // namespace memgraph::replication_coordination_glue
|
@ -12,7 +12,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
enum class ReplicationMode : std::uint8_t { SYNC, ASYNC };
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(ReplicationMode, {
|
||||
{ReplicationMode::SYNC, "sync"},
|
||||
{ReplicationMode::ASYNC, "async"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::replication_coordination_glue
|
||||
|
@ -12,8 +12,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::replication_coordination_glue {
|
||||
|
||||
// TODO: figure out a way of ensuring that usage of this type is never uninitialed/defaulted incorrectly to MAIN
|
||||
enum class ReplicationRole : uint8_t { MAIN, REPLICA };
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(ReplicationRole, {{ReplicationRole::MAIN, "main"}, {ReplicationRole::REPLICA, "replica"}})
|
||||
|
||||
} // namespace memgraph::replication_coordination_glue
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "flags/experimental.hpp"
|
||||
#include "replication/include/replication/state.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "replication_handler/system_replication.hpp"
|
||||
#include "replication_handler/system_rpc.hpp"
|
||||
#include "utils/result.hpp"
|
||||
@ -39,10 +40,12 @@ void SystemRestore(replication::ReplicationClient &client, system::System &syste
|
||||
const utils::UUID &main_uuid, auth::SynchedAuth &auth) {
|
||||
// Check if system is up to date
|
||||
if (client.state_.WithLock(
|
||||
[](auto &state) { return state == memgraph::replication::ReplicationClient::State::READY; }))
|
||||
[](auto &state) { return state != memgraph::replication::ReplicationClient::State::BEHIND; }))
|
||||
return;
|
||||
|
||||
// Try to recover...
|
||||
client.state_.WithLock(
|
||||
[](auto &state) { return state != memgraph::replication::ReplicationClient::State::RECOVERY; });
|
||||
{
|
||||
using enum memgraph::flags::Experiments;
|
||||
bool full_system_replication =
|
||||
@ -139,11 +142,16 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler {
|
||||
bool IsMain() const override;
|
||||
bool IsReplica() const override;
|
||||
|
||||
auto ShowReplicas() const
|
||||
-> utils::BasicResult<memgraph::query::ShowReplicaError, memgraph::query::ReplicasInfos> override;
|
||||
|
||||
auto GetReplState() const -> const memgraph::replication::ReplicationState &;
|
||||
auto GetReplState() -> memgraph::replication::ReplicationState &;
|
||||
|
||||
auto GetReplicaUUID() -> std::optional<utils::UUID>;
|
||||
|
||||
auto GetDatabasesHistories() -> replication_coordination_glue::DatabaseHistories;
|
||||
|
||||
private:
|
||||
template <bool SendSwapUUID>
|
||||
auto RegisterReplica_(const memgraph::replication::ReplicationClientConfig &config)
|
||||
@ -202,8 +210,13 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(*instance_client_ptr, main_uuid);
|
||||
client->Start(storage, std::move(db_acc));
|
||||
bool const success = std::invoke([state = client->State()]() {
|
||||
// We force sync replicas in other situation
|
||||
if (state == storage::replication::ReplicaState::DIVERGED_FROM_MAIN) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
return FLAGS_coordinator_server_port != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
@ -10,26 +10,29 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "replication_handler/replication_handler.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "replication_handler/system_replication.hpp"
|
||||
#include "utils/functional.hpp"
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
namespace {
|
||||
#ifdef MG_ENTERPRISE
|
||||
void RecoverReplication(memgraph::replication::ReplicationState &repl_state, memgraph::system::System &system,
|
||||
memgraph::dbms::DbmsHandler &dbms_handler, memgraph::auth::SynchedAuth &auth) {
|
||||
void RecoverReplication(replication::ReplicationState &repl_state, system::System &system,
|
||||
dbms::DbmsHandler &dbms_handler, auth::SynchedAuth &auth) {
|
||||
/*
|
||||
* REPLICATION RECOVERY AND STARTUP
|
||||
*/
|
||||
|
||||
// Startup replication state (if recovered at startup)
|
||||
auto replica = [&dbms_handler, &auth, &system](memgraph::replication::RoleReplicaData &data) {
|
||||
return memgraph::replication::StartRpcServer(dbms_handler, data, auth, system);
|
||||
auto replica = [&dbms_handler, &auth, &system](replication::RoleReplicaData &data) {
|
||||
return replication::StartRpcServer(dbms_handler, data, auth, system);
|
||||
};
|
||||
|
||||
// Replication recovery and frequent check start
|
||||
auto main = [&system, &dbms_handler, &auth](memgraph::replication::RoleMainData &mainData) {
|
||||
auto main = [&system, &dbms_handler, &auth](replication::RoleMainData &mainData) {
|
||||
for (auto &client : mainData.registered_replicas_) {
|
||||
if (client.try_set_uuid &&
|
||||
replication_coordination_glue::SendSwapMainUUIDRpc(client.rpc_client_, mainData.uuid_)) {
|
||||
@ -38,7 +41,7 @@ void RecoverReplication(memgraph::replication::ReplicationState &repl_state, mem
|
||||
SystemRestore(client, system, dbms_handler, mainData.uuid_, auth);
|
||||
}
|
||||
// DBMS here
|
||||
dbms_handler.ForEach([&mainData](memgraph::dbms::DatabaseAccess db_acc) {
|
||||
dbms_handler.ForEach([&mainData](dbms::DatabaseAccess db_acc) {
|
||||
dbms::DbmsHandler::RecoverStorageReplication(std::move(db_acc), mainData);
|
||||
});
|
||||
|
||||
@ -48,7 +51,7 @@ void RecoverReplication(memgraph::replication::ReplicationState &repl_state, mem
|
||||
|
||||
// Warning
|
||||
if (dbms_handler.default_config().durability.snapshot_wal_mode ==
|
||||
memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED) {
|
||||
storage::Config::Durability::SnapshotWalMode::DISABLED) {
|
||||
spdlog::warn(
|
||||
"The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please "
|
||||
"consider "
|
||||
@ -59,19 +62,18 @@ void RecoverReplication(memgraph::replication::ReplicationState &repl_state, mem
|
||||
return true;
|
||||
};
|
||||
|
||||
auto result = std::visit(memgraph::utils::Overloaded{replica, main}, repl_state.ReplicationData());
|
||||
auto result = std::visit(utils::Overloaded{replica, main}, repl_state.ReplicationData());
|
||||
MG_ASSERT(result, "Replica recovery failure!");
|
||||
}
|
||||
#else
|
||||
void RecoverReplication(memgraph::replication::ReplicationState &repl_state,
|
||||
memgraph::dbms::DbmsHandler &dbms_handler) {
|
||||
void RecoverReplication(replication::ReplicationState &repl_state, dbms::DbmsHandler &dbms_handler) {
|
||||
// Startup replication state (if recovered at startup)
|
||||
auto replica = [&dbms_handler](memgraph::replication::RoleReplicaData &data) {
|
||||
return memgraph::replication::StartRpcServer(dbms_handler, data);
|
||||
auto replica = [&dbms_handler](replication::RoleReplicaData &data) {
|
||||
return replication::StartRpcServer(dbms_handler, data);
|
||||
};
|
||||
|
||||
// Replication recovery and frequent check start
|
||||
auto main = [&dbms_handler](memgraph::replication::RoleMainData &mainData) {
|
||||
auto main = [&dbms_handler](replication::RoleMainData &mainData) {
|
||||
dbms::DbmsHandler::RecoverStorageReplication(dbms_handler.Get(), mainData);
|
||||
|
||||
for (auto &client : mainData.registered_replicas_) {
|
||||
@ -79,12 +81,12 @@ void RecoverReplication(memgraph::replication::ReplicationState &repl_state,
|
||||
replication_coordination_glue::SendSwapMainUUIDRpc(client.rpc_client_, mainData.uuid_)) {
|
||||
client.try_set_uuid = false;
|
||||
}
|
||||
memgraph::replication::StartReplicaClient(client, dbms_handler, mainData.uuid_);
|
||||
replication::StartReplicaClient(client, dbms_handler, mainData.uuid_);
|
||||
}
|
||||
|
||||
// Warning
|
||||
if (dbms_handler.default_config().durability.snapshot_wal_mode ==
|
||||
memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED) {
|
||||
storage::Config::Durability::SnapshotWalMode::DISABLED) {
|
||||
spdlog::warn(
|
||||
"The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please "
|
||||
"consider "
|
||||
@ -95,7 +97,7 @@ void RecoverReplication(memgraph::replication::ReplicationState &repl_state,
|
||||
return true;
|
||||
};
|
||||
|
||||
auto result = std::visit(memgraph::utils::Overloaded{replica, main}, repl_state.ReplicationData());
|
||||
auto result = std::visit(utils::Overloaded{replica, main}, repl_state.ReplicationData());
|
||||
MG_ASSERT(result, "Replica recovery failure!");
|
||||
}
|
||||
#endif
|
||||
@ -133,20 +135,19 @@ void StartReplicaClient(replication::ReplicationClient &client, dbms::DbmsHandle
|
||||
spdlog::trace("Replication client started at: {}:{}", endpoint.address, endpoint.port);
|
||||
client.StartFrequentCheck([&, license = license::global_license_checker.IsEnterpriseValidFast(), main_uuid](
|
||||
bool reconnect, replication::ReplicationClient &client) mutable {
|
||||
if (client.try_set_uuid &&
|
||||
memgraph::replication_coordination_glue::SendSwapMainUUIDRpc(client.rpc_client_, main_uuid)) {
|
||||
if (client.try_set_uuid && replication_coordination_glue::SendSwapMainUUIDRpc(client.rpc_client_, main_uuid)) {
|
||||
client.try_set_uuid = false;
|
||||
}
|
||||
// Working connection
|
||||
// Check if system needs restoration
|
||||
if (reconnect) {
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
client.state_.WithLock([](auto &state) { state = replication::ReplicationClient::State::BEHIND; });
|
||||
}
|
||||
// Check if license has changed
|
||||
const auto new_license = license::global_license_checker.IsEnterpriseValidFast();
|
||||
if (new_license != license) {
|
||||
license = new_license;
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
client.state_.WithLock([](auto &state) { state = replication::ReplicationClient::State::BEHIND; });
|
||||
}
|
||||
#ifdef MG_ENTERPRISE
|
||||
SystemRestore<true>(client, system, dbms_handler, main_uuid, auth);
|
||||
@ -154,10 +155,10 @@ void StartReplicaClient(replication::ReplicationClient &client, dbms::DbmsHandle
|
||||
// Check if any database has been left behind
|
||||
dbms_handler.ForEach([&name = client.name_, reconnect](dbms::DatabaseAccess db_acc) {
|
||||
// Specific database <-> replica client
|
||||
db_acc->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) {
|
||||
if (reconnect || client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
db_acc->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient &client) {
|
||||
if (reconnect || client.State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
// Database <-> replica might be behind, check and recover
|
||||
client->TryCheckReplicaStateAsync(db_acc->storage(), db_acc);
|
||||
client.TryCheckReplicaStateAsync(db_acc->storage(), db_acc);
|
||||
}
|
||||
});
|
||||
});
|
||||
@ -165,9 +166,8 @@ void StartReplicaClient(replication::ReplicationClient &client, dbms::DbmsHandle
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
ReplicationHandler::ReplicationHandler(memgraph::replication::ReplicationState &repl_state,
|
||||
memgraph::dbms::DbmsHandler &dbms_handler, memgraph::system::System &system,
|
||||
memgraph::auth::SynchedAuth &auth)
|
||||
ReplicationHandler::ReplicationHandler(replication::ReplicationState &repl_state, dbms::DbmsHandler &dbms_handler,
|
||||
system::System &system, auth::SynchedAuth &auth)
|
||||
: repl_state_{repl_state}, dbms_handler_{dbms_handler}, system_{system}, auth_{auth} {
|
||||
RecoverReplication(repl_state_, system_, dbms_handler_, auth_);
|
||||
}
|
||||
@ -179,20 +179,20 @@ ReplicationHandler::ReplicationHandler(replication::ReplicationState &repl_state
|
||||
#endif
|
||||
|
||||
bool ReplicationHandler::SetReplicationRoleMain() {
|
||||
auto const main_handler = [](memgraph::replication::RoleMainData &) {
|
||||
auto const main_handler = [](replication::RoleMainData &) {
|
||||
// If we are already MAIN, we don't want to change anything
|
||||
return false;
|
||||
};
|
||||
|
||||
auto const replica_handler = [this](memgraph::replication::RoleReplicaData const &) {
|
||||
auto const replica_handler = [this](replication::RoleReplicaData const &) {
|
||||
return DoReplicaToMainPromotion(utils::UUID{});
|
||||
};
|
||||
|
||||
// TODO: under lock
|
||||
return std::visit(memgraph::utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
}
|
||||
|
||||
bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config,
|
||||
bool ReplicationHandler::SetReplicationRoleReplica(const replication::ReplicationServerConfig &config,
|
||||
const std::optional<utils::UUID> &main_uuid) {
|
||||
return SetReplicationRoleReplica_<true>(config, main_uuid);
|
||||
}
|
||||
@ -238,18 +238,16 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio
|
||||
return RegisterReplica_<false>(config);
|
||||
}
|
||||
|
||||
auto ReplicationHandler::UnregisterReplica(std::string_view name) -> memgraph::query::UnregisterReplicaResult {
|
||||
auto const replica_handler =
|
||||
[](memgraph::replication::RoleReplicaData const &) -> memgraph::query::UnregisterReplicaResult {
|
||||
return memgraph::query::UnregisterReplicaResult::NOT_MAIN;
|
||||
auto ReplicationHandler::UnregisterReplica(std::string_view name) -> query::UnregisterReplicaResult {
|
||||
auto const replica_handler = [](replication::RoleReplicaData const &) -> query::UnregisterReplicaResult {
|
||||
return query::UnregisterReplicaResult::NOT_MAIN;
|
||||
};
|
||||
auto const main_handler =
|
||||
[this, name](memgraph::replication::RoleMainData &mainData) -> memgraph::query::UnregisterReplicaResult {
|
||||
auto const main_handler = [this, name](replication::RoleMainData &mainData) -> query::UnregisterReplicaResult {
|
||||
if (!repl_state_.TryPersistUnregisterReplica(name)) {
|
||||
return memgraph::query::UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
|
||||
return query::UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
|
||||
}
|
||||
// Remove database specific clients
|
||||
dbms_handler_.ForEach([name](memgraph::dbms::DatabaseAccess db_acc) {
|
||||
dbms_handler_.ForEach([name](dbms::DatabaseAccess db_acc) {
|
||||
db_acc->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) {
|
||||
std::erase_if(clients, [name](const auto &client) { return client->Name() == name; });
|
||||
});
|
||||
@ -257,28 +255,91 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> memgraph::q
|
||||
// Remove instance level clients
|
||||
auto const n_unregistered =
|
||||
std::erase_if(mainData.registered_replicas_, [name](auto const &client) { return client.name_ == name; });
|
||||
return n_unregistered != 0 ? memgraph::query::UnregisterReplicaResult::SUCCESS
|
||||
: memgraph::query::UnregisterReplicaResult::CAN_NOT_UNREGISTER;
|
||||
return n_unregistered != 0 ? query::UnregisterReplicaResult::SUCCESS
|
||||
: query::UnregisterReplicaResult::CAN_NOT_UNREGISTER;
|
||||
};
|
||||
|
||||
return std::visit(memgraph::utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole {
|
||||
auto ReplicationHandler::GetRole() const -> replication_coordination_glue::ReplicationRole {
|
||||
return repl_state_.GetRole();
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetDatabasesHistories() -> replication_coordination_glue::DatabaseHistories {
|
||||
replication_coordination_glue::DatabaseHistories results;
|
||||
dbms_handler_.ForEach([&results](memgraph::dbms::DatabaseAccess db_acc) {
|
||||
auto &repl_storage_state = db_acc->storage()->repl_storage_state_;
|
||||
|
||||
std::vector<std::pair<std::string, uint64_t>> history = utils::fmap(repl_storage_state.history);
|
||||
|
||||
history.emplace_back(std::string(repl_storage_state.epoch_.id()), repl_storage_state.last_commit_timestamp_.load());
|
||||
replication_coordination_glue::DatabaseHistory repl{
|
||||
.db_uuid = utils::UUID{db_acc->storage()->uuid()}, .history = history, .name = std::string(db_acc->name())};
|
||||
results.emplace_back(repl);
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetReplicaUUID() -> std::optional<utils::UUID> {
|
||||
MG_ASSERT(repl_state_.IsReplica());
|
||||
MG_ASSERT(repl_state_.IsReplica(), "Instance is not replica");
|
||||
return std::get<RoleReplicaData>(repl_state_.ReplicationData()).uuid_;
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetReplState() const -> const memgraph::replication::ReplicationState & { return repl_state_; }
|
||||
|
||||
auto ReplicationHandler::GetReplState() -> memgraph::replication::ReplicationState & { return repl_state_; }
|
||||
auto ReplicationHandler::GetReplState() -> replication::ReplicationState & { return repl_state_; }
|
||||
|
||||
bool ReplicationHandler::IsMain() const { return repl_state_.IsMain(); }
|
||||
|
||||
bool ReplicationHandler::IsReplica() const { return repl_state_.IsReplica(); }
|
||||
auto ReplicationHandler::ShowReplicas() const -> utils::BasicResult<query::ShowReplicaError, query::ReplicasInfos> {
|
||||
using res_t = utils::BasicResult<query::ShowReplicaError, query::ReplicasInfos>;
|
||||
auto main = [this](RoleMainData const &main) -> res_t {
|
||||
auto entries = std::vector<query::ReplicasInfo>{};
|
||||
entries.reserve(main.registered_replicas_.size());
|
||||
|
||||
const bool full_info = license::global_license_checker.IsEnterpriseValidFast();
|
||||
|
||||
for (auto const &replica : main.registered_replicas_) {
|
||||
// STEP 1: data_info
|
||||
auto data_info = std::map<std::string, query::ReplicaInfoState>{};
|
||||
this->dbms_handler_.ForEach([&](dbms::DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
// ATM we only support IN_MEMORY_TRANSACTIONAL
|
||||
if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
|
||||
if (!full_info && storage->name() == dbms::kDefaultDB) return;
|
||||
auto ok =
|
||||
storage->repl_storage_state_.WithClient(replica.name_, [&](storage::ReplicationStorageClient &client) {
|
||||
auto ts_info = client.GetTimestampInfo(storage);
|
||||
auto state = client.State();
|
||||
|
||||
data_info.emplace(storage->name(),
|
||||
query::ReplicaInfoState{ts_info.current_timestamp_of_replica,
|
||||
ts_info.current_number_of_timestamp_behind_main, state});
|
||||
});
|
||||
DMG_ASSERT(ok);
|
||||
});
|
||||
|
||||
// STEP 2: system_info
|
||||
#ifdef MG_ENTERPRISE
|
||||
// Already locked on system transaction via the interpreter
|
||||
const auto ts = system_.LastCommittedSystemTimestamp();
|
||||
// NOTE: no system behind at the moment
|
||||
query::ReplicaSystemInfoState system_info{ts, 0 /* behind ts not implemented */, *replica.state_.ReadLock()};
|
||||
#else
|
||||
query::ReplicaSystemInfoState system_info{};
|
||||
#endif
|
||||
// STEP 3: add entry
|
||||
entries.emplace_back(replica.name_, replica.rpc_client_.Endpoint().SocketAddress(), replica.mode_, system_info,
|
||||
std::move(data_info));
|
||||
}
|
||||
return query::ReplicasInfos{std::move(entries)};
|
||||
};
|
||||
auto replica = [](RoleReplicaData const &) -> res_t { return query::ShowReplicaError::NOT_MAIN; };
|
||||
|
||||
return std::visit(utils::Overloaded{main, replica}, repl_state_.ReplicationData());
|
||||
}
|
||||
|
||||
} // namespace memgraph::replication
|
||||
|
@ -21,8 +21,10 @@ add_library(mg-storage-v2 STATIC
|
||||
storage.cpp
|
||||
indices/indices.cpp
|
||||
all_vertices_iterable.cpp
|
||||
edges_iterable.cpp
|
||||
vertices_iterable.cpp
|
||||
inmemory/storage.cpp
|
||||
inmemory/edge_type_index.cpp
|
||||
inmemory/label_index.cpp
|
||||
inmemory/label_property_index.cpp
|
||||
inmemory/unique_constraints.cpp
|
||||
@ -30,6 +32,7 @@ add_library(mg-storage-v2 STATIC
|
||||
disk/edge_import_mode_cache.cpp
|
||||
disk/storage.cpp
|
||||
disk/rocksdb_storage.cpp
|
||||
disk/edge_type_index.cpp
|
||||
disk/label_index.cpp
|
||||
disk/label_property_index.cpp
|
||||
disk/unique_constraints.cpp
|
||||
|
@ -123,6 +123,26 @@ inline bool operator==(const PreviousPtr::Pointer &a, const PreviousPtr::Pointer
|
||||
|
||||
inline bool operator!=(const PreviousPtr::Pointer &a, const PreviousPtr::Pointer &b) { return !(a == b); }
|
||||
|
||||
struct opt_str {
|
||||
opt_str(std::optional<std::string> const &other) : str_{other ? new_cstr(*other) : nullptr} {}
|
||||
|
||||
~opt_str() { delete[] str_; }
|
||||
|
||||
auto as_opt_str() const -> std::optional<std::string> {
|
||||
if (!str_) return std::nullopt;
|
||||
return std::optional<std::string>{std::in_place, str_};
|
||||
}
|
||||
|
||||
private:
|
||||
static auto new_cstr(std::string const &str) -> char const * {
|
||||
auto *mem = new char[str.length() + 1];
|
||||
strcpy(mem, str.c_str());
|
||||
return mem;
|
||||
}
|
||||
|
||||
char const *str_ = nullptr;
|
||||
};
|
||||
|
||||
struct Delta {
|
||||
enum class Action : std::uint8_t {
|
||||
/// Use for Vertex and Edge
|
||||
@ -160,7 +180,7 @@ struct Delta {
|
||||
// Because of this object was created in past txs, we create timestamp by ourselves inside instead of having it from
|
||||
// current tx. This timestamp we got from RocksDB timestamp stored in key.
|
||||
Delta(DeleteDeserializedObjectTag /*tag*/, uint64_t ts, std::optional<std::string> old_disk_key)
|
||||
: timestamp(new std::atomic<uint64_t>(ts)), command_id(0), old_disk_key{.value = std::move(old_disk_key)} {}
|
||||
: timestamp(new std::atomic<uint64_t>(ts)), command_id(0), old_disk_key{.value = old_disk_key} {}
|
||||
|
||||
Delta(DeleteObjectTag /*tag*/, std::atomic<uint64_t> *timestamp, uint64_t command_id)
|
||||
: timestamp(timestamp), command_id(command_id), action(Action::DELETE_OBJECT) {}
|
||||
@ -222,7 +242,7 @@ struct Delta {
|
||||
case Action::REMOVE_OUT_EDGE:
|
||||
break;
|
||||
case Action::DELETE_DESERIALIZED_OBJECT:
|
||||
old_disk_key.value.reset();
|
||||
std::destroy_at(&old_disk_key.value);
|
||||
delete timestamp;
|
||||
timestamp = nullptr;
|
||||
break;
|
||||
@ -242,7 +262,7 @@ struct Delta {
|
||||
Action action;
|
||||
struct {
|
||||
Action action = Action::DELETE_DESERIALIZED_OBJECT;
|
||||
std::optional<std::string> value;
|
||||
opt_str value;
|
||||
} old_disk_key;
|
||||
struct {
|
||||
Action action;
|
||||
|
49
src/storage/v2/disk/edge_type_index.cpp
Normal file
49
src/storage/v2/disk/edge_type_index.cpp
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "edge_type_index.hpp"
|
||||
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
bool DiskEdgeTypeIndex::DropIndex(EdgeTypeId /*edge_type*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DiskEdgeTypeIndex::IndexExists(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<EdgeTypeId> DiskEdgeTypeIndex::ListIndices() const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return {};
|
||||
}
|
||||
|
||||
uint64_t DiskEdgeTypeIndex::ApproximateEdgeCount(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return 0U;
|
||||
}
|
||||
|
||||
void DiskEdgeTypeIndex::UpdateOnEdgeCreation(Vertex * /*from*/, Vertex * /*to*/, EdgeRef /*edge_ref*/,
|
||||
EdgeTypeId /*edge_type*/, const Transaction & /*tx*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
void DiskEdgeTypeIndex::UpdateOnEdgeModification(Vertex * /*old_from*/, Vertex * /*old_to*/, Vertex * /*new_from*/,
|
||||
Vertex * /*new_to*/, EdgeRef /*edge_ref*/, EdgeTypeId /*edge_type*/,
|
||||
const Transaction & /*tx*/) {
|
||||
spdlog::warn("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
35
src/storage/v2/disk/edge_type_index.hpp
Normal file
35
src/storage/v2/disk/edge_type_index.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "storage/v2/indices/edge_type_index.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class DiskEdgeTypeIndex : public storage::EdgeTypeIndex {
|
||||
public:
|
||||
bool DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
bool IndexExists(EdgeTypeId edge_type) const override;
|
||||
|
||||
std::vector<EdgeTypeId> ListIndices() const override;
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const override;
|
||||
|
||||
void UpdateOnEdgeCreation(Vertex *from, Vertex *to, EdgeRef edge_ref, EdgeTypeId edge_type,
|
||||
const Transaction &tx) override;
|
||||
|
||||
void UpdateOnEdgeModification(Vertex *old_from, Vertex *old_to, Vertex *new_from, Vertex *new_to, EdgeRef edge_ref,
|
||||
EdgeTypeId edge_type, const Transaction &tx) override;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
@ -41,6 +41,7 @@
|
||||
#include "storage/v2/edge_accessor.hpp"
|
||||
#include "storage/v2/edge_import_mode.hpp"
|
||||
#include "storage/v2/edge_ref.hpp"
|
||||
#include "storage/v2/edges_iterable.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/modified_edge.hpp"
|
||||
#include "storage/v2/mvcc.hpp"
|
||||
@ -807,11 +808,21 @@ void DiskStorage::LoadVerticesFromDiskLabelPropertyIndexForIntervalSearch(
|
||||
}
|
||||
}
|
||||
|
||||
EdgesIterable DiskStorage::DiskAccessor::Edges(EdgeTypeId /*edge_type*/, View /*view*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::DiskAccessor::ApproximateVertexCount() const {
|
||||
auto *disk_storage = static_cast<DiskStorage *>(storage_);
|
||||
return disk_storage->vertex_count_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::DiskAccessor::ApproximateEdgeCount(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::info("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return 0U;
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::GetDiskSpaceUsage() const {
|
||||
uint64_t main_disk_storage_size = utils::GetDirDiskUsage(config_.disk.main_storage_directory);
|
||||
uint64_t index_disk_storage_size = utils::GetDirDiskUsage(config_.disk.label_index_directory) +
|
||||
@ -825,7 +836,7 @@ uint64_t DiskStorage::GetDiskSpaceUsage() const {
|
||||
durability_disk_storage_size;
|
||||
}
|
||||
|
||||
StorageInfo DiskStorage::GetBaseInfo(bool /* unused */) {
|
||||
StorageInfo DiskStorage::GetBaseInfo() {
|
||||
StorageInfo info{};
|
||||
info.vertex_count = vertex_count_;
|
||||
info.edge_count = edge_count_.load(std::memory_order_acquire);
|
||||
@ -838,9 +849,8 @@ StorageInfo DiskStorage::GetBaseInfo(bool /* unused */) {
|
||||
return info;
|
||||
}
|
||||
|
||||
StorageInfo DiskStorage::GetInfo(bool force_dir,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
StorageInfo info = GetBaseInfo(force_dir);
|
||||
StorageInfo DiskStorage::GetInfo(memgraph::replication_coordination_glue::ReplicationRole replication_role) {
|
||||
StorageInfo info = GetBaseInfo();
|
||||
{
|
||||
auto access = Access(replication_role);
|
||||
const auto &lbl = access->ListAllIndices();
|
||||
@ -1630,6 +1640,9 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_CREATE: {
|
||||
throw utils::NotYetImplemented("Edge-type indexing is not yet implemented on on-disk storage mode.");
|
||||
}
|
||||
case MetadataDelta::Action::LABEL_INDEX_DROP: {
|
||||
if (!disk_storage->durable_metadata_.PersistLabelIndexDeletion(md_delta.label)) {
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
@ -1642,6 +1655,9 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::EDGE_INDEX_DROP: {
|
||||
throw utils::NotYetImplemented("Edge-type indexing is not yet implemented on on-disk storage mode.");
|
||||
}
|
||||
case MetadataDelta::Action::LABEL_INDEX_STATS_SET: {
|
||||
throw utils::NotYetImplemented("SetIndexStats(stats) is not implemented for DiskStorage.");
|
||||
} break;
|
||||
@ -1918,6 +1934,11 @@ utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor:
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::CreateIndex(EdgeTypeId /*edge_type*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::DropIndex(LabelId label) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Create index requires a unique access to the storage!");
|
||||
auto *on_disk = static_cast<DiskStorage *>(storage_);
|
||||
@ -1946,6 +1967,11 @@ utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor:
|
||||
return {};
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DiskStorage::DiskAccessor::DropIndex(EdgeTypeId /*edge_type*/) {
|
||||
throw utils::NotYetImplemented(
|
||||
"Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
}
|
||||
|
||||
utils::BasicResult<StorageExistenceConstraintDefinitionError, void>
|
||||
DiskStorage::DiskAccessor::CreateExistenceConstraint(LabelId label, PropertyId property) {
|
||||
MG_ASSERT(unique_guard_.owns_lock(), "Create existence constraint requires a unique access to the storage!");
|
||||
@ -2054,6 +2080,12 @@ std::unique_ptr<Storage::Accessor> DiskStorage::UniqueAccess(
|
||||
return std::unique_ptr<DiskAccessor>(
|
||||
new DiskAccessor{Storage::Accessor::unique_access, this, isolation_level, storage_mode_});
|
||||
}
|
||||
|
||||
bool DiskStorage::DiskAccessor::EdgeTypeIndexExists(EdgeTypeId /*edge_type*/) const {
|
||||
spdlog::info("Edge-type index related operations are not yet supported using on-disk storage mode.");
|
||||
return false;
|
||||
}
|
||||
|
||||
IndicesInfo DiskStorage::DiskAccessor::ListAllIndices() const {
|
||||
auto *on_disk = static_cast<DiskStorage *>(storage_);
|
||||
auto *disk_label_index = static_cast<DiskLabelIndex *>(on_disk->indices_.label_index_.get());
|
||||
|
@ -72,6 +72,8 @@ class DiskStorage final : public Storage {
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view) override;
|
||||
|
||||
EdgesIterable Edges(EdgeTypeId edge_type, View view) override;
|
||||
|
||||
uint64_t ApproximateVertexCount() const override;
|
||||
|
||||
uint64_t ApproximateVertexCount(LabelId /*label*/) const override { return 10; }
|
||||
@ -89,6 +91,8 @@ class DiskStorage final : public Storage {
|
||||
return 10;
|
||||
}
|
||||
|
||||
uint64_t ApproximateEdgeCount(EdgeTypeId edge_type) const override;
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId & /*label*/) const override {
|
||||
return {};
|
||||
}
|
||||
@ -140,6 +144,8 @@ class DiskStorage final : public Storage {
|
||||
return disk_storage->indices_.label_property_index_->IndexExists(label, property);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(EdgeTypeId edge_type) const override;
|
||||
|
||||
IndicesInfo ListAllIndices() const override;
|
||||
|
||||
ConstraintsInfo ListAllConstraints() const override;
|
||||
@ -158,10 +164,14 @@ class DiskStorage final : public Storage {
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> CreateIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(LabelId label, PropertyId property) override;
|
||||
|
||||
utils::BasicResult<StorageIndexDefinitionError, void> DropIndex(EdgeTypeId edge_type) override;
|
||||
|
||||
utils::BasicResult<StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
LabelId label, PropertyId property) override;
|
||||
|
||||
@ -307,11 +317,10 @@ class DiskStorage final : public Storage {
|
||||
std::vector<std::pair<std::string, std::string>> SerializeVerticesForLabelPropertyIndex(LabelId label,
|
||||
PropertyId property);
|
||||
|
||||
StorageInfo GetBaseInfo(bool force_directory) override;
|
||||
StorageInfo GetInfo(bool force_directory,
|
||||
memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
StorageInfo GetBaseInfo() override;
|
||||
StorageInfo GetInfo(memgraph::replication_coordination_glue::ReplicationRole replication_role) override;
|
||||
|
||||
void FreeMemory(std::unique_lock<utils::ResourceLock> /*lock*/) override {}
|
||||
void FreeMemory(std::unique_lock<utils::ResourceLock> /*lock*/, bool /*periodic*/) override {}
|
||||
|
||||
void PrepareForNewEpoch() override { throw utils::BasicException("Disk storage mode does not support replication."); }
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -31,6 +31,7 @@
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/durability/wal.hpp"
|
||||
#include "storage/v2/inmemory/edge_type_index.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
@ -118,6 +119,8 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
if (!item.is_regular_file()) continue;
|
||||
try {
|
||||
auto info = ReadWalInfo(item.path());
|
||||
spdlog::trace("Getting wal file with following info: uuid: {}, epoch id: {}, from timestamp {}, to_timestamp {} ",
|
||||
info.uuid, info.epoch_id, info.from_timestamp, info.to_timestamp);
|
||||
if ((uuid.empty() || info.uuid == uuid) && (!current_seq_num || info.seq_num < *current_seq_num)) {
|
||||
wal_files.emplace_back(info.seq_num, info.from_timestamp, info.to_timestamp, std::move(info.uuid),
|
||||
std::move(info.epoch_id), item.path());
|
||||
@ -197,9 +200,18 @@ void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadat
|
||||
}
|
||||
spdlog::info("Label+property indices statistics are recreated.");
|
||||
|
||||
spdlog::info("Indices are recreated.");
|
||||
// Recover edge-type indices.
|
||||
spdlog::info("Recreating {} edge-type indices from metadata.", indices_metadata.edge.size());
|
||||
auto *mem_edge_type_index = static_cast<InMemoryEdgeTypeIndex *>(indices->edge_type_index_.get());
|
||||
for (const auto &item : indices_metadata.edge) {
|
||||
if (!mem_edge_type_index->CreateIndex(item, vertices->access())) {
|
||||
throw RecoveryFailure("The edge-type index must be created here!");
|
||||
}
|
||||
spdlog::info("Index on :{} is recreated from metadata", name_id_mapper->IdToName(item.AsUint()));
|
||||
}
|
||||
spdlog::info("Edge-type indices are recreated.");
|
||||
|
||||
spdlog::info("Recreating constraints from metadata.");
|
||||
spdlog::info("Indices are recreated.");
|
||||
}
|
||||
|
||||
void RecoverExistenceConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
@ -410,22 +422,17 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
std::optional<uint64_t> previous_seq_num;
|
||||
auto last_loaded_timestamp = snapshot_timestamp;
|
||||
spdlog::info("Trying to load WAL files.");
|
||||
|
||||
if (last_loaded_timestamp) {
|
||||
epoch_history->emplace_back(repl_storage_state.epoch_.id(), *last_loaded_timestamp);
|
||||
}
|
||||
|
||||
for (auto &wal_file : wal_files) {
|
||||
if (previous_seq_num && (wal_file.seq_num - *previous_seq_num) > 1) {
|
||||
LOG_FATAL("You are missing a WAL file with the sequence number {}!", *previous_seq_num + 1);
|
||||
}
|
||||
previous_seq_num = wal_file.seq_num;
|
||||
|
||||
if (wal_file.epoch_id != repl_storage_state.epoch_.id()) {
|
||||
// This way we skip WALs finalized only because of role change.
|
||||
// We can also set the last timestamp to 0 if last loaded timestamp
|
||||
// is nullopt as this can only happen if the WAL file with seq = 0
|
||||
// does not contain any deltas and we didn't find any snapshots.
|
||||
if (last_loaded_timestamp) {
|
||||
epoch_history->emplace_back(wal_file.epoch_id, *last_loaded_timestamp);
|
||||
}
|
||||
repl_storage_state.epoch_.SetEpoch(std::move(wal_file.epoch_id));
|
||||
}
|
||||
try {
|
||||
auto info = LoadWal(wal_file.path, &indices_constraints, last_loaded_timestamp, vertices, edges, name_id_mapper,
|
||||
edge_count, config.salient.items);
|
||||
@ -434,13 +441,24 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
recovery_info.next_timestamp = std::max(recovery_info.next_timestamp, info.next_timestamp);
|
||||
|
||||
recovery_info.last_commit_timestamp = info.last_commit_timestamp;
|
||||
|
||||
if (recovery_info.next_timestamp != 0) {
|
||||
last_loaded_timestamp.emplace(recovery_info.next_timestamp - 1);
|
||||
}
|
||||
|
||||
auto last_loaded_timestamp_value = last_loaded_timestamp.value_or(0);
|
||||
if (epoch_history->empty() || epoch_history->back().first != wal_file.epoch_id) {
|
||||
// no history or new epoch, add it
|
||||
epoch_history->emplace_back(wal_file.epoch_id, last_loaded_timestamp_value);
|
||||
repl_storage_state.epoch_.SetEpoch(wal_file.epoch_id);
|
||||
} else if (epoch_history->back().second < last_loaded_timestamp_value) {
|
||||
// existing epoch, update with newer timestamp
|
||||
epoch_history->back().second = last_loaded_timestamp_value;
|
||||
}
|
||||
|
||||
} catch (const RecoveryFailure &e) {
|
||||
LOG_FATAL("Couldn't recover WAL deltas from {} because of: {}", wal_file.path, e.what());
|
||||
}
|
||||
|
||||
if (recovery_info.next_timestamp != 0) {
|
||||
last_loaded_timestamp.emplace(recovery_info.next_timestamp - 1);
|
||||
}
|
||||
}
|
||||
// The sequence number needs to be recovered even though `LoadWal` didn't
|
||||
// load any deltas from that file.
|
||||
@ -456,7 +474,12 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
|
||||
memgraph::metrics::Measure(memgraph::metrics::SnapshotRecoveryLatency_us,
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(timer.Elapsed()).count());
|
||||
spdlog::trace("Set epoch id: {} with commit timestamp {}", std::string(repl_storage_state.epoch_.id()),
|
||||
repl_storage_state.last_commit_timestamp_);
|
||||
|
||||
std::for_each(repl_storage_state.history.begin(), repl_storage_state.history.end(), [](auto &history) {
|
||||
spdlog::trace("epoch id: {} with commit timestamp {}", std::string(history.first), history.second);
|
||||
});
|
||||
return recovery_info;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user