Compare commits
32 Commits
master
...
query-exec
Author | SHA1 | Date | |
---|---|---|---|
|
2ff58aa783 | ||
|
e143d68c24 | ||
|
04f93ab46c | ||
|
9ec55ac099 | ||
|
c85e510faa | ||
|
75d8a216c3 | ||
|
358c34d444 | ||
|
f517f8f368 | ||
|
7f335052be | ||
|
53458b4962 | ||
|
2416d0fd15 | ||
|
065c88612d | ||
|
ed3a8a9328 | ||
|
34f2a1e10b | ||
|
ce9bba8e83 | ||
|
671f51f21d | ||
|
60de9e5a25 | ||
|
90fa6d9226 | ||
|
4d930fb73b | ||
|
d30a7c70de | ||
|
c0483576db | ||
|
6536d3b21d | ||
|
ec3ba6a408 | ||
|
1145ea87ad | ||
|
8fdc199d2b | ||
|
112c4528d3 | ||
|
11299981df | ||
|
2c0fc680de | ||
|
2b30ba3fef | ||
|
d0babcddc5 | ||
|
8ba1f160d4 | ||
|
b6a55e534b |
21
.github/workflows/diff.yaml
vendored
21
.github/workflows/diff.yaml
vendored
@ -4,10 +4,6 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
@ -257,17 +253,6 @@ jobs:
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph drivers
|
||||
|
||||
- name: Run HA driver tests
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph drivers-high-availability
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
@ -289,7 +274,7 @@ jobs:
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph cppcheck-and-clang-format
|
||||
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@ -486,8 +471,8 @@ jobs:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-12
|
||||
TOOLCHAIN: v5
|
||||
OS: debian-10
|
||||
TOOLCHAIN: v4
|
||||
ARCH: amd
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
|
@ -337,8 +337,6 @@ if (ASAN)
|
||||
endif()
|
||||
|
||||
if (TSAN)
|
||||
message(WARNING "Disabling jemalloc as it doesn't work well with ASAN")
|
||||
set(ENABLE_JEMALLOC OFF)
|
||||
# ThreadSanitizer generally requires all code to be compiled with -fsanitize=thread.
|
||||
# If some code (e.g. dynamic libraries) is not compiled with the flag, it can
|
||||
# lead to false positive race reports, false negative race reports and/or
|
||||
@ -354,7 +352,7 @@ if (TSAN)
|
||||
# By default ThreadSanitizer uses addr2line utility to symbolize reports.
|
||||
# llvm-symbolizer is faster, consumes less memory and produces much better
|
||||
# reports. To use it set runtime flag:
|
||||
# TSAN_OPTIONS="extern-symbolizer-path=~/llvm-symbolizer"
|
||||
# TSAN_OPTIONS="extern-symbolizer-path=~/llvm-symbolizer"
|
||||
# For more runtime flags see: https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags
|
||||
endif()
|
||||
|
||||
|
@ -5,20 +5,17 @@ IFS=' '
|
||||
# NOTE: docker_image_name could be local image build based on release/package images.
|
||||
# NOTE: each line has to be under quotes, docker_container_type, script_name and docker_image_name separate with a space.
|
||||
# "docker_container_type script_name docker_image_name"
|
||||
# docker_container_type OPTIONS:
|
||||
# * mgrun -> running plain/empty operating system for the purposes of testing native memgraph package
|
||||
# * mgbuild -> running the builder container to build memgraph inside it -> it's possible create builder images using release/package/run.sh
|
||||
OPERATING_SYSTEMS=(
|
||||
# "mgrun amzn-2 amazonlinux:2"
|
||||
# "mgrun centos-7 centos:7"
|
||||
# "mgrun centos-9 dokken/centos-stream-9"
|
||||
# "mgrun debian-10 debian:10"
|
||||
# "mgrun debian-11 debian:11"
|
||||
# "mgrun fedora-36 fedora:36"
|
||||
# "mgrun ubuntu-18.04 ubuntu:18.04"
|
||||
# "mgrun ubuntu-20.04 ubuntu:20.04"
|
||||
# "mgrun ubuntu-22.04 ubuntu:22.04"
|
||||
# "mgbuild debian-12 memgraph/memgraph-builder:v5_debian-12"
|
||||
"mgrun amzn-2 amazonlinux:2"
|
||||
"mgrun centos-7 centos:7"
|
||||
"mgrun centos-9 dokken/centos-stream-9"
|
||||
"mgrun debian-10 debian:10"
|
||||
"mgrun debian-11 debian:11"
|
||||
"mgrun fedora-36 fedora:36"
|
||||
"mgrun ubuntu-18.04 ubuntu:18.04"
|
||||
"mgrun ubuntu-20.04 ubuntu:20.04"
|
||||
"mgrun ubuntu-22.04 ubuntu:22.04"
|
||||
# "mgbuild centos-7 package-mgbuild_centos-7"
|
||||
)
|
||||
|
||||
if [ ! "$(docker info)" ]; then
|
||||
@ -36,24 +33,14 @@ print_help () {
|
||||
# NOTE: This is an idempotent operation!
|
||||
# TODO(gitbuda): Consider making docker_run always delete + start a new container or add a new function.
|
||||
docker_run () {
|
||||
cnt_type="$1"
|
||||
if [[ "$cnt_type" != "mgbuild" && "$cnt_type" != "mgrun" ]]; then
|
||||
echo "ERROR: Wrong docker_container_type -> valid options are mgbuild, mgrun"
|
||||
exit 1
|
||||
fi
|
||||
cnt_name="$2"
|
||||
cnt_image="$3"
|
||||
cnt_name="$1"
|
||||
cnt_image="$2"
|
||||
if [ ! "$(docker ps -q -f name=$cnt_name)" ]; then
|
||||
if [ "$(docker ps -aq -f status=exited -f name=$cnt_name)" ]; then
|
||||
echo "Cleanup of the old exited container..."
|
||||
docker rm $cnt_name
|
||||
fi
|
||||
if [[ "$cnt_type" == "mgbuild" ]]; then
|
||||
docker run -d --volume "$SCRIPT_DIR/../../:/memgraph" --network host --name "$cnt_name" "$cnt_image"
|
||||
fi
|
||||
if [[ "$cnt_type" == "mgrun" ]]; then
|
||||
docker run -d --volume "$SCRIPT_DIR/../../:/memgraph" --network host --name "$cnt_name" "$cnt_image" sleep infinity
|
||||
fi
|
||||
docker run -d --volume "$SCRIPT_DIR/../../:/memgraph" --network host --name "$cnt_name" "$cnt_image" sleep infinity
|
||||
fi
|
||||
echo "The $cnt_image container is active under $cnt_name name!"
|
||||
}
|
||||
@ -68,9 +55,9 @@ docker_stop_and_rm () {
|
||||
cnt_name="$1"
|
||||
if [ "$(docker ps -q -f name=$cnt_name)" ]; then
|
||||
docker stop "$1"
|
||||
fi
|
||||
if [ "$(docker ps -aq -f status=exited -f name=$cnt_name)" ]; then
|
||||
docker rm "$1"
|
||||
if [ "$(docker ps -aq -f status=exited -f name=$cnt_name)" ]; then
|
||||
docker rm "$1"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@ -84,7 +71,7 @@ start_all () {
|
||||
docker_name="${docker_container_type}_$script_name"
|
||||
echo ""
|
||||
echo "~~~~ OPERATING ON $docker_image as $docker_name..."
|
||||
docker_run "$docker_container_type" "$docker_name" "$docker_image"
|
||||
docker_run "$docker_name" "$docker_image"
|
||||
docker_exec "$docker_name" "/memgraph/environment/os/$script_name.sh install NEW_DEPS"
|
||||
echo "---- DONE EVERYHING FOR $docker_image as $docker_name..."
|
||||
echo ""
|
||||
|
@ -326,21 +326,6 @@ inline mgp_vertex *graph_get_vertex_by_id(mgp_graph *g, mgp_vertex_id id, mgp_me
|
||||
return MgInvoke<mgp_vertex *>(mgp_graph_get_vertex_by_id, g, id, memory);
|
||||
}
|
||||
|
||||
inline bool graph_has_text_index(mgp_graph *graph, const char *index_name) {
|
||||
return MgInvoke<int>(mgp_graph_has_text_index, graph, index_name);
|
||||
}
|
||||
|
||||
inline mgp_map *graph_search_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
|
||||
text_search_mode search_mode, mgp_memory *memory) {
|
||||
return MgInvoke<mgp_map *>(mgp_graph_search_text_index, graph, index_name, search_query, search_mode, memory);
|
||||
}
|
||||
|
||||
inline mgp_map *graph_aggregate_over_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
|
||||
const char *aggregation_query, mgp_memory *memory) {
|
||||
return MgInvoke<mgp_map *>(mgp_graph_aggregate_over_text_index, graph, index_name, search_query, aggregation_query,
|
||||
memory);
|
||||
}
|
||||
|
||||
inline mgp_vertices_iterator *graph_iter_vertices(mgp_graph *g, mgp_memory *memory) {
|
||||
return MgInvoke<mgp_vertices_iterator *>(mgp_graph_iter_vertices, g, memory);
|
||||
}
|
||||
@ -866,4 +851,23 @@ inline void func_result_set_value(mgp_func_result *res, mgp_value *value, mgp_me
|
||||
MgInvokeVoid(mgp_func_result_set_value, res, value, memory);
|
||||
}
|
||||
|
||||
inline mgp_execution_result *execute_query(mgp_graph *graph, const char *query, mgp_map *params, mgp_memory *memory) {
|
||||
return MgInvoke<mgp_execution_result *>(mgp_execute_query, graph, memory, query, params);
|
||||
}
|
||||
|
||||
inline mgp_execution_headers *fetch_execution_headers(mgp_execution_result *exec_result) {
|
||||
return MgInvoke<mgp_execution_headers *>(mgp_fetch_execution_headers, exec_result);
|
||||
}
|
||||
|
||||
inline size_t execution_headers_size(mgp_execution_headers *headers) {
|
||||
return MgInvoke<size_t>(mgp_execution_headers_size, headers);
|
||||
}
|
||||
|
||||
inline const char *execution_headers_at(mgp_execution_headers *headers, size_t index) {
|
||||
return MgInvoke<const char *>(mgp_execution_headers_at, headers, index);
|
||||
}
|
||||
|
||||
inline mgp_map *pull_one(mgp_execution_result *result, mgp_graph *graph, mgp_memory *memory) {
|
||||
return MgInvoke<mgp_map *>(mgp_pull_one, result, graph, memory);
|
||||
}
|
||||
} // namespace mgp
|
||||
|
@ -891,36 +891,6 @@ enum mgp_error mgp_edge_iter_properties(struct mgp_edge *e, struct mgp_memory *m
|
||||
enum mgp_error mgp_graph_get_vertex_by_id(struct mgp_graph *g, struct mgp_vertex_id id, struct mgp_memory *memory,
|
||||
struct mgp_vertex **result);
|
||||
|
||||
/// Result is non-zero if the index with the given name exists.
|
||||
/// The current implementation always returns without errors.
|
||||
enum mgp_error mgp_graph_has_text_index(struct mgp_graph *graph, const char *index_name, int *result);
|
||||
|
||||
/// Available modes of searching text indices.
|
||||
MGP_ENUM_CLASS text_search_mode{
|
||||
SPECIFIED_PROPERTIES,
|
||||
REGEX,
|
||||
ALL_PROPERTIES,
|
||||
};
|
||||
|
||||
/// Search the named text index for the given query. The result is a map with the "search_results" and "error_msg" keys.
|
||||
/// The "search_results" key contains the vertices whose text-indexed properties match the given query.
|
||||
/// In case of a Tantivy error, the "search_results" key is absent, and "error_msg" contains the error message.
|
||||
/// Return mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE if there’s an allocation error while constructing the results map.
|
||||
/// Return mgp_error::MGP_ERROR_KEY_ALREADY_EXISTS if the same key is being created in the results map more than once.
|
||||
enum mgp_error mgp_graph_search_text_index(struct mgp_graph *graph, const char *index_name, const char *search_query,
|
||||
enum text_search_mode search_mode, struct mgp_memory *memory,
|
||||
struct mgp_map **result);
|
||||
|
||||
/// Aggregate over the results of a search over the named text index. The result is a map with the "aggregation_results"
|
||||
/// and "error_msg" keys.
|
||||
/// The "aggregation_results" key contains the vertices whose text-indexed properties match the given query.
|
||||
/// In case of a Tantivy error, the "aggregation_results" key is absent, and "error_msg" contains the error message.
|
||||
/// Return mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE if there’s an allocation error while constructing the results map.
|
||||
/// Return mgp_error::MGP_ERROR_KEY_ALREADY_EXISTS if the same key is being created in the results map more than once.
|
||||
enum mgp_error mgp_graph_aggregate_over_text_index(struct mgp_graph *graph, const char *index_name,
|
||||
const char *search_query, const char *aggregation_query,
|
||||
struct mgp_memory *memory, struct mgp_map **result);
|
||||
|
||||
/// Creates label index for given label.
|
||||
/// mgp_error::MGP_ERROR_NO_ERROR is always returned.
|
||||
/// if label index already exists, result will be 0, otherwise 1.
|
||||
@ -1830,6 +1800,24 @@ enum mgp_error mgp_func_result_set_error_msg(struct mgp_func_result *result, con
|
||||
/// mgp_func_result.
|
||||
enum mgp_error mgp_func_result_set_value(struct mgp_func_result *result, struct mgp_value *value,
|
||||
struct mgp_memory *memory);
|
||||
|
||||
struct mgp_execution_headers;
|
||||
|
||||
enum mgp_error mgp_execution_headers_at(struct mgp_execution_headers *headers, size_t index, const char **result);
|
||||
|
||||
enum mgp_error mgp_execution_headers_size(struct mgp_execution_headers *headers, size_t *result);
|
||||
|
||||
struct mgp_execution_result;
|
||||
|
||||
enum mgp_error mgp_execute_query(struct mgp_graph *graph, struct mgp_memory *memory, const char *query,
|
||||
struct mgp_map *params, struct mgp_execution_result **result);
|
||||
|
||||
enum mgp_error mgp_fetch_execution_headers(struct mgp_execution_result *exec_result,
|
||||
struct mgp_execution_headers **headers);
|
||||
|
||||
enum mgp_error mgp_pull_one(struct mgp_execution_result *exec_result, struct mgp_graph *graph,
|
||||
struct mgp_memory *memory, struct mgp_map **result);
|
||||
|
||||
/// @}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
242
include/mgp.hpp
242
include/mgp.hpp
@ -16,6 +16,7 @@
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
@ -32,15 +33,6 @@
|
||||
|
||||
namespace mgp {
|
||||
|
||||
class TextSearchException : public std::exception {
|
||||
public:
|
||||
explicit TextSearchException(std::string message) : message_(std::move(message)) {}
|
||||
const char *what() const noexcept override { return message_.c_str(); }
|
||||
|
||||
private:
|
||||
std::string message_;
|
||||
};
|
||||
|
||||
class IndexException : public std::exception {
|
||||
public:
|
||||
explicit IndexException(std::string message) : message_(std::move(message)) {}
|
||||
@ -102,6 +94,10 @@ class Relationship;
|
||||
struct MapItem;
|
||||
class Duration;
|
||||
class Value;
|
||||
class QueryExecution;
|
||||
class ExecutionResult;
|
||||
class ExecutionHeaders;
|
||||
class ExecutionRow;
|
||||
|
||||
struct StealType {};
|
||||
inline constexpr StealType steal{};
|
||||
@ -604,6 +600,7 @@ class Map {
|
||||
friend class Record;
|
||||
friend class Result;
|
||||
friend class Parameter;
|
||||
friend class QueryExecution;
|
||||
|
||||
public:
|
||||
/// @brief Creates a Map from the copy of the given @ref mgp_map.
|
||||
@ -1559,6 +1556,95 @@ class Return {
|
||||
mgp_type *GetMGPType() const;
|
||||
};
|
||||
|
||||
class ExecutionHeaders {
|
||||
public:
|
||||
ExecutionHeaders(mgp_execution_headers *headers);
|
||||
size_t Size() const;
|
||||
std::string At(size_t index) const;
|
||||
|
||||
std::string_view operator[](size_t index) const;
|
||||
|
||||
class Iterator {
|
||||
private:
|
||||
friend class ExecutionHeaders;
|
||||
|
||||
public:
|
||||
using value_type = ExecutionHeaders;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = const ExecutionHeaders *;
|
||||
using reference = const ExecutionHeaders &;
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
|
||||
bool operator==(const Iterator &other) const;
|
||||
|
||||
bool operator!=(const Iterator &other) const;
|
||||
|
||||
Iterator &operator++();
|
||||
|
||||
std::string_view operator*() const;
|
||||
|
||||
private:
|
||||
Iterator(const ExecutionHeaders *iterable, size_t index);
|
||||
|
||||
const ExecutionHeaders *iterable_;
|
||||
size_t index_;
|
||||
};
|
||||
|
||||
Iterator begin();
|
||||
Iterator end();
|
||||
|
||||
Iterator cbegin();
|
||||
Iterator cend();
|
||||
|
||||
private:
|
||||
mgp_execution_headers *headers_;
|
||||
};
|
||||
|
||||
class QueryExecution {
|
||||
public:
|
||||
QueryExecution(mgp_graph *graph);
|
||||
ExecutionResult ExecuteQuery(std::string_view query, Map params = Map()) const;
|
||||
|
||||
private:
|
||||
mgp_graph *graph_;
|
||||
};
|
||||
|
||||
class ExecutionRow {
|
||||
private:
|
||||
Map row_;
|
||||
|
||||
public:
|
||||
ExecutionRow(mgp_map *row);
|
||||
|
||||
/// @brief Returns the size of the map.
|
||||
size_t Size() const;
|
||||
|
||||
/// @brief Returns whether the map is empty.
|
||||
bool Empty() const;
|
||||
|
||||
/// @brief Returns the value at the given `key`.
|
||||
Value operator[](std::string_view key) const;
|
||||
|
||||
/// @brief Returns the value at the given `key`.
|
||||
Value At(std::string_view key) const;
|
||||
|
||||
/// @brief Returns true if the given `key` exists.
|
||||
bool KeyExists(std::string_view key) const;
|
||||
|
||||
mgp::Map Values() const;
|
||||
};
|
||||
|
||||
class ExecutionResult {
|
||||
public:
|
||||
ExecutionResult(mgp_execution_result *result, mgp_graph *graph);
|
||||
ExecutionHeaders Headers() const;
|
||||
std::optional<ExecutionRow> PullOne() const;
|
||||
|
||||
private:
|
||||
mgp_execution_result *result_;
|
||||
mgp_graph *graph_;
|
||||
};
|
||||
|
||||
enum class ProcedureType : uint8_t {
|
||||
Read,
|
||||
Write,
|
||||
@ -4295,6 +4381,76 @@ inline mgp_type *Return::GetMGPType() const {
|
||||
return util::ToMGPType(type_);
|
||||
}
|
||||
|
||||
inline ExecutionHeaders::ExecutionHeaders(mgp_execution_headers *headers) : headers_(headers) {}
|
||||
|
||||
inline size_t ExecutionHeaders::Size() const { return mgp::execution_headers_size(headers_); }
|
||||
|
||||
inline std::string ExecutionHeaders::At(size_t index) const {
|
||||
return std::string(mgp::execution_headers_at(headers_, index));
|
||||
}
|
||||
|
||||
inline QueryExecution::QueryExecution(mgp_graph *graph) : graph_(graph) {}
|
||||
|
||||
inline ExecutionResult QueryExecution::ExecuteQuery(std::string_view query, mgp::Map params) const {
|
||||
return ExecutionResult(mgp::MemHandlerCallback(execute_query, graph_, query.data(), params.ptr_), graph_);
|
||||
}
|
||||
|
||||
inline ExecutionResult::ExecutionResult(mgp_execution_result *result, mgp_graph *graph)
|
||||
: result_(result), graph_(graph) {}
|
||||
|
||||
inline ExecutionHeaders ExecutionResult::Headers() const { return mgp::fetch_execution_headers(result_); };
|
||||
|
||||
inline std::optional<ExecutionRow> ExecutionResult::PullOne() const {
|
||||
auto *value = mgp::MemHandlerCallback(pull_one, result_, graph_);
|
||||
if (!value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return ExecutionRow(value);
|
||||
}
|
||||
|
||||
inline bool ExecutionHeaders::Iterator::operator==(const Iterator &other) const {
|
||||
return iterable_ == other.iterable_ && index_ == other.index_;
|
||||
}
|
||||
|
||||
inline bool ExecutionHeaders::Iterator::operator!=(const Iterator &other) const { return !(*this == other); }
|
||||
|
||||
inline ExecutionHeaders::Iterator &ExecutionHeaders::Iterator::operator++() {
|
||||
index_++;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline std::string_view ExecutionHeaders::Iterator::operator*() const { return (*iterable_)[index_]; }
|
||||
|
||||
inline ExecutionHeaders::Iterator::Iterator(const ExecutionHeaders *iterable, size_t index)
|
||||
: iterable_(iterable), index_(index) {}
|
||||
|
||||
inline std::string_view ExecutionHeaders::operator[](size_t index) const {
|
||||
return std::string_view(mgp::execution_headers_at(headers_, index));
|
||||
}
|
||||
|
||||
inline ExecutionHeaders::Iterator ExecutionHeaders::begin() { return Iterator(this, 0); }
|
||||
|
||||
inline ExecutionHeaders::Iterator ExecutionHeaders::end() { return Iterator(this, Size()); }
|
||||
|
||||
inline ExecutionHeaders::Iterator ExecutionHeaders::cbegin() { return Iterator(this, 0); }
|
||||
|
||||
inline ExecutionHeaders::Iterator ExecutionHeaders::cend() { return Iterator(this, Size()); }
|
||||
|
||||
inline ExecutionRow::ExecutionRow(mgp_map *row) : row_(row) {}
|
||||
|
||||
inline size_t ExecutionRow::Size() const { return row_.Size(); }
|
||||
|
||||
inline bool ExecutionRow::Empty() const { return row_.Empty(); }
|
||||
|
||||
inline Value ExecutionRow::operator[](std::string_view key) const { return row_[key]; }
|
||||
|
||||
inline Value ExecutionRow::At(std::string_view key) const { return row_.At(key); }
|
||||
|
||||
inline bool ExecutionRow::KeyExists(std::string_view key) const { return row_.KeyExists(key); }
|
||||
|
||||
inline mgp::Map ExecutionRow::Values() const { return mgp::Map(row_); }
|
||||
|
||||
// do not enter
|
||||
namespace detail {
|
||||
inline void AddParamsReturnsToProc(mgp_proc *proc, std::vector<Parameter> ¶meters,
|
||||
@ -4315,12 +4471,12 @@ inline void AddParamsReturnsToProc(mgp_proc *proc, std::vector<Parameter> ¶m
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
inline bool CreateLabelIndex(mgp_graph *memgraph_graph, const std::string_view label) {
|
||||
return create_label_index(memgraph_graph, label.data());
|
||||
inline bool CreateLabelIndex(mgp_graph *memgaph_graph, const std::string_view label) {
|
||||
return create_label_index(memgaph_graph, label.data());
|
||||
}
|
||||
|
||||
inline bool DropLabelIndex(mgp_graph *memgraph_graph, const std::string_view label) {
|
||||
return drop_label_index(memgraph_graph, label.data());
|
||||
inline bool DropLabelIndex(mgp_graph *memgaph_graph, const std::string_view label) {
|
||||
return drop_label_index(memgaph_graph, label.data());
|
||||
}
|
||||
|
||||
inline List ListAllLabelIndices(mgp_graph *memgraph_graph) {
|
||||
@ -4331,14 +4487,14 @@ inline List ListAllLabelIndices(mgp_graph *memgraph_graph) {
|
||||
return List(label_indices);
|
||||
}
|
||||
|
||||
inline bool CreateLabelPropertyIndex(mgp_graph *memgraph_graph, const std::string_view label,
|
||||
inline bool CreateLabelPropertyIndex(mgp_graph *memgaph_graph, const std::string_view label,
|
||||
const std::string_view property) {
|
||||
return create_label_property_index(memgraph_graph, label.data(), property.data());
|
||||
return create_label_property_index(memgaph_graph, label.data(), property.data());
|
||||
}
|
||||
|
||||
inline bool DropLabelPropertyIndex(mgp_graph *memgraph_graph, const std::string_view label,
|
||||
inline bool DropLabelPropertyIndex(mgp_graph *memgaph_graph, const std::string_view label,
|
||||
const std::string_view property) {
|
||||
return drop_label_property_index(memgraph_graph, label.data(), property.data());
|
||||
return drop_label_property_index(memgaph_graph, label.data(), property.data());
|
||||
}
|
||||
|
||||
inline List ListAllLabelPropertyIndices(mgp_graph *memgraph_graph) {
|
||||
@ -4349,58 +4505,6 @@ inline List ListAllLabelPropertyIndices(mgp_graph *memgraph_graph) {
|
||||
return List(label_property_indices);
|
||||
}
|
||||
|
||||
namespace {
|
||||
constexpr std::string_view kErrorMsgKey = "error_msg";
|
||||
constexpr std::string_view kSearchResultsKey = "search_results";
|
||||
constexpr std::string_view kAggregationResultsKey = "aggregation_results";
|
||||
} // namespace
|
||||
|
||||
inline List SearchTextIndex(mgp_graph *memgraph_graph, std::string_view index_name, std::string_view search_query,
|
||||
text_search_mode search_mode) {
|
||||
auto results_or_error = Map(mgp::MemHandlerCallback(graph_search_text_index, memgraph_graph, index_name.data(),
|
||||
search_query.data(), search_mode));
|
||||
if (results_or_error.KeyExists(kErrorMsgKey)) {
|
||||
if (!results_or_error.At(kErrorMsgKey).IsString()) {
|
||||
throw TextSearchException{"The error message is not a string!"};
|
||||
}
|
||||
throw TextSearchException(results_or_error.At(kErrorMsgKey).ValueString().data());
|
||||
}
|
||||
|
||||
if (!results_or_error.KeyExists(kSearchResultsKey)) {
|
||||
throw TextSearchException{"Incomplete text index search results!"};
|
||||
}
|
||||
|
||||
if (!results_or_error.At(kSearchResultsKey).IsList()) {
|
||||
throw TextSearchException{"Text index search results have wrong type!"};
|
||||
}
|
||||
|
||||
return results_or_error.At(kSearchResultsKey).ValueList();
|
||||
}
|
||||
|
||||
inline std::string_view AggregateOverTextIndex(mgp_graph *memgraph_graph, std::string_view index_name,
|
||||
std::string_view search_query, std::string_view aggregation_query) {
|
||||
auto results_or_error =
|
||||
Map(mgp::MemHandlerCallback(graph_aggregate_over_text_index, memgraph_graph, index_name.data(),
|
||||
search_query.data(), aggregation_query.data()));
|
||||
|
||||
if (results_or_error.KeyExists(kErrorMsgKey)) {
|
||||
if (!results_or_error.At(kErrorMsgKey).IsString()) {
|
||||
throw TextSearchException{"The error message is not a string!"};
|
||||
}
|
||||
throw TextSearchException(results_or_error.At(kErrorMsgKey).ValueString().data());
|
||||
}
|
||||
|
||||
if (!results_or_error.KeyExists(kAggregationResultsKey)) {
|
||||
throw TextSearchException{"Incomplete text index aggregation results!"};
|
||||
}
|
||||
|
||||
if (!results_or_error.At(kAggregationResultsKey).IsString()) {
|
||||
throw TextSearchException{"Text index aggregation results have wrong type!"};
|
||||
}
|
||||
|
||||
return results_or_error.At(kAggregationResultsKey).ValueString();
|
||||
}
|
||||
|
||||
inline bool CreateExistenceConstraint(mgp_graph *memgraph_graph, const std::string_view label,
|
||||
const std::string_view property) {
|
||||
return create_existence_constraint(memgraph_graph, label.data(), property.data());
|
||||
|
@ -295,34 +295,6 @@ set_path_external_library(jemalloc STATIC
|
||||
|
||||
import_header_library(rangev3 ${CMAKE_CURRENT_SOURCE_DIR}/rangev3/include)
|
||||
|
||||
ExternalProject_Add(mgcxx-proj
|
||||
PREFIX mgcxx-proj
|
||||
GIT_REPOSITORY https://github.com/memgraph/mgcxx
|
||||
GIT_TAG "v0.0.4"
|
||||
CMAKE_ARGS
|
||||
"-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
|
||||
"-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}"
|
||||
"-DENABLE_TESTS=OFF"
|
||||
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
|
||||
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
|
||||
INSTALL_DIR "${PROJECT_BINARY_DIR}/mgcxx"
|
||||
)
|
||||
ExternalProject_Get_Property(mgcxx-proj install_dir)
|
||||
set(MGCXX_ROOT ${install_dir})
|
||||
|
||||
add_library(tantivy_text_search STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(tantivy_text_search mgcxx-proj)
|
||||
set_property(TARGET tantivy_text_search PROPERTY IMPORTED_LOCATION ${MGCXX_ROOT}/lib/libtantivy_text_search.a)
|
||||
|
||||
add_library(mgcxx_text_search STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(mgcxx_text_search mgcxx-proj)
|
||||
set_property(TARGET mgcxx_text_search PROPERTY IMPORTED_LOCATION ${MGCXX_ROOT}/lib/libmgcxx_text_search.a)
|
||||
# We need to create the include directory first in order to be able to add it
|
||||
# as an include directory. The header files in the include directory will be
|
||||
# generated later during the build process.
|
||||
file(MAKE_DIRECTORY ${MGCXX_ROOT}/include)
|
||||
set_property(TARGET mgcxx_text_search PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MGCXX_ROOT}/include)
|
||||
|
||||
# Setup NuRaft
|
||||
import_external_library(nuraft STATIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/nuraft/lib/libnuraft.a
|
||||
|
@ -182,7 +182,7 @@ benchmark_tag="v1.6.0"
|
||||
repo_clone_try_double "${primary_urls[gbenchmark]}" "${secondary_urls[gbenchmark]}" "benchmark" "$benchmark_tag" true
|
||||
|
||||
# google test
|
||||
googletest_tag="v1.14.0"
|
||||
googletest_tag="release-1.8.0"
|
||||
repo_clone_try_double "${primary_urls[gtest]}" "${secondary_urls[gtest]}" "googletest" "$googletest_tag" true
|
||||
|
||||
# libbcrypt
|
||||
|
@ -6,8 +6,6 @@ project(memgraph_query_modules)
|
||||
|
||||
disallow_in_source_build()
|
||||
|
||||
find_package(fmt REQUIRED)
|
||||
|
||||
# Everything that is installed here, should be under the "query_modules" component.
|
||||
set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "query_modules")
|
||||
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
|
||||
@ -60,22 +58,6 @@ install(PROGRAMS $<TARGET_FILE:schema>
|
||||
# Also install the source of the example, so user can read it.
|
||||
install(FILES schema.cpp DESTINATION lib/memgraph/query_modules/src)
|
||||
|
||||
add_library(text SHARED text_search_module.cpp)
|
||||
target_include_directories(text PRIVATE ${CMAKE_SOURCE_DIR}/include)
|
||||
target_compile_options(text PRIVATE -Wall)
|
||||
target_link_libraries(text PRIVATE -static-libgcc -static-libstdc++ fmt::fmt)
|
||||
# Strip C++ example in release build.
|
||||
if (lower_build_type STREQUAL "release")
|
||||
add_custom_command(TARGET text POST_BUILD
|
||||
COMMAND strip -s $<TARGET_FILE:text>
|
||||
COMMENT "Stripping symbols and sections from the C++ text_search module")
|
||||
endif()
|
||||
install(PROGRAMS $<TARGET_FILE:text>
|
||||
DESTINATION lib/memgraph/query_modules
|
||||
RENAME text.so)
|
||||
# Also install the source of the example, so user can read it.
|
||||
install(FILES text_search_module.cpp DESTINATION lib/memgraph/query_modules/src)
|
||||
|
||||
# Install the Python example and modules
|
||||
install(FILES example.py DESTINATION lib/memgraph/query_modules RENAME py_example.py)
|
||||
install(FILES graph_analyzer.py DESTINATION lib/memgraph/query_modules)
|
||||
|
@ -1,149 +0,0 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <mgp.hpp>
|
||||
|
||||
namespace TextSearch {
|
||||
constexpr std::string_view kProcedureSearch = "search";
|
||||
constexpr std::string_view kProcedureRegexSearch = "regex_search";
|
||||
constexpr std::string_view kProcedureSearchAllProperties = "search_all";
|
||||
constexpr std::string_view kProcedureAggregate = "aggregate";
|
||||
constexpr std::string_view kParameterIndexName = "index_name";
|
||||
constexpr std::string_view kParameterSearchQuery = "search_query";
|
||||
constexpr std::string_view kParameterAggregationQuery = "aggregation_query";
|
||||
constexpr std::string_view kReturnNode = "node";
|
||||
constexpr std::string_view kReturnAggregation = "aggregation";
|
||||
const std::string kSearchAllPrefix = "all";
|
||||
|
||||
void Search(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
|
||||
void RegexSearch(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
|
||||
void SearchAllProperties(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
|
||||
void Aggregate(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory);
|
||||
} // namespace TextSearch
|
||||
|
||||
void TextSearch::Search(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
auto arguments = mgp::List(args);
|
||||
|
||||
try {
|
||||
const auto *index_name = arguments[0].ValueString().data();
|
||||
const auto *search_query = arguments[1].ValueString().data();
|
||||
for (const auto &node :
|
||||
mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::SPECIFIED_PROPERTIES)) {
|
||||
auto record = record_factory.NewRecord();
|
||||
record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
record_factory.SetErrorMessage(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void TextSearch::RegexSearch(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
auto arguments = mgp::List(args);
|
||||
|
||||
try {
|
||||
const auto *index_name = arguments[0].ValueString().data();
|
||||
const auto *search_query = arguments[1].ValueString().data();
|
||||
for (const auto &node : mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::REGEX)) {
|
||||
auto record = record_factory.NewRecord();
|
||||
record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
record_factory.SetErrorMessage(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void TextSearch::SearchAllProperties(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result,
|
||||
mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
auto arguments = mgp::List(args);
|
||||
|
||||
try {
|
||||
const auto *index_name = arguments[0].ValueString().data();
|
||||
const auto *search_query = fmt::format("{}:{}", kSearchAllPrefix, arguments[1].ValueString()).data();
|
||||
for (const auto &node :
|
||||
mgp::SearchTextIndex(memgraph_graph, index_name, search_query, text_search_mode::ALL_PROPERTIES)) {
|
||||
auto record = record_factory.NewRecord();
|
||||
record.Insert(TextSearch::kReturnNode.data(), node.ValueNode());
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
record_factory.SetErrorMessage(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void TextSearch::Aggregate(mgp_list *args, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
auto arguments = mgp::List(args);
|
||||
|
||||
try {
|
||||
const auto *index_name = arguments[0].ValueString().data();
|
||||
const auto *search_query = arguments[1].ValueString().data();
|
||||
const auto *aggregation_query = arguments[2].ValueString().data();
|
||||
const auto aggregation_result =
|
||||
mgp::AggregateOverTextIndex(memgraph_graph, index_name, search_query, aggregation_query);
|
||||
auto record = record_factory.NewRecord();
|
||||
record.Insert(TextSearch::kReturnAggregation.data(), aggregation_result.data());
|
||||
} catch (const std::exception &e) {
|
||||
record_factory.SetErrorMessage(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int mgp_init_module(struct mgp_module *module, struct mgp_memory *memory) {
|
||||
try {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
|
||||
AddProcedure(TextSearch::Search, TextSearch::kProcedureSearch, mgp::ProcedureType::Read,
|
||||
{
|
||||
mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
|
||||
mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
|
||||
},
|
||||
{mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
|
||||
|
||||
AddProcedure(TextSearch::RegexSearch, TextSearch::kProcedureRegexSearch, mgp::ProcedureType::Read,
|
||||
{
|
||||
mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
|
||||
mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
|
||||
},
|
||||
{mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
|
||||
|
||||
AddProcedure(TextSearch::SearchAllProperties, TextSearch::kProcedureSearchAllProperties, mgp::ProcedureType::Read,
|
||||
{
|
||||
mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
|
||||
mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
|
||||
},
|
||||
{mgp::Return(TextSearch::kReturnNode, mgp::Type::Node)}, module, memory);
|
||||
|
||||
AddProcedure(TextSearch::Aggregate, TextSearch::kProcedureAggregate, mgp::ProcedureType::Read,
|
||||
{
|
||||
mgp::Parameter(TextSearch::kParameterIndexName, mgp::Type::String),
|
||||
mgp::Parameter(TextSearch::kParameterSearchQuery, mgp::Type::String),
|
||||
mgp::Parameter(TextSearch::kParameterAggregationQuery, mgp::Type::String),
|
||||
},
|
||||
{mgp::Return(TextSearch::kReturnAggregation, mgp::Type::String)}, module, memory);
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error while initializing query module: " << e.what() << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int mgp_shutdown_module() { return 0; }
|
@ -48,9 +48,9 @@ SUPPORTED_ARCHS=(
|
||||
)
|
||||
SUPPORTED_TESTS=(
|
||||
clang-tidy cppcheck-and-clang-format code-analysis
|
||||
code-coverage drivers drivers-high-availability durability e2e gql-behave
|
||||
code-coverage drivers durability e2e gql-behave
|
||||
integration leftover-CTest macro-benchmark
|
||||
mgbench stress-plain stress-ssl
|
||||
mgbench stress-plain stress-ssl
|
||||
unit unit-coverage upload-to-bench-graph
|
||||
|
||||
)
|
||||
@ -116,7 +116,7 @@ print_help () {
|
||||
|
||||
echo -e "\nToolchain v5 supported OSs:"
|
||||
echo -e " \"${SUPPORTED_OS_V5[*]}\""
|
||||
|
||||
|
||||
echo -e "\nExample usage:"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community"
|
||||
@ -211,7 +211,6 @@ check_support() {
|
||||
build_memgraph () {
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local ACTIVATE_CARGO="source $MGBUILD_HOME_DIR/.cargo/env"
|
||||
local container_build_dir="$MGBUILD_ROOT_DIR/build"
|
||||
local container_output_dir="$container_build_dir/output"
|
||||
local arm_flag=""
|
||||
@ -296,7 +295,7 @@ build_memgraph () {
|
||||
docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/"
|
||||
fi
|
||||
# Change ownership of copied files so the mg user inside container can access them
|
||||
docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
|
||||
docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
|
||||
|
||||
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
|
||||
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
|
||||
@ -317,20 +316,21 @@ build_memgraph () {
|
||||
|
||||
# Define cmake command
|
||||
local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .."
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO && $cmake_cmd"
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $cmake_cmd"
|
||||
|
||||
# ' is used instead of " because we need to run make within the allowed
|
||||
# container resources.
|
||||
# Default value for $threads is 0 instead of $(nproc) because macos
|
||||
# Default value for $threads is 0 instead of $(nproc) because macos
|
||||
# doesn't support the nproc command.
|
||||
# 0 is set for default value and checked here because mgbuild containers
|
||||
# support nproc
|
||||
# shellcheck disable=SC2016
|
||||
if [[ "$threads" == 0 ]]; then
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO "'&& make -j$(nproc)'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO "'&& make -j$(nproc) -B mgconsole'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc)'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc) -B mgconsole'
|
||||
else
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO "'&& make -j$threads'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $ACTIVATE_CARGO "'&& make -j$threads -B mgconsole'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads -B mgconsole'
|
||||
fi
|
||||
}
|
||||
|
||||
@ -362,7 +362,7 @@ copy_memgraph() {
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph"
|
||||
local host_output_path="$PROJECT_ROOT/build/memgraph"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Binary saved to $host_output_path"
|
||||
;;
|
||||
--build-logs)
|
||||
@ -370,7 +370,7 @@ copy_memgraph() {
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/logs"
|
||||
local host_output_path="$PROJECT_ROOT/build/logs"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Build logs saved to $host_output_path"
|
||||
;;
|
||||
--package)
|
||||
@ -396,7 +396,6 @@ copy_memgraph() {
|
||||
test_memgraph() {
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local ACTIVATE_VENV="./setup.sh /opt/toolchain-${toolchain_version}/activate"
|
||||
local ACTIVATE_CARGO="source $MGBUILD_HOME_DIR/.cargo/env"
|
||||
local EXPORT_LICENSE="export MEMGRAPH_ENTERPRISE_LICENSE=$enterprise_license"
|
||||
local EXPORT_ORG_NAME="export MEMGRAPH_ORGANIZATION_NAME=$organization_name"
|
||||
local BUILD_DIR="$MGBUILD_ROOT_DIR/build"
|
||||
@ -417,9 +416,6 @@ test_memgraph() {
|
||||
drivers)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh'
|
||||
;;
|
||||
drivers-high-availability)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run_cluster.sh'
|
||||
;;
|
||||
integration)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh'
|
||||
;;
|
||||
@ -485,7 +481,7 @@ test_memgraph() {
|
||||
# docker network connect --alias $kafka_hostname $build_container_network $kafka_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
# docker network connect --alias $pulsar_hostname $build_container_network $pulsar_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
docker exec -u mg $build_container bash -c "pip install --user networkx && pip3 install --user networkx"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && $ACTIVATE_CARGO && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && source ve3/bin/activate_e2e && cd $MGBUILD_ROOT_DIR/tests/e2e "'&& ./run.sh'
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && source ve3/bin/activate_e2e && cd $MGBUILD_ROOT_DIR/tests/e2e "'&& ./run.sh'
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown test '$1'"
|
||||
@ -666,4 +662,4 @@ case $command in
|
||||
echo "Error: Unknown command '$command'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
esac
|
||||
|
@ -45,7 +45,7 @@ set(mg_single_node_v2_sources
|
||||
add_executable(memgraph ${mg_single_node_v2_sources})
|
||||
target_include_directories(memgraph PUBLIC ${CMAKE_SOURCE_DIR}/include)
|
||||
target_link_libraries(memgraph stdc++fs Threads::Threads
|
||||
mg-telemetry mgcxx_text_search tantivy_text_search mg-communication mg-communication-metrics mg-memory mg-utils mg-license mg-settings mg-glue mg-flags mg::system mg::replication_handler)
|
||||
mg-telemetry mg-communication mg-communication-metrics mg-memory mg-utils mg-license mg-settings mg-glue mg-flags mg::system mg::replication_handler)
|
||||
|
||||
# NOTE: `include/mg_procedure.syms` describes a pattern match for symbols which
|
||||
# should be dynamically exported, so that `dlopen` can correctly link th
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -88,12 +88,6 @@ class Session {
|
||||
|
||||
virtual void Configure(const std::map<std::string, memgraph::communication::bolt::Value> &run_time_info) = 0;
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
virtual auto Route(std::map<std::string, Value> const &routing,
|
||||
std::vector<memgraph::communication::bolt::Value> const &bookmarks,
|
||||
std::map<std::string, Value> const &extra) -> std::map<std::string, Value> = 0;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Put results of the processed query in the `encoder`.
|
||||
*
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -79,9 +79,9 @@ State RunHandlerV4(Signature signature, TSession &session, State state, Marker m
|
||||
}
|
||||
case Signature::Route: {
|
||||
if constexpr (bolt_minor >= 3) {
|
||||
return HandleRoute<TSession>(session, marker);
|
||||
if (signature == Signature::Route) return HandleRoute<TSession>(session, marker);
|
||||
} else {
|
||||
spdlog::trace("Supported only in bolt versions >= 4.3");
|
||||
spdlog::trace("Supported only in bolt v4.3");
|
||||
return State::Close;
|
||||
}
|
||||
}
|
||||
|
@ -478,6 +478,9 @@ State HandleGoodbye() {
|
||||
|
||||
template <typename TSession>
|
||||
State HandleRoute(TSession &session, const Marker marker) {
|
||||
// Route message is not implemented since it is Neo4j specific, therefore we will receive it and inform user that
|
||||
// there is no implementation. Before that, we have to read out the fields from the buffer to leave it in a clean
|
||||
// state.
|
||||
if (marker != Marker::TinyStruct3) {
|
||||
spdlog::trace("Expected TinyStruct3 marker, but received 0x{:02x}!", utils::UnderlyingCast(marker));
|
||||
return State::Close;
|
||||
@ -493,27 +496,11 @@ State HandleRoute(TSession &session, const Marker marker) {
|
||||
spdlog::trace("Couldn't read bookmarks field!");
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
// TODO: (andi) Fix Bolt versions
|
||||
Value db;
|
||||
if (!session.decoder_.ReadValue(&db)) {
|
||||
spdlog::trace("Couldn't read db field!");
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
try {
|
||||
auto res = session.Route(routing.ValueMap(), bookmarks.ValueList(), {});
|
||||
if (!session.encoder_.MessageSuccess(std::move(res))) {
|
||||
spdlog::trace("Couldn't send result of routing!");
|
||||
return State::Close;
|
||||
}
|
||||
return State::Idle;
|
||||
} catch (const std::exception &e) {
|
||||
return HandleFailure(session, e);
|
||||
}
|
||||
|
||||
#else
|
||||
session.encoder_buffer_.Clear();
|
||||
bool fail_sent =
|
||||
session.encoder_.MessageFailure({{"code", "66"}, {"message", "Route message is not supported in Memgraph!"}});
|
||||
@ -522,7 +509,6 @@ State HandleRoute(TSession &session, const Marker marker) {
|
||||
return State::Close;
|
||||
}
|
||||
return State::Error;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename TSession>
|
||||
|
@ -6,7 +6,7 @@ target_sources(mg-coordination
|
||||
include/coordination/coordinator_state.hpp
|
||||
include/coordination/coordinator_rpc.hpp
|
||||
include/coordination/coordinator_server.hpp
|
||||
include/coordination/coordinator_communication_config.hpp
|
||||
include/coordination/coordinator_config.hpp
|
||||
include/coordination/coordinator_exceptions.hpp
|
||||
include/coordination/coordinator_slk.hpp
|
||||
include/coordination/coordinator_instance.hpp
|
||||
@ -23,7 +23,7 @@ target_sources(mg-coordination
|
||||
include/nuraft/coordinator_state_manager.hpp
|
||||
|
||||
PRIVATE
|
||||
coordinator_communication_config.cpp
|
||||
coordinator_config.cpp
|
||||
coordinator_client.cpp
|
||||
coordinator_state.cpp
|
||||
coordinator_rpc.cpp
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#include "coordination/coordinator_client.hpp"
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
@ -23,17 +23,18 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
namespace {
|
||||
auto CreateClientContext(memgraph::coordination::CoordinatorToReplicaConfig const &config)
|
||||
auto CreateClientContext(memgraph::coordination::CoordinatorClientConfig const &config)
|
||||
-> communication::ClientContext {
|
||||
return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file}
|
||||
: communication::ClientContext{};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
|
||||
CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb)
|
||||
: rpc_context_{CreateClientContext(config)},
|
||||
rpc_client_{config.mgt_server, &rpc_context_},
|
||||
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
|
||||
&rpc_context_},
|
||||
config_{std::move(config)},
|
||||
coord_instance_{coord_instance},
|
||||
succ_cb_{std::move(succ_cb)},
|
||||
@ -85,9 +86,7 @@ void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
|
||||
void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
|
||||
void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
|
||||
|
||||
auto CoordinatorClient::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
|
||||
return config_.replication_client_info;
|
||||
}
|
||||
auto CoordinatorClient::ReplicationClientInfo() const -> ReplClientInfo { return config_.replication_client_info; }
|
||||
|
||||
auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid,
|
||||
ReplicationClientsInfo replication_clients_info) const -> bool {
|
||||
|
@ -18,178 +18,101 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state) {
|
||||
j = nlohmann::json{
|
||||
{"config", instance_state.config}, {"status", instance_state.status}, {"uuid", instance_state.instance_uuid}};
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state) {
|
||||
j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state) {
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state) {
|
||||
j.at("config").get_to(instance_state.config);
|
||||
j.at("status").get_to(instance_state.status);
|
||||
j.at("uuid").get_to(instance_state.instance_uuid);
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances,
|
||||
utils::UUID const ¤t_main_uuid, bool is_lock_opened)
|
||||
: repl_instances_{std::move(instances)}, current_main_uuid_(current_main_uuid), is_lock_opened_(is_lock_opened) {}
|
||||
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances)
|
||||
: instances_{std::move(instances)} {}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other)
|
||||
: repl_instances_{other.repl_instances_},
|
||||
current_main_uuid_(other.current_main_uuid_),
|
||||
is_lock_opened_(other.is_lock_opened_) {}
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
repl_instances_ = other.repl_instances_;
|
||||
current_main_uuid_ = other.current_main_uuid_;
|
||||
is_lock_opened_ = other.is_lock_opened_;
|
||||
instances_ = other.instances_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
|
||||
: repl_instances_{std::move(other.repl_instances_)},
|
||||
current_main_uuid_(other.current_main_uuid_),
|
||||
is_lock_opened_(other.is_lock_opened_) {}
|
||||
: instances_{std::move(other.instances_)} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
repl_instances_ = std::move(other.repl_instances_);
|
||||
current_main_uuid_ = other.current_main_uuid_;
|
||||
is_lock_opened_ = other.is_lock_opened_;
|
||||
instances_ = std::move(other.instances_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::MainExists() const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return std::ranges::any_of(repl_instances_,
|
||||
return std::ranges::any_of(instances_,
|
||||
[](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::HasMainState(std::string_view instance_name) const -> bool {
|
||||
auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = repl_instances_.find(instance_name);
|
||||
return it != repl_instances_.end() && it->second.status == ReplicationRole::MAIN;
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::HasReplicaState(std::string_view instance_name) const -> bool {
|
||||
auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = repl_instances_.find(instance_name);
|
||||
return it != repl_instances_.end() && it->second.status == ReplicationRole::REPLICA;
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::REPLICA;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsCurrentMain(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = repl_instances_.find(instance_name);
|
||||
return it != repl_instances_.end() && it->second.status == ReplicationRole::MAIN &&
|
||||
it->second.instance_uuid == current_main_uuid_;
|
||||
auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
switch (log_action) {
|
||||
// end of OPEN_LOCK_REGISTER_REPLICATION_INSTANCE
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
|
||||
auto const &config = std::get<CoordinatorToReplicaConfig>(log_entry);
|
||||
spdlog::trace("DoAction: register replication instance {}", config.instance_name);
|
||||
// Setting instance uuid to random, if registration fails, we are still in random state
|
||||
repl_instances_.emplace(config.instance_name,
|
||||
ReplicationInstanceState{config, ReplicationRole::REPLICA, utils::UUID{}});
|
||||
is_lock_opened_ = false;
|
||||
auto const &config = std::get<CoordinatorClientConfig>(log_entry);
|
||||
instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
|
||||
break;
|
||||
}
|
||||
// end of OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
spdlog::trace("DoAction: unregister replication instance {}", instance_name);
|
||||
repl_instances_.erase(instance_name);
|
||||
is_lock_opened_ = false;
|
||||
instances_.erase(instance_name);
|
||||
break;
|
||||
}
|
||||
// end of OPEN_LOCK_SET_INSTANCE_AS_MAIN and OPEN_LOCK_FAILOVER
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN: {
|
||||
auto const instance_uuid_change = std::get<InstanceUUIDUpdate>(log_entry);
|
||||
auto it = repl_instances_.find(instance_uuid_change.instance_name);
|
||||
MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::MAIN;
|
||||
it->second.instance_uuid = instance_uuid_change.uuid;
|
||||
is_lock_opened_ = false;
|
||||
spdlog::trace("DoAction: set replication instance {} as main with uuid {}", instance_uuid_change.instance_name,
|
||||
std::string{instance_uuid_change.uuid});
|
||||
break;
|
||||
}
|
||||
// end of OPEN_LOCK_SET_INSTANCE_AS_REPLICA
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = repl_instances_.find(instance_name);
|
||||
MG_ASSERT(it != repl_instances_.end(), "Instance does not exist as part of raft state!");
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::REPLICA;
|
||||
is_lock_opened_ = false;
|
||||
spdlog::trace("DoAction: set replication instance {} as replica", instance_name);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UPDATE_UUID_OF_NEW_MAIN: {
|
||||
current_main_uuid_ = std::get<utils::UUID>(log_entry);
|
||||
spdlog::trace("DoAction: update uuid of new main {}", std::string{current_main_uuid_});
|
||||
case RaftLogAction::UPDATE_UUID: {
|
||||
uuid_ = std::get<utils::UUID>(log_entry);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UPDATE_UUID_FOR_INSTANCE: {
|
||||
auto const instance_uuid_change = std::get<InstanceUUIDUpdate>(log_entry);
|
||||
auto it = repl_instances_.find(instance_uuid_change.instance_name);
|
||||
MG_ASSERT(it != repl_instances_.end(), "Instance doesn't exist as part of RAFT state");
|
||||
it->second.instance_uuid = instance_uuid_change.uuid;
|
||||
spdlog::trace("DoAction: update uuid for instance {} to {}", instance_uuid_change.instance_name,
|
||||
std::string{instance_uuid_change.uuid});
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::ADD_COORDINATOR_INSTANCE: {
|
||||
auto const &config = std::get<CoordinatorToCoordinatorConfig>(log_entry);
|
||||
coordinators_.emplace_back(CoordinatorInstanceState{config});
|
||||
spdlog::trace("DoAction: add coordinator instance {}", config.coordinator_server_id);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE: {
|
||||
is_lock_opened_ = true;
|
||||
spdlog::trace("DoAction: open lock register");
|
||||
break;
|
||||
// TODO(antoniofilipovic) save what we are doing to be able to undo....
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE: {
|
||||
is_lock_opened_ = true;
|
||||
spdlog::trace("DoAction: open lock unregister");
|
||||
break;
|
||||
// TODO(antoniofilipovic) save what we are doing
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN: {
|
||||
is_lock_opened_ = true;
|
||||
spdlog::trace("DoAction: open lock set instance as main");
|
||||
break;
|
||||
// TODO(antoniofilipovic) save what we are doing
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_FAILOVER: {
|
||||
is_lock_opened_ = true;
|
||||
spdlog::trace("DoAction: open lock failover");
|
||||
break;
|
||||
// TODO(antoniofilipovic) save what we are doing
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA: {
|
||||
is_lock_opened_ = true;
|
||||
spdlog::trace("DoAction: open lock set instance as replica");
|
||||
break;
|
||||
// TODO(antoniofilipovic) save what we need to undo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
nlohmann::json j = {{"repl_instances", repl_instances_},
|
||||
{"is_lock_opened", is_lock_opened_},
|
||||
{"current_main_uuid", current_main_uuid_}};
|
||||
auto const log = j.dump();
|
||||
|
||||
auto const log = nlohmann::json(instances_).dump();
|
||||
|
||||
data = buffer::alloc(sizeof(uint32_t) + log.size());
|
||||
buffer_serializer bs(data);
|
||||
bs.put_str(log);
|
||||
@ -198,34 +121,26 @@ auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
|
||||
auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
|
||||
buffer_serializer bs(data);
|
||||
auto const j = nlohmann::json::parse(bs.get_str());
|
||||
auto instances = j["repl_instances"].get<std::map<std::string, ReplicationInstanceState, std::less<>>>();
|
||||
auto current_main_uuid = j["current_main_uuid"].get<utils::UUID>();
|
||||
bool is_lock_opened = j["is_lock_opened"].get<int>();
|
||||
return CoordinatorClusterState{std::move(instances), current_main_uuid, is_lock_opened};
|
||||
auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>();
|
||||
|
||||
return CoordinatorClusterState{std::move(instances)};
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
|
||||
auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return repl_instances_ | ranges::views::values | ranges::to<std::vector<ReplicationInstanceState>>;
|
||||
return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetCurrentMainUUID() const -> utils::UUID { return current_main_uuid_; }
|
||||
auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
|
||||
|
||||
auto CoordinatorClusterState::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
|
||||
auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = repl_instances_.find(instance_name);
|
||||
MG_ASSERT(it != repl_instances_.end(), "Instance with that name doesn't exist.");
|
||||
return it->second.instance_uuid;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return coordinators_;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsLockOpened() const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return is_lock_opened_;
|
||||
auto const it =
|
||||
std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
if (it == instances_.end()) {
|
||||
return {};
|
||||
}
|
||||
return it->first;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -11,62 +11,43 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config) {
|
||||
j = nlohmann::json{{"coordinator_server_id", config.coordinator_server_id},
|
||||
{"coordinator_server", config.coordinator_server},
|
||||
{"bolt_server", config.bolt_server}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config) {
|
||||
config.coordinator_server_id = j.at("coordinator_server_id").get<uint32_t>();
|
||||
config.coordinator_server = j.at("coordinator_server").get<io::network::Endpoint>();
|
||||
config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, ReplicationClientInfo const &config) {
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"replication_mode", config.replication_mode},
|
||||
{"replication_server", config.replication_server}};
|
||||
{"replication_ip_address", config.replication_ip_address},
|
||||
{"replication_port", config.replication_port}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, ReplicationClientInfo &config) {
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
|
||||
config.replication_server = j.at("replication_server").get<io::network::Endpoint>();
|
||||
config.replication_ip_address = j.at("replication_ip_address").get<std::string>();
|
||||
config.replication_port = j.at("replication_port").get<uint16_t>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config) {
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"mgt_server", config.mgt_server},
|
||||
{"bolt_server", config.bolt_server},
|
||||
{"ip_address", config.ip_address},
|
||||
{"port", config.port},
|
||||
{"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
|
||||
{"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
|
||||
{"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
|
||||
{"replication_client_info", config.replication_client_info}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config) {
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.mgt_server = j.at("mgt_server").get<io::network::Endpoint>();
|
||||
config.bolt_server = j.at("bolt_server").get<io::network::Endpoint>();
|
||||
config.ip_address = j.at("ip_address").get<std::string>();
|
||||
config.port = j.at("port").get<uint16_t>();
|
||||
config.instance_health_check_frequency_sec =
|
||||
std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
|
||||
config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
|
||||
config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
|
||||
config.replication_client_info = j.at("replication_client_info").get<ReplicationClientInfo>();
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, InstanceUUIDUpdate &instance_uuid_change) {
|
||||
instance_uuid_change.uuid = j.at("uuid").get<utils::UUID>();
|
||||
instance_uuid_change.instance_name = j.at("instance_name").get<std::string>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceUUIDUpdate const &instance_uuid_change) {
|
||||
j = nlohmann::json{{"instance_name", instance_uuid_change.instance_name}, {"uuid", instance_uuid_change.uuid}};
|
||||
config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>();
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
@ -95,8 +95,8 @@ void CoordinatorHandlers::DemoteMainToReplicaHandler(replication::ReplicationHan
|
||||
slk::Load(&req, req_reader);
|
||||
|
||||
const replication::ReplicationServerConfig clients_config{
|
||||
.ip_address = req.replication_client_info.replication_server.address,
|
||||
.port = req.replication_client_info.replication_server.port};
|
||||
.ip_address = req.replication_client_info.replication_ip_address,
|
||||
.port = req.replication_client_info.replication_port};
|
||||
|
||||
if (!replication_handler.SetReplicationRoleReplica(clients_config, std::nullopt)) {
|
||||
spdlog::error("Demoting main to replica failed!");
|
||||
@ -136,8 +136,8 @@ void CoordinatorHandlers::PromoteReplicaToMainHandler(replication::ReplicationHa
|
||||
return replication::ReplicationClientConfig{
|
||||
.name = repl_info_config.instance_name,
|
||||
.mode = repl_info_config.replication_mode,
|
||||
.ip_address = repl_info_config.replication_server.address,
|
||||
.port = repl_info_config.replication_server.port,
|
||||
.ip_address = repl_info_config.replication_ip_address,
|
||||
.port = repl_info_config.replication_port,
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "coordination/coordinator_instance.hpp"
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/fmt.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
@ -30,11 +31,10 @@ using nuraft::ptr;
|
||||
using nuraft::srv_config;
|
||||
|
||||
CoordinatorInstance::CoordinatorInstance()
|
||||
: thread_pool_{1},
|
||||
raft_state_(RaftState::MakeRaftState(
|
||||
: raft_state_(RaftState::MakeRaftState(
|
||||
[this]() {
|
||||
spdlog::info("Leader changed, starting all replication instances!");
|
||||
auto const instances = raft_state_.GetReplicationInstances();
|
||||
auto const instances = raft_state_.GetInstances();
|
||||
auto replicas = instances | ranges::views::filter([](auto const &instance) {
|
||||
return instance.status == ReplicationRole::REPLICA;
|
||||
});
|
||||
@ -56,34 +56,23 @@ CoordinatorInstance::CoordinatorInstance()
|
||||
&CoordinatorInstance::MainFailCallback);
|
||||
});
|
||||
|
||||
std::ranges::for_each(repl_instances_, [](auto &instance) { instance.StartFrequentCheck(); });
|
||||
std::ranges::for_each(repl_instances_, [this](auto &instance) {
|
||||
instance.SetNewMainUUID(raft_state_.GetUUID());
|
||||
instance.StartFrequentCheck();
|
||||
});
|
||||
},
|
||||
[this]() {
|
||||
thread_pool_.AddTask([this]() {
|
||||
spdlog::info("Leader changed, trying to stop all replication instances frequent checks!");
|
||||
// We need to stop checks before taking a lock because deadlock can happen if instances waits
|
||||
// to take a lock in frequent check, and this thread already has a lock and waits for instance to
|
||||
// be done with frequent check
|
||||
for (auto &repl_instance : repl_instances_) {
|
||||
repl_instance.StopFrequentCheck();
|
||||
}
|
||||
auto lock = std::unique_lock{coord_instance_lock_};
|
||||
repl_instances_.clear();
|
||||
spdlog::info("Stopped all replication instance frequent checks.");
|
||||
});
|
||||
spdlog::info("Leader changed, stopping all replication instances!");
|
||||
repl_instances_.clear();
|
||||
})) {
|
||||
client_succ_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::unique_lock{self->coord_instance_lock_};
|
||||
// when coordinator is becoming follower it will want to stop all threads doing frequent checks
|
||||
// Thread can get stuck here waiting for lock so we need to frequently check if we are in shutdown state
|
||||
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetSuccessCallback(), self, repl_instance_name);
|
||||
};
|
||||
|
||||
client_fail_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::unique_lock{self->coord_instance_lock_};
|
||||
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetFailCallback(), self, repl_instance_name);
|
||||
};
|
||||
@ -112,7 +101,7 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
if (raft_state_.IsLeader()) {
|
||||
auto const stringify_repl_role = [this](ReplicationInstance const &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "unknown";
|
||||
if (raft_state_.IsCurrentMain(instance.InstanceName())) return "main";
|
||||
if (raft_state_.IsMain(instance.InstanceName())) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
@ -133,36 +122,26 @@ auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), process_repl_instance_as_leader);
|
||||
}
|
||||
} else {
|
||||
auto const stringify_inst_status = [raft_state_ptr = &raft_state_](
|
||||
utils::UUID const &main_uuid,
|
||||
ReplicationInstanceState const &instance) -> std::string {
|
||||
if (raft_state_ptr->IsCurrentMain(instance.config.instance_name)) {
|
||||
return "main";
|
||||
}
|
||||
if (raft_state_ptr->HasMainState(instance.config.instance_name)) {
|
||||
return "unknown";
|
||||
}
|
||||
return "replica";
|
||||
auto const stringify_inst_status = [](ReplicationRole status) -> std::string {
|
||||
return status == ReplicationRole::MAIN ? "main" : "replica";
|
||||
};
|
||||
|
||||
// TODO: (andi) Add capability that followers can also return socket addresses
|
||||
auto process_repl_instance_as_follower =
|
||||
[this, &stringify_inst_status](ReplicationInstanceState const &instance) -> InstanceStatus {
|
||||
auto process_repl_instance_as_follower = [&stringify_inst_status](auto const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.config.instance_name,
|
||||
.cluster_role = stringify_inst_status(raft_state_.GetCurrentMainUUID(), instance),
|
||||
.cluster_role = stringify_inst_status(instance.status),
|
||||
.health = "unknown"};
|
||||
};
|
||||
|
||||
std::ranges::transform(raft_state_.GetReplicationInstances(), std::back_inserter(instances_status),
|
||||
std::ranges::transform(raft_state_.GetInstances(), std::back_inserter(instances_status),
|
||||
process_repl_instance_as_follower);
|
||||
}
|
||||
|
||||
return instances_status;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::TryFailover() -> void {
|
||||
auto const is_replica = [this](ReplicationInstance const &instance) {
|
||||
return HasReplicaState(instance.InstanceName());
|
||||
};
|
||||
auto const is_replica = [this](ReplicationInstance const &instance) { return IsReplica(instance.InstanceName()); };
|
||||
|
||||
auto alive_replicas =
|
||||
repl_instances_ | ranges::views::filter(is_replica) | ranges::views::filter(&ReplicationInstance::IsAlive);
|
||||
@ -172,6 +151,11 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Failover failed since the instance is not the leader!");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const get_ts = [](ReplicationInstance &replica) { return replica.GetClient().SendGetInstanceTimestampsRpc(); };
|
||||
|
||||
auto maybe_instance_db_histories = alive_replicas | ranges::views::transform(get_ts) | ranges::to<std::vector>();
|
||||
@ -199,10 +183,6 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
|
||||
auto *new_main = &FindReplicationInstance(most_up_to_date_instance);
|
||||
|
||||
if (!raft_state_.AppendOpenLockFailover(most_up_to_date_instance)) {
|
||||
spdlog::error("Aborting failover as instance is not anymore leader.");
|
||||
return;
|
||||
}
|
||||
new_main->PauseFrequentCheck();
|
||||
utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
|
||||
|
||||
@ -212,18 +192,16 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
auto const failed_to_swap = [this, &new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid) ||
|
||||
!raft_state_.AppendUpdateUUIDForInstanceLog(instance.InstanceName(), new_main_uuid);
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
// If for some replicas swap fails, for others on successful ping we will revert back on next change
|
||||
// or we will do failover first again and then it will be consistent again
|
||||
if (std::ranges::any_of(alive_replicas | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Aborting failover. Failed to swap uuid for all alive instances.");
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return;
|
||||
}
|
||||
|
||||
auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
|
||||
ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
|
||||
ranges::to<ReplicationClientsInfo>();
|
||||
@ -234,36 +212,27 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendUpdateUUIDForNewMainLog(new_main_uuid)) {
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto const new_main_instance_name = new_main->InstanceName();
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name, new_main_uuid)) {
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!new_main->EnableWritingOnMain()) {
|
||||
spdlog::error("Failover successful but couldn't enable writing on instance.");
|
||||
}
|
||||
|
||||
spdlog::info("Failover successful! Instance {} promoted to main.", new_main->InstanceName());
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> SetInstanceToMainCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
return SetInstanceToMainCoordinatorStatus::LOCK_OPENED;
|
||||
}
|
||||
|
||||
if (raft_state_.MainExists()) {
|
||||
return SetInstanceToMainCoordinatorStatus::MAIN_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
// TODO(antoniofilipovic) Check if request leadership can cause problems due to changing of leadership while other
|
||||
// doing failover
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return SetInstanceToMainCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
@ -280,10 +249,6 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
|
||||
return SetInstanceToMainCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendOpenLockSetInstanceToMain(instance_name)) {
|
||||
return SetInstanceToMainCoordinatorStatus::OPEN_LOCK;
|
||||
}
|
||||
|
||||
new_main->PauseFrequentCheck();
|
||||
utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
|
||||
|
||||
@ -293,13 +258,12 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
auto const failed_to_swap = [this, &new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid) ||
|
||||
!raft_state_.AppendUpdateUUIDForInstanceLog(instance.InstanceName(), new_main_uuid);
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_ | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Failed to swap uuid for all currently alive instances.");
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
|
||||
}
|
||||
|
||||
@ -311,28 +275,22 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance
|
||||
&CoordinatorInstance::MainFailCallback)) {
|
||||
return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN;
|
||||
}
|
||||
if (!raft_state_.AppendUpdateUUIDForNewMainLog(new_main_uuid)) {
|
||||
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(instance_name, new_main_uuid)) {
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(instance_name)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
spdlog::info("Instance {} promoted to main on leader", instance_name);
|
||||
|
||||
if (!new_main->EnableWritingOnMain()) {
|
||||
return SetInstanceToMainCoordinatorStatus::ENABLE_WRITING_FAILED;
|
||||
}
|
||||
return SetInstanceToMainCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
return RegisterInstanceCoordinatorStatus::LOCK_OPENED;
|
||||
}
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, [instance_name = config.instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
@ -352,14 +310,11 @@ auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorToReplicaConfig
|
||||
return RegisterInstanceCoordinatorStatus::REPL_ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
// TODO(antoniofilipovic) Check if this is an issue
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return RegisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendOpenLockRegister(config)) {
|
||||
return RegisterInstanceCoordinatorStatus::OPEN_LOCK;
|
||||
}
|
||||
auto const undo_action_ = [this]() { repl_instances_.pop_back(); };
|
||||
|
||||
auto *new_instance = &repl_instances_.emplace_back(this, config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
@ -367,12 +322,15 @@ auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorToReplicaConfig
|
||||
|
||||
if (!new_instance->SendDemoteToReplicaRpc()) {
|
||||
spdlog::error("Failed to send demote to replica rpc for instance {}", config.instance_name);
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendRegisterReplicationInstanceLog(config)) {
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
new_instance->StartFrequentCheck();
|
||||
|
||||
spdlog::info("Instance {} registered", config.instance_name);
|
||||
@ -383,11 +341,6 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
|
||||
-> UnregisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
return UnregisterInstanceCoordinatorStatus::LOCK_OPENED;
|
||||
}
|
||||
|
||||
// TODO(antoniofilipovic) Check if this is an issue
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return UnregisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
@ -401,23 +354,19 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
|
||||
return UnregisterInstanceCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
auto const is_current_main = [this](ReplicationInstance const &instance) {
|
||||
return raft_state_.IsCurrentMain(instance.InstanceName()) && instance.IsAlive();
|
||||
auto const is_main = [this](ReplicationInstance const &instance) {
|
||||
return IsMain(instance.InstanceName()) && instance.GetMainUUID() == raft_state_.GetUUID() && instance.IsAlive();
|
||||
};
|
||||
|
||||
if (is_current_main(*inst_to_remove)) {
|
||||
if (is_main(*inst_to_remove)) {
|
||||
return UnregisterInstanceCoordinatorStatus::IS_MAIN;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendOpenLockUnregister(instance_name)) {
|
||||
return UnregisterInstanceCoordinatorStatus::OPEN_LOCK;
|
||||
}
|
||||
|
||||
inst_to_remove->StopFrequentCheck();
|
||||
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, is_current_main);
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, is_main);
|
||||
|
||||
if (curr_main != repl_instances_.end()) {
|
||||
if (curr_main != repl_instances_.end() && curr_main->IsAlive()) {
|
||||
if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
|
||||
inst_to_remove->StartFrequentCheck();
|
||||
return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
@ -433,25 +382,20 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instanc
|
||||
return UnregisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
|
||||
raft_state_.AddCoordinatorInstance(config);
|
||||
// NOTE: We ignore error we added coordinator instance to networking stuff but not in raft log.
|
||||
if (!raft_state_.AppendAddCoordinatorInstanceLog(config)) {
|
||||
spdlog::error("Failed to append add coordinator instance log");
|
||||
}
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main fail callback", repl_instance_name);
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
spdlog::error("Returning from main fail callback as the last action didn't successfully finish");
|
||||
}
|
||||
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Replication instance must have uuid set");
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (!repl_instance.IsAlive() && raft_state_.IsCurrentMain(repl_instance_name)) {
|
||||
if (!repl_instance.IsAlive() && raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
spdlog::info("Cluster without main instance, trying automatic failover");
|
||||
TryFailover();
|
||||
}
|
||||
@ -459,12 +403,6 @@ void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name)
|
||||
|
||||
void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
|
||||
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
spdlog::error("Stopping main successful callback as the last action didn't successfully finish");
|
||||
return;
|
||||
}
|
||||
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (repl_instance.IsAlive()) {
|
||||
@ -472,8 +410,11 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (raft_state_.IsCurrentMain(repl_instance.InstanceName())) {
|
||||
if (raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
if (!repl_instance.EnableWritingOnMain()) {
|
||||
spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
|
||||
return;
|
||||
@ -483,8 +424,9 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendOpenLockSetInstanceToReplica(repl_instance.InstanceName())) {
|
||||
spdlog::error("Failed to open lock for demoting OLD MAIN {} to REPLICA", repl_instance_name);
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Demoting main instance {} to replica failed since the instance is not the leader!",
|
||||
repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -497,38 +439,29 @@ void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_nam
|
||||
return;
|
||||
}
|
||||
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetCurrentMainUUID())) {
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for demoted main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendUpdateUUIDForInstanceLog(repl_instance_name, raft_state_.GetCurrentMainUUID())) {
|
||||
spdlog::error("Failed to update log of changing instance uuid {} to {}", repl_instance_name,
|
||||
std::string{raft_state_.GetCurrentMainUUID()});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsReplicaLog(repl_instance_name)) {
|
||||
spdlog::error("Failed to append log that OLD MAIN was demoted to REPLICA {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
|
||||
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
spdlog::error("Stopping main successful callback as the last action didn't successfully finish");
|
||||
return;
|
||||
}
|
||||
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
// We need to get replicas UUID from time to time to ensure replica is listening to correct main
|
||||
// and that it didn't go down for less time than we could notice
|
||||
// We need to get id of main replica is listening to
|
||||
// and swap if necessary
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetCurrentMainUUID())) {
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
|
||||
return;
|
||||
}
|
||||
@ -538,14 +471,13 @@ void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_
|
||||
|
||||
void CoordinatorInstance::ReplicaFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (raft_state_.IsLockOpened()) {
|
||||
spdlog::error("Stopping main successful callback as the last action didn't successfully finish.");
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica fail callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
repl_instance.OnFailPing();
|
||||
}
|
||||
|
||||
@ -617,63 +549,12 @@ auto CoordinatorInstance::ChooseMostUpToDateInstance(std::span<InstanceNameDbHis
|
||||
return std::move(*new_main_res);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::HasMainState(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.HasMainState(instance_name);
|
||||
auto CoordinatorInstance::IsMain(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::HasReplicaState(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.HasReplicaState(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
|
||||
auto res = RoutingTable{};
|
||||
|
||||
auto const repl_instance_to_bolt = [](ReplicationInstanceState const &instance) {
|
||||
return instance.config.BoltSocketAddress();
|
||||
};
|
||||
|
||||
// TODO: (andi) This is wrong check, Fico will correct in #1819.
|
||||
auto const is_instance_main = [&](ReplicationInstanceState const &instance) {
|
||||
return instance.status == ReplicationRole::MAIN;
|
||||
};
|
||||
|
||||
auto const is_instance_replica = [&](ReplicationInstanceState const &instance) {
|
||||
return instance.status == ReplicationRole::REPLICA;
|
||||
};
|
||||
|
||||
auto const &raft_log_repl_instances = raft_state_.GetReplicationInstances();
|
||||
|
||||
auto bolt_mains = raft_log_repl_instances | ranges::views::filter(is_instance_main) |
|
||||
ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
|
||||
MG_ASSERT(bolt_mains.size() <= 1, "There can be at most one main instance active!");
|
||||
|
||||
if (!std::ranges::empty(bolt_mains)) {
|
||||
res.emplace_back(std::move(bolt_mains), "WRITE");
|
||||
}
|
||||
|
||||
auto bolt_replicas = raft_log_repl_instances | ranges::views::filter(is_instance_replica) |
|
||||
ranges::views::transform(repl_instance_to_bolt) | ranges::to<std::vector>();
|
||||
if (!std::ranges::empty(bolt_replicas)) {
|
||||
res.emplace_back(std::move(bolt_replicas), "READ");
|
||||
}
|
||||
|
||||
auto const coord_instance_to_bolt = [](CoordinatorInstanceState const &instance) {
|
||||
return instance.config.bolt_server.SocketAddress();
|
||||
};
|
||||
|
||||
auto const &raft_log_coord_instances = raft_state_.GetCoordinatorInstances();
|
||||
auto bolt_coords =
|
||||
raft_log_coord_instances | ranges::views::transform(coord_instance_to_bolt) | ranges::to<std::vector>();
|
||||
|
||||
auto const &local_bolt_coord = routing.find("address");
|
||||
if (local_bolt_coord == routing.end()) {
|
||||
throw InvalidRoutingTableException("No bolt address found in routing table for the current coordinator!");
|
||||
}
|
||||
|
||||
bolt_coords.push_back(local_bolt_coord->second);
|
||||
res.emplace_back(std::move(bolt_coords), "ROUTE");
|
||||
|
||||
return res;
|
||||
auto CoordinatorInstance::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -18,7 +18,8 @@ namespace memgraph::coordination {
|
||||
|
||||
namespace {
|
||||
|
||||
auto CreateServerContext(const memgraph::coordination::ManagementServerConfig &config) -> communication::ServerContext {
|
||||
auto CreateServerContext(const memgraph::coordination::CoordinatorServerConfig &config)
|
||||
-> communication::ServerContext {
|
||||
return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
|
||||
config.ssl->verify_peer}
|
||||
: communication::ServerContext{};
|
||||
@ -31,7 +32,7 @@ constexpr auto kCoordinatorServerThreads = 1;
|
||||
|
||||
} // namespace
|
||||
|
||||
CoordinatorServer::CoordinatorServer(const ManagementServerConfig &config)
|
||||
CoordinatorServer::CoordinatorServer(const CoordinatorServerConfig &config)
|
||||
: rpc_server_context_{CreateServerContext(config)},
|
||||
rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
|
||||
kCoordinatorServerThreads} {
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include "coordination/coordinator_state.hpp"
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#include "flags/replication.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
@ -25,15 +25,15 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
CoordinatorState::CoordinatorState() {
|
||||
MG_ASSERT(!(FLAGS_coordinator_id && FLAGS_management_port),
|
||||
MG_ASSERT(!(FLAGS_raft_server_id && FLAGS_coordinator_server_port),
|
||||
"Instance cannot be a coordinator and have registered coordinator server.");
|
||||
|
||||
spdlog::info("Executing coordinator constructor");
|
||||
if (FLAGS_management_port) {
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
spdlog::info("Coordinator server port set");
|
||||
auto const config = ManagementServerConfig{
|
||||
auto const config = CoordinatorServerConfig{
|
||||
.ip_address = kDefaultReplicationServerIp,
|
||||
.port = static_cast<uint16_t>(FLAGS_management_port),
|
||||
.port = static_cast<uint16_t>(FLAGS_coordinator_server_port),
|
||||
};
|
||||
spdlog::info("Executing coordinator constructor main replica");
|
||||
|
||||
@ -41,7 +41,7 @@ CoordinatorState::CoordinatorState() {
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
@ -98,16 +98,11 @@ auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
|
||||
return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
|
||||
}
|
||||
|
||||
auto CoordinatorState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
|
||||
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(config);
|
||||
}
|
||||
|
||||
auto CoordinatorState::GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot get routing table since variant holds wrong alternative");
|
||||
return std::get<CoordinatorInstance>(data_).GetRoutingTable(routing);
|
||||
return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -20,14 +20,18 @@ constexpr int MAX_SNAPSHOTS = 3;
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
|
||||
|
||||
auto CoordinatorStateMachine::HasMainState(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.HasMainState(instance_name);
|
||||
auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return cluster_state_.FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::HasReplicaState(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.HasReplicaState(instance_name);
|
||||
auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
|
||||
|
||||
auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
|
||||
@ -38,24 +42,7 @@ auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
|
||||
return log_buf;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeOpenLockRegister(CoordinatorToReplicaConfig const &config) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeOpenLockUnregister(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog(
|
||||
{{"action", RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE}, {"info", std::string{instance_name}}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeOpenLockFailover(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::OPEN_LOCK_FAILOVER}, {"info", std::string(instance_name)}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeOpenLockSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN}, {"info", std::string(instance_name)}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer> {
|
||||
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
@ -63,65 +50,35 @@ auto CoordinatorStateMachine::SerializeUnregisterInstance(std::string_view insta
|
||||
return CreateLog({{"action", RaftLogAction::UNREGISTER_REPLICATION_INSTANCE}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsMain(InstanceUUIDUpdate const &instance_uuid_change)
|
||||
-> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_uuid_change}});
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUIDForNewMain(utils::UUID const &uuid) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID_OF_NEW_MAIN}, {"info", uuid}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUIDForInstance(InstanceUUIDUpdate const &instance_uuid_change)
|
||||
-> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID_FOR_INSTANCE}, {"info", instance_uuid_change}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config)
|
||||
-> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::ADD_COORDINATOR_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeOpenLockSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
|
||||
buffer_serializer bs(data);
|
||||
auto const json = nlohmann::json::parse(bs.get_str());
|
||||
|
||||
auto const action = json["action"].get<RaftLogAction>();
|
||||
auto const &info = json["info"];
|
||||
|
||||
switch (action) {
|
||||
case RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE: {
|
||||
return {info.get<CoordinatorToReplicaConfig>(), action};
|
||||
}
|
||||
case RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::OPEN_LOCK_FAILOVER:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA: {
|
||||
return {info.get<std::string>(), action};
|
||||
}
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
|
||||
return {info.get<CoordinatorToReplicaConfig>(), action};
|
||||
case RaftLogAction::UPDATE_UUID_OF_NEW_MAIN:
|
||||
return {info.get<CoordinatorClientConfig>(), action};
|
||||
case RaftLogAction::UPDATE_UUID:
|
||||
return {info.get<utils::UUID>(), action};
|
||||
case RaftLogAction::UPDATE_UUID_FOR_INSTANCE:
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN:
|
||||
return {info.get<InstanceUUIDUpdate>(), action};
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA:
|
||||
return {info.get<std::string>(), action};
|
||||
case RaftLogAction::ADD_COORDINATOR_INSTANCE:
|
||||
return {info.get<CoordinatorToCoordinatorConfig>(), action};
|
||||
}
|
||||
throw std::runtime_error("Unknown action");
|
||||
}
|
||||
@ -176,7 +133,6 @@ auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /
|
||||
} else {
|
||||
// Object ID > 0: second object, put actual value.
|
||||
ctx->cluster_state_.Serialize(data_out);
|
||||
is_last_obj = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -199,7 +155,6 @@ auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &ob
|
||||
DMG_ASSERT(entry != snapshots_.end());
|
||||
entry->second->cluster_state_ = cluster_state;
|
||||
}
|
||||
obj_id++;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
|
||||
@ -250,24 +205,11 @@ auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
|
||||
return cluster_state_.GetReplicationInstances();
|
||||
auto CoordinatorStateMachine::GetInstances() const -> std::vector<InstanceState> {
|
||||
return cluster_state_.GetInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetCurrentMainUUID() const -> utils::UUID { return cluster_state_.GetCurrentMainUUID(); }
|
||||
|
||||
auto CoordinatorStateMachine::IsCurrentMain(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsCurrentMain(instance_name);
|
||||
}
|
||||
auto CoordinatorStateMachine::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
|
||||
return cluster_state_.GetCoordinatorInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
|
||||
return cluster_state_.GetInstanceUUID(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::IsLockOpened() const -> bool { return cluster_state_.IsLockOpened(); }
|
||||
auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -33,7 +33,6 @@ CoordinatorStateManager::CoordinatorStateManager(int srv_id, std::string const &
|
||||
auto CoordinatorStateManager::load_config() -> ptr<cluster_config> {
|
||||
// Just return in-memory data in this example.
|
||||
// May require reading from disk here, if it has been written to disk.
|
||||
spdlog::trace("Loading cluster config");
|
||||
return cluster_config_;
|
||||
}
|
||||
|
||||
@ -42,11 +41,6 @@ auto CoordinatorStateManager::save_config(cluster_config const &config) -> void
|
||||
// Need to write to disk here, if want to make it durable.
|
||||
ptr<buffer> buf = config.serialize();
|
||||
cluster_config_ = cluster_config::deserialize(*buf);
|
||||
spdlog::info("Saving cluster config.");
|
||||
auto servers = cluster_config_->get_servers();
|
||||
for (auto const &server : servers) {
|
||||
spdlog::trace("Server id: {}, endpoint: {}", server->get_id(), server->get_endpoint());
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateManager::save_state(srv_state const &state) -> void {
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "rpc_errors.hpp"
|
||||
@ -25,11 +25,11 @@ namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
|
||||
using ReplicationClientsInfo = std::vector<ReplicationClientInfo>;
|
||||
using ReplicationClientsInfo = std::vector<ReplClientInfo>;
|
||||
|
||||
class CoordinatorClient {
|
||||
public:
|
||||
explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorToReplicaConfig config,
|
||||
explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb);
|
||||
|
||||
~CoordinatorClient() = default;
|
||||
@ -62,7 +62,7 @@ class CoordinatorClient {
|
||||
|
||||
auto SendGetInstanceUUIDRpc() const -> memgraph::utils::BasicResult<GetInstanceUUIDError, std::optional<utils::UUID>>;
|
||||
|
||||
auto ReplicationClientInfo() const -> ReplicationClientInfo;
|
||||
auto ReplicationClientInfo() const -> ReplClientInfo;
|
||||
|
||||
auto SendGetInstanceTimestampsRpc() const
|
||||
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>;
|
||||
@ -83,7 +83,7 @@ class CoordinatorClient {
|
||||
communication::ClientContext rpc_context_;
|
||||
mutable rpc::Client rpc_client_;
|
||||
|
||||
CoordinatorToReplicaConfig config_;
|
||||
CoordinatorClientConfig config_;
|
||||
CoordinatorInstance *coord_instance_;
|
||||
HealthCheckClientCallback succ_cb_;
|
||||
HealthCheckClientCallback fail_cb_;
|
||||
|
@ -1,110 +0,0 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "io/network/endpoint.hpp"
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "json/json.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
|
||||
struct ReplicationClientInfo {
|
||||
std::string instance_name{};
|
||||
replication_coordination_glue::ReplicationMode replication_mode{};
|
||||
io::network::Endpoint replication_server;
|
||||
|
||||
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
|
||||
};
|
||||
|
||||
struct CoordinatorToReplicaConfig {
|
||||
auto BoltSocketAddress() const -> std::string { return bolt_server.SocketAddress(); }
|
||||
auto CoordinatorSocketAddress() const -> std::string { return mgt_server.SocketAddress(); }
|
||||
auto ReplicationSocketAddress() const -> std::string {
|
||||
return replication_client_info.replication_server.SocketAddress();
|
||||
}
|
||||
|
||||
std::string instance_name{};
|
||||
io::network::Endpoint mgt_server;
|
||||
io::network::Endpoint bolt_server;
|
||||
ReplicationClientInfo replication_client_info;
|
||||
|
||||
std::chrono::seconds instance_health_check_frequency_sec{1};
|
||||
std::chrono::seconds instance_down_timeout_sec{5};
|
||||
std::chrono::seconds instance_get_uuid_frequency_sec{10};
|
||||
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
friend bool operator==(const SSL &, const SSL &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(CoordinatorToReplicaConfig const &, CoordinatorToReplicaConfig const &) = default;
|
||||
};
|
||||
|
||||
struct CoordinatorToCoordinatorConfig {
|
||||
uint32_t coordinator_server_id{0};
|
||||
io::network::Endpoint bolt_server;
|
||||
io::network::Endpoint coordinator_server;
|
||||
|
||||
friend bool operator==(CoordinatorToCoordinatorConfig const &, CoordinatorToCoordinatorConfig const &) = default;
|
||||
};
|
||||
|
||||
struct ManagementServerConfig {
|
||||
std::string ip_address;
|
||||
uint16_t port{};
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
std::string ca_file;
|
||||
bool verify_peer{};
|
||||
friend bool operator==(SSL const &, SSL const &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(ManagementServerConfig const &, ManagementServerConfig const &) = default;
|
||||
};
|
||||
|
||||
struct InstanceUUIDUpdate {
|
||||
std::string instance_name;
|
||||
memgraph::utils::UUID uuid;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorToReplicaConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorToReplicaConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorToCoordinatorConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorToCoordinatorConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, ReplicationClientInfo const &config);
|
||||
void from_json(nlohmann::json const &j, ReplicationClientInfo &config);
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceUUIDUpdate const &config);
|
||||
void from_json(nlohmann::json const &j, InstanceUUIDUpdate &config);
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
93
src/coordination/include/coordination/coordinator_config.hpp
Normal file
93
src/coordination/include/coordination/coordinator_config.hpp
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
|
||||
struct CoordinatorClientConfig {
|
||||
std::string instance_name;
|
||||
std::string ip_address;
|
||||
uint16_t port{};
|
||||
std::chrono::seconds instance_health_check_frequency_sec{1};
|
||||
std::chrono::seconds instance_down_timeout_sec{5};
|
||||
std::chrono::seconds instance_get_uuid_frequency_sec{10};
|
||||
|
||||
auto CoordinatorSocketAddress() const -> std::string { return fmt::format("{}:{}", ip_address, port); }
|
||||
auto ReplicationSocketAddress() const -> std::string {
|
||||
return fmt::format("{}:{}", replication_client_info.replication_ip_address,
|
||||
replication_client_info.replication_port);
|
||||
}
|
||||
|
||||
struct ReplicationClientInfo {
|
||||
std::string instance_name;
|
||||
replication_coordination_glue::ReplicationMode replication_mode{};
|
||||
std::string replication_ip_address;
|
||||
uint16_t replication_port{};
|
||||
|
||||
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
|
||||
};
|
||||
|
||||
ReplicationClientInfo replication_client_info;
|
||||
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
|
||||
friend bool operator==(const SSL &, const SSL &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default;
|
||||
};
|
||||
|
||||
using ReplClientInfo = CoordinatorClientConfig::ReplicationClientInfo;
|
||||
|
||||
struct CoordinatorServerConfig {
|
||||
std::string ip_address;
|
||||
uint16_t port{};
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
std::string ca_file;
|
||||
bool verify_peer{};
|
||||
friend bool operator==(SSL const &, SSL const &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config);
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config);
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -94,16 +94,5 @@ class InvalidRaftLogActionException final : public utils::BasicException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException)
|
||||
};
|
||||
|
||||
class InvalidRoutingTableException final : public utils::BasicException {
|
||||
public:
|
||||
explicit InvalidRoutingTableException(std::string_view what) noexcept : BasicException(what) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit InvalidRoutingTableException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
|
||||
: InvalidRoutingTableException(fmt::format(fmt, std::forward<Args>(args)...)) {}
|
||||
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRoutingTableException)
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -26,8 +26,6 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using RoutingTable = std::vector<std::pair<std::vector<std::string>, std::string>>;
|
||||
|
||||
struct NewMainRes {
|
||||
std::string most_up_to_date_instance;
|
||||
std::string latest_epoch;
|
||||
@ -38,14 +36,8 @@ using InstanceNameDbHistories = std::pair<std::string, replication_coordination_
|
||||
class CoordinatorInstance {
|
||||
public:
|
||||
CoordinatorInstance();
|
||||
CoordinatorInstance(CoordinatorInstance const &) = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance const &) = delete;
|
||||
CoordinatorInstance(CoordinatorInstance &&) noexcept = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance &&) noexcept = delete;
|
||||
|
||||
~CoordinatorInstance() = default;
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
@ -56,17 +48,15 @@ class CoordinatorInstance {
|
||||
|
||||
auto TryFailover() -> void;
|
||||
|
||||
auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
|
||||
|
||||
auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes;
|
||||
|
||||
auto HasMainState(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto HasReplicaState(std::string_view instance_name) const -> bool;
|
||||
|
||||
private:
|
||||
HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
|
||||
|
||||
auto OnRaftCommitCallback(TRaftLog const &log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &;
|
||||
|
||||
void MainFailCallback(std::string_view);
|
||||
@ -77,14 +67,14 @@ class CoordinatorInstance {
|
||||
|
||||
void ReplicaFailCallback(std::string_view);
|
||||
|
||||
HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
// NOTE: Must be std::list because we rely on pointer stability.
|
||||
// Leader and followers should both have same view on repl_instances_
|
||||
std::list<ReplicationInstance> repl_instances_;
|
||||
mutable utils::ResourceLock coord_instance_lock_{};
|
||||
|
||||
// Thread pool needs to be constructed before raft state as raft state can call thread pool
|
||||
utils::ThreadPool thread_pool_;
|
||||
|
||||
RaftState raft_state_;
|
||||
};
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "utils/uuid.hpp"
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
@ -28,13 +28,14 @@ struct PromoteReplicaToMainReq {
|
||||
static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit PromoteReplicaToMainReq(const utils::UUID &uuid, std::vector<ReplicationClientInfo> replication_clients_info)
|
||||
explicit PromoteReplicaToMainReq(const utils::UUID &uuid,
|
||||
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info)
|
||||
: main_uuid_(uuid), replication_clients_info(std::move(replication_clients_info)) {}
|
||||
PromoteReplicaToMainReq() = default;
|
||||
|
||||
// get uuid here
|
||||
utils::UUID main_uuid_;
|
||||
std::vector<ReplicationClientInfo> replication_clients_info;
|
||||
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info;
|
||||
};
|
||||
|
||||
struct PromoteReplicaToMainRes {
|
||||
@ -59,12 +60,12 @@ struct DemoteMainToReplicaReq {
|
||||
static void Load(DemoteMainToReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const DemoteMainToReplicaReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit DemoteMainToReplicaReq(ReplicationClientInfo replication_client_info)
|
||||
explicit DemoteMainToReplicaReq(CoordinatorClientConfig::ReplicationClientInfo replication_client_info)
|
||||
: replication_client_info(std::move(replication_client_info)) {}
|
||||
|
||||
DemoteMainToReplicaReq() = default;
|
||||
|
||||
ReplicationClientInfo replication_client_info;
|
||||
CoordinatorClientConfig::ReplicationClientInfo replication_client_info;
|
||||
};
|
||||
|
||||
struct DemoteMainToReplicaRes {
|
||||
|
@ -13,14 +13,14 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "rpc/server.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorServer {
|
||||
public:
|
||||
explicit CoordinatorServer(const ManagementServerConfig &config);
|
||||
explicit CoordinatorServer(const CoordinatorServerConfig &config);
|
||||
CoordinatorServer(const CoordinatorServer &) = delete;
|
||||
CoordinatorServer(CoordinatorServer &&) = delete;
|
||||
CoordinatorServer &operator=(const CoordinatorServer &) = delete;
|
||||
|
@ -13,37 +13,27 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
namespace memgraph::slk {
|
||||
|
||||
using ReplicationClientInfo = coordination::ReplicationClientInfo;
|
||||
using ReplicationClientInfo = coordination::CoordinatorClientConfig::ReplicationClientInfo;
|
||||
|
||||
inline void Save(io::network::Endpoint const &obj, Builder *builder) {
|
||||
Save(obj.address, builder);
|
||||
Save(obj.port, builder);
|
||||
Save(obj.family, builder);
|
||||
}
|
||||
|
||||
inline void Load(io::network::Endpoint *obj, Reader *reader) {
|
||||
Load(&obj->address, reader);
|
||||
Load(&obj->port, reader);
|
||||
Load(&obj->family, reader);
|
||||
}
|
||||
|
||||
inline void Save(ReplicationClientInfo const &obj, Builder *builder) {
|
||||
inline void Save(const ReplicationClientInfo &obj, Builder *builder) {
|
||||
Save(obj.instance_name, builder);
|
||||
Save(obj.replication_mode, builder);
|
||||
Save(obj.replication_server, builder);
|
||||
Save(obj.replication_ip_address, builder);
|
||||
Save(obj.replication_port, builder);
|
||||
}
|
||||
|
||||
inline void Load(ReplicationClientInfo *obj, Reader *reader) {
|
||||
Load(&obj->instance_name, reader);
|
||||
Load(&obj->replication_mode, reader);
|
||||
Load(&obj->replication_server, reader);
|
||||
Load(&obj->replication_ip_address, reader);
|
||||
Load(&obj->replication_port, reader);
|
||||
}
|
||||
|
||||
inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) {
|
||||
|
@ -33,7 +33,7 @@ class CoordinatorState {
|
||||
CoordinatorState(CoordinatorState &&) noexcept = delete;
|
||||
CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorToReplicaConfig const &config)
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
@ -42,13 +42,11 @@ class CoordinatorState {
|
||||
|
||||
auto ShowInstances() const -> std::vector<InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
// NOTE: The client code must check that the server exists before calling this method.
|
||||
auto GetCoordinatorServer() const -> CoordinatorServer &;
|
||||
|
||||
auto GetRoutingTable(std::map<std::string, std::string> const &routing) -> RoutingTable;
|
||||
|
||||
private:
|
||||
struct CoordinatorMainReplicaData {
|
||||
std::unique_ptr<CoordinatorServer> coordinator_server_;
|
||||
|
@ -23,7 +23,7 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
struct CoordinatorToReplicaConfig;
|
||||
struct CoordinatorClientConfig;
|
||||
|
||||
using BecomeLeaderCb = std::function<void()>;
|
||||
using BecomeFollowerCb = std::function<void()>;
|
||||
@ -40,7 +40,7 @@ using raft_result = nuraft::cmd_result<ptr<buffer>>;
|
||||
|
||||
class RaftState {
|
||||
private:
|
||||
explicit RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t coordinator_id,
|
||||
explicit RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
|
||||
uint32_t raft_port, std::string raft_address);
|
||||
|
||||
auto InitRaftServer() -> void;
|
||||
@ -58,43 +58,30 @@ class RaftState {
|
||||
auto InstanceName() const -> std::string;
|
||||
auto RaftSocketAddress() const -> std::string;
|
||||
|
||||
auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>;
|
||||
|
||||
auto RequestLeadership() -> bool;
|
||||
auto IsLeader() const -> bool;
|
||||
|
||||
auto AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool;
|
||||
auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsMainLog(std::string_view instance_name, utils::UUID const &uuid) -> bool;
|
||||
auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
|
||||
auto AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool;
|
||||
auto AppendUpdateUUIDForInstanceLog(std::string_view instance_name, utils::UUID const &uuid) -> bool;
|
||||
auto AppendOpenLockRegister(CoordinatorToReplicaConfig const &) -> bool;
|
||||
auto AppendOpenLockUnregister(std::string_view) -> bool;
|
||||
auto AppendOpenLockFailover(std::string_view instance_name) -> bool;
|
||||
auto AppendOpenLockSetInstanceToMain(std::string_view instance_name) -> bool;
|
||||
auto AppendOpenLockSetInstanceToReplica(std::string_view instance_name) -> bool;
|
||||
auto AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool;
|
||||
|
||||
auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
|
||||
// TODO: (andi) Do we need then GetAllCoordinators?
|
||||
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
|
||||
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto HasMainState(std::string_view instance_name) const -> bool;
|
||||
auto HasReplicaState(std::string_view instance_name) const -> bool;
|
||||
auto IsCurrentMain(std::string_view instance_name) const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto GetCurrentMainUUID() const -> utils::UUID;
|
||||
auto GetInstanceUUID(std::string_view) const -> utils::UUID;
|
||||
auto AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool;
|
||||
auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
|
||||
auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool;
|
||||
|
||||
auto IsLockOpened() const -> bool;
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
// TODO: (andi) I think variables below can be abstracted/clean them.
|
||||
io::network::Endpoint raft_endpoint_;
|
||||
uint32_t coordinator_id_;
|
||||
uint32_t raft_server_id_;
|
||||
|
||||
ptr<CoordinatorStateMachine> state_machine_;
|
||||
ptr<CoordinatorStateManager> state_manager_;
|
||||
|
@ -25,9 +25,7 @@ enum class RegisterInstanceCoordinatorStatus : uint8_t {
|
||||
NOT_LEADER,
|
||||
RPC_FAILED,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS,
|
||||
LOCK_OPENED,
|
||||
OPEN_LOCK
|
||||
SUCCESS
|
||||
};
|
||||
|
||||
enum class UnregisterInstanceCoordinatorStatus : uint8_t {
|
||||
@ -38,8 +36,6 @@ enum class UnregisterInstanceCoordinatorStatus : uint8_t {
|
||||
NOT_LEADER,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS,
|
||||
LOCK_OPENED,
|
||||
OPEN_LOCK
|
||||
};
|
||||
|
||||
enum class SetInstanceToMainCoordinatorStatus : uint8_t {
|
||||
@ -51,9 +47,6 @@ enum class SetInstanceToMainCoordinatorStatus : uint8_t {
|
||||
COULD_NOT_PROMOTE_TO_MAIN,
|
||||
SWAP_UUID_FAILED,
|
||||
SUCCESS,
|
||||
LOCK_OPENED,
|
||||
OPEN_LOCK,
|
||||
ENABLE_WRITING_FAILED
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -32,7 +32,7 @@ using HealthCheckInstanceCallback = void (CoordinatorInstance::*)(std::string_vi
|
||||
|
||||
class ReplicationInstance {
|
||||
public:
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config, HealthCheckClientCallback succ_cb,
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckClientCallback succ_cb,
|
||||
HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb);
|
||||
|
||||
@ -67,7 +67,7 @@ class ReplicationInstance {
|
||||
auto PauseFrequentCheck() -> void;
|
||||
auto ResumeFrequentCheck() -> void;
|
||||
|
||||
auto ReplicationClientInfo() const -> ReplicationClientInfo;
|
||||
auto ReplicationClientInfo() const -> ReplClientInfo;
|
||||
|
||||
auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool;
|
||||
|
||||
@ -79,6 +79,10 @@ class ReplicationInstance {
|
||||
|
||||
auto EnableWritingOnMain() -> bool;
|
||||
|
||||
auto SetNewMainUUID(utils::UUID const &main_uuid) -> void;
|
||||
auto ResetMainUUID() -> void;
|
||||
auto GetMainUUID() const -> std::optional<utils::UUID> const &;
|
||||
|
||||
auto GetSuccessCallback() -> HealthCheckInstanceCallback &;
|
||||
auto GetFailCallback() -> HealthCheckInstanceCallback &;
|
||||
|
||||
@ -88,12 +92,19 @@ class ReplicationInstance {
|
||||
bool is_alive_{false};
|
||||
std::chrono::system_clock::time_point last_check_of_uuid_{};
|
||||
|
||||
// for replica this is main uuid of current main
|
||||
// for "main" main this same as in CoordinatorData
|
||||
// it is set to nullopt when replica is down
|
||||
// TLDR; when replica is down and comes back up we reset uuid of main replica is listening to
|
||||
// so we need to send swap uuid again
|
||||
std::optional<utils::UUID> main_uuid_;
|
||||
|
||||
HealthCheckInstanceCallback succ_cb_;
|
||||
HealthCheckInstanceCallback fail_cb_;
|
||||
|
||||
friend bool operator==(ReplicationInstance const &first, ReplicationInstance const &second) {
|
||||
return first.client_ == second.client_ && first.last_response_time_ == second.last_response_time_ &&
|
||||
first.is_alive_ == second.is_alive_;
|
||||
first.is_alive_ == second.is_alive_ && first.main_uuid_ == second.main_uuid_;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
@ -32,37 +32,19 @@ namespace memgraph::coordination {
|
||||
|
||||
using replication_coordination_glue::ReplicationRole;
|
||||
|
||||
struct ReplicationInstanceState {
|
||||
CoordinatorToReplicaConfig config;
|
||||
struct InstanceState {
|
||||
CoordinatorClientConfig config;
|
||||
ReplicationRole status;
|
||||
|
||||
// for replica this is main uuid of current main
|
||||
// for "main" main this same as current_main_id_
|
||||
// when replica is down and comes back up we reset uuid of main replica is listening to
|
||||
// so we need to send swap uuid again
|
||||
// For MAIN we don't enable writing until cluster is in healthy state
|
||||
utils::UUID instance_uuid;
|
||||
|
||||
friend auto operator==(ReplicationInstanceState const &lhs, ReplicationInstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config && lhs.status == rhs.status && lhs.instance_uuid == rhs.instance_uuid;
|
||||
friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config && lhs.status == rhs.status;
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: Currently instance of coordinator doesn't change from the registration. Hence, just wrap
|
||||
// CoordinatorToCoordinatorConfig.
|
||||
struct CoordinatorInstanceState {
|
||||
CoordinatorToCoordinatorConfig config;
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state);
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state);
|
||||
|
||||
friend auto operator==(CoordinatorInstanceState const &lhs, CoordinatorInstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config;
|
||||
}
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, ReplicationInstanceState const &instance_state);
|
||||
void from_json(nlohmann::json const &j, ReplicationInstanceState &instance_state);
|
||||
|
||||
using TRaftLog = std::variant<CoordinatorToReplicaConfig, std::string, utils::UUID, CoordinatorToCoordinatorConfig,
|
||||
InstanceUUIDUpdate>;
|
||||
using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>;
|
||||
|
||||
using nuraft::buffer;
|
||||
using nuraft::buffer_serializer;
|
||||
@ -71,8 +53,7 @@ using nuraft::ptr;
|
||||
class CoordinatorClusterState {
|
||||
public:
|
||||
CoordinatorClusterState() = default;
|
||||
explicit CoordinatorClusterState(std::map<std::string, ReplicationInstanceState, std::less<>> instances,
|
||||
utils::UUID const ¤t_main_uuid, bool is_lock_opened);
|
||||
explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState const &);
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState const &);
|
||||
@ -81,13 +62,15 @@ class CoordinatorClusterState {
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept;
|
||||
~CoordinatorClusterState() = default;
|
||||
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
|
||||
auto MainExists() const -> bool;
|
||||
|
||||
auto HasMainState(std::string_view instance_name) const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto HasReplicaState(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto IsCurrentMain(std::string_view instance_name) const -> bool;
|
||||
auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void;
|
||||
|
||||
auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
@ -95,22 +78,14 @@ class CoordinatorClusterState {
|
||||
|
||||
static auto Deserialize(buffer &data) -> CoordinatorClusterState;
|
||||
|
||||
auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
|
||||
auto GetCurrentMainUUID() const -> utils::UUID;
|
||||
|
||||
auto GetInstanceUUID(std::string_view) const -> utils::UUID;
|
||||
|
||||
auto IsLockOpened() const -> bool;
|
||||
|
||||
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
std::vector<CoordinatorInstanceState> coordinators_{};
|
||||
std::map<std::string, ReplicationInstanceState, std::less<>> repl_instances_{};
|
||||
utils::UUID current_main_uuid_{};
|
||||
std::map<std::string, InstanceState, std::less<>> instances_{};
|
||||
utils::UUID uuid_{};
|
||||
mutable utils::ResourceLock log_lock_{};
|
||||
bool is_lock_opened_{false};
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/coordinator_cluster_state.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
|
||||
@ -40,21 +40,19 @@ class CoordinatorStateMachine : public state_machine {
|
||||
CoordinatorStateMachine &operator=(CoordinatorStateMachine const &) = delete;
|
||||
CoordinatorStateMachine(CoordinatorStateMachine &&) = delete;
|
||||
CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete;
|
||||
~CoordinatorStateMachine() override = default;
|
||||
~CoordinatorStateMachine() override {}
|
||||
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
|
||||
static auto SerializeOpenLockRegister(CoordinatorToReplicaConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeOpenLockUnregister(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeOpenLockSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeOpenLockFailover(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeRegisterInstance(CoordinatorToReplicaConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsMain(InstanceUUIDUpdate const &instance_uuid_change) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeUpdateUUIDForNewMain(utils::UUID const &uuid) -> ptr<buffer>;
|
||||
static auto SerializeUpdateUUIDForInstance(InstanceUUIDUpdate const &instance_uuid_change) -> ptr<buffer>;
|
||||
static auto SerializeAddCoordinatorInstance(CoordinatorToCoordinatorConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeOpenLockSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>;
|
||||
|
||||
static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>;
|
||||
|
||||
@ -82,19 +80,8 @@ class CoordinatorStateMachine : public state_machine {
|
||||
|
||||
auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override;
|
||||
|
||||
auto GetReplicationInstances() const -> std::vector<ReplicationInstanceState>;
|
||||
|
||||
auto GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState>;
|
||||
|
||||
// Getters
|
||||
auto MainExists() const -> bool;
|
||||
auto HasMainState(std::string_view instance_name) const -> bool;
|
||||
auto HasReplicaState(std::string_view instance_name) const -> bool;
|
||||
auto IsCurrentMain(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto GetCurrentMainUUID() const -> utils::UUID;
|
||||
auto GetInstanceUUID(std::string_view instance_name) const -> utils::UUID;
|
||||
auto IsLockOpened() const -> bool;
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
struct SnapshotCtx {
|
||||
|
@ -23,34 +23,20 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
enum class RaftLogAction : uint8_t {
|
||||
OPEN_LOCK_REGISTER_REPLICATION_INSTANCE,
|
||||
OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE,
|
||||
OPEN_LOCK_FAILOVER,
|
||||
OPEN_LOCK_SET_INSTANCE_AS_MAIN,
|
||||
OPEN_LOCK_SET_INSTANCE_AS_REPLICA,
|
||||
REGISTER_REPLICATION_INSTANCE,
|
||||
UNREGISTER_REPLICATION_INSTANCE,
|
||||
SET_INSTANCE_AS_MAIN,
|
||||
SET_INSTANCE_AS_REPLICA,
|
||||
UPDATE_UUID_OF_NEW_MAIN,
|
||||
ADD_COORDINATOR_INSTANCE,
|
||||
UPDATE_UUID_FOR_INSTANCE,
|
||||
UPDATE_UUID
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction,
|
||||
{{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
|
||||
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
|
||||
{RaftLogAction::UPDATE_UUID_OF_NEW_MAIN, "update_uuid_of_new_main"},
|
||||
{RaftLogAction::ADD_COORDINATOR_INSTANCE, "add_coordinator_instance"},
|
||||
{RaftLogAction::UPDATE_UUID_FOR_INSTANCE, "update_uuid_for_instance"},
|
||||
{RaftLogAction::OPEN_LOCK_REGISTER_REPLICATION_INSTANCE, "open_lock_register_instance"},
|
||||
{RaftLogAction::OPEN_LOCK_UNREGISTER_REPLICATION_INSTANCE,
|
||||
"open_lock_unregister_instance"},
|
||||
{RaftLogAction::OPEN_LOCK_FAILOVER, "open_lock_failover"},
|
||||
{RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_MAIN, "open_lock_set_instance_as_main"},
|
||||
{RaftLogAction::OPEN_LOCK_SET_INSTANCE_AS_REPLICA, "open_lock_set_instance_as_replica"}})
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {
|
||||
{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
|
||||
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
|
||||
{RaftLogAction::UPDATE_UUID, "update_uuid"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -12,7 +12,8 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include <chrono>
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include <spdlog/spdlog.h>
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
@ -30,12 +31,12 @@ using nuraft::raft_server;
|
||||
using nuraft::srv_config;
|
||||
using raft_result = cmd_result<ptr<buffer>>;
|
||||
|
||||
RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t coordinator_id,
|
||||
RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
|
||||
uint32_t raft_port, std::string raft_address)
|
||||
: raft_endpoint_(raft_address, raft_port),
|
||||
coordinator_id_(coordinator_id),
|
||||
raft_server_id_(raft_server_id),
|
||||
state_machine_(cs_new<CoordinatorStateMachine>()),
|
||||
state_manager_(cs_new<CoordinatorStateManager>(coordinator_id_, raft_endpoint_.SocketAddress())),
|
||||
state_manager_(cs_new<CoordinatorStateManager>(raft_server_id_, raft_endpoint_.SocketAddress())),
|
||||
logger_(nullptr),
|
||||
become_leader_cb_(std::move(become_leader_cb)),
|
||||
become_follower_cb_(std::move(become_follower_cb)) {}
|
||||
@ -62,18 +63,13 @@ auto RaftState::InitRaftServer() -> void {
|
||||
params.leadership_expiry_ = 200;
|
||||
|
||||
raft_server::init_options init_opts;
|
||||
|
||||
init_opts.raft_callback_ = [this](cb_func::Type event_type, cb_func::Param *param) -> nuraft::CbReturnCode {
|
||||
if (event_type == cb_func::BecomeLeader) {
|
||||
spdlog::info("Node {} became leader", param->leaderId);
|
||||
become_leader_cb_();
|
||||
} else if (event_type == cb_func::BecomeFollower) {
|
||||
// TODO(antoniofilipovic) Check what happens when becoming follower while doing failover
|
||||
// There is no way to stop becoming a follower:
|
||||
// https://github.com/eBay/NuRaft/blob/188947bcc73ce38ab1c3cf9d01015ca8a29decd9/src/raft_server.cxx#L1334-L1335
|
||||
spdlog::trace("Got request to become follower");
|
||||
spdlog::info("Node {} became follower", param->myId);
|
||||
become_follower_cb_();
|
||||
spdlog::trace("Node {} became follower", param->myId);
|
||||
}
|
||||
return CbReturnCode::Ok;
|
||||
};
|
||||
@ -86,6 +82,7 @@ auto RaftState::InitRaftServer() -> void {
|
||||
if (!raft_server_) {
|
||||
throw RaftServerStartException("Failed to launch raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto maybe_stop = utils::ResettableCounter<20>();
|
||||
do {
|
||||
if (raft_server_->is_initialized()) {
|
||||
@ -98,11 +95,11 @@ auto RaftState::InitRaftServer() -> void {
|
||||
}
|
||||
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState {
|
||||
uint32_t coordinator_id = FLAGS_coordinator_id;
|
||||
uint32_t raft_port = FLAGS_coordinator_port;
|
||||
uint32_t raft_server_id = FLAGS_raft_server_id;
|
||||
uint32_t raft_port = FLAGS_raft_server_port;
|
||||
|
||||
auto raft_state =
|
||||
RaftState(std::move(become_leader_cb), std::move(become_follower_cb), coordinator_id, raft_port, "127.0.0.1");
|
||||
RaftState(std::move(become_leader_cb), std::move(become_follower_cb), raft_server_id, raft_port, "127.0.0.1");
|
||||
|
||||
raft_state.InitRaftServer();
|
||||
return raft_state;
|
||||
@ -111,14 +108,15 @@ auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerC
|
||||
RaftState::~RaftState() { launcher_.shutdown(); }
|
||||
|
||||
auto RaftState::InstanceName() const -> std::string {
|
||||
return fmt::format("coordinator_{}", std::to_string(coordinator_id_));
|
||||
return fmt::format("coordinator_{}", std::to_string(raft_server_id_));
|
||||
}
|
||||
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
|
||||
|
||||
auto RaftState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
|
||||
auto const endpoint = config.coordinator_server.SocketAddress();
|
||||
srv_config const srv_config_to_add(static_cast<int>(config.coordinator_server_id), endpoint);
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address)
|
||||
-> void {
|
||||
auto const endpoint = fmt::format("{}:{}", raft_address, raft_port);
|
||||
srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
|
||||
|
||||
auto cmd_result = raft_server_->add_srv(srv_config_to_add);
|
||||
|
||||
@ -136,9 +134,9 @@ auto RaftState::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorCon
|
||||
bool added{false};
|
||||
while (!maybe_stop()) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
|
||||
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(config.coordinator_server_id));
|
||||
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id));
|
||||
if (server_config) {
|
||||
spdlog::trace("Server with id {} added to cluster", config.coordinator_server_id);
|
||||
spdlog::trace("Server with id {} added to cluster", raft_server_id);
|
||||
added = true;
|
||||
break;
|
||||
}
|
||||
@ -160,79 +158,7 @@ auto RaftState::IsLeader() const -> bool { return raft_server_->is_leader(); }
|
||||
|
||||
auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); }
|
||||
|
||||
auto RaftState::AppendOpenLockRegister(CoordinatorToReplicaConfig const &config) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeOpenLockRegister(config);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error("Failed to accept request to open lock to register instance {}", config.instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to open lock for registering instance {} with error code {}", config.instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendOpenLockUnregister(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeOpenLockUnregister(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error("Failed to accept request to open lock to unregister instance {}.", instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to open lock for unregistering instance {} with error code {}", instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendOpenLockFailover(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeOpenLockFailover(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error("Failed to accept request to open lock for failover {}", instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to open lock for failover to instance {} with error code {}", instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendOpenLockSetInstanceToMain(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeOpenLockSetInstanceAsMain(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error("Failed to accept request to open lock and set instance {} to MAIN", instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to open lock to set instance {} to MAIN with error code {}", instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorToReplicaConfig const &config) -> bool {
|
||||
auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
@ -276,9 +202,8 @@ auto RaftState::AppendUnregisterReplicationInstanceLog(std::string_view instance
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name, utils::UUID const &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(
|
||||
InstanceUUIDUpdate{.instance_name = std::string{instance_name}, .uuid = uuid});
|
||||
auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
@ -317,28 +242,8 @@ auto RaftState::AppendSetInstanceAsReplicaLog(std::string_view instance_name) ->
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendOpenLockSetInstanceToReplica(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeOpenLockSetInstanceAsReplica(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for demoting instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Request for demoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUpdateUUIDForNewMain(uuid);
|
||||
auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUpdateUUID(uuid);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
@ -346,7 +251,7 @@ auto RaftState::AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool {
|
||||
"the leader.");
|
||||
return false;
|
||||
}
|
||||
spdlog::trace("Request for updating UUID accepted");
|
||||
spdlog::info("Request for updating UUID accepted");
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to update UUID with error code {}", int(res->get_result_code()));
|
||||
@ -356,75 +261,21 @@ auto RaftState::AppendUpdateUUIDForNewMainLog(utils::UUID const &uuid) -> bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendAddCoordinatorInstanceLog(CoordinatorToCoordinatorConfig const &config) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeAddCoordinatorInstance(config);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for adding coordinator instance {}. Most likely the reason is that the instance is "
|
||||
"not the leader.",
|
||||
config.coordinator_server_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for adding coordinator instance {} accepted", config.coordinator_server_id);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to add coordinator instance {} with error code {}", config.coordinator_server_id,
|
||||
static_cast<int>(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUpdateUUIDForInstanceLog(std::string_view instance_name, const utils::UUID &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUpdateUUIDForInstance(
|
||||
{.instance_name = std::string{instance_name}, .uuid = uuid});
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error("Failed to accept request for updating UUID of instance.");
|
||||
return false;
|
||||
}
|
||||
spdlog::trace("Request for updating UUID of instance accepted");
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to update UUID of instance with error code {}", int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
auto RaftState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return state_machine_->FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); }
|
||||
|
||||
auto RaftState::HasMainState(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->HasMainState(instance_name);
|
||||
auto RaftState::IsMain(std::string_view instance_name) const -> bool { return state_machine_->IsMain(instance_name); }
|
||||
|
||||
auto RaftState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto RaftState::HasReplicaState(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->HasReplicaState(instance_name);
|
||||
}
|
||||
auto RaftState::GetInstances() const -> std::vector<InstanceState> { return state_machine_->GetInstances(); }
|
||||
|
||||
auto RaftState::GetReplicationInstances() const -> std::vector<ReplicationInstanceState> {
|
||||
return state_machine_->GetReplicationInstances();
|
||||
}
|
||||
|
||||
auto RaftState::GetCurrentMainUUID() const -> utils::UUID { return state_machine_->GetCurrentMainUUID(); }
|
||||
|
||||
auto RaftState::IsCurrentMain(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->IsCurrentMain(instance_name);
|
||||
}
|
||||
|
||||
auto RaftState::IsLockOpened() const -> bool { return state_machine_->IsLockOpened(); }
|
||||
|
||||
auto RaftState::GetInstanceUUID(std::string_view instance_name) const -> utils::UUID {
|
||||
return state_machine_->GetInstanceUUID(instance_name);
|
||||
}
|
||||
|
||||
auto RaftState::GetCoordinatorInstances() const -> std::vector<CoordinatorInstanceState> {
|
||||
return state_machine_->GetCoordinatorInstances();
|
||||
}
|
||||
auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorToReplicaConfig config,
|
||||
ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config,
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb,
|
||||
HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb)
|
||||
@ -56,6 +56,7 @@ auto ReplicationInstance::PromoteToMain(utils::UUID const &new_uuid, Replication
|
||||
return false;
|
||||
}
|
||||
|
||||
main_uuid_ = new_uuid;
|
||||
succ_cb_ = main_succ_cb;
|
||||
fail_cb_ = main_fail_cb;
|
||||
|
||||
@ -81,7 +82,7 @@ auto ReplicationInstance::StopFrequentCheck() -> void { client_.StopFrequentChec
|
||||
auto ReplicationInstance::PauseFrequentCheck() -> void { client_.PauseFrequentCheck(); }
|
||||
auto ReplicationInstance::ResumeFrequentCheck() -> void { client_.ResumeFrequentCheck(); }
|
||||
|
||||
auto ReplicationInstance::ReplicationClientInfo() const -> coordination::ReplicationClientInfo {
|
||||
auto ReplicationInstance::ReplicationClientInfo() const -> CoordinatorClientConfig::ReplicationClientInfo {
|
||||
return client_.ReplicationClientInfo();
|
||||
}
|
||||
|
||||
@ -90,6 +91,9 @@ auto ReplicationInstance::GetFailCallback() -> HealthCheckInstanceCallback & { r
|
||||
|
||||
auto ReplicationInstance::GetClient() -> CoordinatorClient & { return client_; }
|
||||
|
||||
auto ReplicationInstance::SetNewMainUUID(utils::UUID const &main_uuid) -> void { main_uuid_ = main_uuid; }
|
||||
auto ReplicationInstance::GetMainUUID() const -> std::optional<utils::UUID> const & { return main_uuid_; }
|
||||
|
||||
auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool {
|
||||
if (!IsReadyForUUIDPing()) {
|
||||
return true;
|
||||
@ -112,6 +116,7 @@ auto ReplicationInstance::SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid
|
||||
if (!replication_coordination_glue::SendSwapMainUUIDRpc(client_.RpcClient(), new_main_uuid)) {
|
||||
return false;
|
||||
}
|
||||
SetNewMainUUID(new_main_uuid);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ namespace memgraph::dbms {
|
||||
CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state)
|
||||
: coordinator_state_(coordinator_state) {}
|
||||
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus {
|
||||
return coordinator_state_.RegisterReplicationInstance(config);
|
||||
}
|
||||
@ -39,8 +39,9 @@ auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::Inst
|
||||
return coordinator_state_.ShowInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void {
|
||||
coordinator_state_.AddCoordinatorInstance(config);
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_communication_config.hpp"
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_state.hpp"
|
||||
#include "coordination/instance_status.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
@ -30,7 +30,7 @@ class CoordinatorHandler {
|
||||
|
||||
// TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to
|
||||
// RegisterInstance
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorToReplicaConfig const &config)
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus;
|
||||
|
||||
auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
@ -40,7 +40,7 @@ class CoordinatorHandler {
|
||||
|
||||
auto ShowInstances() const -> std::vector<coordination::InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(coordination::CoordinatorToCoordinatorConfig const &config) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
private:
|
||||
coordination::CoordinatorState &coordinator_state_;
|
||||
|
@ -311,7 +311,7 @@ class DbmsHandler {
|
||||
stats.triggers += info.triggers;
|
||||
stats.streams += info.streams;
|
||||
++stats.num_databases;
|
||||
stats.indices += storage_info.label_indices + storage_info.label_property_indices + storage_info.text_indices;
|
||||
stats.indices += storage_info.label_indices + storage_info.label_property_indices;
|
||||
stats.constraints += storage_info.existence_constraints + storage_info.unique_constraints;
|
||||
++stats.storage_modes[(int)storage_info.storage_mode];
|
||||
++stats.isolation_levels[(int)storage_info.isolation_level];
|
||||
|
@ -615,7 +615,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
// NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
|
||||
auto ret = vertex->AddLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
|
||||
if (ret.HasError() || !ret.GetValue())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
@ -628,7 +627,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
// NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
|
||||
auto ret = vertex->RemoveLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
|
||||
if (ret.HasError() || !ret.GetValue())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
@ -642,7 +640,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto vertex = transaction->FindVertex(delta.vertex_edge_set_property.gid, View::NEW);
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
// NOTE: Phase 1 of the text search feature doesn't have replication in scope
|
||||
auto ret = vertex->SetProperty(transaction->NameToProperty(delta.vertex_edge_set_property.property),
|
||||
delta.vertex_edge_set_property.value);
|
||||
if (ret.HasError())
|
||||
@ -856,14 +853,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::TEXT_INDEX_CREATE: {
|
||||
// NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::TEXT_INDEX_DROP: {
|
||||
// NOTE: Text search doesn’t have replication in scope yet (Phases 1 and 2)
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
spdlog::trace(" Create existence constraint on :{} ({})", delta.operation_label_property.label,
|
||||
delta.operation_label_property.property);
|
||||
|
@ -18,15 +18,14 @@
|
||||
|
||||
// Bolt server flags.
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(
|
||||
experimental_enabled, "",
|
||||
"Experimental features to be used, comma-separated. Options [system-replication, text-search, high-availability]");
|
||||
DEFINE_string(experimental_enabled, "",
|
||||
"Experimental features to be used, comma seperated. Options [system-replication, high-availability]");
|
||||
|
||||
using namespace std::string_view_literals;
|
||||
|
||||
namespace memgraph::flags {
|
||||
|
||||
auto const mapping = std::map{std::pair{"system-replication"sv, Experiments::SYSTEM_REPLICATION},
|
||||
std::pair{"text-search"sv, Experiments::TEXT_SEARCH},
|
||||
std::pair{"high-availability"sv, Experiments::HIGH_AVAILABILITY}};
|
||||
|
||||
auto ExperimentsInstance() -> Experiments & {
|
||||
@ -46,7 +45,7 @@ bool AreExperimentsEnabled(Experiments experiments) {
|
||||
void InitializeExperimental() {
|
||||
namespace rv = ranges::views;
|
||||
|
||||
auto const canonicalize_string = [](auto &&rng) {
|
||||
auto const connonicalize_string = [](auto &&rng) {
|
||||
auto const is_space = [](auto c) { return c == ' '; };
|
||||
auto const to_lower = [](unsigned char c) { return std::tolower(c); };
|
||||
|
||||
@ -57,7 +56,7 @@ void InitializeExperimental() {
|
||||
auto const mapping_end = mapping.cend();
|
||||
using underlying_type = std::underlying_type_t<Experiments>;
|
||||
auto to_set = underlying_type{};
|
||||
for (auto &&experiment : FLAGS_experimental_enabled | rv::split(',') | rv::transform(canonicalize_string)) {
|
||||
for (auto &&experiment : FLAGS_experimental_enabled | rv::split(',') | rv::transform(connonicalize_string)) {
|
||||
if (auto it = mapping.find(experiment); it != mapping_end) {
|
||||
to_set |= static_cast<underlying_type>(it->second);
|
||||
}
|
||||
|
@ -23,8 +23,7 @@ namespace memgraph::flags {
|
||||
// old experiments can be reused once code cleanup has happened
|
||||
enum class Experiments : uint8_t {
|
||||
SYSTEM_REPLICATION = 1 << 0,
|
||||
TEXT_SEARCH = 1 << 1,
|
||||
HIGH_AVAILABILITY = 1 << 2,
|
||||
HIGH_AVAILABILITY = 1 << 1,
|
||||
};
|
||||
|
||||
bool AreExperimentsEnabled(Experiments experiments);
|
||||
|
@ -131,10 +131,6 @@ DEFINE_uint64(storage_recovery_thread_count,
|
||||
DEFINE_bool(storage_enable_schema_metadata, false,
|
||||
"Controls whether metadata should be collected about the resident labels and edge types.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_delta_on_identical_property_update, true,
|
||||
"Controls whether updating a property with the same value should create a delta object.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(telemetry_enabled, false,
|
||||
"Set to true to enable telemetry. We collect information about the "
|
||||
|
@ -84,8 +84,6 @@ DECLARE_bool(storage_parallel_schema_recovery);
|
||||
DECLARE_uint64(storage_recovery_thread_count);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_enable_schema_metadata);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_delta_on_identical_property_update);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(telemetry_enabled);
|
||||
|
@ -13,11 +13,11 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint32(management_port, 0, "Port on which coordinator servers will be started.");
|
||||
DEFINE_uint32(coordinator_server_port, 0, "Port on which coordinator servers will be started.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint32(coordinator_port, 0, "Port on which raft servers will be started.");
|
||||
DEFINE_uint32(raft_server_port, 0, "Port on which raft servers will be started.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint32(coordinator_id, 0, "Unique ID of the raft server.");
|
||||
DEFINE_uint32(raft_server_id, 0, "Unique ID of the raft server.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint32(instance_down_timeout_sec, 5, "Time duration after which an instance is considered down.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
|
@ -15,11 +15,11 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint32(management_port);
|
||||
DECLARE_uint32(coordinator_server_port);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint32(coordinator_port);
|
||||
DECLARE_uint32(raft_server_port);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint32(coordinator_id);
|
||||
DECLARE_uint32(raft_server_id);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint32(instance_down_timeout_sec);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -73,11 +73,11 @@ constexpr auto kLogToStderrGFlagsKey = "also_log_to_stderr";
|
||||
constexpr auto kCartesianProductEnabledSettingKey = "cartesian-product-enabled";
|
||||
constexpr auto kCartesianProductEnabledGFlagsKey = "cartesian-product-enabled";
|
||||
|
||||
// NOLINTBEGIN(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
// Local cache-like thing
|
||||
std::atomic<double> execution_timeout_sec_;
|
||||
std::atomic<bool> cartesian_product_enabled_{true};
|
||||
// NOLINTEND(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<double> execution_timeout_sec_; // Local cache-like thing
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<bool> cartesian_product_enabled_{true}; // Local cache-like thing
|
||||
|
||||
auto ToLLEnum(std::string_view val) {
|
||||
const auto ll_enum = memgraph::flags::LogLevelToEnum(val);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -249,40 +249,6 @@ std::pair<std::vector<std::string>, std::optional<int>> SessionHL::Interpret(
|
||||
}
|
||||
}
|
||||
|
||||
using memgraph::communication::bolt::Value;
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
auto SessionHL::Route(std::map<std::string, Value> const &routing,
|
||||
std::vector<memgraph::communication::bolt::Value> const & /*bookmarks*/,
|
||||
std::map<std::string, Value> const & /*extra*/) -> std::map<std::string, Value> {
|
||||
auto routing_map = ranges::views::transform(
|
||||
routing, [](auto const &pair) { return std::pair(pair.first, pair.second.ValueString()); }) |
|
||||
ranges::to<std::map<std::string, std::string>>();
|
||||
|
||||
auto routing_table_res = interpreter_.Route(routing_map);
|
||||
|
||||
auto create_server = [](auto const &server_info) -> Value {
|
||||
auto const &[addresses, role] = server_info;
|
||||
std::map<std::string, Value> server_map;
|
||||
auto bolt_addresses = ranges::views::transform(addresses, [](auto const &addr) { return Value{addr}; }) |
|
||||
ranges::to<std::vector<Value>>();
|
||||
|
||||
server_map["addresses"] = std::move(bolt_addresses);
|
||||
server_map["role"] = memgraph::communication::bolt::Value{role};
|
||||
return Value{std::move(server_map)};
|
||||
};
|
||||
|
||||
std::map<std::string, Value> communication_res;
|
||||
communication_res["ttl"] = Value{routing_table_res.ttl};
|
||||
communication_res["db"] = Value{};
|
||||
|
||||
auto servers = ranges::views::transform(routing_table_res.servers, create_server) | ranges::to<std::vector<Value>>();
|
||||
communication_res["servers"] = memgraph::communication::bolt::Value{std::move(servers)};
|
||||
|
||||
return {{"rt", memgraph::communication::bolt::Value{std::move(communication_res)}}};
|
||||
}
|
||||
#endif
|
||||
|
||||
void SessionHL::RollbackTransaction() {
|
||||
try {
|
||||
interpreter_.RollbackTransaction();
|
||||
|
@ -55,13 +55,6 @@ class SessionHL final : public memgraph::communication::bolt::Session<memgraph::
|
||||
const std::string &query, const std::map<std::string, memgraph::communication::bolt::Value> ¶ms,
|
||||
const std::map<std::string, memgraph::communication::bolt::Value> &extra) override;
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
auto Route(std::map<std::string, memgraph::communication::bolt::Value> const &routing,
|
||||
std::vector<memgraph::communication::bolt::Value> const &bookmarks,
|
||||
std::map<std::string, memgraph::communication::bolt::Value> const &extra)
|
||||
-> std::map<std::string, memgraph::communication::bolt::Value> override;
|
||||
#endif
|
||||
|
||||
std::map<std::string, memgraph::communication::bolt::Value> Pull(TEncoder *encoder, std::optional<int> n,
|
||||
std::optional<int> qid) override;
|
||||
|
||||
|
@ -82,7 +82,8 @@ bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
|
||||
return status == 0;
|
||||
}
|
||||
|
||||
std::optional<Endpoint> Endpoint::ParseSocketOrAddress(std::string_view address, std::optional<uint16_t> default_port) {
|
||||
std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port) {
|
||||
auto const parts = utils::SplitView(address, delimiter);
|
||||
|
||||
if (parts.size() > 2) {
|
||||
@ -108,13 +109,13 @@ std::optional<Endpoint> Endpoint::ParseSocketOrAddress(std::string_view address,
|
||||
}();
|
||||
|
||||
if (GetIpFamily(addr) == IpFamily::NONE) {
|
||||
if (IsResolvableAddress(addr, *port)) { // NOLINT
|
||||
return Endpoint{std::string(addr), *port}; // NOLINT
|
||||
if (IsResolvableAddress(addr, *port)) { // NOLINT
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return Endpoint{std::string(addr), *port}; // NOLINT
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
|
||||
auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
|
||||
@ -137,14 +138,4 @@ auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, Endpoint const &config) {
|
||||
j = nlohmann::json{{"address", config.address}, {"port", config.port}, {"family", config.family}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, Endpoint &config) {
|
||||
config.address = j.at("address").get<std::string>();
|
||||
config.port = j.at("port").get<uint16_t>();
|
||||
config.family = j.at("family").get<Endpoint::IpFamily>();
|
||||
}
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -17,10 +17,10 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
using ParsedAddress = std::pair<std::string_view, uint16_t>;
|
||||
|
||||
struct Endpoint {
|
||||
static const struct needs_resolving_t {
|
||||
} needs_resolving;
|
||||
@ -39,8 +39,8 @@ struct Endpoint {
|
||||
|
||||
enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
|
||||
|
||||
static std::optional<Endpoint> ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port = {});
|
||||
static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port = {});
|
||||
|
||||
std::string SocketAddress() const;
|
||||
|
||||
@ -59,7 +59,4 @@ struct Endpoint {
|
||||
static auto ValidatePort(std::optional<uint16_t> port) -> bool;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, Endpoint const &config);
|
||||
void from_json(nlohmann::json const &j, Endpoint &config);
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -332,8 +332,7 @@ int main(int argc, char **argv) {
|
||||
.durability_directory = FLAGS_data_directory + "/rocksdb_durability",
|
||||
.wal_directory = FLAGS_data_directory + "/rocksdb_wal"},
|
||||
.salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata,
|
||||
.delta_on_identical_property_update = FLAGS_storage_delta_on_identical_property_update},
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.salient.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
spdlog::info("config recover on startup {}, flags {} {}", db_config.durability.recover_on_startup,
|
||||
FLAGS_storage_recover_on_startup, FLAGS_data_recovery_on_startup);
|
||||
@ -429,7 +428,7 @@ int main(int argc, char **argv) {
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// MAIN or REPLICA instance
|
||||
if (FLAGS_management_port) {
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
memgraph::dbms::CoordinatorHandlers::Register(coordinator_state.GetCoordinatorServer(), replication_handler);
|
||||
MG_ASSERT(coordinator_state.GetCoordinatorServer().Start(), "Failed to start coordinator server!");
|
||||
}
|
||||
@ -437,12 +436,12 @@ int main(int argc, char **argv) {
|
||||
|
||||
auto db_acc = dbms_handler.Get();
|
||||
|
||||
memgraph::query::InterpreterContext interpreter_context_(interp_config, &dbms_handler, &repl_state, system,
|
||||
auto *interpreter_context_ =
|
||||
memgraph::query::InterpreterContext::getInstance(interp_config, &dbms_handler, &repl_state, system,
|
||||
#ifdef MG_ENTERPRISE
|
||||
&coordinator_state,
|
||||
&coordinator_state,
|
||||
#endif
|
||||
auth_handler.get(), auth_checker.get(),
|
||||
&replication_handler);
|
||||
auth_handler.get(), auth_checker.get(), &replication_handler);
|
||||
MG_ASSERT(db_acc, "Failed to access the main database");
|
||||
|
||||
memgraph::query::procedure::gModuleRegistry.SetModulesDirectory(memgraph::flags::ParseQueryModulesDirectory(),
|
||||
@ -455,9 +454,9 @@ int main(int argc, char **argv) {
|
||||
spdlog::info("Running init file...");
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_file, &audit_log);
|
||||
InitFromCypherlFile(*interpreter_context_, db_acc, FLAGS_init_file, &audit_log);
|
||||
} else {
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_file);
|
||||
InitFromCypherlFile(*interpreter_context_, db_acc, FLAGS_init_file);
|
||||
}
|
||||
#else
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_file);
|
||||
@ -465,20 +464,20 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
dbms_handler.RestoreTriggers(&interpreter_context_);
|
||||
dbms_handler.RestoreStreams(&interpreter_context_);
|
||||
dbms_handler.RestoreTriggers(interpreter_context_);
|
||||
dbms_handler.RestoreStreams(interpreter_context_);
|
||||
#else
|
||||
{
|
||||
// Triggers can execute query procedures, so we need to reload the modules first and then
|
||||
// the triggers
|
||||
auto storage_accessor = db_acc->Access();
|
||||
auto dba = memgraph::query::DbAccessor{storage_accessor.get()};
|
||||
db_acc->trigger_store()->RestoreTriggers(&interpreter_context_.ast_cache, &dba, interpreter_context_.config.query,
|
||||
db_acc->trigger_store()->RestoreTriggers(interpreter_context_.ast_cache, &dba, interpreter_context_.config.query,
|
||||
interpreter_context_.auth_checker);
|
||||
}
|
||||
|
||||
// As the Stream transformations are using modules, they have to be restored after the query modules are loaded.
|
||||
db_acc->streams()->RestoreStreams(db_acc, &interpreter_context_);
|
||||
db_acc->streams()->RestoreStreams(db_acc, interpreter_context_);
|
||||
#endif
|
||||
|
||||
ServerContext context;
|
||||
@ -494,9 +493,9 @@ int main(int argc, char **argv) {
|
||||
auto server_endpoint = memgraph::communication::v2::ServerEndpoint{
|
||||
boost::asio::ip::address::from_string(FLAGS_bolt_address), static_cast<uint16_t>(FLAGS_bolt_port)};
|
||||
#ifdef MG_ENTERPRISE
|
||||
Context session_context{&interpreter_context_, &auth_, &audit_log};
|
||||
Context session_context{interpreter_context_, &auth_, &audit_log};
|
||||
#else
|
||||
Context session_context{&interpreter_context_, &auth_};
|
||||
Context session_context{interpreter_context_, &auth_};
|
||||
#endif
|
||||
memgraph::glue::ServerT server(server_endpoint, &session_context, &context, FLAGS_bolt_session_inactivity_timeout,
|
||||
service_name, FLAGS_bolt_num_workers);
|
||||
@ -541,14 +540,14 @@ int main(int argc, char **argv) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
&metrics_server,
|
||||
#endif
|
||||
&websocket_server, &server, &interpreter_context_] {
|
||||
&websocket_server, &server, interpreter_context_] {
|
||||
// Server needs to be shutdown first and then the database. This prevents
|
||||
// a race condition when a transaction is accepted during server shutdown.
|
||||
server.Shutdown();
|
||||
// After the server is notified to stop accepting and processing
|
||||
// connections we tell the execution engine to stop processing all pending
|
||||
// queries.
|
||||
interpreter_context_.Shutdown();
|
||||
interpreter_context_->Shutdown();
|
||||
websocket_server.Shutdown();
|
||||
#ifdef MG_ENTERPRISE
|
||||
metrics_server.Shutdown();
|
||||
@ -576,12 +575,12 @@ int main(int argc, char **argv) {
|
||||
MG_ASSERT(db_acc, "Failed to gain access to the main database");
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file, &audit_log);
|
||||
InitFromCypherlFile(*interpreter_context_, db_acc, FLAGS_init_data_file, &audit_log);
|
||||
} else {
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file);
|
||||
InitFromCypherlFile(*interpreter_context_, db_acc, FLAGS_init_data_file);
|
||||
}
|
||||
#else
|
||||
InitFromCypherlFile(interpreter_context_, db_acc, FLAGS_init_data_file);
|
||||
InitFromCypherlFile(*interpreter_context_, db_acc, FLAGS_init_data_file);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,12 @@ inline std::vector<storage::LabelId> NamesToLabels(const std::vector<std::string
|
||||
return labels;
|
||||
}
|
||||
|
||||
struct UserExecutionContextInfo {
|
||||
enum class UserMode { NONE, USER, ROLE };
|
||||
UserMode mode;
|
||||
std::string name;
|
||||
};
|
||||
|
||||
struct ExecutionContext {
|
||||
DbAccessor *db_accessor{nullptr};
|
||||
SymbolTable symbol_table;
|
||||
@ -86,6 +92,7 @@ struct ExecutionContext {
|
||||
TriggerContextCollector *trigger_context_collector{nullptr};
|
||||
FrameChangeCollector *frame_change_collector{nullptr};
|
||||
std::shared_ptr<utils::AsyncTimer> timer;
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role;
|
||||
#ifdef MG_ENTERPRISE
|
||||
std::unique_ptr<FineGrainedAuthChecker> auth_checker{nullptr};
|
||||
#endif
|
||||
|
@ -634,24 +634,6 @@ class DbAccessor final {
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) const { return accessor_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
bool TextIndexExists(const std::string &index_name) const { return accessor_->TextIndexExists(index_name); }
|
||||
|
||||
void TextIndexAddVertex(const VertexAccessor &vertex) { accessor_->TextIndexAddVertex(vertex.impl_); }
|
||||
|
||||
void TextIndexUpdateVertex(const VertexAccessor &vertex, const std::vector<storage::LabelId> &removed_labels = {}) {
|
||||
accessor_->TextIndexUpdateVertex(vertex.impl_, removed_labels);
|
||||
}
|
||||
|
||||
std::vector<storage::Gid> TextIndexSearch(const std::string &index_name, const std::string &search_query,
|
||||
text_search_mode search_mode) const {
|
||||
return accessor_->TextIndexSearch(index_name, search_query, search_mode);
|
||||
}
|
||||
|
||||
std::string TextIndexAggregate(const std::string &index_name, const std::string &search_query,
|
||||
const std::string &aggregation_query) const {
|
||||
return accessor_->TextIndexAggregate(index_name, search_query, aggregation_query);
|
||||
}
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return accessor_->GetIndexStats(label);
|
||||
}
|
||||
@ -735,12 +717,6 @@ class DbAccessor final {
|
||||
return accessor_->DropIndex(edge_type);
|
||||
}
|
||||
|
||||
void CreateTextIndex(const std::string &index_name, storage::LabelId label) {
|
||||
accessor_->CreateTextIndex(index_name, label, this);
|
||||
}
|
||||
|
||||
void DropTextIndex(const std::string &index_name) { accessor_->DropTextIndex(index_name); }
|
||||
|
||||
utils::BasicResult<storage::StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
storage::LabelId label, storage::PropertyId property) {
|
||||
return accessor_->CreateExistenceConstraint(label, property);
|
||||
|
@ -252,10 +252,6 @@ void DumpLabelPropertyIndex(std::ostream *os, query::DbAccessor *dba, storage::L
|
||||
<< ");";
|
||||
}
|
||||
|
||||
void DumpTextIndex(std::ostream *os, query::DbAccessor *dba, const std::string &index_name, storage::LabelId label) {
|
||||
*os << "CREATE TEXT INDEX " << EscapeName(index_name) << " ON :" << EscapeName(dba->LabelToName(label)) << ";";
|
||||
}
|
||||
|
||||
void DumpExistenceConstraint(std::ostream *os, query::DbAccessor *dba, storage::LabelId label,
|
||||
storage::PropertyId property) {
|
||||
*os << "CREATE CONSTRAINT ON (u:" << EscapeName(dba->LabelToName(label)) << ") ASSERT EXISTS (u."
|
||||
@ -290,8 +286,6 @@ PullPlanDump::PullPlanDump(DbAccessor *dba, dbms::DatabaseAccess db_acc)
|
||||
CreateLabelIndicesPullChunk(),
|
||||
// Dump all label property indices
|
||||
CreateLabelPropertyIndicesPullChunk(),
|
||||
// Dump all text indices
|
||||
CreateTextIndicesPullChunk(),
|
||||
// Dump all existence constraints
|
||||
CreateExistenceConstraintsPullChunk(),
|
||||
// Dump all unique constraints
|
||||
@ -418,34 +412,6 @@ PullPlanDump::PullChunk PullPlanDump::CreateLabelPropertyIndicesPullChunk() {
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateTextIndicesPullChunk() {
|
||||
// Dump all text indices
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
if (!indices_info_) {
|
||||
indices_info_.emplace(dba_->ListAllIndices());
|
||||
}
|
||||
const auto &text = indices_info_->text_indices;
|
||||
|
||||
size_t local_counter = 0;
|
||||
while (global_index < text.size() && (!n || local_counter < *n)) {
|
||||
std::ostringstream os;
|
||||
const auto &text_index = text[global_index];
|
||||
DumpTextIndex(&os, dba_, text_index.first, text_index.second);
|
||||
stream->Result({TypedValue(os.str())});
|
||||
|
||||
++global_index;
|
||||
++local_counter;
|
||||
}
|
||||
|
||||
if (global_index == text.size()) {
|
||||
return local_counter;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateExistenceConstraintsPullChunk() {
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of constraint vectors
|
||||
|
@ -55,7 +55,6 @@ struct PullPlanDump {
|
||||
|
||||
PullChunk CreateLabelIndicesPullChunk();
|
||||
PullChunk CreateLabelPropertyIndicesPullChunk();
|
||||
PullChunk CreateTextIndicesPullChunk();
|
||||
PullChunk CreateExistenceConstraintsPullChunk();
|
||||
PullChunk CreateUniqueConstraintsPullChunk();
|
||||
PullChunk CreateInternalIndexPullChunk();
|
||||
|
@ -433,17 +433,4 @@ class MultiDatabaseQueryInMulticommandTxException : public QueryException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(MultiDatabaseQueryInMulticommandTxException)
|
||||
};
|
||||
|
||||
class TextSearchException : public QueryException {
|
||||
using QueryException::QueryException;
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(TextSearchException)
|
||||
};
|
||||
|
||||
class TextSearchDisabledException : public TextSearchException {
|
||||
public:
|
||||
TextSearchDisabledException()
|
||||
: TextSearchException(
|
||||
"To use text indices and text search, start Memgraph with the experimental text search feature enabled.") {}
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(TextSearchDisabledException)
|
||||
};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -189,9 +189,6 @@ constexpr utils::TypeInfo query::IndexQuery::kType{utils::TypeId::AST_INDEX_QUER
|
||||
constexpr utils::TypeInfo query::EdgeIndexQuery::kType{utils::TypeId::AST_EDGE_INDEX_QUERY, "EdgeIndexQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::TextIndexQuery::kType{utils::TypeId::AST_TEXT_INDEX_QUERY, "TextIndexQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::Create::kType{utils::TypeId::AST_CREATE, "Create", &query::Clause::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::CallProcedure::kType{utils::TypeId::AST_CALL_PROCEDURE, "CallProcedure",
|
||||
|
@ -2273,37 +2273,6 @@ class EdgeIndexQuery : public memgraph::query::Query {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class TextIndexQuery : public memgraph::query::Query {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Action { CREATE, DROP };
|
||||
|
||||
TextIndexQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::TextIndexQuery::Action action_;
|
||||
memgraph::query::LabelIx label_;
|
||||
std::string index_name_;
|
||||
|
||||
TextIndexQuery *Clone(AstStorage *storage) const override {
|
||||
TextIndexQuery *object = storage->Create<TextIndexQuery>();
|
||||
object->action_ = action_;
|
||||
object->label_ = storage->GetLabelIx(label_.name);
|
||||
object->index_name_ = index_name_;
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
TextIndexQuery(Action action, LabelIx label, std::string index_name)
|
||||
: action_(action), label_(std::move(label)), index_name_(index_name) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class Create : public memgraph::query::Clause {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
|
@ -83,7 +83,6 @@ class ExplainQuery;
|
||||
class ProfileQuery;
|
||||
class IndexQuery;
|
||||
class EdgeIndexQuery;
|
||||
class TextIndexQuery;
|
||||
class DatabaseInfoQuery;
|
||||
class SystemInfoQuery;
|
||||
class ConstraintQuery;
|
||||
@ -145,11 +144,11 @@ class ExpressionVisitor
|
||||
|
||||
template <class TResult>
|
||||
class QueryVisitor
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery,
|
||||
TextIndexQuery, AuthQuery, DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery,
|
||||
ReplicationQuery, LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery,
|
||||
CreateSnapshotQuery, StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery,
|
||||
TransactionQueueQuery, StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery,
|
||||
ShowDatabasesQuery, EdgeImportModeQuery, CoordinatorQuery> {};
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery, AuthQuery,
|
||||
DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery,
|
||||
LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery,
|
||||
StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery,
|
||||
StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery,
|
||||
EdgeImportModeQuery, CoordinatorQuery> {};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -243,13 +243,6 @@ antlrcpp::Any CypherMainVisitor::visitIndexQuery(MemgraphCypher::IndexQueryConte
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitTextIndexQuery(MemgraphCypher::TextIndexQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "TextIndexQuery should have exactly one child!");
|
||||
auto *text_index_query = std::any_cast<TextIndexQuery *>(ctx->children[0]->accept(this));
|
||||
query_ = text_index_query;
|
||||
return text_index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateIndex(MemgraphCypher::CreateIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<IndexQuery>();
|
||||
index_query->action_ = IndexQuery::Action::CREATE;
|
||||
@ -293,21 +286,6 @@ antlrcpp::Any CypherMainVisitor::visitDropEdgeIndex(MemgraphCypher::DropEdgeInde
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateTextIndex(MemgraphCypher::CreateTextIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<TextIndexQuery>();
|
||||
index_query->index_name_ = std::any_cast<std::string>(ctx->indexName()->accept(this));
|
||||
index_query->action_ = TextIndexQuery::Action::CREATE;
|
||||
index_query->label_ = AddLabel(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitDropTextIndex(MemgraphCypher::DropTextIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<TextIndexQuery>();
|
||||
index_query->index_name_ = std::any_cast<std::string>(ctx->indexName()->accept(this));
|
||||
index_query->action_ = TextIndexQuery::Action::DROP;
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "AuthQuery should have exactly one child!");
|
||||
auto *auth_query = std::any_cast<AuthQuery *>(ctx->children[0]->accept(this));
|
||||
|
@ -153,11 +153,6 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return TextIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitTextIndexQuery(MemgraphCypher::TextIndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ExplainQuery*
|
||||
*/
|
||||
@ -505,7 +500,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
antlrcpp::Any visitCreateIndex(MemgraphCypher::CreateIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return IndexQuery*
|
||||
* @return DropIndex*
|
||||
*/
|
||||
antlrcpp::Any visitDropIndex(MemgraphCypher::DropIndexContext *ctx) override;
|
||||
|
||||
@ -519,16 +514,6 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return TextIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateTextIndex(MemgraphCypher::CreateTextIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return TextIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitDropTextIndex(MemgraphCypher::DropTextIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
|
@ -25,7 +25,6 @@ statement : query ;
|
||||
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| textIndexQuery
|
||||
| explainQuery
|
||||
| profileQuery
|
||||
| databaseInfoQuery
|
||||
@ -66,8 +65,6 @@ cypherQuery : singleQuery ( cypherUnion )* ( queryMemoryLimit )? ;
|
||||
|
||||
indexQuery : createIndex | dropIndex;
|
||||
|
||||
textIndexQuery : createTextIndex | dropTextIndex;
|
||||
|
||||
singleQuery : clause ( clause )* ;
|
||||
|
||||
cypherUnion : ( UNION ALL singleQuery )
|
||||
@ -345,12 +342,6 @@ createIndex : CREATE INDEX ON ':' labelName ( '(' propertyKeyName ')' )? ;
|
||||
|
||||
dropIndex : DROP INDEX ON ':' labelName ( '(' propertyKeyName ')' )? ;
|
||||
|
||||
indexName : symbolicName ;
|
||||
|
||||
createTextIndex : CREATE TEXT INDEX indexName ON ':' labelName ;
|
||||
|
||||
dropTextIndex : DROP TEXT INDEX indexName ;
|
||||
|
||||
doubleLiteral : FloatingLiteral ;
|
||||
|
||||
cypherKeyword : ALL
|
||||
|
@ -131,7 +131,6 @@ SHOW : S H O W ;
|
||||
SINGLE : S I N G L E ;
|
||||
STARTS : S T A R T S ;
|
||||
STORAGE : S T O R A G E ;
|
||||
TEXT : T E X T ;
|
||||
THEN : T H E N ;
|
||||
TRUE : T R U E ;
|
||||
UNION : U N I O N ;
|
||||
|
@ -134,7 +134,6 @@ symbolicName : UnescapedSymbolicName
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| edgeIndexQuery
|
||||
| textIndexQuery
|
||||
| explainQuery
|
||||
| profileQuery
|
||||
| databaseInfoQuery
|
||||
|
@ -29,8 +29,6 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
|
||||
void Visit(EdgeIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(TextIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AnalyzeGraphQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AuthQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::AUTH); }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "dbms/global.hpp"
|
||||
#include "dbms/inmemory/storage_helper.hpp"
|
||||
#include "flags/experimental.hpp"
|
||||
#include "flags/replication.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "glue/communication.hpp"
|
||||
@ -328,14 +327,15 @@ class ReplQueryHandler {
|
||||
|
||||
const auto repl_mode = convertToReplicationMode(sync_mode);
|
||||
|
||||
auto maybe_endpoint =
|
||||
const auto maybe_ip_and_port =
|
||||
io::network::Endpoint::ParseSocketOrAddress(socket_address, memgraph::replication::kDefaultReplicationPort);
|
||||
if (maybe_endpoint) {
|
||||
if (maybe_ip_and_port) {
|
||||
const auto [ip, port] = *maybe_ip_and_port;
|
||||
const auto replication_config =
|
||||
replication::ReplicationClientConfig{.name = name,
|
||||
.mode = repl_mode,
|
||||
.ip_address = std::move(maybe_endpoint->address),
|
||||
.port = maybe_endpoint->port,
|
||||
.ip_address = std::string(ip),
|
||||
.port = port,
|
||||
.replica_check_frequency = replica_check_frequency,
|
||||
.ssl = std::nullopt};
|
||||
|
||||
@ -407,51 +407,44 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
case RPC_FAILED:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't unregister replica instance because current main instance couldn't unregister replica!");
|
||||
case LOCK_OPENED:
|
||||
throw QueryRuntimeException("Couldn't unregister replica because the last action didn't finish successfully!");
|
||||
case OPEN_LOCK:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register instance as cluster didn't accept entering unregistration state!");
|
||||
case SUCCESS:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
|
||||
std::string_view replication_server,
|
||||
void RegisterReplicationInstance(std::string_view coordinator_socket_address,
|
||||
std::string_view replication_socket_address,
|
||||
std::chrono::seconds const &instance_check_frequency,
|
||||
std::chrono::seconds const &instance_down_timeout,
|
||||
std::chrono::seconds const &instance_get_uuid_frequency,
|
||||
std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override {
|
||||
auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
|
||||
if (!maybe_bolt_server) {
|
||||
throw QueryRuntimeException("Invalid bolt socket address!");
|
||||
}
|
||||
|
||||
auto const maybe_management_server = io::network::Endpoint::ParseSocketOrAddress(management_server);
|
||||
if (!maybe_management_server) {
|
||||
throw QueryRuntimeException("Invalid management socket address!");
|
||||
}
|
||||
|
||||
auto const maybe_replication_server = io::network::Endpoint::ParseSocketOrAddress(replication_server);
|
||||
if (!maybe_replication_server) {
|
||||
const auto maybe_replication_ip_port = io::network::Endpoint::ParseSocketOrAddress(replication_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
|
||||
auto const repl_config =
|
||||
coordination::ReplicationClientInfo{.instance_name = std::string(instance_name),
|
||||
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
|
||||
.replication_server = *maybe_replication_server};
|
||||
const auto maybe_coordinator_ip_port = io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
|
||||
const auto [replication_ip, replication_port] = *maybe_replication_ip_port;
|
||||
const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port;
|
||||
const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
|
||||
.instance_name = std::string(instance_name),
|
||||
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
|
||||
.replication_ip_address = std::string(replication_ip),
|
||||
.replication_port = replication_port};
|
||||
|
||||
auto coordinator_client_config =
|
||||
coordination::CoordinatorToReplicaConfig{.instance_name = std::string(instance_name),
|
||||
.mgt_server = *maybe_management_server,
|
||||
.bolt_server = *maybe_bolt_server,
|
||||
.replication_client_info = repl_config,
|
||||
.instance_health_check_frequency_sec = instance_check_frequency,
|
||||
.instance_down_timeout_sec = instance_down_timeout,
|
||||
.instance_get_uuid_frequency_sec = instance_get_uuid_frequency,
|
||||
.ssl = std::nullopt};
|
||||
coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name),
|
||||
.ip_address = std::string(coordinator_server_ip),
|
||||
.port = coordinator_server_port,
|
||||
.instance_health_check_frequency_sec = instance_check_frequency,
|
||||
.instance_down_timeout_sec = instance_down_timeout,
|
||||
.instance_get_uuid_frequency_sec = instance_get_uuid_frequency,
|
||||
.replication_client_info = repl_config,
|
||||
.ssl = std::nullopt};
|
||||
|
||||
auto status = coordinator_handler_.RegisterReplicationInstance(coordinator_client_config);
|
||||
switch (status) {
|
||||
@ -474,36 +467,20 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance because setting instance to replica failed! Check logs on replica to "
|
||||
"find out more info!");
|
||||
case LOCK_OPENED:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance because because the last action didn't finish successfully!");
|
||||
case OPEN_LOCK:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance because cluster didn't accept registration query!");
|
||||
case SUCCESS:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t coordinator_id, std::string_view bolt_server,
|
||||
std::string_view coordinator_server) -> void override {
|
||||
auto const maybe_coordinator_server = io::network::Endpoint::ParseSocketOrAddress(coordinator_server);
|
||||
if (!maybe_coordinator_server) {
|
||||
throw QueryRuntimeException("Invalid coordinator socket address!");
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view raft_socket_address) -> void override {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address);
|
||||
if (maybe_ip_and_port) {
|
||||
auto const [ip, port] = *maybe_ip_and_port;
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port);
|
||||
coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
|
||||
} else {
|
||||
spdlog::error("Invalid raft socket address {}.", raft_socket_address);
|
||||
}
|
||||
|
||||
auto const maybe_bolt_server = io::network::Endpoint::ParseSocketOrAddress(bolt_server);
|
||||
if (!maybe_bolt_server) {
|
||||
throw QueryRuntimeException("Invalid bolt socket address!");
|
||||
}
|
||||
|
||||
auto const coord_coord_config =
|
||||
coordination::CoordinatorToCoordinatorConfig{.coordinator_server_id = coordinator_id,
|
||||
.bolt_server = *maybe_bolt_server,
|
||||
.coordinator_server = *maybe_coordinator_server};
|
||||
|
||||
coordinator_handler_.AddCoordinatorInstance(coord_coord_config);
|
||||
spdlog::info("Added instance on coordinator server {}", maybe_coordinator_server->SocketAddress());
|
||||
}
|
||||
|
||||
void SetReplicationInstanceToMain(std::string_view instance_name) override {
|
||||
@ -525,14 +502,6 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
"Couldn't set replica instance to main! Check coordinator and replica for more logs");
|
||||
case SWAP_UUID_FAILED:
|
||||
throw QueryRuntimeException("Couldn't set replica instance to main. Replicas didn't swap uuid of new main.");
|
||||
case OPEN_LOCK:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't set replica instance to main as cluster didn't accept setting instance state.");
|
||||
case LOCK_OPENED:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance because because the last action didn't finish successfully!");
|
||||
case ENABLE_WRITING_FAILED:
|
||||
throw QueryRuntimeException("Instance promoted to MAIN, but couldn't enable writing to instance.");
|
||||
case SUCCESS:
|
||||
break;
|
||||
}
|
||||
@ -548,7 +517,7 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
#endif
|
||||
|
||||
/// returns false if the replication role can't be set
|
||||
/// @throw QueryRuntimeException if an error occurred.
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
|
||||
Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_context, const Parameters ¶meters,
|
||||
Interpreter &interpreter) {
|
||||
@ -961,10 +930,10 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
switch (repl_query->action_) {
|
||||
case ReplicationQuery::Action::SET_REPLICATION_ROLE: {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_coordinator_id) {
|
||||
if (FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Coordinator can't set roles!");
|
||||
}
|
||||
if (FLAGS_management_port) {
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
throw QueryRuntimeException("Can't set role manually on instance with coordinator server port.");
|
||||
}
|
||||
#endif
|
||||
@ -991,7 +960,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
case ReplicationQuery::Action::SHOW_REPLICATION_ROLE: {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_coordinator_id) {
|
||||
if (FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Coordinator doesn't have a replication role!");
|
||||
}
|
||||
#endif
|
||||
@ -1012,7 +981,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
case ReplicationQuery::Action::REGISTER_REPLICA: {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_management_port) {
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
throw QueryRuntimeException("Can't register replica manually on instance with coordinator server port.");
|
||||
}
|
||||
#endif
|
||||
@ -1033,7 +1002,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
|
||||
case ReplicationQuery::Action::DROP_REPLICA: {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_management_port) {
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
throw QueryRuntimeException("Can't drop replica manually on instance with coordinator server port.");
|
||||
}
|
||||
#endif
|
||||
@ -1048,7 +1017,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
case ReplicationQuery::Action::SHOW_REPLICAS: {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_coordinator_id) {
|
||||
if (FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Coordinator cannot call SHOW REPLICAS! Use SHOW INSTANCES instead.");
|
||||
}
|
||||
#endif
|
||||
@ -1195,7 +1164,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
Callback callback;
|
||||
switch (coordinator_query->action_) {
|
||||
case CoordinatorQuery::Action::ADD_COORDINATOR_INSTANCE: {
|
||||
if (!FLAGS_coordinator_id) {
|
||||
if (!FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Only coordinator can add coordinator instance!");
|
||||
}
|
||||
|
||||
@ -1227,9 +1196,8 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt();
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id,
|
||||
bolt_server = bolt_server_it->second,
|
||||
coordinator_server = coordinator_server_it->second]() mutable {
|
||||
handler.AddCoordinatorInstance(coord_server_id, bolt_server, coordinator_server);
|
||||
handler.AddCoordinatorInstance(coord_server_id, coordinator_server);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
@ -1239,7 +1207,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::REGISTER_INSTANCE: {
|
||||
if (!FLAGS_coordinator_id) {
|
||||
if (!FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Only coordinator can register coordinator server!");
|
||||
}
|
||||
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
|
||||
@ -1274,15 +1242,15 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
instance_health_check_frequency_sec = config.instance_health_check_frequency_sec,
|
||||
bolt_server = bolt_server_it->second, management_server = management_server_it->second,
|
||||
replication_server = replication_server_it->second,
|
||||
management_server = management_server_it->second,
|
||||
replication_server = replication_server_it->second, bolt_server = bolt_server_it->second,
|
||||
instance_name = coordinator_query->instance_name_,
|
||||
instance_down_timeout_sec = config.instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec,
|
||||
sync_mode = coordinator_query->sync_mode_]() mutable {
|
||||
handler.RegisterReplicationInstance(bolt_server, management_server, replication_server,
|
||||
instance_health_check_frequency_sec, instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec, instance_name, sync_mode);
|
||||
handler.RegisterReplicationInstance(management_server, replication_server, instance_health_check_frequency_sec,
|
||||
instance_down_timeout_sec, instance_get_uuid_frequency_sec, instance_name,
|
||||
sync_mode);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
@ -1292,7 +1260,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::UNREGISTER_INSTANCE:
|
||||
if (!FLAGS_coordinator_id) {
|
||||
if (!FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Only coordinator can register coordinator server!");
|
||||
}
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
@ -1307,7 +1275,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
return callback;
|
||||
|
||||
case CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN: {
|
||||
if (!FLAGS_coordinator_id) {
|
||||
if (!FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Only coordinator can register coordinator server!");
|
||||
}
|
||||
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
|
||||
@ -1324,7 +1292,7 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::SHOW_INSTANCES: {
|
||||
if (!FLAGS_coordinator_id) {
|
||||
if (!FLAGS_raft_server_id) {
|
||||
throw QueryRuntimeException("Only coordinator can run SHOW INSTANCES.");
|
||||
}
|
||||
|
||||
@ -1786,10 +1754,12 @@ PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters &pa
|
||||
ctx_.evaluation_context.parameters = parameters;
|
||||
ctx_.evaluation_context.properties = NamesToProperties(plan->ast_storage().properties_, dba);
|
||||
ctx_.evaluation_context.labels = NamesToLabels(plan->ast_storage().labels_, dba);
|
||||
ctx_.user_or_role = user_or_role;
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && user_or_role && *user_or_role && dba) {
|
||||
// Create only if an explicit user is defined
|
||||
auto auth_checker = interpreter_context->auth_checker->GetFineGrainedAuthChecker(std::move(user_or_role), dba);
|
||||
auto auth_checker = interpreter_context->auth_checker->GetFineGrainedAuthChecker(user_or_role, dba);
|
||||
|
||||
// if the user has global privileges to read, edit and write anything, we don't need to perform authorization
|
||||
// otherwise, we do assign the auth checker to check for label access control
|
||||
@ -2741,75 +2711,6 @@ PreparedQuery PrepareEdgeIndexQuery(ParsedQuery parsed_query, bool in_explicit_t
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareTextIndexQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
std::vector<Notification> *notifications, CurrentDB ¤t_db) {
|
||||
if (in_explicit_transaction) {
|
||||
throw IndexInMulticommandTxException();
|
||||
}
|
||||
|
||||
auto *text_index_query = utils::Downcast<TextIndexQuery>(parsed_query.query);
|
||||
std::function<void(Notification &)> handler;
|
||||
|
||||
// TODO: we will need transaction for replication
|
||||
MG_ASSERT(current_db.db_acc_, "Text index query expects a current DB");
|
||||
auto &db_acc = *current_db.db_acc_;
|
||||
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Text index query expects a current DB transaction");
|
||||
auto *dba = &*current_db.execution_db_accessor_;
|
||||
|
||||
// Creating an index influences computed plan costs.
|
||||
auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
|
||||
auto *storage = db_acc->storage();
|
||||
auto label = storage->NameToLabel(text_index_query->label_.name);
|
||||
auto &index_name = text_index_query->index_name_;
|
||||
|
||||
Notification index_notification(SeverityLevel::INFO);
|
||||
switch (text_index_query->action_) {
|
||||
case TextIndexQuery::Action::CREATE: {
|
||||
index_notification.code = NotificationCode::CREATE_INDEX;
|
||||
index_notification.title = fmt::format("Created text index on label {}.", text_index_query->label_.name);
|
||||
// TODO: not just storage + invalidate_plan_cache. Need a DB transaction (for replication)
|
||||
handler = [dba, label, index_name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
throw TextSearchDisabledException();
|
||||
}
|
||||
dba->CreateTextIndex(index_name, label);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
};
|
||||
break;
|
||||
}
|
||||
case TextIndexQuery::Action::DROP: {
|
||||
index_notification.code = NotificationCode::DROP_INDEX;
|
||||
index_notification.title = fmt::format("Dropped text index on label {}.", text_index_query->label_.name);
|
||||
// TODO: not just storage + invalidate_plan_cache. Need a DB transaction (for replication)
|
||||
handler = [dba, index_name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
if (!flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
throw TextSearchDisabledException();
|
||||
}
|
||||
dba->DropTextIndex(index_name);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return PreparedQuery{
|
||||
{},
|
||||
std::move(parsed_query.required_privileges),
|
||||
[handler = std::move(handler), notifications, index_notification = std::move(index_notification)](
|
||||
AnyStream * /*stream*/, std::optional<int> /*unused*/) mutable {
|
||||
handler(index_notification);
|
||||
notifications->push_back(index_notification);
|
||||
return QueryHandlerResult::COMMIT; // TODO: Will need to become COMMIT when we fix replication
|
||||
},
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
InterpreterContext *interpreter_context, Interpreter &interpreter) {
|
||||
if (in_explicit_transaction) {
|
||||
@ -3600,7 +3501,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
}
|
||||
|
||||
MG_ASSERT(current_db.db_acc_, "Database info query expects a current DB");
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Database info query expects a current DB transaction");
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Database ifo query expects a current DB transaction");
|
||||
auto *dba = &*current_db.execution_db_accessor_;
|
||||
|
||||
auto *info_query = utils::Downcast<DatabaseInfoQuery>(parsed_query.query);
|
||||
@ -3615,11 +3516,10 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
const std::string_view label_index_mark{"label"};
|
||||
const std::string_view label_property_index_mark{"label+property"};
|
||||
const std::string_view edge_type_index_mark{"edge-type"};
|
||||
const std::string_view text_index_mark{"text"};
|
||||
auto info = dba->ListAllIndices();
|
||||
auto storage_acc = database->Access();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
results.reserve(info.label.size() + info.label_property.size() + info.text_indices.size());
|
||||
results.reserve(info.label.size() + info.label_property.size());
|
||||
for (const auto &item : info.label) {
|
||||
results.push_back({TypedValue(label_index_mark), TypedValue(storage->LabelToName(item)), TypedValue(),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateVertexCount(item)))});
|
||||
@ -3634,10 +3534,6 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
results.push_back({TypedValue(edge_type_index_mark), TypedValue(storage->EdgeTypeToName(item)), TypedValue(),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateEdgeCount(item)))});
|
||||
}
|
||||
for (const auto &[index_name, label] : info.text_indices) {
|
||||
results.push_back({TypedValue(fmt::format("{} (name: {})", text_index_mark, index_name)),
|
||||
TypedValue(storage->LabelToName(label)), TypedValue(), TypedValue()});
|
||||
}
|
||||
std::sort(results.begin(), results.end(), [&label_index_mark](const auto &record_1, const auto &record_2) {
|
||||
const auto type_1 = record_1[0].ValueString();
|
||||
const auto type_2 = record_2[0].ValueString();
|
||||
@ -4297,28 +4193,6 @@ void Interpreter::RollbackTransaction() {
|
||||
ResetInterpreter();
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
auto Interpreter::Route(std::map<std::string, std::string> const &routing) -> RouteResult {
|
||||
// TODO: (andi) Test
|
||||
if (!FLAGS_coordinator_id) {
|
||||
auto const &address = routing.find("address");
|
||||
if (address == routing.end()) {
|
||||
throw QueryException("Routing table must contain address field.");
|
||||
}
|
||||
|
||||
auto result = RouteResult{};
|
||||
if (interpreter_context_->repl_state->IsMain()) {
|
||||
result.servers.emplace_back(std::vector<std::string>{address->second}, "WRITE");
|
||||
} else {
|
||||
result.servers.emplace_back(std::vector<std::string>{address->second}, "READ");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return RouteResult{.servers = interpreter_context_->coordinator_state_->GetRoutingTable(routing)};
|
||||
}
|
||||
#endif
|
||||
|
||||
#if MG_ENTERPRISE
|
||||
// Before Prepare or during Prepare, but single-threaded.
|
||||
// TODO: Is there any cleanup?
|
||||
@ -4421,22 +4295,20 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
utils::Downcast<ProfileQuery>(parsed_query.query) || utils::Downcast<DumpQuery>(parsed_query.query) ||
|
||||
utils::Downcast<TriggerQuery>(parsed_query.query) || utils::Downcast<AnalyzeGraphQuery>(parsed_query.query) ||
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<EdgeIndexQuery>(parsed_query.query) ||
|
||||
utils::Downcast<TextIndexQuery>(parsed_query.query) || utils::Downcast<DatabaseInfoQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
utils::Downcast<DatabaseInfoQuery>(parsed_query.query) || utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
|
||||
if (!in_explicit_transaction_ && requires_db_transaction) {
|
||||
// TODO: ATM only a single database, will change when we have multiple database transactions
|
||||
bool could_commit = utils::Downcast<CypherQuery>(parsed_query.query) != nullptr;
|
||||
bool unique = utils::Downcast<IndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<EdgeIndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<TextIndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query) != nullptr ||
|
||||
upper_case_query.find(kSchemaAssert) != std::string::npos;
|
||||
SetupDatabaseTransaction(could_commit, unique);
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_coordinator_id && !utils::Downcast<CoordinatorQuery>(parsed_query.query) &&
|
||||
if (FLAGS_raft_server_id && !utils::Downcast<CoordinatorQuery>(parsed_query.query) &&
|
||||
!utils::Downcast<SettingQuery>(parsed_query.query)) {
|
||||
throw QueryRuntimeException("Coordinator can run only coordinator queries!");
|
||||
}
|
||||
@ -4467,9 +4339,6 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
} else if (utils::Downcast<EdgeIndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareEdgeIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<TextIndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareTextIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<AnalyzeGraphQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareAnalyzeGraphQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
|
||||
} else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
|
||||
@ -4567,7 +4436,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
throw QueryException("Write query forbidden on the replica!");
|
||||
}
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_management_port && !interpreter_context_->repl_state->IsMainWriteable()) {
|
||||
if (FLAGS_coordinator_server_port && !interpreter_context_->repl_state->IsMainWriteable()) {
|
||||
query_execution = nullptr;
|
||||
throw QueryException(
|
||||
"Write query forbidden on the main! Coordinator needs to enable writing on main by sending RPC message.");
|
||||
|
@ -143,8 +143,8 @@ class CoordinatorQueryHandler {
|
||||
};
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual void RegisterReplicationInstance(std::string_view bolt_server, std::string_view management_server,
|
||||
std::string_view replication_server,
|
||||
virtual void RegisterReplicationInstance(std::string_view coordinator_socket_address,
|
||||
std::string_view replication_socket_address,
|
||||
std::chrono::seconds const &instance_health_check_frequency,
|
||||
std::chrono::seconds const &instance_down_timeout,
|
||||
std::chrono::seconds const &instance_get_uuid_frequency,
|
||||
@ -160,8 +160,7 @@ class CoordinatorQueryHandler {
|
||||
virtual std::vector<coordination::InstanceStatus> ShowInstances() const = 0;
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual auto AddCoordinatorInstance(uint32_t coordinator_id, std::string_view bolt_server,
|
||||
std::string_view coordinator_server) -> void = 0;
|
||||
virtual auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view coordinator_socket_address) -> void = 0;
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -248,14 +247,6 @@ class Interpreter final {
|
||||
std::optional<std::string> db;
|
||||
};
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
struct RouteResult {
|
||||
int ttl{300};
|
||||
std::string db{}; // Currently not used since we don't have any specific replication groups etc.
|
||||
coordination::RoutingTable servers{};
|
||||
};
|
||||
#endif
|
||||
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role_{};
|
||||
bool in_explicit_transaction_{false};
|
||||
CurrentDB current_db_;
|
||||
@ -281,10 +272,6 @@ class Interpreter final {
|
||||
const std::map<std::string, storage::PropertyValue> ¶ms,
|
||||
QueryExtras const &extras);
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
auto Route(std::map<std::string, std::string> const &routing) -> RouteResult;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Execute the last prepared query and stream *all* of the results into the
|
||||
* given stream.
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include "system/include/system/system.hpp"
|
||||
namespace memgraph::query {
|
||||
|
||||
InterpreterContext *InterpreterContext::instance = nullptr;
|
||||
|
||||
InterpreterContext::InterpreterContext(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
|
||||
replication::ReplicationState *rs, memgraph::system::System &system,
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "storage/v2/transaction.hpp"
|
||||
#include "system/state.hpp"
|
||||
#include "system/system.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/gatekeeper.hpp"
|
||||
#include "utils/skip_list.hpp"
|
||||
#include "utils/spin_lock.hpp"
|
||||
@ -54,13 +55,30 @@ struct QueryUserOrRole;
|
||||
*
|
||||
*/
|
||||
struct InterpreterContext {
|
||||
InterpreterContext(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
|
||||
replication::ReplicationState *rs, memgraph::system::System &system,
|
||||
static InterpreterContext *instance;
|
||||
|
||||
static InterpreterContext *getInstance() {
|
||||
MG_ASSERT(instance != nullptr, "Interpreter context has not been initialized!");
|
||||
return instance;
|
||||
}
|
||||
|
||||
static InterpreterContext *getInstance(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
|
||||
replication::ReplicationState *rs, memgraph::system::System &system,
|
||||
#ifdef MG_ENTERPRISE
|
||||
memgraph::coordination::CoordinatorState *coordinator_state,
|
||||
memgraph::coordination::CoordinatorState *coordinator_state,
|
||||
#endif
|
||||
AuthQueryHandler *ah = nullptr, AuthChecker *ac = nullptr,
|
||||
ReplicationQueryHandler *replication_handler = nullptr);
|
||||
AuthQueryHandler *ah = nullptr, AuthChecker *ac = nullptr,
|
||||
ReplicationQueryHandler *replication_handler = nullptr) {
|
||||
if (instance == nullptr) {
|
||||
instance = new InterpreterContext(interpreter_config, dbms_handler, rs, system,
|
||||
#ifdef MG_ENTERPRISE
|
||||
coordinator_state,
|
||||
#endif
|
||||
ah, ac, replication_handler);
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
memgraph::dbms::DbmsHandler *dbms_handler;
|
||||
|
||||
@ -98,6 +116,14 @@ struct InterpreterContext {
|
||||
std::vector<std::vector<TypedValue>> TerminateTransactions(
|
||||
std::vector<std::string> maybe_kill_transaction_ids, QueryUserOrRole *user_or_role,
|
||||
std::function<bool(QueryUserOrRole *, std::string const &)> privilege_checker);
|
||||
};
|
||||
|
||||
private:
|
||||
InterpreterContext(InterpreterConfig interpreter_config, dbms::DbmsHandler *dbms_handler,
|
||||
replication::ReplicationState *rs, memgraph::system::System &system,
|
||||
#ifdef MG_ENTERPRISE
|
||||
memgraph::coordination::CoordinatorState *coordinator_state,
|
||||
#endif
|
||||
AuthQueryHandler *ah = nullptr, AuthChecker *ac = nullptr,
|
||||
ReplicationQueryHandler *replication_handler = nullptr);
|
||||
};
|
||||
} // namespace memgraph::query
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "spdlog/spdlog.h"
|
||||
|
||||
#include "csv/parsing.hpp"
|
||||
#include "flags/experimental.hpp"
|
||||
#include "license/license.hpp"
|
||||
#include "query/auth_checker.hpp"
|
||||
#include "query/context.hpp"
|
||||
@ -267,10 +266,6 @@ VertexAccessor &CreateLocalVertex(const NodeCreationInfo &node_info, Frame *fram
|
||||
}
|
||||
MultiPropsInitChecked(&new_node, properties);
|
||||
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexAddVertex(new_node);
|
||||
}
|
||||
|
||||
(*frame)[node_info.symbol] = new_node;
|
||||
return (*frame)[node_info.symbol].ValueVertex();
|
||||
}
|
||||
@ -334,7 +329,7 @@ CreateExpand::CreateExpand(NodeCreationInfo node_info, EdgeCreationInfo edge_inf
|
||||
ACCEPT_WITH_INPUT(CreateExpand)
|
||||
|
||||
UniqueCursorPtr CreateExpand::MakeCursor(utils::MemoryResource *mem) const {
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::CreateExpandOperator);
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::CreateNodeOperator);
|
||||
|
||||
return MakeUniqueCursorPtr<CreateExpandCursor>(mem, *this, mem);
|
||||
}
|
||||
@ -2996,9 +2991,6 @@ bool SetProperty::SetPropertyCursor::Pull(Frame &frame, ExecutionContext &contex
|
||||
context.trigger_context_collector->RegisterSetObjectProperty(lhs.ValueVertex(), self_.property_,
|
||||
TypedValue{std::move(old_value)}, TypedValue{rhs});
|
||||
}
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TypedValue::Type::Edge: {
|
||||
@ -3155,9 +3147,6 @@ void SetPropertiesOnRecord(TRecordAccessor *record, const TypedValue &rhs, SetPr
|
||||
case TypedValue::Type::Vertex: {
|
||||
PropertiesMap new_properties = get_props(rhs.ValueVertex());
|
||||
update_props(new_properties);
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context->db_accessor->TextIndexUpdateVertex(rhs.ValueVertex());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TypedValue::Type::Map: {
|
||||
@ -3215,9 +3204,6 @@ bool SetProperties::SetPropertiesCursor::Pull(Frame &frame, ExecutionContext &co
|
||||
}
|
||||
#endif
|
||||
SetPropertiesOnRecord(&lhs.ValueVertex(), rhs, self_.op_, &context, cached_name_id_);
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
|
||||
}
|
||||
break;
|
||||
case TypedValue::Type::Edge:
|
||||
#ifdef MG_ENTERPRISE
|
||||
@ -3309,10 +3295,6 @@ bool SetLabels::SetLabelsCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
}
|
||||
}
|
||||
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexUpdateVertex(vertex);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3384,9 +3366,6 @@ bool RemoveProperty::RemovePropertyCursor::Pull(Frame &frame, ExecutionContext &
|
||||
}
|
||||
#endif
|
||||
remove_prop(&lhs.ValueVertex());
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexUpdateVertex(lhs.ValueVertex());
|
||||
}
|
||||
break;
|
||||
case TypedValue::Type::Edge:
|
||||
#ifdef MG_ENTERPRISE
|
||||
@ -3479,10 +3458,6 @@ bool RemoveLabels::RemoveLabelsCursor::Pull(Frame &frame, ExecutionContext &cont
|
||||
}
|
||||
}
|
||||
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
context.db_accessor->TextIndexUpdateVertex(vertex, EvaluateLabels(self_.labels_, evaluator, context.db_accessor));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -23,18 +23,18 @@
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
#include "flags/experimental.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "glue/auth.hpp"
|
||||
#include "license/license.hpp"
|
||||
#include "mg_procedure.h"
|
||||
#include "module.hpp"
|
||||
#include "query/db_accessor.hpp"
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/interpreter.hpp"
|
||||
#include "query/interpreter_context.hpp"
|
||||
#include "query/procedure/cypher_types.hpp"
|
||||
#include "query/procedure/fmt.hpp"
|
||||
#include "query/procedure/mg_procedure_helpers.hpp"
|
||||
#include "query/stream/common.hpp"
|
||||
#include "storage/v2/indices/text_index.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
@ -1846,11 +1846,6 @@ mgp_error mgp_vertex_set_property(struct mgp_vertex *v, const char *property_nam
|
||||
const auto result = std::visit(
|
||||
[prop_key, property_value](auto &impl) { return impl.SetProperty(prop_key, ToPropertyValue(*property_value)); },
|
||||
v->impl);
|
||||
if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
|
||||
auto v_impl = v->getImpl();
|
||||
v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
|
||||
}
|
||||
|
||||
if (result.HasError()) {
|
||||
switch (result.GetError()) {
|
||||
case memgraph::storage::Error::DELETED_OBJECT:
|
||||
@ -1907,11 +1902,6 @@ mgp_error mgp_vertex_set_properties(struct mgp_vertex *v, struct mgp_map *proper
|
||||
}
|
||||
|
||||
const auto result = v->getImpl().UpdateProperties(props);
|
||||
if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
|
||||
auto v_impl = v->getImpl();
|
||||
v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
|
||||
}
|
||||
|
||||
if (result.HasError()) {
|
||||
switch (result.GetError()) {
|
||||
case memgraph::storage::Error::DELETED_OBJECT:
|
||||
@ -1969,10 +1959,6 @@ mgp_error mgp_vertex_add_label(struct mgp_vertex *v, mgp_label label) {
|
||||
}
|
||||
|
||||
const auto result = std::visit([label_id](auto &impl) { return impl.AddLabel(label_id); }, v->impl);
|
||||
if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
|
||||
auto v_impl = v->getImpl();
|
||||
v->graph->getImpl()->TextIndexUpdateVertex(v_impl);
|
||||
}
|
||||
|
||||
if (result.HasError()) {
|
||||
switch (result.GetError()) {
|
||||
@ -2015,10 +2001,6 @@ mgp_error mgp_vertex_remove_label(struct mgp_vertex *v, mgp_label label) {
|
||||
throw ImmutableObjectException{"Cannot remove a label from an immutable vertex!"};
|
||||
}
|
||||
const auto result = std::visit([label_id](auto &impl) { return impl.RemoveLabel(label_id); }, v->impl);
|
||||
if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH) && !result.HasError()) {
|
||||
auto v_impl = v->getImpl();
|
||||
v->graph->getImpl()->TextIndexUpdateVertex(v_impl, {label_id});
|
||||
}
|
||||
|
||||
if (result.HasError()) {
|
||||
switch (result.GetError()) {
|
||||
@ -2611,7 +2593,7 @@ mgp_error mgp_edge_iter_properties(mgp_edge *e, mgp_memory *memory, mgp_properti
|
||||
mgp_error mgp_graph_get_vertex_by_id(mgp_graph *graph, mgp_vertex_id id, mgp_memory *memory, mgp_vertex **result) {
|
||||
return WrapExceptions(
|
||||
[graph, id, memory]() -> mgp_vertex * {
|
||||
auto maybe_vertex = std::visit(
|
||||
std::optional<memgraph::query::VertexAccessor> maybe_vertex = std::visit(
|
||||
[graph, id](auto *impl) {
|
||||
return impl->FindVertex(memgraph::storage::Gid::FromInt(id.as_int), graph->view);
|
||||
},
|
||||
@ -2988,10 +2970,6 @@ mgp_error mgp_graph_create_vertex(struct mgp_graph *graph, mgp_memory *memory, m
|
||||
}
|
||||
auto *vertex = std::visit(
|
||||
[=](auto *impl) { return NewRawMgpObject<mgp_vertex>(memory, impl->InsertVertex(), graph); }, graph->impl);
|
||||
if (memgraph::flags::AreExperimentsEnabled(memgraph::flags::Experiments::TEXT_SEARCH)) {
|
||||
auto v_impl = vertex->getImpl();
|
||||
vertex->graph->getImpl()->TextIndexAddVertex(v_impl);
|
||||
}
|
||||
|
||||
auto &ctx = graph->ctx;
|
||||
ctx->execution_stats[memgraph::query::ExecutionStats::Key::CREATED_NODES] += 1;
|
||||
@ -3349,140 +3327,6 @@ mgp_error mgp_graph_delete_edge(struct mgp_graph *graph, mgp_edge *edge) {
|
||||
});
|
||||
}
|
||||
|
||||
mgp_error mgp_graph_has_text_index(mgp_graph *graph, const char *index_name, int *result) {
|
||||
return WrapExceptions([graph, index_name, result]() {
|
||||
std::visit(memgraph::utils::Overloaded{
|
||||
[&](memgraph::query::DbAccessor *impl) { *result = impl->TextIndexExists(index_name); },
|
||||
[&](memgraph::query::SubgraphDbAccessor *impl) {
|
||||
*result = impl->GetAccessor()->TextIndexExists(index_name);
|
||||
}},
|
||||
graph->impl);
|
||||
});
|
||||
}
|
||||
|
||||
mgp_vertex *GetVertexByGid(mgp_graph *graph, memgraph::storage::Gid id, mgp_memory *memory) {
|
||||
auto get_vertex_by_gid = memgraph::utils::Overloaded{
|
||||
[graph, id, memory](memgraph::query::DbAccessor *impl) -> mgp_vertex * {
|
||||
auto maybe_vertex = impl->FindVertex(id, graph->view);
|
||||
if (!maybe_vertex) return nullptr;
|
||||
return NewRawMgpObject<mgp_vertex>(memory, *maybe_vertex, graph);
|
||||
},
|
||||
[graph, id, memory](memgraph::query::SubgraphDbAccessor *impl) -> mgp_vertex * {
|
||||
auto maybe_vertex = impl->FindVertex(id, graph->view);
|
||||
if (!maybe_vertex) return nullptr;
|
||||
return NewRawMgpObject<mgp_vertex>(
|
||||
memory, memgraph::query::SubgraphVertexAccessor(*maybe_vertex, impl->getGraph()), graph);
|
||||
}};
|
||||
return std::visit(get_vertex_by_gid, graph->impl);
|
||||
}
|
||||
|
||||
void WrapTextSearch(mgp_graph *graph, mgp_memory *memory, mgp_map **result,
|
||||
const std::vector<memgraph::storage::Gid> &vertex_ids = {},
|
||||
const std::optional<std::string> &error_msg = std::nullopt) {
|
||||
if (const auto err = mgp_map_make_empty(memory, result); err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a mgp_map");
|
||||
}
|
||||
|
||||
mgp_value *error_value;
|
||||
if (error_msg.has_value()) {
|
||||
if (const auto err = mgp_value_make_string(error_msg.value().data(), memory, &error_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a string mgp_value");
|
||||
}
|
||||
}
|
||||
|
||||
mgp_list *search_results{};
|
||||
if (const auto err = mgp_list_make_empty(vertex_ids.size(), memory, &search_results);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a mgp_list");
|
||||
}
|
||||
|
||||
for (const auto &vertex_id : vertex_ids) {
|
||||
mgp_value *vertex;
|
||||
if (const auto err = mgp_value_make_vertex(GetVertexByGid(graph, vertex_id, memory), &vertex);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a vertex mgp_value");
|
||||
}
|
||||
if (const auto err = mgp_list_append(search_results, vertex); err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error(
|
||||
"Retrieving text search results failed during insertion of the mgp_value into the result list");
|
||||
}
|
||||
}
|
||||
|
||||
mgp_value *search_results_value;
|
||||
if (const auto err = mgp_value_make_list(search_results, &search_results_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a list mgp_value");
|
||||
}
|
||||
|
||||
if (error_msg.has_value()) {
|
||||
if (const auto err = mgp_map_insert(*result, "error_msg", error_value); err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text index search error failed during insertion into mgp_map");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto err = mgp_map_insert(*result, "search_results", search_results_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text index search results failed during insertion into mgp_map");
|
||||
}
|
||||
}
|
||||
|
||||
void WrapTextIndexAggregation(mgp_memory *memory, mgp_map **result, const std::string &aggregation_result,
|
||||
const std::optional<std::string> &error_msg = std::nullopt) {
|
||||
if (const auto err = mgp_map_make_empty(memory, result); err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a mgp_map");
|
||||
}
|
||||
|
||||
mgp_value *aggregation_result_or_error_value;
|
||||
if (const auto err = mgp_value_make_string(error_msg.value_or(aggregation_result).data(), memory,
|
||||
&aggregation_result_or_error_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text search results failed during creation of a string mgp_value");
|
||||
}
|
||||
|
||||
if (error_msg.has_value()) {
|
||||
if (const auto err = mgp_map_insert(*result, "error_msg", aggregation_result_or_error_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text index aggregation error failed during insertion into mgp_map");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto err = mgp_map_insert(*result, "aggregation_results", aggregation_result_or_error_value);
|
||||
err != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::logic_error("Retrieving text index aggregation results failed during insertion into mgp_map");
|
||||
}
|
||||
}
|
||||
|
||||
mgp_error mgp_graph_search_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
|
||||
text_search_mode search_mode, mgp_memory *memory, mgp_map **result) {
|
||||
return WrapExceptions([graph, memory, index_name, search_query, search_mode, result]() {
|
||||
std::vector<memgraph::storage::Gid> found_vertices_ids;
|
||||
std::optional<std::string> error_msg = std::nullopt;
|
||||
try {
|
||||
found_vertices_ids = graph->getImpl()->TextIndexSearch(index_name, search_query, search_mode);
|
||||
} catch (memgraph::query::QueryException &e) {
|
||||
error_msg = e.what();
|
||||
}
|
||||
WrapTextSearch(graph, memory, result, found_vertices_ids, error_msg);
|
||||
});
|
||||
}
|
||||
|
||||
mgp_error mgp_graph_aggregate_over_text_index(mgp_graph *graph, const char *index_name, const char *search_query,
|
||||
const char *aggregation_query, mgp_memory *memory, mgp_map **result) {
|
||||
return WrapExceptions([graph, memory, index_name, search_query, aggregation_query, result]() {
|
||||
std::string search_results;
|
||||
std::optional<std::string> error_msg = std::nullopt;
|
||||
try {
|
||||
search_results = graph->getImpl()->TextIndexAggregate(index_name, search_query, aggregation_query);
|
||||
} catch (memgraph::query::QueryException &e) {
|
||||
error_msg = e.what();
|
||||
}
|
||||
WrapTextIndexAggregation(memory, result, search_results, error_msg);
|
||||
});
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
namespace {
|
||||
void NextPermitted(mgp_vertices_iterator &it) {
|
||||
@ -4180,3 +4024,131 @@ mgp_error mgp_untrack_current_thread_allocations(mgp_graph *graph) {
|
||||
std::visit([](auto *db_accessor) -> void { db_accessor->UntrackCurrentThreadAllocations(); }, graph->impl);
|
||||
});
|
||||
}
|
||||
|
||||
mgp_execution_headers::mgp_execution_headers(memgraph::utils::pmr::vector<memgraph::utils::pmr::string> &&storage)
|
||||
: headers(std::move(storage)){};
|
||||
|
||||
mgp_error mgp_execution_headers_size(mgp_execution_headers *headers, size_t *result) {
|
||||
static_assert(noexcept(headers->headers.size()));
|
||||
*result = headers->headers.size();
|
||||
return mgp_error::MGP_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
mgp_error mgp_execution_headers_at(mgp_execution_headers *headers, size_t index, const char **result) {
|
||||
return WrapExceptions(
|
||||
[headers, index] {
|
||||
if (index >= Call<size_t>(mgp_execution_headers_size, headers)) {
|
||||
throw std::out_of_range("Header cannot be retrieved, because index exceeds headers' size!");
|
||||
}
|
||||
return headers->headers[index].data();
|
||||
},
|
||||
result);
|
||||
}
|
||||
|
||||
mgp_execution_rows::mgp_execution_rows(
|
||||
memgraph::utils::pmr::vector<memgraph::utils::pmr::vector<memgraph::query::TypedValue>> &&tv_rows)
|
||||
: rows(std::move(tv_rows)) {}
|
||||
|
||||
struct MgProcedureResultStream final {
|
||||
explicit MgProcedureResultStream(mgp_memory *memory) : rows(memory->impl), memory(memory) {}
|
||||
using Row = std::vector<memgraph::query::TypedValue>;
|
||||
using Rows = std::vector<Row>;
|
||||
using PmrRow = memgraph::utils::pmr::vector<memgraph::query::TypedValue>;
|
||||
using PmrRows = memgraph::utils::pmr::vector<PmrRow>;
|
||||
|
||||
PmrRows rows;
|
||||
mgp_memory *memory;
|
||||
|
||||
void Result(const Row &row) {
|
||||
PmrRow pmr_row(memory->impl);
|
||||
for (auto &val : row) {
|
||||
pmr_row.emplace_back(std::move(val));
|
||||
}
|
||||
|
||||
rows.emplace_back(std::move(pmr_row));
|
||||
}
|
||||
};
|
||||
|
||||
std::map<std::string, memgraph::storage::PropertyValue> CreateQueryParams(mgp_map *params) {
|
||||
std::map<std::string, memgraph::storage::PropertyValue> query_params;
|
||||
for (auto &[k, v] : params->items) {
|
||||
query_params.emplace(k, ToPropertyValue(v));
|
||||
}
|
||||
|
||||
return query_params;
|
||||
}
|
||||
|
||||
struct mgp_execution_result::pImplMgpExecutionResult {
|
||||
std::unique_ptr<memgraph::query::Interpreter> interpreter;
|
||||
std::unique_ptr<mgp_execution_headers> headers;
|
||||
};
|
||||
|
||||
mgp_execution_result::mgp_execution_result() : pImpl(std::make_unique<pImplMgpExecutionResult>()) {
|
||||
auto *instance = memgraph::query::InterpreterContext::getInstance();
|
||||
pImpl->interpreter = std::make_unique<memgraph::query::Interpreter>(instance, instance->dbms_handler->Get());
|
||||
}
|
||||
|
||||
mgp_execution_result::~mgp_execution_result() {
|
||||
auto *instance = memgraph::query::InterpreterContext::getInstance();
|
||||
instance->interpreters.WithLock([this](auto &interpreters) { interpreters.erase(pImpl->interpreter.get()); });
|
||||
// interpreter will delete itself because it's a smart pointer
|
||||
}
|
||||
|
||||
mgp_error mgp_execute_query(mgp_graph *graph, mgp_memory *memory, const char *query, mgp_map *params,
|
||||
mgp_execution_result **result) {
|
||||
return WrapExceptions(
|
||||
[query, params, graph, memory]() {
|
||||
auto query_string = std::string(query);
|
||||
auto *instance = memgraph::query::InterpreterContext::getInstance();
|
||||
|
||||
mgp_execution_result *result = NewRawMgpObject<mgp_execution_result>(memory->impl);
|
||||
result->pImpl->interpreter->SetUser(graph->ctx->user_or_role);
|
||||
|
||||
instance->interpreters.WithLock(
|
||||
[result](auto &interpreters) { interpreters.insert(result->pImpl->interpreter.get()); });
|
||||
|
||||
const auto query_params = CreateQueryParams(params);
|
||||
|
||||
auto prepare_query_result = result->pImpl->interpreter->Prepare(query_string, query_params, {});
|
||||
|
||||
memgraph::utils::pmr::vector<memgraph::utils::pmr::string> headers(memory->impl);
|
||||
for (auto header : prepare_query_result.headers) {
|
||||
headers.emplace_back(header);
|
||||
}
|
||||
result->pImpl->headers = std::make_unique<mgp_execution_headers>(std::move(headers));
|
||||
|
||||
return result;
|
||||
},
|
||||
result);
|
||||
}
|
||||
|
||||
mgp_error mgp_fetch_execution_headers(mgp_execution_result *exec_result, mgp_execution_headers **result) {
|
||||
return WrapExceptions([exec_result]() { return exec_result->pImpl->headers.get(); }, result);
|
||||
}
|
||||
|
||||
mgp_error mgp_pull_one(mgp_execution_result *exec_result, mgp_graph *graph, mgp_memory *memory, mgp_map **result) {
|
||||
return WrapExceptions(
|
||||
[exec_result, graph, memory]() -> mgp_map * {
|
||||
MgProcedureResultStream stream(memory);
|
||||
|
||||
try {
|
||||
exec_result->pImpl->interpreter->Pull(&stream, 1, {});
|
||||
} catch (const std::exception &e) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (stream.rows.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t headers_size = exec_result->pImpl->headers->headers.size();
|
||||
memgraph::utils::pmr::map<memgraph::utils::pmr::string, mgp_value> items(memory->impl);
|
||||
for (size_t idx = 0; idx < headers_size; idx++) {
|
||||
items.emplace(exec_result->pImpl->headers->headers[idx],
|
||||
mgp_value{std::move(stream.rows[0][idx]), graph, memory->impl});
|
||||
}
|
||||
|
||||
return NewRawMgpObject<mgp_map>(memory->impl, std::move(items));
|
||||
},
|
||||
result);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "mg_procedure.h"
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
|
||||
@ -24,6 +25,7 @@
|
||||
#include "query/context.hpp"
|
||||
#include "query/db_accessor.hpp"
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
|
||||
#include "query/procedure/cypher_type_ptr.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
@ -33,6 +35,7 @@
|
||||
#include "utils/pmr/vector.hpp"
|
||||
#include "utils/temporal.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
/// Wraps memory resource used in custom procedures.
|
||||
///
|
||||
/// This should have been `using mgp_memory = memgraph::utils::MemoryResource`, but that's
|
||||
@ -562,13 +565,6 @@ struct mgp_graph {
|
||||
memgraph::query::ExecutionContext *ctx;
|
||||
memgraph::storage::StorageMode storage_mode;
|
||||
|
||||
memgraph::query::DbAccessor *getImpl() const {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{[](memgraph::query::DbAccessor *impl) { return impl; },
|
||||
[](memgraph::query::SubgraphDbAccessor *impl) { return impl->GetAccessor(); }},
|
||||
this->impl);
|
||||
}
|
||||
|
||||
static mgp_graph WritableGraph(memgraph::query::DbAccessor &acc, memgraph::storage::View view,
|
||||
memgraph::query::ExecutionContext &ctx) {
|
||||
return mgp_graph{&acc, view, &ctx, acc.GetStorageMode()};
|
||||
@ -1000,3 +996,29 @@ struct mgp_messages {
|
||||
bool ContainsDeleted(const mgp_value *val);
|
||||
|
||||
memgraph::query::TypedValue ToTypedValue(const mgp_value &val, memgraph::utils::MemoryResource *memory);
|
||||
|
||||
struct mgp_execution_headers {
|
||||
using allocator_type = memgraph::utils::Allocator<mgp_execution_headers>;
|
||||
using storage_type = memgraph::utils::pmr::vector<memgraph::utils::pmr::string>;
|
||||
explicit mgp_execution_headers(storage_type &&storage);
|
||||
|
||||
~mgp_execution_headers() = default;
|
||||
|
||||
storage_type headers;
|
||||
};
|
||||
|
||||
struct mgp_execution_rows {
|
||||
explicit mgp_execution_rows(
|
||||
memgraph::utils::pmr::vector<memgraph::utils::pmr::vector<memgraph::query::TypedValue>> &&tv_rows);
|
||||
~mgp_execution_rows() = default;
|
||||
|
||||
memgraph::utils::pmr::vector<memgraph::utils::pmr::vector<memgraph::query::TypedValue>> rows;
|
||||
};
|
||||
|
||||
struct mgp_execution_result {
|
||||
explicit mgp_execution_result();
|
||||
~mgp_execution_result();
|
||||
|
||||
struct pImplMgpExecutionResult;
|
||||
std::unique_ptr<pImplMgpExecutionResult> pImpl;
|
||||
};
|
||||
|
@ -56,7 +56,7 @@ ReplicationState::ReplicationState(std::optional<std::filesystem::path> durabili
|
||||
}
|
||||
auto replication_data = std::move(fetched_replication_data).GetValue();
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (FLAGS_management_port && std::holds_alternative<RoleReplicaData>(replication_data)) {
|
||||
if (FLAGS_coordinator_server_port && std::holds_alternative<RoleReplicaData>(replication_data)) {
|
||||
spdlog::trace("Restarted replication uuid for replica");
|
||||
std::get<RoleReplicaData>(replication_data).uuid_.reset();
|
||||
}
|
||||
@ -254,8 +254,7 @@ bool ReplicationState::SetReplicationRoleMain(const utils::UUID &main_uuid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// By default, writing on MAIN is disabled until cluster is in healthy state
|
||||
replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}, /*is_writing enabled*/ false, main_uuid};
|
||||
replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}, true, main_uuid};
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler {
|
||||
// We force sync replicas in other situation
|
||||
if (state == storage::replication::ReplicaState::DIVERGED_FROM_MAIN) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
return FLAGS_management_port != 0;
|
||||
return FLAGS_coordinator_server_port != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
@ -20,7 +20,6 @@ add_library(mg-storage-v2 STATIC
|
||||
vertex_info_cache.cpp
|
||||
storage.cpp
|
||||
indices/indices.cpp
|
||||
indices/text_index.cpp
|
||||
all_vertices_iterable.cpp
|
||||
edges_iterable.cpp
|
||||
vertices_iterable.cpp
|
||||
@ -46,5 +45,4 @@ add_library(mg-storage-v2 STATIC
|
||||
inmemory/replication/recovery.cpp
|
||||
)
|
||||
|
||||
target_include_directories(mg-storage-v2 PUBLIC ${CMAKE_SOURCE_DIR}/include)
|
||||
target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils mg-flags gflags absl::flat_hash_map mg-rpc mg-slk mg-events mg-memory mgcxx_text_search tantivy_text_search)
|
||||
target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils gflags absl::flat_hash_map mg-rpc mg-slk mg-events mg-memory)
|
||||
|
@ -37,7 +37,6 @@ struct SalientConfig {
|
||||
struct Items {
|
||||
bool properties_on_edges{true};
|
||||
bool enable_schema_metadata{false};
|
||||
bool delta_on_identical_property_update{true};
|
||||
friend bool operator==(const Items &lrh, const Items &rhs) = default;
|
||||
} items;
|
||||
|
||||
@ -132,7 +131,7 @@ struct Config {
|
||||
inline auto ReplicationStateRootPath(memgraph::storage::Config const &config) -> std::optional<std::filesystem::path> {
|
||||
if (!config.durability.restore_replication_state_on_startup
|
||||
#ifdef MG_ENTERPRISE
|
||||
&& !FLAGS_management_port
|
||||
&& !FLAGS_coordinator_server_port
|
||||
#endif
|
||||
) {
|
||||
spdlog::warn(
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -26,7 +26,6 @@ constexpr const char *kVertexCountDescr = "vertex_count";
|
||||
constexpr const char *kEdgeDountDescr = "edge_count";
|
||||
constexpr const char *kLabelIndexStr = "label_index";
|
||||
constexpr const char *kLabelPropertyIndexStr = "label_property_index";
|
||||
constexpr const char *kTextIndexStr = "text_index";
|
||||
constexpr const char *kExistenceConstraintsStr = "existence_constraints";
|
||||
constexpr const char *kUniqueConstraintsStr = "unique_constraints";
|
||||
} // namespace
|
||||
@ -42,7 +41,7 @@ DurableMetadata::DurableMetadata(const Config &config)
|
||||
DurableMetadata::DurableMetadata(DurableMetadata &&other) noexcept
|
||||
: durability_kvstore_(std::move(other.durability_kvstore_)), config_(std::move(other.config_)) {}
|
||||
|
||||
void DurableMetadata::UpdateMetaData(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count) {
|
||||
void DurableMetadata::SaveBeforeClosingDB(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count) {
|
||||
durability_kvstore_.Put(kLastTransactionStartTimeStamp, std::to_string(timestamp));
|
||||
durability_kvstore_.Put(kVertexCountDescr, std::to_string(vertex_count));
|
||||
durability_kvstore_.Put(kEdgeDountDescr, std::to_string(edge_count));
|
||||
@ -145,31 +144,6 @@ bool DurableMetadata::PersistLabelPropertyIndexAndExistenceConstraintDeletion(La
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DurableMetadata::PersistTextIndexCreation(const std::string &index_name, LabelId label) {
|
||||
const std::string index_name_label_pair = index_name + "," + label.ToString();
|
||||
if (auto text_index_store = durability_kvstore_.Get(kTextIndexStr); text_index_store.has_value()) {
|
||||
std::string &value = text_index_store.value();
|
||||
value += "|";
|
||||
value += index_name_label_pair;
|
||||
return durability_kvstore_.Put(kTextIndexStr, value);
|
||||
}
|
||||
return durability_kvstore_.Put(kTextIndexStr, index_name_label_pair);
|
||||
}
|
||||
|
||||
bool DurableMetadata::PersistTextIndexDeletion(const std::string &index_name, LabelId label) {
|
||||
const std::string index_name_label_pair = index_name + "," + label.ToString();
|
||||
if (auto text_index_store = durability_kvstore_.Get(kTextIndexStr); text_index_store.has_value()) {
|
||||
const std::string &value = text_index_store.value();
|
||||
std::vector<std::string> text_indices = utils::Split(value, "|");
|
||||
std::erase(text_indices, index_name_label_pair);
|
||||
if (text_indices.empty()) {
|
||||
return durability_kvstore_.Delete(kTextIndexStr);
|
||||
}
|
||||
return durability_kvstore_.Put(kTextIndexStr, utils::Join(text_indices, "|"));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DurableMetadata::PersistUniqueConstraintCreation(LabelId label, const std::set<PropertyId> &properties) {
|
||||
const std::string entry = utils::GetKeyForUniqueConstraintsDurability(label, properties);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -41,7 +41,7 @@ class DurableMetadata {
|
||||
std::optional<std::vector<std::string>> LoadExistenceConstraintInfoIfExists() const;
|
||||
std::optional<std::vector<std::string>> LoadUniqueConstraintInfoIfExists() const;
|
||||
|
||||
void UpdateMetaData(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count);
|
||||
void SaveBeforeClosingDB(uint64_t timestamp, uint64_t vertex_count, uint64_t edge_count);
|
||||
|
||||
bool PersistLabelIndexCreation(LabelId label);
|
||||
|
||||
@ -53,10 +53,6 @@ class DurableMetadata {
|
||||
bool PersistLabelPropertyIndexAndExistenceConstraintDeletion(LabelId label, PropertyId property,
|
||||
const std::string &key);
|
||||
|
||||
bool PersistTextIndexCreation(const std::string &index_name, LabelId label);
|
||||
|
||||
bool PersistTextIndexDeletion(const std::string &index_name, LabelId label);
|
||||
|
||||
bool PersistUniqueConstraintCreation(LabelId label, const std::set<PropertyId> &properties);
|
||||
|
||||
bool PersistUniqueConstraintDeletion(LabelId label, const std::set<PropertyId> &properties);
|
||||
|
@ -29,8 +29,6 @@
|
||||
#include <rocksdb/utilities/transaction.h>
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
|
||||
#include "flags/experimental.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "kvstore/kvstore.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v2/constraints/unique_constraints.hpp"
|
||||
@ -274,8 +272,8 @@ DiskStorage::DiskStorage(Config config)
|
||||
}
|
||||
|
||||
DiskStorage::~DiskStorage() {
|
||||
durable_metadata_.UpdateMetaData(timestamp_, vertex_count_.load(std::memory_order_acquire),
|
||||
edge_count_.load(std::memory_order_acquire));
|
||||
durable_metadata_.SaveBeforeClosingDB(timestamp_, vertex_count_.load(std::memory_order_acquire),
|
||||
edge_count_.load(std::memory_order_acquire));
|
||||
logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->vertex_chandle));
|
||||
logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->edge_chandle));
|
||||
logging::AssertRocksDBStatus(kvstore_->db_->DestroyColumnFamilyHandle(kvstore_->out_edges_chandle));
|
||||
@ -858,7 +856,6 @@ StorageInfo DiskStorage::GetInfo(memgraph::replication_coordination_glue::Replic
|
||||
const auto &lbl = access->ListAllIndices();
|
||||
info.label_indices = lbl.label.size();
|
||||
info.label_property_indices = lbl.label_property.size();
|
||||
info.text_indices = lbl.text_indices.size();
|
||||
const auto &con = access->ListAllConstraints();
|
||||
info.existence_constraints = con.existence.size();
|
||||
info.unique_constraints = con.unique.size();
|
||||
@ -1673,18 +1670,6 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
case MetadataDelta::Action::LABEL_PROPERTY_INDEX_STATS_CLEAR: {
|
||||
throw utils::NotYetImplemented("ClearIndexStats(stats) is not implemented for DiskStorage.");
|
||||
} break;
|
||||
case MetadataDelta::Action::TEXT_INDEX_CREATE: {
|
||||
const auto &info = md_delta.text_index;
|
||||
if (!disk_storage->durable_metadata_.PersistTextIndexCreation(info.index_name, info.label)) {
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::TEXT_INDEX_DROP: {
|
||||
const auto &info = md_delta.text_index;
|
||||
if (!disk_storage->durable_metadata_.PersistTextIndexDeletion(info.index_name, info.label)) {
|
||||
return StorageManipulationError{PersistenceError{}};
|
||||
}
|
||||
} break;
|
||||
case MetadataDelta::Action::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
const auto &info = md_delta.label_property;
|
||||
if (!disk_storage->durable_metadata_.PersistLabelPropertyIndexAndExistenceConstraintCreation(
|
||||
@ -1783,11 +1768,7 @@ utils::BasicResult<StorageManipulationError, void> DiskStorage::DiskAccessor::Co
|
||||
return StorageManipulationError{SerializationError{}};
|
||||
}
|
||||
spdlog::trace("rocksdb: Commit successful");
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
disk_storage->indices_.text_index_.Commit();
|
||||
}
|
||||
disk_storage->durable_metadata_.UpdateMetaData(disk_storage->timestamp_, disk_storage->vertex_count_,
|
||||
disk_storage->edge_count_);
|
||||
|
||||
is_transaction_active_ = false;
|
||||
|
||||
return {};
|
||||
@ -1905,9 +1886,6 @@ void DiskStorage::DiskAccessor::Abort() {
|
||||
// query_plan_accumulate_aggregate.cpp
|
||||
transaction_.disk_transaction_->Rollback();
|
||||
transaction_.disk_transaction_->ClearSnapshot();
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
storage_->indices_.text_index_.Rollback();
|
||||
}
|
||||
delete transaction_.disk_transaction_;
|
||||
transaction_.disk_transaction_ = nullptr;
|
||||
is_transaction_active_ = false;
|
||||
@ -2114,11 +2092,7 @@ IndicesInfo DiskStorage::DiskAccessor::ListAllIndices() const {
|
||||
auto *disk_label_index = static_cast<DiskLabelIndex *>(on_disk->indices_.label_index_.get());
|
||||
auto *disk_label_property_index =
|
||||
static_cast<DiskLabelPropertyIndex *>(on_disk->indices_.label_property_index_.get());
|
||||
auto &text_index = storage_->indices_.text_index_;
|
||||
return {disk_label_index->ListIndices(),
|
||||
disk_label_property_index->ListIndices(),
|
||||
{/* edge type indices */},
|
||||
text_index.ListIndices()};
|
||||
return {disk_label_index->ListIndices(), disk_label_property_index->ListIndices()};
|
||||
}
|
||||
ConstraintsInfo DiskStorage::DiskAccessor::ListAllConstraints() const {
|
||||
auto *disk_storage = static_cast<DiskStorage *>(storage_);
|
||||
|
@ -301,8 +301,6 @@ class DiskStorage final : public Storage {
|
||||
|
||||
EdgeImportMode GetEdgeImportMode() const;
|
||||
|
||||
DurableMetadata *GetDurableMetadata() { return &durable_metadata_; }
|
||||
|
||||
private:
|
||||
void LoadPersistingMetadataInfo();
|
||||
|
||||
|
@ -151,8 +151,7 @@ void RecoverConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadat
|
||||
|
||||
void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
|
||||
utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info,
|
||||
const std::optional<std::filesystem::path> &storage_dir) {
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
spdlog::info("Recreating indices from metadata.");
|
||||
|
||||
// Recover label indices.
|
||||
@ -212,26 +211,6 @@ void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadat
|
||||
}
|
||||
spdlog::info("Edge-type indices are recreated.");
|
||||
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
// Recover text indices.
|
||||
spdlog::info("Recreating {} text indices from metadata.", indices_metadata.text_indices.size());
|
||||
auto &mem_text_index = indices->text_index_;
|
||||
for (const auto &[index_name, label] : indices_metadata.text_indices) {
|
||||
try {
|
||||
if (!storage_dir.has_value()) {
|
||||
throw RecoveryFailure("There must exist a storage directory in order to recover text indices!");
|
||||
}
|
||||
|
||||
mem_text_index.RecoverIndex(storage_dir.value(), index_name, label, vertices->access(), name_id_mapper);
|
||||
} catch (...) {
|
||||
throw RecoveryFailure("The text index must be created here!");
|
||||
}
|
||||
spdlog::info("Text index {} on :{} is recreated from metadata", index_name,
|
||||
name_id_mapper->IdToName(label.AsUint()));
|
||||
}
|
||||
spdlog::info("Text indices are recreated.");
|
||||
}
|
||||
|
||||
spdlog::info("Indices are recreated.");
|
||||
}
|
||||
|
||||
@ -352,13 +331,8 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
repl_storage_state.epoch_.SetEpoch(std::move(recovered_snapshot->snapshot_info.epoch_id));
|
||||
|
||||
if (!utils::DirExists(wal_directory_)) {
|
||||
std::optional<std::filesystem::path> storage_dir = std::nullopt;
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
storage_dir = config.durability.storage_directory;
|
||||
}
|
||||
|
||||
RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
|
||||
GetParallelExecInfoIndices(recovery_info, config), storage_dir);
|
||||
GetParallelExecInfoIndices(recovery_info, config));
|
||||
RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
|
||||
GetParallelExecInfo(recovery_info, config));
|
||||
return recovered_snapshot->recovery_info;
|
||||
@ -493,13 +467,8 @@ std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, Replication
|
||||
spdlog::info("All necessary WAL files are loaded successfully.");
|
||||
}
|
||||
|
||||
std::optional<std::filesystem::path> storage_dir = std::nullopt;
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
storage_dir = config.durability.storage_directory;
|
||||
}
|
||||
|
||||
RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
|
||||
GetParallelExecInfoIndices(recovery_info, config), storage_dir);
|
||||
GetParallelExecInfoIndices(recovery_info, config));
|
||||
RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
|
||||
GetParallelExecInfo(recovery_info, config));
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -102,8 +102,7 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
/// @throw RecoveryFailure
|
||||
void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
|
||||
utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info = std::nullopt,
|
||||
const std::optional<std::filesystem::path> &storage_dir = std::nullopt);
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info = std::nullopt);
|
||||
|
||||
// Helper function used to recover all discovered constraints. The
|
||||
// constraints must be recovered after the data recovery is done
|
||||
|
@ -64,8 +64,6 @@ enum class Marker : uint8_t {
|
||||
DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR = 0x64,
|
||||
DELTA_EDGE_TYPE_INDEX_CREATE = 0x65,
|
||||
DELTA_EDGE_TYPE_INDEX_DROP = 0x66,
|
||||
DELTA_TEXT_INDEX_CREATE = 0x67,
|
||||
DELTA_TEXT_INDEX_DROP = 0x68,
|
||||
|
||||
VALUE_FALSE = 0x00,
|
||||
VALUE_TRUE = 0xff,
|
||||
@ -112,8 +110,6 @@ static const Marker kMarkersAll[] = {
|
||||
Marker::DELTA_LABEL_PROPERTY_INDEX_DROP,
|
||||
Marker::DELTA_EDGE_TYPE_INDEX_CREATE,
|
||||
Marker::DELTA_EDGE_TYPE_INDEX_DROP,
|
||||
Marker::DELTA_TEXT_INDEX_CREATE,
|
||||
Marker::DELTA_TEXT_INDEX_DROP,
|
||||
Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE,
|
||||
Marker::DELTA_EXISTENCE_CONSTRAINT_DROP,
|
||||
Marker::DELTA_UNIQUE_CONSTRAINT_CREATE,
|
||||
|
@ -44,7 +44,6 @@ struct RecoveredIndicesAndConstraints {
|
||||
std::vector<std::pair<LabelId, LabelIndexStats>> label_stats;
|
||||
std::vector<std::pair<LabelId, std::pair<PropertyId, LabelPropertyIndexStats>>> label_property_stats;
|
||||
std::vector<EdgeTypeId> edge;
|
||||
std::vector<std::pair<std::string, LabelId>> text_indices;
|
||||
} indices;
|
||||
|
||||
struct ConstraintsMetadata {
|
||||
|
@ -353,8 +353,6 @@ std::optional<PropertyValue> Decoder::ReadPropertyValue() {
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
|
||||
case Marker::DELTA_TEXT_INDEX_CREATE:
|
||||
case Marker::DELTA_TEXT_INDEX_DROP:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
|
||||
case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
|
||||
@ -461,8 +459,6 @@ bool Decoder::SkipPropertyValue() {
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_CREATE:
|
||||
case Marker::DELTA_EDGE_TYPE_INDEX_DROP:
|
||||
case Marker::DELTA_TEXT_INDEX_CREATE:
|
||||
case Marker::DELTA_TEXT_INDEX_DROP:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE:
|
||||
case Marker::DELTA_EXISTENCE_CONSTRAINT_DROP:
|
||||
case Marker::DELTA_UNIQUE_CONSTRAINT_CREATE:
|
||||
|
@ -13,8 +13,6 @@
|
||||
|
||||
#include <thread>
|
||||
|
||||
#include "flags/experimental.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v2/durability/exceptions.hpp"
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
@ -2006,24 +2004,6 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis
|
||||
spdlog::info("Metadata of edge-type indices are recovered.");
|
||||
}
|
||||
|
||||
// Recover text indices.
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
auto size = snapshot.ReadUint();
|
||||
if (!size) throw RecoveryFailure("Couldn't recover the number of text indices!");
|
||||
spdlog::info("Recovering metadata of {} text indices.", *size);
|
||||
for (uint64_t i = 0; i < *size; ++i) {
|
||||
auto index_name = snapshot.ReadString();
|
||||
if (!index_name.has_value()) throw RecoveryFailure("Couldn't read text index name!");
|
||||
auto label = snapshot.ReadUint();
|
||||
if (!label) throw RecoveryFailure("Couldn't read text index label!");
|
||||
AddRecoveredIndexConstraint(&indices_constraints.indices.text_indices,
|
||||
{index_name.value(), get_label_from_id(*label)}, "The text index already exists!");
|
||||
SPDLOG_TRACE("Recovered metadata of text index {} for :{}", index_name.value(),
|
||||
name_id_mapper->IdToName(snapshot_id_map.at(*label)));
|
||||
}
|
||||
spdlog::info("Metadata of text indices are recovered.");
|
||||
}
|
||||
|
||||
spdlog::info("Metadata of indices are recovered.");
|
||||
}
|
||||
|
||||
@ -2513,16 +2493,6 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files
|
||||
write_mapping(item);
|
||||
}
|
||||
}
|
||||
|
||||
// Write text indices.
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::TEXT_SEARCH)) {
|
||||
auto text_indices = storage->indices_.text_index_.ListIndices();
|
||||
snapshot.WriteUint(text_indices.size());
|
||||
for (const auto &[index_name, label] : text_indices) {
|
||||
snapshot.WriteString(index_name);
|
||||
write_mapping(label);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write constraints.
|
||||
|
@ -25,8 +25,6 @@ enum class StorageMetadataOperation {
|
||||
LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
EDGE_TYPE_INDEX_CREATE,
|
||||
EDGE_TYPE_INDEX_DROP,
|
||||
TEXT_INDEX_CREATE,
|
||||
TEXT_INDEX_DROP,
|
||||
EXISTENCE_CONSTRAINT_CREATE,
|
||||
EXISTENCE_CONSTRAINT_DROP,
|
||||
UNIQUE_CONSTRAINT_CREATE,
|
||||
|
@ -99,10 +99,6 @@ Marker OperationToMarker(StorageMetadataOperation operation) {
|
||||
return Marker::DELTA_EDGE_TYPE_INDEX_CREATE;
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP:
|
||||
return Marker::DELTA_EDGE_TYPE_INDEX_DROP;
|
||||
case StorageMetadataOperation::TEXT_INDEX_CREATE:
|
||||
return Marker::DELTA_TEXT_INDEX_CREATE;
|
||||
case StorageMetadataOperation::TEXT_INDEX_DROP:
|
||||
return Marker::DELTA_TEXT_INDEX_DROP;
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
|
||||
return Marker::DELTA_EXISTENCE_CONSTRAINT_CREATE;
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
|
||||
@ -176,10 +172,6 @@ WalDeltaData::Type MarkerToWalDeltaDataType(Marker marker) {
|
||||
return WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE;
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_DROP:
|
||||
return WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP;
|
||||
case Marker::DELTA_TEXT_INDEX_CREATE:
|
||||
return WalDeltaData::Type::TEXT_INDEX_CREATE;
|
||||
case Marker::DELTA_TEXT_INDEX_DROP:
|
||||
return WalDeltaData::Type::TEXT_INDEX_DROP;
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_SET:
|
||||
return WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET;
|
||||
case Marker::DELTA_LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
@ -390,21 +382,6 @@ WalDeltaData ReadSkipWalDeltaData(BaseDecoder *decoder) {
|
||||
if (!decoder->SkipString()) throw RecoveryFailure("Invalid WAL data!");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::TEXT_INDEX_CREATE:
|
||||
case WalDeltaData::Type::TEXT_INDEX_DROP: {
|
||||
if constexpr (read_data) {
|
||||
auto index_name = decoder->ReadString();
|
||||
if (!index_name) throw RecoveryFailure("Invalid WAL data!");
|
||||
delta.operation_text.index_name = std::move(*index_name);
|
||||
auto label = decoder->ReadString();
|
||||
if (!label) throw RecoveryFailure("Invalid WAL data!");
|
||||
delta.operation_text.label = std::move(*label);
|
||||
} else {
|
||||
if (!decoder->SkipString() || !decoder->SkipString()) throw RecoveryFailure("Invalid WAL data!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,12 +529,6 @@ bool operator==(const WalDeltaData &a, const WalDeltaData &b) {
|
||||
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE:
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP:
|
||||
case WalDeltaData::Type::TEXT_INDEX_CREATE:
|
||||
return a.operation_text.index_name == b.operation_text.index_name &&
|
||||
a.operation_text.label == b.operation_text.label;
|
||||
case WalDeltaData::Type::TEXT_INDEX_DROP:
|
||||
return a.operation_text.index_name == b.operation_text.index_name &&
|
||||
a.operation_text.label == b.operation_text.label;
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP:
|
||||
return a.operation_label_property.label == b.operation_label_property.label &&
|
||||
@ -704,8 +675,7 @@ void EncodeTransactionEnd(BaseEncoder *encoder, uint64_t timestamp) {
|
||||
}
|
||||
|
||||
void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
|
||||
const std::optional<std::string> text_index_name, LabelId label,
|
||||
const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp) {
|
||||
encoder->WriteMarker(Marker::SECTION_DELTA);
|
||||
encoder->WriteUint(timestamp);
|
||||
@ -761,14 +731,6 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
|
||||
case StorageMetadataOperation::EDGE_TYPE_INDEX_DROP: {
|
||||
MG_ASSERT(false, "Invalid function call!");
|
||||
}
|
||||
case StorageMetadataOperation::TEXT_INDEX_CREATE:
|
||||
case StorageMetadataOperation::TEXT_INDEX_DROP: {
|
||||
MG_ASSERT(text_index_name.has_value(), "Text indices must be named!");
|
||||
encoder->WriteMarker(OperationToMarker(operation));
|
||||
encoder->WriteString(text_index_name.value());
|
||||
encoder->WriteString(name_id_mapper->IdToName(label.AsUint()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -790,8 +752,6 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage
|
||||
case StorageMetadataOperation::LABEL_INDEX_STATS_SET:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_CREATE:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_DROP:
|
||||
case StorageMetadataOperation::TEXT_INDEX_CREATE:
|
||||
case StorageMetadataOperation::TEXT_INDEX_DROP:
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case StorageMetadataOperation::EXISTENCE_CONSTRAINT_DROP:
|
||||
case StorageMetadataOperation::LABEL_PROPERTY_INDEX_STATS_SET:
|
||||
@ -1040,20 +1000,6 @@ RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConst
|
||||
"The label index stats doesn't exist!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::TEXT_INDEX_CREATE: {
|
||||
auto index_name = delta.operation_text.index_name;
|
||||
auto label = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_text.label));
|
||||
AddRecoveredIndexConstraint(&indices_constraints->indices.text_indices, {index_name, label},
|
||||
"The text index already exists!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::TEXT_INDEX_DROP: {
|
||||
auto index_name = delta.operation_text.index_name;
|
||||
auto label = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_text.label));
|
||||
RemoveRecoveredIndexConstraint(&indices_constraints->indices.text_indices, {index_name, label},
|
||||
"The text index doesn't exist!");
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
auto label_id = LabelId::FromUint(name_id_mapper->NameToId(delta.operation_label_property.label));
|
||||
auto property_id = PropertyId::FromUint(name_id_mapper->NameToId(delta.operation_label_property.property));
|
||||
@ -1202,11 +1148,10 @@ void WalFile::AppendTransactionEnd(uint64_t timestamp) {
|
||||
UpdateStats(timestamp);
|
||||
}
|
||||
|
||||
void WalFile::AppendOperation(StorageMetadataOperation operation, const std::optional<std::string> text_index_name,
|
||||
LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp) {
|
||||
EncodeOperation(&wal_, name_id_mapper_, operation, text_index_name, label, properties, stats, property_stats,
|
||||
timestamp);
|
||||
void WalFile::AppendOperation(StorageMetadataOperation operation, LabelId label, const std::set<PropertyId> &properties,
|
||||
const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats,
|
||||
uint64_t timestamp) {
|
||||
EncodeOperation(&wal_, name_id_mapper_, operation, label, properties, stats, property_stats, timestamp);
|
||||
UpdateStats(timestamp);
|
||||
}
|
||||
|
||||
|
@ -69,8 +69,6 @@ struct WalDeltaData {
|
||||
LABEL_PROPERTY_INDEX_STATS_CLEAR,
|
||||
EDGE_INDEX_CREATE,
|
||||
EDGE_INDEX_DROP,
|
||||
TEXT_INDEX_CREATE,
|
||||
TEXT_INDEX_DROP,
|
||||
EXISTENCE_CONSTRAINT_CREATE,
|
||||
EXISTENCE_CONSTRAINT_DROP,
|
||||
UNIQUE_CONSTRAINT_CREATE,
|
||||
@ -129,11 +127,6 @@ struct WalDeltaData {
|
||||
std::string property;
|
||||
std::string stats;
|
||||
} operation_label_property_stats;
|
||||
|
||||
struct {
|
||||
std::string index_name;
|
||||
std::string label;
|
||||
} operation_text;
|
||||
};
|
||||
|
||||
bool operator==(const WalDeltaData &a, const WalDeltaData &b);
|
||||
@ -170,8 +163,6 @@ constexpr bool IsWalDeltaDataTypeTransactionEndVersion15(const WalDeltaData::Typ
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_CLEAR:
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE:
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP:
|
||||
case WalDeltaData::Type::TEXT_INDEX_CREATE:
|
||||
case WalDeltaData::Type::TEXT_INDEX_DROP:
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE:
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP:
|
||||
case WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE:
|
||||
@ -222,8 +213,7 @@ void EncodeTransactionEnd(BaseEncoder *encoder, uint64_t timestamp);
|
||||
|
||||
/// Function used to encode non-transactional operation.
|
||||
void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
|
||||
const std::optional<std::string> text_index_name, LabelId label,
|
||||
const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
|
||||
void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, StorageMetadataOperation operation,
|
||||
@ -258,9 +248,8 @@ class WalFile {
|
||||
|
||||
void AppendTransactionEnd(uint64_t timestamp);
|
||||
|
||||
void AppendOperation(StorageMetadataOperation operation, const std::optional<std::string> text_index_name,
|
||||
LabelId label, const std::set<PropertyId> &properties, const LabelIndexStats &stats,
|
||||
const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
void AppendOperation(StorageMetadataOperation operation, LabelId label, const std::set<PropertyId> &properties,
|
||||
const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats, uint64_t timestamp);
|
||||
|
||||
void AppendOperation(StorageMetadataOperation operation, EdgeTypeId edge_type, uint64_t timestamp);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user