Merge branch 'project-pineapples' into T1151-MG-distributed-shard-split-logic
This commit is contained in:
commit
bd21b405d4
27
.github/workflows/diff.yaml
vendored
27
.github/workflows/diff.yaml
vendored
@ -271,3 +271,30 @@ jobs:
|
||||
source ve3/bin/activate
|
||||
cd e2e
|
||||
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../../libs/mgclient/lib python runner.py --workloads-root-directory ./distributed_queries
|
||||
|
||||
- name: Run query performance tests
|
||||
run: |
|
||||
cd tests/manual
|
||||
./query_performance_runner.py
|
||||
|
||||
- name: Get branch name (merge)
|
||||
if: github.event_name != 'pull_request'
|
||||
shell: bash
|
||||
run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV
|
||||
|
||||
- name: Get branch name (pull request)
|
||||
if: github.event_name == 'pull_request'
|
||||
shell: bash
|
||||
run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / -)" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload macro benchmark results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "query_performance" \
|
||||
--benchmark-results-path "../../build/tests/manual/query_performance_benchmark/summary.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -23,17 +23,17 @@ namespace memgraph::coordinator {
|
||||
using Time = memgraph::io::Time;
|
||||
|
||||
/// Hybrid-logical clock
|
||||
struct Hlc {
|
||||
uint64_t logical_id = 0;
|
||||
struct Hlc final {
|
||||
uint64_t logical_id{0};
|
||||
Time coordinator_wall_clock = Time::min();
|
||||
|
||||
auto operator<=>(const Hlc &other) const { return logical_id <=> other.logical_id; }
|
||||
auto operator<=>(const Hlc &other) const noexcept { return logical_id <=> other.logical_id; }
|
||||
|
||||
bool operator==(const Hlc &other) const = default;
|
||||
bool operator<(const Hlc &other) const = default;
|
||||
bool operator==(const uint64_t other) const { return logical_id == other; }
|
||||
bool operator<(const uint64_t other) const { return logical_id < other; }
|
||||
bool operator>=(const uint64_t other) const { return logical_id >= other; }
|
||||
bool operator==(const Hlc &other) const noexcept = default;
|
||||
bool operator<(const Hlc &other) const noexcept = default;
|
||||
bool operator==(const uint64_t other) const noexcept { return logical_id == other; }
|
||||
bool operator<(const uint64_t other) const noexcept { return logical_id < other; }
|
||||
bool operator>=(const uint64_t other) const noexcept { return logical_id >= other; }
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const Hlc &hlc) {
|
||||
auto wall_clock = std::chrono::system_clock::to_time_t(hlc.coordinator_wall_clock);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -51,7 +51,7 @@ constexpr char kId[] = "ID";
|
||||
|
||||
namespace MG_INJECTED_NAMESPACE_NAME {
|
||||
namespace detail {
|
||||
using antlropencypher::MemgraphCypher;
|
||||
using antlropencypher::v2::MemgraphCypher;
|
||||
|
||||
template <typename TVisitor>
|
||||
std::optional<std::pair<Expression *, size_t>> VisitMemoryLimit(MemgraphCypher::MemoryLimitContext *memory_limit_ctx,
|
||||
@ -211,13 +211,13 @@ inline std::string_view ToString(const PulsarConfigKey key) {
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
using antlropencypher::MemgraphCypher;
|
||||
using antlropencypher::v2::MemgraphCypher;
|
||||
|
||||
struct ParsingContext {
|
||||
bool is_query_cached = false;
|
||||
};
|
||||
|
||||
class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
class CypherMainVisitor : public antlropencypher::v2::MemgraphCypherBaseVisitor {
|
||||
public:
|
||||
explicit CypherMainVisitor(ParsingContext context, AstStorage *storage) : context_(context), storage_(storage) {}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -47,12 +47,12 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
TypedValue Visit(NamedExpression &named_expression) override {
|
||||
const auto &symbol = symbol_table_->at(named_expression);
|
||||
auto value = named_expression.expression_->Accept(*this);
|
||||
frame_->at(symbol) = value;
|
||||
frame_->At(symbol) = value;
|
||||
return value;
|
||||
}
|
||||
|
||||
TypedValue Visit(Identifier &ident) override {
|
||||
return TypedValue(frame_->at(symbol_table_->at(ident)), ctx_->memory);
|
||||
return TypedValue(frame_->At(symbol_table_->at(ident)), ctx_->memory);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
@ -470,7 +470,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
}
|
||||
|
||||
TypedValue Visit(Aggregation &aggregation) override {
|
||||
return TypedValue(frame_->at(symbol_table_->at(aggregation)), ctx_->memory);
|
||||
return TypedValue(frame_->At(symbol_table_->at(aggregation)), ctx_->memory);
|
||||
}
|
||||
|
||||
TypedValue Visit(Coalesce &coalesce) override {
|
||||
@ -528,8 +528,8 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
const auto &accumulator_symbol = symbol_table_->at(*reduce.accumulator_);
|
||||
auto accumulator = reduce.initializer_->Accept(*this);
|
||||
for (const auto &element : list) {
|
||||
frame_->at(accumulator_symbol) = accumulator;
|
||||
frame_->at(element_symbol) = element;
|
||||
frame_->At(accumulator_symbol) = accumulator;
|
||||
frame_->At(element_symbol) = element;
|
||||
accumulator = reduce.expression_->Accept(*this);
|
||||
}
|
||||
return accumulator;
|
||||
@ -551,7 +551,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
if (element.IsNull()) {
|
||||
result.emplace_back();
|
||||
} else {
|
||||
frame_->at(element_symbol) = element;
|
||||
frame_->At(element_symbol) = element;
|
||||
result.emplace_back(extract.expression_->Accept(*this));
|
||||
}
|
||||
}
|
||||
@ -571,7 +571,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
bool has_null_elements = false;
|
||||
bool has_value = false;
|
||||
for (const auto &element : list) {
|
||||
frame_->at(symbol) = element;
|
||||
frame_->At(symbol) = element;
|
||||
auto result = all.where_->expression_->Accept(*this);
|
||||
if (!result.IsNull() && result.type() != TypedValue::Type::Bool) {
|
||||
throw ExpressionRuntimeException("Predicate of ALL must evaluate to boolean, got {}.", result.type());
|
||||
@ -608,7 +608,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
bool has_value = false;
|
||||
bool predicate_satisfied = false;
|
||||
for (const auto &element : list) {
|
||||
frame_->at(symbol) = element;
|
||||
frame_->At(symbol) = element;
|
||||
auto result = single.where_->expression_->Accept(*this);
|
||||
if (!result.IsNull() && result.type() != TypedValue::Type::Bool) {
|
||||
throw ExpressionRuntimeException("Predicate of SINGLE must evaluate to boolean, got {}.", result.type());
|
||||
@ -645,7 +645,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
const auto &symbol = symbol_table_->at(*any.identifier_);
|
||||
bool has_value = false;
|
||||
for (const auto &element : list) {
|
||||
frame_->at(symbol) = element;
|
||||
frame_->At(symbol) = element;
|
||||
auto result = any.where_->expression_->Accept(*this);
|
||||
if (!result.IsNull() && result.type() != TypedValue::Type::Bool) {
|
||||
throw ExpressionRuntimeException("Predicate of ANY must evaluate to boolean, got {}.", result.type());
|
||||
@ -677,7 +677,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
const auto &symbol = symbol_table_->at(*none.identifier_);
|
||||
bool has_value = false;
|
||||
for (const auto &element : list) {
|
||||
frame_->at(symbol) = element;
|
||||
frame_->At(symbol) = element;
|
||||
auto result = none.where_->expression_->Accept(*this);
|
||||
if (!result.IsNull() && result.type() != TypedValue::Type::Bool) {
|
||||
throw ExpressionRuntimeException("Predicate of NONE must evaluate to boolean, got {}.", result.type());
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -23,29 +23,33 @@ namespace memgraph::expr {
|
||||
class Frame {
|
||||
public:
|
||||
/// Create a Frame of given size backed by a utils::NewDeleteResource()
|
||||
explicit Frame(int64_t size) : elems_(size, utils::NewDeleteResource()) { MG_ASSERT(size >= 0); }
|
||||
explicit Frame(size_t size) : elems_(size, utils::NewDeleteResource()) { MG_ASSERT(size >= 0); }
|
||||
|
||||
Frame(int64_t size, utils::MemoryResource *memory) : elems_(size, memory) { MG_ASSERT(size >= 0); }
|
||||
Frame(size_t size, utils::MemoryResource *memory) : elems_(size, memory) { MG_ASSERT(size >= 0); }
|
||||
|
||||
TypedValue &operator[](const Symbol &symbol) { return elems_[symbol.position()]; }
|
||||
const TypedValue &operator[](const Symbol &symbol) const { return elems_[symbol.position()]; }
|
||||
|
||||
TypedValue &at(const Symbol &symbol) { return elems_.at(symbol.position()); }
|
||||
const TypedValue &at(const Symbol &symbol) const { return elems_.at(symbol.position()); }
|
||||
TypedValue &At(const Symbol &symbol) { return elems_.at(symbol.position()); }
|
||||
const TypedValue &At(const Symbol &symbol) const { return elems_.at(symbol.position()); }
|
||||
|
||||
auto &elems() { return elems_; }
|
||||
uint64_t Id() const { return id_; }
|
||||
void SetId(const uint64_t id) { id_ = id; }
|
||||
|
||||
const utils::pmr::vector<TypedValue> &Elems() const { return elems_; }
|
||||
|
||||
utils::MemoryResource *GetMemoryResource() const { return elems_.get_allocator().GetMemoryResource(); }
|
||||
|
||||
private:
|
||||
uint64_t id_{0U};
|
||||
utils::pmr::vector<TypedValue> elems_;
|
||||
};
|
||||
|
||||
class FrameWithValidity final : public Frame {
|
||||
public:
|
||||
explicit FrameWithValidity(int64_t size) : Frame(size), is_valid_(false) {}
|
||||
explicit FrameWithValidity(size_t size) : Frame(size), is_valid_(false) {}
|
||||
|
||||
FrameWithValidity(int64_t size, utils::MemoryResource *memory) : Frame(size, memory), is_valid_(false) {}
|
||||
FrameWithValidity(size_t size, utils::MemoryResource *memory) : Frame(size, memory), is_valid_(false) {}
|
||||
|
||||
bool IsValid() const noexcept { return is_valid_; }
|
||||
void MakeValid() noexcept { is_valid_ = true; }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -120,7 +120,7 @@ class Shared {
|
||||
MG_ASSERT(!consumed_, "Promise filled after it was already consumed!");
|
||||
MG_ASSERT(!filled_, "Promise filled twice!");
|
||||
|
||||
item_ = item;
|
||||
item_ = std::move(item);
|
||||
filled_ = true;
|
||||
} // lock released before condition variable notification
|
||||
|
||||
@ -235,7 +235,7 @@ class Promise {
|
||||
// Fill the expected item into the Future.
|
||||
void Fill(T item) {
|
||||
MG_ASSERT(!filled_or_moved_, "Promise::Fill called on a promise that is already filled or moved!");
|
||||
shared_->Fill(item);
|
||||
shared_->Fill(std::move(item));
|
||||
filled_or_moved_ = true;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -30,10 +30,10 @@ class LocalTransport {
|
||||
explicit LocalTransport(std::shared_ptr<LocalTransportHandle> local_transport_handle)
|
||||
: local_transport_handle_(std::move(local_transport_handle)) {}
|
||||
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request,
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RValueRef<RequestT> request,
|
||||
std::function<void()> fill_notifier, Duration timeout) {
|
||||
return local_transport_handle_->template SubmitRequest<RequestT, ResponseT>(
|
||||
return local_transport_handle_->template SubmitRequest<ResponseT, RequestT>(
|
||||
to_address, from_address, std::move(request), timeout, fill_notifier);
|
||||
}
|
||||
|
||||
@ -43,8 +43,8 @@ class LocalTransport {
|
||||
}
|
||||
|
||||
template <Message M>
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, M &&message) {
|
||||
return local_transport_handle_->template Send<M>(to_address, from_address, request_id, std::forward<M>(message));
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, RValueRef<M> message) {
|
||||
return local_transport_handle_->template Send<M>(to_address, from_address, request_id, std::move(message));
|
||||
}
|
||||
|
||||
Time Now() const { return local_transport_handle_->Now(); }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -104,10 +104,10 @@ class LocalTransportHandle {
|
||||
}
|
||||
|
||||
template <Message M>
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, M &&message) {
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, RValueRef<M> message) {
|
||||
auto type_info = TypeInfoFor(message);
|
||||
|
||||
std::any message_any(std::forward<M>(message));
|
||||
std::any message_any(std::move(message));
|
||||
OpaqueMessage opaque_message{.to_address = to_address,
|
||||
.from_address = from_address,
|
||||
.request_id = request_id,
|
||||
@ -138,14 +138,14 @@ class LocalTransportHandle {
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> SubmitRequest(Address to_address, Address from_address, RequestT &&request,
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> SubmitRequest(Address to_address, Address from_address, RValueRef<RequestT> request,
|
||||
Duration timeout, std::function<void()> fill_notifier) {
|
||||
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifications<ResponseResult<ResponseT>>(
|
||||
// set null notifier for when the Future::Wait is called
|
||||
nullptr,
|
||||
// set notifier for when Promise::Fill is called
|
||||
std::forward<std::function<void()>>(fill_notifier));
|
||||
std::move(fill_notifier));
|
||||
|
||||
const bool port_matches = to_address.last_known_port == from_address.last_known_port;
|
||||
const bool ip_matches = to_address.last_known_ip == from_address.last_known_ip;
|
||||
@ -168,7 +168,7 @@ class LocalTransportHandle {
|
||||
promises_.emplace(std::move(promise_key), std::move(dop));
|
||||
} // lock dropped
|
||||
|
||||
Send(to_address, from_address, request_id, std::forward<RequestT>(request));
|
||||
Send<RequestT>(to_address, from_address, request_id, std::move(request));
|
||||
|
||||
return std::move(future);
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <boost/core/demangle.hpp>
|
||||
|
||||
#include "io/time.hpp"
|
||||
#include "io/transport.hpp"
|
||||
#include "utils/type_info_ref.hpp"
|
||||
|
||||
@ -38,6 +39,7 @@ struct OpaqueMessage {
|
||||
uint64_t request_id;
|
||||
std::any message;
|
||||
utils::TypeInfoRef type_info;
|
||||
Time deliverable_at;
|
||||
|
||||
/// Recursively tries to match a specific type from the outer
|
||||
/// variant's parameter pack against the type of the std::any,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -19,6 +19,7 @@
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/core/demangle.hpp>
|
||||
@ -246,7 +247,7 @@ to a CAS operation.
|
||||
template <typename WriteOperation, typename ReadOperation, typename ReplicatedState, typename WriteResponseValue,
|
||||
typename ReadResponseValue>
|
||||
concept Rsm = requires(ReplicatedState state, WriteOperation w, ReadOperation r) {
|
||||
{ state.Read(r) } -> std::same_as<ReadResponseValue>;
|
||||
{ state.Read(std::move(r)) } -> std::same_as<ReadResponseValue>;
|
||||
{ state.Apply(w) } -> std::same_as<WriteResponseValue>;
|
||||
};
|
||||
|
||||
@ -402,7 +403,7 @@ class Raft {
|
||||
const PendingClientRequest client_request = std::move(leader.pending_client_requests.at(apply_index));
|
||||
leader.pending_client_requests.erase(apply_index);
|
||||
|
||||
const WriteResponse<WriteResponseValue> resp{
|
||||
WriteResponse<WriteResponseValue> resp{
|
||||
.success = true,
|
||||
.write_return = std::move(write_return),
|
||||
.raft_index = apply_index,
|
||||
@ -554,7 +555,7 @@ class Raft {
|
||||
for (const auto &peer : peers_) {
|
||||
// request_id not necessary to set because it's not a Future-backed Request.
|
||||
static constexpr auto request_id = 0;
|
||||
io_.template Send<VoteRequest>(peer, request_id, request);
|
||||
io_.template Send(peer, request_id, VoteRequest{request});
|
||||
outstanding_votes.insert(peer);
|
||||
}
|
||||
|
||||
@ -624,13 +625,12 @@ class Raft {
|
||||
MG_ASSERT(std::max(req.term, state_.term) == req.term);
|
||||
}
|
||||
|
||||
const VoteResponse res{
|
||||
.term = std::max(req.term, state_.term),
|
||||
.committed_log_size = state_.committed_log_size,
|
||||
.vote_granted = new_leader,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id,
|
||||
VoteResponse{
|
||||
.term = std::max(req.term, state_.term),
|
||||
.committed_log_size = state_.committed_log_size,
|
||||
.vote_granted = new_leader,
|
||||
});
|
||||
|
||||
if (new_leader) {
|
||||
// become a follower
|
||||
@ -718,6 +718,10 @@ class Raft {
|
||||
.log_size = state_.log.size(),
|
||||
};
|
||||
|
||||
static_assert(std::is_trivially_copyable_v<AppendResponse>,
|
||||
"This function copies this message, therefore it is important to be trivially copyable. Otherwise it "
|
||||
"should be moved");
|
||||
|
||||
if constexpr (std::is_same<ALL, Leader>()) {
|
||||
MG_ASSERT(req.term != state_.term, "Multiple leaders are acting under the term ", req.term);
|
||||
}
|
||||
@ -736,7 +740,7 @@ class Raft {
|
||||
// become follower of this leader, reply with our log status
|
||||
state_.term = req.term;
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, AppendResponse{res});
|
||||
|
||||
Log("becoming Follower of Leader ", from_address.last_known_port, " at term ", req.term);
|
||||
return Follower{
|
||||
@ -747,7 +751,7 @@ class Raft {
|
||||
|
||||
if (req.term < state_.term) {
|
||||
// nack this request from an old leader
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, AppendResponse{res});
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -808,7 +812,7 @@ class Raft {
|
||||
|
||||
Log("returning log_size of ", res.log_size);
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, AppendResponse{res});
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -859,17 +863,17 @@ class Raft {
|
||||
auto type_info = TypeInfoFor(req);
|
||||
std::string demangled_name = boost::core::demangle(type_info.get().name());
|
||||
Log("handling ReadOperation<" + demangled_name + ">");
|
||||
ReadOperation read_operation = req.operation;
|
||||
ReadOperation &read_operation = req.operation;
|
||||
|
||||
ReadResponseValue read_return = replicated_state_.Read(read_operation);
|
||||
ReadResponseValue read_return = replicated_state_.Read(std::move(read_operation));
|
||||
|
||||
const ReadResponse<ReadResponseValue> resp{
|
||||
ReadResponse<ReadResponseValue> resp{
|
||||
.success = true,
|
||||
.read_return = std::move(read_return),
|
||||
.retry_leader = std::nullopt,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, resp);
|
||||
io_.Send(from_address, request_id, std::move(resp));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -878,11 +882,11 @@ class Raft {
|
||||
std::optional<Role> Handle(Candidate & /* variable */, ReadRequest<ReadOperation> && /* variable */,
|
||||
RequestId request_id, Address from_address) {
|
||||
Log("received ReadOperation - not redirecting because no Leader is known");
|
||||
const ReadResponse<ReadResponseValue> res{
|
||||
ReadResponse<ReadResponseValue> res{
|
||||
.success = false,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, std::move(res));
|
||||
|
||||
Cron();
|
||||
|
||||
@ -894,12 +898,12 @@ class Raft {
|
||||
Address from_address) {
|
||||
Log("redirecting client to known Leader with port ", follower.leader_address.last_known_port);
|
||||
|
||||
const ReadResponse<ReadResponseValue> res{
|
||||
ReadResponse<ReadResponseValue> res{
|
||||
.success = false,
|
||||
.retry_leader = follower.leader_address,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, std::move(res));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -913,12 +917,12 @@ class Raft {
|
||||
Address from_address) {
|
||||
Log("redirecting client to known Leader with port ", follower.leader_address.last_known_port);
|
||||
|
||||
const WriteResponse<WriteResponseValue> res{
|
||||
WriteResponse<WriteResponseValue> res{
|
||||
.success = false,
|
||||
.retry_leader = follower.leader_address,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, std::move(res));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -927,11 +931,11 @@ class Raft {
|
||||
RequestId request_id, Address from_address) {
|
||||
Log("received WriteRequest - not redirecting because no Leader is known");
|
||||
|
||||
const WriteResponse<WriteResponseValue> res{
|
||||
WriteResponse<WriteResponseValue> res{
|
||||
.success = false,
|
||||
};
|
||||
|
||||
io_.Send(from_address, request_id, res);
|
||||
io_.Send(from_address, request_id, std::move(res));
|
||||
|
||||
Cron();
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -95,7 +95,7 @@ class RsmClient {
|
||||
BasicResult<TimedOut, WriteResponseT> SendWriteRequest(WriteRequestT req) {
|
||||
Notifier notifier;
|
||||
const ReadinessToken readiness_token{0};
|
||||
SendAsyncWriteRequest(req, notifier, readiness_token);
|
||||
SendAsyncWriteRequest(std::move(req), notifier, readiness_token);
|
||||
auto poll_result = AwaitAsyncWriteRequest(readiness_token);
|
||||
while (!poll_result) {
|
||||
poll_result = AwaitAsyncWriteRequest(readiness_token);
|
||||
@ -106,7 +106,7 @@ class RsmClient {
|
||||
BasicResult<TimedOut, ReadResponseT> SendReadRequest(ReadRequestT req) {
|
||||
Notifier notifier;
|
||||
const ReadinessToken readiness_token{0};
|
||||
SendAsyncReadRequest(req, notifier, readiness_token);
|
||||
SendAsyncReadRequest(std::move(req), notifier, readiness_token);
|
||||
auto poll_result = AwaitAsyncReadRequest(readiness_token);
|
||||
while (!poll_result) {
|
||||
poll_result = AwaitAsyncReadRequest(readiness_token);
|
||||
@ -115,15 +115,15 @@ class RsmClient {
|
||||
}
|
||||
|
||||
/// AsyncRead methods
|
||||
void SendAsyncReadRequest(const ReadRequestT &req, Notifier notifier, ReadinessToken readiness_token) {
|
||||
void SendAsyncReadRequest(ReadRequestT &&req, Notifier notifier, ReadinessToken readiness_token) {
|
||||
ReadRequest<ReadRequestT> read_req = {.operation = req};
|
||||
|
||||
AsyncRequest<ReadRequestT, ReadResponse<ReadResponseT>> async_request{
|
||||
.start_time = io_.Now(),
|
||||
.request = std::move(req),
|
||||
.notifier = notifier,
|
||||
.future = io_.template RequestWithNotification<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(
|
||||
leader_, read_req, notifier, readiness_token),
|
||||
.future = io_.template RequestWithNotification<ReadResponse<ReadResponseT>, ReadRequest<ReadRequestT>>(
|
||||
leader_, std::move(read_req), notifier, readiness_token),
|
||||
};
|
||||
|
||||
async_reads_.emplace(readiness_token.GetId(), std::move(async_request));
|
||||
@ -134,8 +134,8 @@ class RsmClient {
|
||||
|
||||
ReadRequest<ReadRequestT> read_req = {.operation = async_request.request};
|
||||
|
||||
async_request.future = io_.template RequestWithNotification<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(
|
||||
leader_, read_req, async_request.notifier, readiness_token);
|
||||
async_request.future = io_.template RequestWithNotification<ReadResponse<ReadResponseT>, ReadRequest<ReadRequestT>>(
|
||||
leader_, std::move(read_req), async_request.notifier, readiness_token);
|
||||
}
|
||||
|
||||
std::optional<BasicResult<TimedOut, ReadResponseT>> PollAsyncReadRequest(const ReadinessToken &readiness_token) {
|
||||
@ -184,15 +184,15 @@ class RsmClient {
|
||||
}
|
||||
|
||||
/// AsyncWrite methods
|
||||
void SendAsyncWriteRequest(const WriteRequestT &req, Notifier notifier, ReadinessToken readiness_token) {
|
||||
void SendAsyncWriteRequest(WriteRequestT &&req, Notifier notifier, ReadinessToken readiness_token) {
|
||||
WriteRequest<WriteRequestT> write_req = {.operation = req};
|
||||
|
||||
AsyncRequest<WriteRequestT, WriteResponse<WriteResponseT>> async_request{
|
||||
.start_time = io_.Now(),
|
||||
.request = std::move(req),
|
||||
.notifier = notifier,
|
||||
.future = io_.template RequestWithNotification<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(
|
||||
leader_, write_req, notifier, readiness_token),
|
||||
.future = io_.template RequestWithNotification<WriteResponse<WriteResponseT>, WriteRequest<WriteRequestT>>(
|
||||
leader_, std::move(write_req), notifier, readiness_token),
|
||||
};
|
||||
|
||||
async_writes_.emplace(readiness_token.GetId(), std::move(async_request));
|
||||
@ -204,8 +204,8 @@ class RsmClient {
|
||||
WriteRequest<WriteRequestT> write_req = {.operation = async_request.request};
|
||||
|
||||
async_request.future =
|
||||
io_.template RequestWithNotification<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(
|
||||
leader_, write_req, async_request.notifier, readiness_token);
|
||||
io_.template RequestWithNotification<WriteResponse<WriteResponseT>, WriteRequest<WriteRequestT>>(
|
||||
leader_, std::move(write_req), async_request.notifier, readiness_token);
|
||||
}
|
||||
|
||||
std::optional<BasicResult<TimedOut, WriteResponseT>> PollAsyncWriteRequest(const ReadinessToken &readiness_token) {
|
||||
|
@ -41,7 +41,7 @@ class Simulator {
|
||||
Io<SimulatorTransport> Register(Address address) {
|
||||
std::uniform_int_distribution<uint64_t> seed_distrib;
|
||||
uint64_t seed = seed_distrib(rng_);
|
||||
return Io{SimulatorTransport{simulator_handle_, address, seed}, address};
|
||||
return Io{SimulatorTransport(simulator_handle_, address, seed), address};
|
||||
}
|
||||
|
||||
void IncrementServerCountAndWaitForQuiescentState(Address address) {
|
||||
@ -50,8 +50,12 @@ class Simulator {
|
||||
|
||||
SimulatorStats Stats() { return simulator_handle_->Stats(); }
|
||||
|
||||
std::shared_ptr<SimulatorHandle> GetSimulatorHandle() const { return simulator_handle_; }
|
||||
|
||||
std::function<bool()> GetSimulatorTickClosure() {
|
||||
std::function<bool()> tick_closure = [handle_copy = simulator_handle_] { return handle_copy->MaybeTickSimulator(); };
|
||||
std::function<bool()> tick_closure = [handle_copy = simulator_handle_] {
|
||||
return handle_copy->MaybeTickSimulator();
|
||||
};
|
||||
return tick_closure;
|
||||
}
|
||||
};
|
||||
|
@ -26,5 +26,6 @@ struct SimulatorConfig {
|
||||
uint64_t rng_seed = 0;
|
||||
Time start_time = Time::min();
|
||||
Time abort_time = Time::max();
|
||||
Duration message_delay = std::chrono::microseconds(100);
|
||||
};
|
||||
}; // namespace memgraph::io::simulator
|
||||
|
@ -175,8 +175,8 @@ bool SimulatorHandle::MaybeTickSimulator() {
|
||||
spdlog::trace("simulator adding message to can_receive_ from {} to {}", opaque_message.from_address.last_known_port,
|
||||
opaque_message.to_address.last_known_port);
|
||||
const auto &[om_vec, inserted] =
|
||||
can_receive_.try_emplace(to_address.ToPartialAddress(), std::vector<OpaqueMessage>());
|
||||
om_vec->second.emplace_back(std::move(opaque_message));
|
||||
can_receive_.try_emplace(to_address.ToPartialAddress(), std::deque<OpaqueMessage>());
|
||||
om_vec->second.emplace_front(std::move(opaque_message));
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -46,7 +46,7 @@ class SimulatorHandle {
|
||||
std::map<PromiseKey, DeadlineAndOpaquePromise> promises_;
|
||||
|
||||
// messages that are sent to servers that may later receive them
|
||||
std::map<PartialAddress, std::vector<OpaqueMessage>> can_receive_;
|
||||
std::map<PartialAddress, std::deque<OpaqueMessage>> can_receive_;
|
||||
|
||||
Time cluster_wide_time_microseconds_;
|
||||
bool should_shut_down_ = false;
|
||||
@ -105,19 +105,19 @@ class SimulatorHandle {
|
||||
|
||||
bool ShouldShutDown() const;
|
||||
|
||||
template <Message Request, Message Response>
|
||||
ResponseFuture<Response> SubmitRequest(Address to_address, Address from_address, Request &&request, Duration timeout,
|
||||
std::function<bool()> &&maybe_tick_simulator,
|
||||
std::function<void()> &&fill_notifier) {
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> SubmitRequest(Address to_address, Address from_address, RValueRef<RequestT> request,
|
||||
Duration timeout, std::function<bool()> &&maybe_tick_simulator,
|
||||
std::function<void()> &&fill_notifier) {
|
||||
auto type_info = TypeInfoFor(request);
|
||||
std::string demangled_name = boost::core::demangle(type_info.get().name());
|
||||
spdlog::trace("simulator sending request {} to {}", demangled_name, to_address);
|
||||
|
||||
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifications<ResponseResult<Response>>(
|
||||
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifications<ResponseResult<ResponseT>>(
|
||||
// set notifier for when the Future::Wait is called
|
||||
std::forward<std::function<bool()>>(maybe_tick_simulator),
|
||||
std::move(maybe_tick_simulator),
|
||||
// set notifier for when Promise::Fill is called
|
||||
std::forward<std::function<void()>>(fill_notifier));
|
||||
std::move(fill_notifier));
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
@ -126,12 +126,13 @@ class SimulatorHandle {
|
||||
|
||||
const Time deadline = cluster_wide_time_microseconds_ + timeout;
|
||||
|
||||
std::any message(request);
|
||||
std::any message(std::move(request));
|
||||
OpaqueMessage om{.to_address = to_address,
|
||||
.from_address = from_address,
|
||||
.request_id = request_id,
|
||||
.message = std::move(message),
|
||||
.type_info = type_info};
|
||||
.type_info = type_info,
|
||||
.deliverable_at = cluster_wide_time_microseconds_ + config_.message_delay};
|
||||
in_flight_.emplace_back(std::make_pair(to_address, std::move(om)));
|
||||
|
||||
PromiseKey promise_key{.requester_address = from_address, .request_id = request_id};
|
||||
@ -165,8 +166,12 @@ class SimulatorHandle {
|
||||
|
||||
while (!should_shut_down_ && (cluster_wide_time_microseconds_ < deadline)) {
|
||||
if (can_receive_.contains(partial_address)) {
|
||||
std::vector<OpaqueMessage> &can_rx = can_receive_.at(partial_address);
|
||||
if (!can_rx.empty()) {
|
||||
std::deque<OpaqueMessage> &can_rx = can_receive_.at(partial_address);
|
||||
|
||||
bool contains_items = !can_rx.empty();
|
||||
bool can_receive = contains_items && can_rx.back().deliverable_at <= cluster_wide_time_microseconds_;
|
||||
|
||||
if (can_receive) {
|
||||
OpaqueMessage message = std::move(can_rx.back());
|
||||
can_rx.pop_back();
|
||||
|
||||
@ -177,6 +182,12 @@ class SimulatorHandle {
|
||||
|
||||
return std::move(m_opt).value();
|
||||
}
|
||||
if (contains_items) {
|
||||
auto count = can_rx.back().deliverable_at.time_since_epoch().count();
|
||||
auto now_count = cluster_wide_time_microseconds_.time_since_epoch().count();
|
||||
spdlog::trace("can't receive message yet due to artificial latency. deliverable_at: {}, now: {}", count,
|
||||
now_count);
|
||||
}
|
||||
}
|
||||
|
||||
if (!should_shut_down_) {
|
||||
@ -194,7 +205,7 @@ class SimulatorHandle {
|
||||
}
|
||||
|
||||
template <Message M>
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, M message) {
|
||||
void Send(Address to_address, Address from_address, RequestId request_id, RValueRef<M> message) {
|
||||
spdlog::trace("sending message from {} to {}", from_address.last_known_port, to_address.last_known_port);
|
||||
auto type_info = TypeInfoFor(message);
|
||||
{
|
||||
@ -204,7 +215,8 @@ class SimulatorHandle {
|
||||
.from_address = from_address,
|
||||
.request_id = request_id,
|
||||
.message = std::move(message_any),
|
||||
.type_info = type_info};
|
||||
.type_info = type_info,
|
||||
.deliverable_at = cluster_wide_time_microseconds_ + config_.message_delay};
|
||||
in_flight_.emplace_back(std::make_pair(std::move(to_address), std::move(om)));
|
||||
|
||||
stats_.total_messages++;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -26,19 +26,21 @@ using memgraph::io::Time;
|
||||
|
||||
class SimulatorTransport {
|
||||
std::shared_ptr<SimulatorHandle> simulator_handle_;
|
||||
const Address address_;
|
||||
Address address_;
|
||||
std::mt19937 rng_;
|
||||
|
||||
public:
|
||||
SimulatorTransport(std::shared_ptr<SimulatorHandle> simulator_handle, Address address, uint64_t seed)
|
||||
: simulator_handle_(simulator_handle), address_(address), rng_(std::mt19937{seed}) {}
|
||||
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request,
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RValueRef<RequestT> request,
|
||||
std::function<void()> notification, Duration timeout) {
|
||||
std::function<bool()> tick_simulator = [handle_copy = simulator_handle_] { return handle_copy->MaybeTickSimulator(); };
|
||||
std::function<bool()> tick_simulator = [handle_copy = simulator_handle_] {
|
||||
return handle_copy->MaybeTickSimulator();
|
||||
};
|
||||
|
||||
return simulator_handle_->template SubmitRequest<RequestT, ResponseT>(
|
||||
return simulator_handle_->template SubmitRequest<ResponseT, RequestT>(
|
||||
to_address, from_address, std::move(request), timeout, std::move(tick_simulator), std::move(notification));
|
||||
}
|
||||
|
||||
@ -48,8 +50,8 @@ class SimulatorTransport {
|
||||
}
|
||||
|
||||
template <Message M>
|
||||
void Send(Address to_address, Address from_address, uint64_t request_id, M message) {
|
||||
return simulator_handle_->template Send<M>(to_address, from_address, request_id, message);
|
||||
void Send(Address to_address, Address from_address, uint64_t request_id, RValueRef<M> message) {
|
||||
return simulator_handle_->template Send<M>(to_address, from_address, request_id, std::move(message));
|
||||
}
|
||||
|
||||
Time Now() const { return simulator_handle_->Now(); }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -22,6 +22,7 @@
|
||||
#include "io/message_histogram_collector.hpp"
|
||||
#include "io/notifier.hpp"
|
||||
#include "io/time.hpp"
|
||||
#include "utils/concepts.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
namespace memgraph::io {
|
||||
@ -32,7 +33,15 @@ using memgraph::utils::BasicResult;
|
||||
// reasonable constraints around message types over time,
|
||||
// as we adapt things to use Thrift-generated message types.
|
||||
template <typename T>
|
||||
concept Message = std::same_as<T, std::decay_t<T>>;
|
||||
concept Message = std::movable<T> && std::copyable<T>;
|
||||
|
||||
template <utils::Object T>
|
||||
struct RValueRefEnforcer {
|
||||
using Type = T &&;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
using RValueRef = typename RValueRefEnforcer<T>::Type;
|
||||
|
||||
using RequestId = uint64_t;
|
||||
|
||||
@ -82,44 +91,44 @@ class Io {
|
||||
Duration GetDefaultTimeout() { return default_timeout_; }
|
||||
|
||||
/// Issue a request with an explicit timeout in microseconds provided. This tends to be used by clients.
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> RequestWithTimeout(Address address, RequestT request, Duration timeout) {
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> RequestWithTimeout(Address address, RValueRef<RequestT> request, Duration timeout) {
|
||||
const Address from_address = address_;
|
||||
std::function<void()> fill_notifier = nullptr;
|
||||
return implementation_.template Request<RequestT, ResponseT>(address, from_address, request, fill_notifier,
|
||||
timeout);
|
||||
return implementation_.template Request<ResponseT, RequestT>(address, from_address, std::move(request),
|
||||
fill_notifier, timeout);
|
||||
}
|
||||
|
||||
/// Issue a request that times out after the default timeout. This tends
|
||||
/// to be used by clients.
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, RequestT request) {
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> Request(Address to_address, RValueRef<RequestT> request) {
|
||||
const Duration timeout = default_timeout_;
|
||||
const Address from_address = address_;
|
||||
std::function<void()> fill_notifier = nullptr;
|
||||
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
|
||||
return implementation_.template Request<ResponseT, RequestT>(to_address, from_address, std::move(request),
|
||||
fill_notifier, timeout);
|
||||
}
|
||||
|
||||
/// Issue a request that will notify a Notifier when it is filled or times out.
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> RequestWithNotification(Address to_address, RequestT request, Notifier notifier,
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> RequestWithNotification(Address to_address, RValueRef<RequestT> request, Notifier notifier,
|
||||
ReadinessToken readiness_token) {
|
||||
const Duration timeout = default_timeout_;
|
||||
const Address from_address = address_;
|
||||
std::function<void()> fill_notifier = [notifier, readiness_token]() { notifier.Notify(readiness_token); };
|
||||
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
|
||||
return implementation_.template Request<ResponseT, RequestT>(to_address, from_address, std::move(request),
|
||||
fill_notifier, timeout);
|
||||
}
|
||||
|
||||
/// Issue a request that will notify a Notifier when it is filled or times out.
|
||||
template <Message RequestT, Message ResponseT>
|
||||
ResponseFuture<ResponseT> RequestWithNotificationAndTimeout(Address to_address, RequestT request, Notifier notifier,
|
||||
template <Message ResponseT, Message RequestT>
|
||||
ResponseFuture<ResponseT> RequestWithNotificationAndTimeout(Address to_address, RequestT &&request, Notifier notifier,
|
||||
ReadinessToken readiness_token, Duration timeout) {
|
||||
const Address from_address = address_;
|
||||
std::function<void()> fill_notifier = [notifier, readiness_token]() { notifier.Notify(readiness_token); };
|
||||
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
|
||||
fill_notifier, timeout);
|
||||
return implementation_.template Request<ResponseT>(to_address, from_address, std::forward<RequestT>(request),
|
||||
fill_notifier, timeout);
|
||||
}
|
||||
|
||||
/// Wait for an explicit number of microseconds for a request of one of the
|
||||
@ -141,9 +150,9 @@ class Io {
|
||||
/// responses are not necessarily expected, and for servers to respond to requests.
|
||||
/// If you need reliable delivery, this must be built on-top. TCP is not enough for most use cases.
|
||||
template <Message M>
|
||||
void Send(Address to_address, RequestId request_id, M message) {
|
||||
void Send(Address to_address, RequestId request_id, M &&message) {
|
||||
Address from_address = address_;
|
||||
return implementation_.template Send<M>(to_address, from_address, request_id, std::move(message));
|
||||
return implementation_.template Send<M>(to_address, from_address, request_id, std::forward<M>(message));
|
||||
}
|
||||
|
||||
/// The current system time. This time source should be preferred over any other,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -640,6 +640,8 @@ int main(int argc, char **argv) {
|
||||
memgraph::machine_manager::MachineManager<memgraph::io::local_transport::LocalTransport> mm{io, config, coordinator};
|
||||
std::jthread mm_thread([&mm] { mm.Run(); });
|
||||
|
||||
auto rr_factory = std::make_unique<memgraph::query::v2::LocalRequestRouterFactory>(io);
|
||||
|
||||
memgraph::query::v2::InterpreterContext interpreter_context{
|
||||
(memgraph::storage::v3::Shard *)(nullptr),
|
||||
{.query = {.allow_load_csv = FLAGS_allow_load_csv},
|
||||
@ -650,7 +652,7 @@ int main(int argc, char **argv) {
|
||||
.stream_transaction_conflict_retries = FLAGS_stream_transaction_conflict_retries,
|
||||
.stream_transaction_retry_interval = std::chrono::milliseconds(FLAGS_stream_transaction_retry_interval)},
|
||||
FLAGS_data_directory,
|
||||
std::move(io),
|
||||
std::move(rr_factory),
|
||||
mm.CoordinatorAddress()};
|
||||
|
||||
SessionData session_data{&interpreter_context};
|
||||
|
@ -23,7 +23,7 @@ add_custom_command(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${opencypher_generated}
|
||||
COMMAND
|
||||
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.10.1-complete.jar
|
||||
-Dlanguage=Cpp -visitor -package antlropencypher
|
||||
-Dlanguage=Cpp -visitor -package antlropencypher::v2
|
||||
-o ${opencypher_generated}
|
||||
${opencypher_lexer_grammar} ${opencypher_parser_grammar}
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}"
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -14,10 +14,10 @@
|
||||
#include <string>
|
||||
|
||||
#include "antlr4-runtime.h"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "parser/opencypher/generated/MemgraphCypher.h"
|
||||
#include "parser/opencypher/generated/MemgraphCypherLexer.h"
|
||||
#include "utils/concepts.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::frontend::opencypher {
|
||||
|
||||
@ -32,11 +32,9 @@ class SyntaxException : public utils::BasicException {
|
||||
* This thing must me a class since parser.cypher() returns pointer and there is
|
||||
* no way for us to get ownership over the object.
|
||||
*/
|
||||
enum class ParserOpTag : uint8_t {
|
||||
CYPHER, EXPRESSION
|
||||
};
|
||||
enum class ParserOpTag : uint8_t { CYPHER, EXPRESSION };
|
||||
|
||||
template<ParserOpTag Tag = ParserOpTag::CYPHER>
|
||||
template <ParserOpTag Tag = ParserOpTag::CYPHER>
|
||||
class Parser {
|
||||
public:
|
||||
/**
|
||||
@ -46,10 +44,9 @@ class Parser {
|
||||
Parser(const std::string query) : query_(std::move(query)) {
|
||||
parser_.removeErrorListeners();
|
||||
parser_.addErrorListener(&error_listener_);
|
||||
if constexpr(Tag == ParserOpTag::CYPHER) {
|
||||
if constexpr (Tag == ParserOpTag::CYPHER) {
|
||||
tree_ = parser_.cypher();
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
tree_ = parser_.expression();
|
||||
}
|
||||
if (parser_.getNumberOfSyntaxErrors()) {
|
||||
@ -75,11 +72,11 @@ class Parser {
|
||||
FirstMessageErrorListener error_listener_;
|
||||
std::string query_;
|
||||
antlr4::ANTLRInputStream input_{query_};
|
||||
antlropencypher::MemgraphCypherLexer lexer_{&input_};
|
||||
antlropencypher::v2::MemgraphCypherLexer lexer_{&input_};
|
||||
antlr4::CommonTokenStream tokens_{&lexer_};
|
||||
|
||||
// generate ast
|
||||
antlropencypher::MemgraphCypher parser_{&tokens_};
|
||||
antlropencypher::v2::MemgraphCypher parser_{&tokens_};
|
||||
antlr4::tree::ParseTree *tree_ = nullptr;
|
||||
};
|
||||
} // namespace memgraph::frontend::opencypher
|
||||
|
@ -48,18 +48,20 @@ add_dependencies(mg-query generate_lcp_query)
|
||||
target_include_directories(mg-query PUBLIC ${CMAKE_SOURCE_DIR}/include)
|
||||
target_link_libraries(mg-query dl cppitertools Boost::headers)
|
||||
target_link_libraries(mg-query mg-integrations-pulsar mg-integrations-kafka mg-storage-v2 mg-license mg-utils mg-kvstore mg-memory)
|
||||
|
||||
if(NOT "${MG_PYTHON_PATH}" STREQUAL "")
|
||||
set(Python3_ROOT_DIR "${MG_PYTHON_PATH}")
|
||||
endif()
|
||||
|
||||
if("${MG_PYTHON_VERSION}" STREQUAL "")
|
||||
find_package(Python3 3.5 REQUIRED COMPONENTS Development)
|
||||
else()
|
||||
find_package(Python3 "${MG_PYTHON_VERSION}" EXACT REQUIRED COMPONENTS Development)
|
||||
endif()
|
||||
|
||||
target_link_libraries(mg-query Python3::Python)
|
||||
|
||||
# Generate Antlr openCypher parser
|
||||
|
||||
set(opencypher_frontend ${CMAKE_CURRENT_SOURCE_DIR}/frontend/opencypher)
|
||||
set(opencypher_generated ${opencypher_frontend}/generated)
|
||||
set(opencypher_lexer_grammar ${opencypher_frontend}/grammar/MemgraphCypherLexer.g4)
|
||||
@ -82,15 +84,15 @@ add_custom_command(
|
||||
OUTPUT ${antlr_opencypher_generated_src} ${antlr_opencypher_generated_include}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${opencypher_generated}
|
||||
COMMAND
|
||||
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.10.1-complete.jar
|
||||
-Dlanguage=Cpp -visitor -package antlropencypher
|
||||
-o ${opencypher_generated}
|
||||
${opencypher_lexer_grammar} ${opencypher_parser_grammar}
|
||||
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.10.1-complete.jar
|
||||
-Dlanguage=Cpp -visitor -package antlropencypher
|
||||
-o ${opencypher_generated}
|
||||
${opencypher_lexer_grammar} ${opencypher_parser_grammar}
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}"
|
||||
DEPENDS
|
||||
${opencypher_lexer_grammar} ${opencypher_parser_grammar}
|
||||
${opencypher_frontend}/grammar/CypherLexer.g4
|
||||
${opencypher_frontend}/grammar/Cypher.g4)
|
||||
${opencypher_lexer_grammar} ${opencypher_parser_grammar}
|
||||
${opencypher_frontend}/grammar/CypherLexer.g4
|
||||
${opencypher_frontend}/grammar/Cypher.g4)
|
||||
|
||||
add_custom_target(generate_opencypher_parser
|
||||
DEPENDS ${antlr_opencypher_generated_src} ${antlr_opencypher_generated_include})
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -14,9 +14,9 @@
|
||||
#include "query/v2/request_router.hpp"
|
||||
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_HIDDEN_bool(query_cost_planner, true, "Use the cost-estimating query planner.");
|
||||
DEFINE_HIDDEN_bool(query_v2_cost_planner, true, "Use the cost-estimating query planner.");
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_VALIDATED_int32(query_plan_cache_ttl, 60, "Time to live for cached query plans, in seconds.",
|
||||
DEFINE_VALIDATED_int32(query_v2_plan_cache_ttl, 60, "Time to live for cached query plans, in seconds.",
|
||||
FLAG_IN_RANGE(0, std::numeric_limits<int32_t>::max()));
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
@ -123,7 +123,7 @@ std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery
|
||||
auto vertex_counts = plan::MakeVertexCountCache(request_router);
|
||||
auto symbol_table = expr::MakeSymbolTable(query, predefined_identifiers);
|
||||
auto planning_context = plan::MakePlanningContext(&ast_storage, &symbol_table, query, &vertex_counts);
|
||||
auto [root, cost] = plan::MakeLogicalPlan(&planning_context, parameters, FLAGS_query_cost_planner);
|
||||
auto [root, cost] = plan::MakeLogicalPlan(&planning_context, parameters, FLAGS_query_v2_cost_planner);
|
||||
return std::make_unique<SingleNodeLogicalPlan>(std::move(root), cost, std::move(ast_storage),
|
||||
std::move(symbol_table));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -22,9 +22,9 @@
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(query_cost_planner);
|
||||
DECLARE_bool(query_v2_cost_planner);
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_int32(query_plan_cache_ttl);
|
||||
DECLARE_int32(query_v2_plan_cache_ttl);
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
|
||||
@ -58,7 +58,7 @@ class CachedPlan {
|
||||
|
||||
bool IsExpired() const {
|
||||
// NOLINTNEXTLINE (modernize-use-nullptr)
|
||||
return cache_timer_.Elapsed() > std::chrono::seconds(FLAGS_query_plan_cache_ttl);
|
||||
return cache_timer_.Elapsed() > std::chrono::seconds(FLAGS_query_v2_plan_cache_ttl);
|
||||
};
|
||||
|
||||
private:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -64,7 +64,11 @@
|
||||
#include "utils/tsc.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(use_multi_frame, false, "Whether to use MultiFrame or not");
|
||||
|
||||
namespace EventCounter {
|
||||
|
||||
extern Event ReadQuery;
|
||||
extern Event WriteQuery;
|
||||
extern Event ReadWriteQuery;
|
||||
@ -74,6 +78,7 @@ extern const Event LabelPropertyIndexCreated;
|
||||
|
||||
extern const Event StreamsCreated;
|
||||
extern const Event TriggersCreated;
|
||||
|
||||
} // namespace EventCounter
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
@ -688,7 +693,7 @@ PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters &par
|
||||
: plan_(plan),
|
||||
cursor_(plan->plan().MakeCursor(execution_memory)),
|
||||
frame_(plan->symbol_table().max_position(), execution_memory),
|
||||
multi_frame_(plan->symbol_table().max_position(), kNumberOfFramesInMultiframe, execution_memory),
|
||||
multi_frame_(plan->symbol_table().max_position(), FLAGS_default_multi_frame_size, execution_memory),
|
||||
memory_limit_(memory_limit) {
|
||||
ctx_.db_accessor = dba;
|
||||
ctx_.symbol_table = plan->symbol_table();
|
||||
@ -704,7 +709,6 @@ PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters &par
|
||||
ctx_.request_router = request_router;
|
||||
ctx_.edge_ids_alloc = &interpreter_context->edge_ids_alloc;
|
||||
}
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStream *stream, std::optional<int> n,
|
||||
const std::vector<Symbol> &output_symbols,
|
||||
std::map<std::string, TypedValue> *summary) {
|
||||
@ -732,10 +736,7 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStrea
|
||||
}
|
||||
|
||||
// Returns true if a result was pulled.
|
||||
const auto pull_result = [&]() -> bool {
|
||||
cursor_->PullMultiple(multi_frame_, ctx_);
|
||||
return multi_frame_.HasValidFrame();
|
||||
};
|
||||
const auto pull_result = [&]() -> bool { return cursor_->PullMultiple(multi_frame_, ctx_); };
|
||||
|
||||
const auto stream_values = [&output_symbols, &stream](const Frame &frame) {
|
||||
// TODO: The streamed values should also probably use the above memory.
|
||||
@ -755,13 +756,14 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStrea
|
||||
int i = 0;
|
||||
if (has_unsent_results_ && !output_symbols.empty()) {
|
||||
// stream unsent results from previous pull
|
||||
|
||||
auto iterator_for_valid_frame_only = multi_frame_.GetValidFramesReader();
|
||||
for (const auto &frame : iterator_for_valid_frame_only) {
|
||||
for (auto &frame : multi_frame_.GetValidFramesConsumer()) {
|
||||
stream_values(frame);
|
||||
frame.MakeInvalid();
|
||||
++i;
|
||||
if (i == n) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
multi_frame_.MakeAllFramesInvalid();
|
||||
}
|
||||
|
||||
for (; !n || i < n;) {
|
||||
@ -770,13 +772,17 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStrea
|
||||
}
|
||||
|
||||
if (!output_symbols.empty()) {
|
||||
auto iterator_for_valid_frame_only = multi_frame_.GetValidFramesReader();
|
||||
for (const auto &frame : iterator_for_valid_frame_only) {
|
||||
for (auto &frame : multi_frame_.GetValidFramesConsumer()) {
|
||||
stream_values(frame);
|
||||
frame.MakeInvalid();
|
||||
++i;
|
||||
if (i == n) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
multi_frame_.MakeAllFramesInvalid();
|
||||
}
|
||||
multi_frame_.MakeAllFramesInvalid();
|
||||
}
|
||||
|
||||
// If we finished because we streamed the requested n results,
|
||||
@ -811,8 +817,7 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStrea
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *stream, std::optional<int> n,
|
||||
const std::vector<Symbol> &output_symbols,
|
||||
std::map<std::string, TypedValue> *summary) {
|
||||
auto should_pull_multiple = false; // TODO on the long term, we will only use PullMultiple
|
||||
if (should_pull_multiple) {
|
||||
if (FLAGS_use_multi_frame) {
|
||||
return PullMultiple(stream, n, output_symbols, summary);
|
||||
}
|
||||
// Set up temporary memory for a single Pull. Initial memory comes from the
|
||||
@ -906,34 +911,24 @@ using RWType = plan::ReadWriteTypeChecker::RWType;
|
||||
|
||||
InterpreterContext::InterpreterContext(storage::v3::Shard *db, const InterpreterConfig config,
|
||||
const std::filesystem::path & /*data_directory*/,
|
||||
io::Io<io::local_transport::LocalTransport> io,
|
||||
std::unique_ptr<RequestRouterFactory> request_router_factory,
|
||||
coordinator::Address coordinator_addr)
|
||||
: db(db), config(config), io{std::move(io)}, coordinator_address{coordinator_addr} {}
|
||||
: db(db),
|
||||
config(config),
|
||||
coordinator_address{coordinator_addr},
|
||||
request_router_factory_{std::move(request_router_factory)} {}
|
||||
|
||||
Interpreter::Interpreter(InterpreterContext *interpreter_context) : interpreter_context_(interpreter_context) {
|
||||
MG_ASSERT(interpreter_context_, "Interpreter context must not be NULL");
|
||||
|
||||
// TODO(tyler) make this deterministic so that it can be tested.
|
||||
auto random_uuid = boost::uuids::uuid{boost::uuids::random_generator()()};
|
||||
auto query_io = interpreter_context_->io.ForkLocal(random_uuid);
|
||||
request_router_ =
|
||||
interpreter_context_->request_router_factory_->CreateRequestRouter(interpreter_context_->coordinator_address);
|
||||
|
||||
request_router_ = std::make_unique<RequestRouter<io::local_transport::LocalTransport>>(
|
||||
coordinator::CoordinatorClient<io::local_transport::LocalTransport>(
|
||||
query_io, interpreter_context_->coordinator_address, std::vector{interpreter_context_->coordinator_address}),
|
||||
std::move(query_io));
|
||||
// Get edge ids
|
||||
coordinator::CoordinatorWriteRequests requests{coordinator::AllocateEdgeIdBatchRequest{.batch_size = 1000000}};
|
||||
io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests> ww;
|
||||
ww.operation = requests;
|
||||
auto resp = interpreter_context_->io
|
||||
.Request<io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests>,
|
||||
io::rsm::WriteResponse<coordinator::CoordinatorWriteResponses>>(
|
||||
interpreter_context_->coordinator_address, ww)
|
||||
.Wait();
|
||||
if (resp.HasValue()) {
|
||||
const auto alloc_edge_id_reps =
|
||||
std::get<coordinator::AllocateEdgeIdBatchResponse>(resp.GetValue().message.write_return);
|
||||
interpreter_context_->edge_ids_alloc = {alloc_edge_id_reps.low, alloc_edge_id_reps.high};
|
||||
const auto edge_ids_alloc_min_max_pair =
|
||||
request_router_->AllocateInitialEdgeIds(interpreter_context_->coordinator_address);
|
||||
if (edge_ids_alloc_min_max_pair) {
|
||||
interpreter_context_->edge_ids_alloc = {edge_ids_alloc_min_max_pair->first, edge_ids_alloc_min_max_pair->second};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -16,7 +16,6 @@
|
||||
|
||||
#include "coordinator/coordinator.hpp"
|
||||
#include "coordinator/coordinator_client.hpp"
|
||||
#include "io/local_transport/local_transport.hpp"
|
||||
#include "io/transport.hpp"
|
||||
#include "query/v2/auth_checker.hpp"
|
||||
#include "query/v2/bindings/cypher_main_visitor.hpp"
|
||||
@ -172,7 +171,8 @@ struct PreparedQuery {
|
||||
struct InterpreterContext {
|
||||
explicit InterpreterContext(storage::v3::Shard *db, InterpreterConfig config,
|
||||
const std::filesystem::path &data_directory,
|
||||
io::Io<io::local_transport::LocalTransport> io, coordinator::Address coordinator_addr);
|
||||
std::unique_ptr<RequestRouterFactory> request_router_factory,
|
||||
coordinator::Address coordinator_addr);
|
||||
|
||||
storage::v3::Shard *db;
|
||||
|
||||
@ -188,26 +188,24 @@ struct InterpreterContext {
|
||||
const InterpreterConfig config;
|
||||
IdAllocator edge_ids_alloc;
|
||||
|
||||
// TODO (antaljanosbenjamin) Figure out an abstraction for io::Io to make it possible to construct an interpreter
|
||||
// context with a simulator transport without templatizing it.
|
||||
io::Io<io::local_transport::LocalTransport> io;
|
||||
coordinator::Address coordinator_address;
|
||||
std::unique_ptr<RequestRouterFactory> request_router_factory_;
|
||||
|
||||
storage::v3::LabelId NameToLabelId(std::string_view label_name) {
|
||||
return storage::v3::LabelId::FromUint(query_id_mapper.NameToId(label_name));
|
||||
return storage::v3::LabelId::FromUint(query_id_mapper_.NameToId(label_name));
|
||||
}
|
||||
|
||||
storage::v3::PropertyId NameToPropertyId(std::string_view property_name) {
|
||||
return storage::v3::PropertyId::FromUint(query_id_mapper.NameToId(property_name));
|
||||
return storage::v3::PropertyId::FromUint(query_id_mapper_.NameToId(property_name));
|
||||
}
|
||||
|
||||
storage::v3::EdgeTypeId NameToEdgeTypeId(std::string_view edge_type_name) {
|
||||
return storage::v3::EdgeTypeId::FromUint(query_id_mapper.NameToId(edge_type_name));
|
||||
return storage::v3::EdgeTypeId::FromUint(query_id_mapper_.NameToId(edge_type_name));
|
||||
}
|
||||
|
||||
private:
|
||||
// TODO Replace with local map of labels, properties and edge type ids
|
||||
storage::v3::NameIdMapper query_id_mapper;
|
||||
storage::v3::NameIdMapper query_id_mapper_;
|
||||
};
|
||||
|
||||
/// Function that is used to tell all active interpreters that they should stop
|
||||
@ -297,12 +295,15 @@ class Interpreter final {
|
||||
void Abort();
|
||||
|
||||
const RequestRouterInterface *GetRequestRouter() const { return request_router_.get(); }
|
||||
void InstallSimulatorTicker(std::function<bool()> &&tick_simulator) {
|
||||
request_router_->InstallSimulatorTicker(tick_simulator);
|
||||
}
|
||||
|
||||
private:
|
||||
struct QueryExecution {
|
||||
std::optional<PreparedQuery> prepared_query;
|
||||
utils::MonotonicBufferResource execution_memory{kExecutionMemoryBlockSize};
|
||||
utils::ResourceWithOutOfMemoryException execution_memory_with_exception{&execution_memory};
|
||||
std::optional<PreparedQuery> prepared_query;
|
||||
|
||||
std::map<std::string, TypedValue> summary;
|
||||
std::vector<Notification> notifications;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -17,6 +17,9 @@
|
||||
#include "query/v2/bindings/frame.hpp"
|
||||
#include "utils/pmr/vector.hpp"
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint64(default_multi_frame_size, 100, "Default size of MultiFrame");
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
|
||||
static_assert(std::forward_iterator<ValidFramesReader::Iterator>);
|
||||
@ -45,20 +48,26 @@ void MultiFrame::MakeAllFramesInvalid() noexcept {
|
||||
}
|
||||
|
||||
bool MultiFrame::HasValidFrame() const noexcept {
|
||||
return std::any_of(frames_.begin(), frames_.end(), [](auto &frame) { return frame.IsValid(); });
|
||||
return std::any_of(frames_.begin(), frames_.end(), [](const auto &frame) { return frame.IsValid(); });
|
||||
}
|
||||
|
||||
bool MultiFrame::HasInvalidFrame() const noexcept {
|
||||
return std::any_of(frames_.rbegin(), frames_.rend(), [](const auto &frame) { return !frame.IsValid(); });
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE (bugprone-exception-escape)
|
||||
void MultiFrame::DefragmentValidFrames() noexcept {
|
||||
/*
|
||||
from: https://en.cppreference.com/w/cpp/algorithm/remove
|
||||
"Removing is done by shifting (by means of copy assignment (until C++11)move assignment (since C++11)) the elements
|
||||
in the range in such a way that the elements that are not to be removed appear in the beginning of the range.
|
||||
Relative order of the elements that remain is preserved and the physical size of the container is unchanged."
|
||||
*/
|
||||
|
||||
// NOLINTNEXTLINE (bugprone-unused-return-value)
|
||||
std::remove_if(frames_.begin(), frames_.end(), [](auto &frame) { return !frame.IsValid(); });
|
||||
static constexpr auto kIsValid = [](const FrameWithValidity &frame) { return frame.IsValid(); };
|
||||
static constexpr auto kIsInvalid = [](const FrameWithValidity &frame) { return !frame.IsValid(); };
|
||||
auto first_invalid_frame = std::find_if(frames_.begin(), frames_.end(), kIsInvalid);
|
||||
auto following_first_valid = std::find_if(first_invalid_frame, frames_.end(), kIsValid);
|
||||
while (first_invalid_frame != frames_.end() && following_first_valid != frames_.end()) {
|
||||
std::swap(*first_invalid_frame, *following_first_valid);
|
||||
first_invalid_frame++;
|
||||
first_invalid_frame = std::find_if(first_invalid_frame, frames_.end(), kIsInvalid);
|
||||
following_first_valid++;
|
||||
following_first_valid = std::find_if(following_first_valid, frames_.end(), kIsValid);
|
||||
}
|
||||
}
|
||||
|
||||
ValidFramesReader MultiFrame::GetValidFramesReader() { return ValidFramesReader{*this}; }
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,10 +13,14 @@
|
||||
|
||||
#include <iterator>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "query/v2/bindings/frame.hpp"
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(default_multi_frame_size);
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
constexpr uint64_t kNumberOfFramesInMultiframe = 1000; // TODO have it configurable
|
||||
|
||||
class ValidFramesConsumer;
|
||||
class ValidFramesModifier;
|
||||
@ -33,6 +37,7 @@ class MultiFrame {
|
||||
MultiFrame(size_t size_of_frame, size_t number_of_frames, utils::MemoryResource *execution_memory);
|
||||
~MultiFrame() = default;
|
||||
|
||||
// Assigning and moving the MultiFrame is not allowed if any accessor from the above ones are alive.
|
||||
MultiFrame(const MultiFrame &other);
|
||||
MultiFrame(MultiFrame &&other) noexcept;
|
||||
MultiFrame &operator=(const MultiFrame &other) = delete;
|
||||
@ -81,6 +86,7 @@ class MultiFrame {
|
||||
void MakeAllFramesInvalid() noexcept;
|
||||
|
||||
bool HasValidFrame() const noexcept;
|
||||
bool HasInvalidFrame() const noexcept;
|
||||
|
||||
inline utils::MemoryResource *GetMemoryResource() { return frames_[0].GetMemoryResource(); }
|
||||
|
||||
@ -96,9 +102,9 @@ class ValidFramesReader {
|
||||
|
||||
~ValidFramesReader() = default;
|
||||
ValidFramesReader(const ValidFramesReader &other) = delete;
|
||||
ValidFramesReader(ValidFramesReader &&other) noexcept = delete;
|
||||
ValidFramesReader(ValidFramesReader &&other) noexcept = default;
|
||||
ValidFramesReader &operator=(const ValidFramesReader &other) = delete;
|
||||
ValidFramesReader &operator=(ValidFramesReader &&other) noexcept = delete;
|
||||
ValidFramesReader &operator=(ValidFramesReader &&other) noexcept = default;
|
||||
|
||||
struct Iterator {
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
@ -146,9 +152,9 @@ class ValidFramesModifier {
|
||||
|
||||
~ValidFramesModifier() = default;
|
||||
ValidFramesModifier(const ValidFramesModifier &other) = delete;
|
||||
ValidFramesModifier(ValidFramesModifier &&other) noexcept = delete;
|
||||
ValidFramesModifier(ValidFramesModifier &&other) noexcept = default;
|
||||
ValidFramesModifier &operator=(const ValidFramesModifier &other) = delete;
|
||||
ValidFramesModifier &operator=(ValidFramesModifier &&other) noexcept = delete;
|
||||
ValidFramesModifier &operator=(ValidFramesModifier &&other) noexcept = default;
|
||||
|
||||
struct Iterator {
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
@ -201,9 +207,9 @@ class ValidFramesConsumer {
|
||||
|
||||
~ValidFramesConsumer() noexcept;
|
||||
ValidFramesConsumer(const ValidFramesConsumer &other) = delete;
|
||||
ValidFramesConsumer(ValidFramesConsumer &&other) noexcept = delete;
|
||||
ValidFramesConsumer(ValidFramesConsumer &&other) noexcept = default;
|
||||
ValidFramesConsumer &operator=(const ValidFramesConsumer &other) = delete;
|
||||
ValidFramesConsumer &operator=(ValidFramesConsumer &&other) noexcept = delete;
|
||||
ValidFramesConsumer &operator=(ValidFramesConsumer &&other) noexcept = default;
|
||||
|
||||
struct Iterator {
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
@ -255,9 +261,9 @@ class InvalidFramesPopulator {
|
||||
~InvalidFramesPopulator() = default;
|
||||
|
||||
InvalidFramesPopulator(const InvalidFramesPopulator &other) = delete;
|
||||
InvalidFramesPopulator(InvalidFramesPopulator &&other) noexcept = delete;
|
||||
InvalidFramesPopulator(InvalidFramesPopulator &&other) noexcept = default;
|
||||
InvalidFramesPopulator &operator=(const InvalidFramesPopulator &other) = delete;
|
||||
InvalidFramesPopulator &operator=(InvalidFramesPopulator &&other) noexcept = delete;
|
||||
InvalidFramesPopulator &operator=(InvalidFramesPopulator &&other) noexcept = default;
|
||||
|
||||
struct Iterator {
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -149,8 +149,6 @@ class CostEstimator : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: Cost estimate ScanAllById?
|
||||
|
||||
// For the given op first increments the cardinality and then cost.
|
||||
#define POST_VISIT_CARD_FIRST(NAME) \
|
||||
bool PostVisit(NAME &) override { \
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -72,7 +72,23 @@ class Cursor {
|
||||
/// @throws QueryRuntimeException if something went wrong with execution
|
||||
virtual bool Pull(Frame &, ExecutionContext &) = 0;
|
||||
|
||||
virtual void PullMultiple(MultiFrame &, ExecutionContext &) { LOG_FATAL("PullMultipleIsNotImplemented"); }
|
||||
/// Run an iteration of a @c LogicalOperator with MultiFrame.
|
||||
///
|
||||
/// Since operators may be chained, the iteration may pull results from
|
||||
/// multiple operators.
|
||||
///
|
||||
/// @param MultiFrame May be read from or written to while performing the
|
||||
/// iteration.
|
||||
/// @param ExecutionContext Used to get the position of symbols in frame and
|
||||
/// other information.
|
||||
/// @return True if the operator was able to populate at least one Frame on the MultiFrame,
|
||||
/// thus if an operator returns true, that means there is at least one valid Frame in the
|
||||
/// MultiFrame.
|
||||
///
|
||||
/// @throws QueryRuntimeException if something went wrong with execution
|
||||
virtual bool PullMultiple(MultiFrame &, ExecutionContext &) {MG_ASSERT(false, "PullMultipleIsNotImplemented"); return false; }
|
||||
|
||||
virtual void PushDown(const MultiFrame&) { MG_ASSERT(false, "PushDownIsNotImplemented"); }
|
||||
|
||||
/// Resets the Cursor to its initial state.
|
||||
virtual void Reset() = 0;
|
||||
@ -113,7 +129,7 @@ class ScanAllByLabel;
|
||||
class ScanAllByLabelPropertyRange;
|
||||
class ScanAllByLabelPropertyValue;
|
||||
class ScanAllByLabelProperty;
|
||||
class ScanAllById;
|
||||
class ScanByPrimaryKey;
|
||||
class Expand;
|
||||
class ExpandVariable;
|
||||
class ConstructNamedPath;
|
||||
@ -144,7 +160,7 @@ class Foreach;
|
||||
using LogicalOperatorCompositeVisitor = utils::CompositeVisitor<
|
||||
Once, CreateNode, CreateExpand, ScanAll, ScanAllByLabel,
|
||||
ScanAllByLabelPropertyRange, ScanAllByLabelPropertyValue,
|
||||
ScanAllByLabelProperty, ScanAllById,
|
||||
ScanAllByLabelProperty, ScanByPrimaryKey,
|
||||
Expand, ExpandVariable, ConstructNamedPath, Filter, Produce, Delete,
|
||||
SetProperty, SetProperties, SetLabels, RemoveProperty, RemoveLabels,
|
||||
EdgeUniquenessFilter, Accumulate, Aggregate, Skip, Limit, OrderBy, Merge,
|
||||
@ -335,12 +351,14 @@ and false on every following Pull.")
|
||||
class OnceCursor : public Cursor {
|
||||
public:
|
||||
OnceCursor() {}
|
||||
void PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
bool PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
void PushDown(const MultiFrame&) override;
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
private:
|
||||
std::optional<MultiFrame> pushed_down_multi_frame_;
|
||||
bool did_pull_{false};
|
||||
};
|
||||
cpp<#)
|
||||
@ -845,19 +863,21 @@ given label and property.
|
||||
(:serialize (:slk))
|
||||
(:clone))
|
||||
|
||||
|
||||
|
||||
(lcp:define-class scan-all-by-id (scan-all)
|
||||
((expression "Expression *" :scope :public
|
||||
(lcp:define-class scan-by-primary-key (scan-all)
|
||||
((label "::storage::v3::LabelId" :scope :public)
|
||||
(primary-key "std::vector<Expression*>" :scope :public)
|
||||
(expression "Expression *" :scope :public
|
||||
:slk-save #'slk-save-ast-pointer
|
||||
:slk-load (slk-load-ast-pointer "Expression")))
|
||||
(:documentation
|
||||
"ScanAll producing a single node with ID equal to evaluated expression")
|
||||
"ScanAll producing a single node with specified by the label and primary key")
|
||||
(:public
|
||||
#>cpp
|
||||
ScanAllById() {}
|
||||
ScanAllById(const std::shared_ptr<LogicalOperator> &input,
|
||||
Symbol output_symbol, Expression *expression,
|
||||
ScanByPrimaryKey() {}
|
||||
ScanByPrimaryKey(const std::shared_ptr<LogicalOperator> &input,
|
||||
Symbol output_symbol,
|
||||
storage::v3::LabelId label,
|
||||
std::vector<query::v2::Expression*> primary_key,
|
||||
storage::v3::View view = storage::v3::View::OLD);
|
||||
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
@ -1160,6 +1180,7 @@ a boolean value.")
|
||||
public:
|
||||
FilterCursor(const Filter &, utils::MemoryResource *);
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
bool PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
@ -1211,7 +1232,7 @@ RETURN clause) the Produce's pull succeeds exactly once.")
|
||||
public:
|
||||
ProduceCursor(const Produce &, utils::MemoryResource *);
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
void PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
bool PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
@ -1259,6 +1280,7 @@ Has a flag for using DETACH DELETE when deleting vertices.")
|
||||
public:
|
||||
DeleteCursor(const Delete &, utils::MemoryResource *);
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
bool PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
@ -1552,6 +1574,7 @@ edge lists).")
|
||||
EdgeUniquenessFilterCursor(const EdgeUniquenessFilter &,
|
||||
utils::MemoryResource *);
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
bool PullMultiple(MultiFrame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
@ -1948,27 +1971,6 @@ and returns true, once.")
|
||||
input_ = input;
|
||||
}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
class OptionalCursor : public Cursor {
|
||||
public:
|
||||
OptionalCursor(const Optional &, utils::MemoryResource *);
|
||||
bool Pull(Frame &, ExecutionContext &) override;
|
||||
void Shutdown() override;
|
||||
void Reset() override;
|
||||
|
||||
private:
|
||||
const Optional &self_;
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
const UniqueCursorPtr optional_cursor_;
|
||||
// indicates if the next Pull from this cursor should
|
||||
// perform a Pull from the input_cursor_
|
||||
// this is true when:
|
||||
// - first pulling from this Cursor
|
||||
// - previous Pull from this cursor exhausted the optional_cursor_
|
||||
bool pull_input_{true};
|
||||
};
|
||||
cpp<#)
|
||||
(:serialize (:slk))
|
||||
(:clone))
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -86,10 +86,10 @@ bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelProperty &op) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(ScanAllById &op) {
|
||||
bool PlanPrinter::PreVisit(query::v2::plan::ScanByPrimaryKey &op) {
|
||||
WithPrintLn([&](auto &out) {
|
||||
out << "* ScanAllById"
|
||||
<< " (" << op.output_symbol_.name() << ")";
|
||||
out << "* ScanByPrimaryKey"
|
||||
<< " (" << op.output_symbol_.name() << " :" << request_router_->LabelToName(op.label_) << ")";
|
||||
});
|
||||
return true;
|
||||
}
|
||||
@ -487,12 +487,15 @@ bool PlanToJsonVisitor::PreVisit(ScanAllByLabelProperty &op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlanToJsonVisitor::PreVisit(ScanAllById &op) {
|
||||
bool PlanToJsonVisitor::PreVisit(ScanByPrimaryKey &op) {
|
||||
json self;
|
||||
self["name"] = "ScanAllById";
|
||||
self["name"] = "ScanByPrimaryKey";
|
||||
self["label"] = ToJson(op.label_, *request_router_);
|
||||
self["output_symbol"] = ToJson(op.output_symbol_);
|
||||
|
||||
op.input_->Accept(*this);
|
||||
self["input"] = PopOutput();
|
||||
|
||||
output_ = std::move(self);
|
||||
return false;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -67,7 +67,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &) override;
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanByPrimaryKey & /*unused*/) override;
|
||||
|
||||
bool PreVisit(Expand &) override;
|
||||
bool PreVisit(ExpandVariable &) override;
|
||||
@ -194,7 +194,7 @@ class PlanToJsonVisitor : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &) override;
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanByPrimaryKey & /*unused*/) override;
|
||||
|
||||
bool PreVisit(Produce &) override;
|
||||
bool PreVisit(Accumulate &) override;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -11,10 +11,12 @@
|
||||
|
||||
#include "query/v2/plan/read_write_type_checker.hpp"
|
||||
|
||||
#define PRE_VISIT(TOp, RWType, continue_visiting) \
|
||||
bool ReadWriteTypeChecker::PreVisit(TOp &op) { \
|
||||
UpdateType(RWType); \
|
||||
return continue_visiting; \
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define PRE_VISIT(TOp, RWType, continue_visiting) \
|
||||
/*NOLINTNEXTLINE(bugprone-macro-parentheses)*/ \
|
||||
bool ReadWriteTypeChecker::PreVisit(TOp & /*op*/) { \
|
||||
UpdateType(RWType); \
|
||||
return continue_visiting; \
|
||||
}
|
||||
|
||||
namespace memgraph::query::v2::plan {
|
||||
@ -35,7 +37,7 @@ PRE_VISIT(ScanAllByLabel, RWType::R, true)
|
||||
PRE_VISIT(ScanAllByLabelPropertyRange, RWType::R, true)
|
||||
PRE_VISIT(ScanAllByLabelPropertyValue, RWType::R, true)
|
||||
PRE_VISIT(ScanAllByLabelProperty, RWType::R, true)
|
||||
PRE_VISIT(ScanAllById, RWType::R, true)
|
||||
PRE_VISIT(ScanByPrimaryKey, RWType::R, true)
|
||||
|
||||
PRE_VISIT(Expand, RWType::R, true)
|
||||
PRE_VISIT(ExpandVariable, RWType::R, true)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -59,7 +59,7 @@ class ReadWriteTypeChecker : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &) override;
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &) override;
|
||||
bool PreVisit(ScanAllByLabelProperty &) override;
|
||||
bool PreVisit(ScanAllById &) override;
|
||||
bool PreVisit(ScanByPrimaryKey & /*unused*/) override;
|
||||
|
||||
bool PreVisit(Expand &) override;
|
||||
bool PreVisit(ExpandVariable &) override;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,7 +13,8 @@
|
||||
|
||||
#include "utils/flag_validation.hpp"
|
||||
|
||||
DEFINE_VALIDATED_HIDDEN_int64(query_vertex_count_to_expand_existing, 10,
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_VALIDATED_HIDDEN_int64(query_v2_vertex_count_to_expand_existing, 10,
|
||||
"Maximum count of indexed vertices which provoke "
|
||||
"indexed lookup and then expand to existing, instead of "
|
||||
"a regular expand. Default is 10, to turn off use -1.",
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -25,10 +25,13 @@
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "query/v2/frontend/ast/ast.hpp"
|
||||
#include "query/v2/plan/operator.hpp"
|
||||
#include "query/v2/plan/preprocess.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
|
||||
DECLARE_int64(query_vertex_count_to_expand_existing);
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_int64(query_v2_vertex_count_to_expand_existing);
|
||||
|
||||
namespace memgraph::query::v2::plan {
|
||||
|
||||
@ -98,7 +101,7 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
ScanAll dst_scan(expand.input(), expand.common_.node_symbol, expand.view_);
|
||||
auto indexed_scan = GenScanByIndex(dst_scan, FLAGS_query_vertex_count_to_expand_existing);
|
||||
auto indexed_scan = GenScanByIndex(dst_scan, FLAGS_query_v2_vertex_count_to_expand_existing);
|
||||
if (indexed_scan) {
|
||||
expand.set_input(std::move(indexed_scan));
|
||||
expand.common_.existing_node = true;
|
||||
@ -127,7 +130,7 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
// unconditionally creating an indexed scan.
|
||||
indexed_scan = GenScanByIndex(dst_scan);
|
||||
} else {
|
||||
indexed_scan = GenScanByIndex(dst_scan, FLAGS_query_vertex_count_to_expand_existing);
|
||||
indexed_scan = GenScanByIndex(dst_scan, FLAGS_query_v2_vertex_count_to_expand_existing);
|
||||
}
|
||||
if (indexed_scan) {
|
||||
expand.set_input(std::move(indexed_scan));
|
||||
@ -271,11 +274,12 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(ScanAllById &op) override {
|
||||
bool PreVisit(ScanByPrimaryKey &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAllById &) override {
|
||||
|
||||
bool PostVisit(ScanByPrimaryKey & /*unused*/) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
@ -487,6 +491,12 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
|
||||
storage::v3::PropertyId GetProperty(PropertyIx prop) { return db_->NameToProperty(prop.name); }
|
||||
|
||||
void EraseLabelFilters(const memgraph::query::v2::Symbol &node_symbol, memgraph::query::v2::LabelIx prim_label) {
|
||||
std::vector<query::v2::Expression *> removed_expressions;
|
||||
filters_.EraseLabelFilter(node_symbol, prim_label, &removed_expressions);
|
||||
filter_exprs_for_removal_.insert(removed_expressions.begin(), removed_expressions.end());
|
||||
}
|
||||
|
||||
std::optional<LabelIx> FindBestLabelIndex(const std::unordered_set<LabelIx> &labels) {
|
||||
MG_ASSERT(!labels.empty(), "Trying to find the best label without any labels.");
|
||||
std::optional<LabelIx> best_label;
|
||||
@ -559,31 +569,84 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
const auto &view = scan.view_;
|
||||
const auto &modified_symbols = scan.ModifiedSymbols(*symbol_table_);
|
||||
std::unordered_set<Symbol> bound_symbols(modified_symbols.begin(), modified_symbols.end());
|
||||
auto are_bound = [&bound_symbols](const auto &used_symbols) {
|
||||
for (const auto &used_symbol : used_symbols) {
|
||||
if (!utils::Contains(bound_symbols, used_symbol)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
// First, try to see if we can find a vertex by ID.
|
||||
if (!max_vertex_count || *max_vertex_count >= 1) {
|
||||
for (const auto &filter : filters_.IdFilters(node_symbol)) {
|
||||
if (filter.id_filter->is_symbol_in_value_ || !are_bound(filter.used_symbols)) continue;
|
||||
auto *value = filter.id_filter->value_;
|
||||
filter_exprs_for_removal_.insert(filter.expression);
|
||||
filters_.EraseFilter(filter);
|
||||
return std::make_unique<ScanAllById>(input, node_symbol, value, view);
|
||||
}
|
||||
}
|
||||
// Now try to see if we can use label+property index. If not, try to use
|
||||
// just the label index.
|
||||
|
||||
// Try to see if we can use label + primary-key or label + property index.
|
||||
// If not, try to use just the label index.
|
||||
const auto labels = filters_.FilteredLabels(node_symbol);
|
||||
if (labels.empty()) {
|
||||
// Without labels, we cannot generate any indexed ScanAll.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// First, try to see if we can find a vertex based on the possibly
|
||||
// supplied primary key.
|
||||
auto property_filters = filters_.PropertyFilters(node_symbol);
|
||||
query::v2::LabelIx prim_label;
|
||||
std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> primary_key;
|
||||
|
||||
auto extract_primary_key = [this](storage::v3::LabelId label,
|
||||
std::vector<query::v2::plan::FilterInfo> property_filters)
|
||||
-> std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> {
|
||||
std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> pk_temp;
|
||||
std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> pk;
|
||||
std::vector<memgraph::storage::v3::SchemaProperty> schema = db_->GetSchemaForLabel(label);
|
||||
|
||||
std::vector<storage::v3::PropertyId> schema_properties;
|
||||
schema_properties.reserve(schema.size());
|
||||
|
||||
std::transform(schema.begin(), schema.end(), std::back_inserter(schema_properties),
|
||||
[](const auto &schema_elem) { return schema_elem.property_id; });
|
||||
|
||||
for (const auto &property_filter : property_filters) {
|
||||
if (property_filter.property_filter->type_ != PropertyFilter::Type::EQUAL) {
|
||||
continue;
|
||||
}
|
||||
const auto &property_id = db_->NameToProperty(property_filter.property_filter->property_.name);
|
||||
if (std::find(schema_properties.begin(), schema_properties.end(), property_id) != schema_properties.end()) {
|
||||
pk_temp.emplace_back(std::make_pair(property_filter.expression, property_filter));
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure pk is in the same order as schema_properties.
|
||||
for (const auto &schema_prop : schema_properties) {
|
||||
for (auto &pk_temp_prop : pk_temp) {
|
||||
const auto &property_id = db_->NameToProperty(pk_temp_prop.second.property_filter->property_.name);
|
||||
if (schema_prop == property_id) {
|
||||
pk.push_back(pk_temp_prop);
|
||||
}
|
||||
}
|
||||
}
|
||||
MG_ASSERT(pk.size() == pk_temp.size(),
|
||||
"The two vectors should represent the same primary key with a possibly different order of contained "
|
||||
"elements.");
|
||||
|
||||
return pk.size() == schema_properties.size()
|
||||
? pk
|
||||
: std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>>{};
|
||||
};
|
||||
|
||||
if (!property_filters.empty()) {
|
||||
for (const auto &label : labels) {
|
||||
if (db_->PrimaryLabelExists(GetLabel(label))) {
|
||||
prim_label = label;
|
||||
primary_key = extract_primary_key(GetLabel(prim_label), property_filters);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!primary_key.empty()) {
|
||||
// Mark the expressions so they won't be used for an additional, unnecessary filter.
|
||||
for (const auto &primary_property : primary_key) {
|
||||
filter_exprs_for_removal_.insert(primary_property.first);
|
||||
filters_.EraseFilter(primary_property.second);
|
||||
}
|
||||
EraseLabelFilters(node_symbol, prim_label);
|
||||
std::vector<query::v2::Expression *> pk_expressions;
|
||||
std::transform(primary_key.begin(), primary_key.end(), std::back_inserter(pk_expressions),
|
||||
[](const auto &exp) { return exp.second.property_filter->value_; });
|
||||
return std::make_unique<ScanByPrimaryKey>(input, node_symbol, GetLabel(prim_label), pk_expressions);
|
||||
}
|
||||
}
|
||||
|
||||
auto found_index = FindBestLabelPropertyIndex(node_symbol, bound_symbols);
|
||||
if (found_index &&
|
||||
// Use label+property index if we satisfy max_vertex_count.
|
||||
@ -597,9 +660,7 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
filter_exprs_for_removal_.insert(found_index->filter.expression);
|
||||
}
|
||||
filters_.EraseFilter(found_index->filter);
|
||||
std::vector<Expression *> removed_expressions;
|
||||
filters_.EraseLabelFilter(node_symbol, found_index->label, &removed_expressions);
|
||||
filter_exprs_for_removal_.insert(removed_expressions.begin(), removed_expressions.end());
|
||||
EraseLabelFilters(node_symbol, found_index->label);
|
||||
if (prop_filter.lower_bound_ || prop_filter.upper_bound_) {
|
||||
return std::make_unique<ScanAllByLabelPropertyRange>(
|
||||
input, node_symbol, GetLabel(found_index->label), GetProperty(prop_filter.property_),
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -17,7 +17,8 @@
|
||||
#include "utils/flag_validation.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
DEFINE_VALIDATED_HIDDEN_uint64(query_max_plans, 1000U, "Maximum number of generated plans for a query.",
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_VALIDATED_HIDDEN_uint64(query_v2_max_plans, 1000U, "Maximum number of generated plans for a query.",
|
||||
FLAG_IN_RANGE(1, std::numeric_limits<std::uint64_t>::max()));
|
||||
|
||||
namespace memgraph::query::v2::plan::impl {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -18,7 +18,8 @@
|
||||
|
||||
#include "query/v2/plan/rule_based_planner.hpp"
|
||||
|
||||
DECLARE_uint64(query_max_plans);
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(query_v2_max_plans);
|
||||
|
||||
namespace memgraph::query::v2::plan {
|
||||
|
||||
@ -310,7 +311,7 @@ class VariableStartPlanner {
|
||||
for (const auto &query_part : query_parts) {
|
||||
alternative_query_parts.emplace_back(impl::VaryQueryPartMatching(query_part, symbol_table));
|
||||
}
|
||||
return iter::slice(MakeCartesianProduct(std::move(alternative_query_parts)), 0UL, FLAGS_query_max_plans);
|
||||
return iter::slice(MakeCartesianProduct(std::move(alternative_query_parts)), 0UL, FLAGS_query_v2_max_plans);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -12,14 +12,17 @@
|
||||
/// @file
|
||||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
|
||||
#include "query/v2/bindings/typed_value.hpp"
|
||||
#include "query/v2/plan/preprocess.hpp"
|
||||
#include "query/v2/request_router.hpp"
|
||||
#include "storage/v3/conversions.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "utils/bound.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/fnv.hpp"
|
||||
|
||||
namespace memgraph::query::v2::plan {
|
||||
@ -52,11 +55,16 @@ class VertexCountCache {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// For now return true if label is primary label
|
||||
bool LabelIndexExists(storage::v3::LabelId label) { return request_router_->IsPrimaryLabel(label); }
|
||||
bool LabelIndexExists(storage::v3::LabelId label) { return PrimaryLabelExists(label); }
|
||||
|
||||
bool PrimaryLabelExists(storage::v3::LabelId label) { return request_router_->IsPrimaryLabel(label); }
|
||||
|
||||
bool LabelPropertyIndexExists(storage::v3::LabelId /*label*/, storage::v3::PropertyId /*property*/) { return false; }
|
||||
|
||||
const std::vector<memgraph::storage::v3::SchemaProperty> &GetSchemaForLabel(storage::v3::LabelId label) {
|
||||
return request_router_->GetSchemaForLabel(label);
|
||||
}
|
||||
|
||||
RequestRouterInterface *request_router_;
|
||||
};
|
||||
|
||||
|
@ -24,14 +24,18 @@
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
|
||||
#include "coordinator/coordinator.hpp"
|
||||
#include "coordinator/coordinator_client.hpp"
|
||||
#include "coordinator/coordinator_rsm.hpp"
|
||||
#include "coordinator/shard_map.hpp"
|
||||
#include "io/address.hpp"
|
||||
#include "io/errors.hpp"
|
||||
#include "io/local_transport/local_transport.hpp"
|
||||
#include "io/notifier.hpp"
|
||||
#include "io/rsm/raft.hpp"
|
||||
#include "io/rsm/rsm_client.hpp"
|
||||
@ -113,7 +117,11 @@ class RequestRouterInterface {
|
||||
virtual std::optional<storage::v3::EdgeTypeId> MaybeNameToEdgeType(const std::string &name) const = 0;
|
||||
virtual std::optional<storage::v3::LabelId> MaybeNameToLabel(const std::string &name) const = 0;
|
||||
virtual bool IsPrimaryLabel(storage::v3::LabelId label) const = 0;
|
||||
virtual bool IsPrimaryKey(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const = 0;
|
||||
virtual bool IsPrimaryProperty(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const = 0;
|
||||
|
||||
virtual std::optional<std::pair<uint64_t, uint64_t>> AllocateInitialEdgeIds(io::Address coordinator_address) = 0;
|
||||
virtual void InstallSimulatorTicker(std::function<bool()> tick_simulator) = 0;
|
||||
virtual const std::vector<coordinator::SchemaProperty> &GetSchemaForLabel(storage::v3::LabelId label) const = 0;
|
||||
};
|
||||
|
||||
// TODO(kostasrim)rename this class template
|
||||
@ -138,7 +146,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
|
||||
~RequestRouter() override {}
|
||||
|
||||
void InstallSimulatorTicker(std::function<bool()> tick_simulator) {
|
||||
void InstallSimulatorTicker(std::function<bool()> tick_simulator) override {
|
||||
notifier_.InstallSimulatorTicker(tick_simulator);
|
||||
}
|
||||
|
||||
@ -223,7 +231,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
return edge_types_.IdToName(id.AsUint());
|
||||
}
|
||||
|
||||
bool IsPrimaryKey(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const override {
|
||||
bool IsPrimaryProperty(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const override {
|
||||
const auto schema_it = shards_map_.schemas.find(primary_label);
|
||||
MG_ASSERT(schema_it != shards_map_.schemas.end(), "Invalid primary label id: {}", primary_label.AsUint());
|
||||
|
||||
@ -232,12 +240,17 @@ class RequestRouter : public RequestRouterInterface {
|
||||
}) != schema_it->second.end();
|
||||
}
|
||||
|
||||
const std::vector<coordinator::SchemaProperty> &GetSchemaForLabel(storage::v3::LabelId label) const override {
|
||||
return shards_map_.schemas.at(label);
|
||||
}
|
||||
|
||||
bool IsPrimaryLabel(storage::v3::LabelId label) const override { return shards_map_.label_spaces.contains(label); }
|
||||
|
||||
// TODO(kostasrim) Simplify return result
|
||||
std::vector<VertexAccessor> ScanVertices(std::optional<std::string> label) override {
|
||||
// create requests
|
||||
std::vector<ShardRequestState<msgs::ScanVerticesRequest>> requests_to_be_sent = RequestsForScanVertices(label);
|
||||
auto requests_to_be_sent = RequestsForScanVertices(label);
|
||||
|
||||
spdlog::trace("created {} ScanVertices requests", requests_to_be_sent.size());
|
||||
|
||||
// begin all requests in parallel
|
||||
@ -310,8 +323,8 @@ class RequestRouter : public RequestRouterInterface {
|
||||
io::ReadinessToken readiness_token{i};
|
||||
auto &storage_client = GetStorageClientForShard(request.shard);
|
||||
msgs::WriteRequests req = request.request;
|
||||
storage_client.SendAsyncWriteRequest(req, notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), request);
|
||||
storage_client.SendAsyncWriteRequest(std::move(req), notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), std::move(request));
|
||||
}
|
||||
|
||||
// drive requests to completion
|
||||
@ -326,7 +339,8 @@ class RequestRouter : public RequestRouterInterface {
|
||||
// must be fetched again with an ExpandOne(Edges.dst)
|
||||
|
||||
// create requests
|
||||
std::vector<ShardRequestState<msgs::ExpandOneRequest>> requests_to_be_sent = RequestsForExpandOne(request);
|
||||
std::vector<ShardRequestState<msgs::ExpandOneRequest>> requests_to_be_sent =
|
||||
RequestsForExpandOne(std::move(request));
|
||||
|
||||
// begin all requests in parallel
|
||||
RunningRequests<msgs::ExpandOneRequest> running_requests = {};
|
||||
@ -336,8 +350,8 @@ class RequestRouter : public RequestRouterInterface {
|
||||
io::ReadinessToken readiness_token{i};
|
||||
auto &storage_client = GetStorageClientForShard(request.shard);
|
||||
msgs::ReadRequests req = request.request;
|
||||
storage_client.SendAsyncReadRequest(req, notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), request);
|
||||
storage_client.SendAsyncReadRequest(std::move(req), notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), std::move(request));
|
||||
}
|
||||
|
||||
// drive requests to completion
|
||||
@ -360,6 +374,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
}
|
||||
|
||||
std::vector<msgs::GetPropertiesResultRow> GetProperties(msgs::GetPropertiesRequest requests) override {
|
||||
requests.transaction_id = transaction_id_;
|
||||
// create requests
|
||||
std::vector<ShardRequestState<msgs::GetPropertiesRequest>> requests_to_be_sent =
|
||||
RequestsForGetProperties(std::move(requests));
|
||||
@ -372,8 +387,8 @@ class RequestRouter : public RequestRouterInterface {
|
||||
io::ReadinessToken readiness_token{i};
|
||||
auto &storage_client = GetStorageClientForShard(request.shard);
|
||||
msgs::ReadRequests req = request.request;
|
||||
storage_client.SendAsyncReadRequest(req, notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), request);
|
||||
storage_client.SendAsyncReadRequest(std::move(req), notifier_, readiness_token);
|
||||
running_requests.emplace(readiness_token.GetId(), std::move(request));
|
||||
}
|
||||
|
||||
// drive requests to completion
|
||||
@ -489,6 +504,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
|
||||
msgs::ScanVerticesRequest request;
|
||||
request.transaction_id = transaction_id_;
|
||||
request.props_to_return.emplace();
|
||||
request.start_id.second = storage::conversions::ConvertValueVector(key);
|
||||
|
||||
ShardRequestState<msgs::ScanVerticesRequest> shard_request_state{
|
||||
@ -503,7 +519,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
return requests;
|
||||
}
|
||||
|
||||
std::vector<ShardRequestState<msgs::ExpandOneRequest>> RequestsForExpandOne(const msgs::ExpandOneRequest &request) {
|
||||
std::vector<ShardRequestState<msgs::ExpandOneRequest>> RequestsForExpandOne(msgs::ExpandOneRequest &&request) {
|
||||
std::map<ShardMetadata, msgs::ExpandOneRequest> per_shard_request_table;
|
||||
msgs::ExpandOneRequest top_level_rqst_template = request;
|
||||
top_level_rqst_template.transaction_id = transaction_id_;
|
||||
@ -515,7 +531,7 @@ class RequestRouter : public RequestRouterInterface {
|
||||
if (!per_shard_request_table.contains(shard)) {
|
||||
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
|
||||
}
|
||||
per_shard_request_table[shard].src_vertices.push_back(vertex);
|
||||
per_shard_request_table[shard].src_vertices.push_back(std::move(vertex));
|
||||
}
|
||||
|
||||
std::vector<ShardRequestState<msgs::ExpandOneRequest>> requests = {};
|
||||
@ -708,6 +724,23 @@ class RequestRouter : public RequestRouterInterface {
|
||||
edge_types_.StoreMapping(std::move(id_to_name));
|
||||
}
|
||||
|
||||
std::optional<std::pair<uint64_t, uint64_t>> AllocateInitialEdgeIds(io::Address coordinator_address) override {
|
||||
coordinator::CoordinatorWriteRequests requests{coordinator::AllocateEdgeIdBatchRequest{.batch_size = 1000000}};
|
||||
|
||||
io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests> ww;
|
||||
ww.operation = std::move(requests);
|
||||
auto resp = io_.template Request<io::rsm::WriteResponse<coordinator::CoordinatorWriteResponses>,
|
||||
io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests>>(coordinator_address,
|
||||
std::move(ww))
|
||||
.Wait();
|
||||
if (resp.HasValue()) {
|
||||
const auto alloc_edge_id_reps =
|
||||
std::get<coordinator::AllocateEdgeIdBatchResponse>(resp.GetValue().message.write_return);
|
||||
return std::make_pair(alloc_edge_id_reps.low, alloc_edge_id_reps.high);
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
ShardMap shards_map_;
|
||||
storage::v3::NameIdMapper properties_;
|
||||
storage::v3::NameIdMapper edge_types_;
|
||||
@ -719,4 +752,66 @@ class RequestRouter : public RequestRouterInterface {
|
||||
io::Notifier notifier_ = {};
|
||||
// TODO(kostasrim) Add batch prefetching
|
||||
};
|
||||
|
||||
class RequestRouterFactory {
|
||||
public:
|
||||
RequestRouterFactory() = default;
|
||||
RequestRouterFactory(const RequestRouterFactory &) = delete;
|
||||
RequestRouterFactory &operator=(const RequestRouterFactory &) = delete;
|
||||
RequestRouterFactory(RequestRouterFactory &&) = delete;
|
||||
RequestRouterFactory &operator=(RequestRouterFactory &&) = delete;
|
||||
|
||||
virtual ~RequestRouterFactory() = default;
|
||||
|
||||
virtual std::unique_ptr<RequestRouterInterface> CreateRequestRouter(
|
||||
const coordinator::Address &coordinator_address) const = 0;
|
||||
};
|
||||
|
||||
class LocalRequestRouterFactory : public RequestRouterFactory {
|
||||
using LocalTransportIo = io::Io<io::local_transport::LocalTransport>;
|
||||
LocalTransportIo &io_;
|
||||
|
||||
public:
|
||||
explicit LocalRequestRouterFactory(LocalTransportIo &io) : io_(io) {}
|
||||
|
||||
std::unique_ptr<RequestRouterInterface> CreateRequestRouter(
|
||||
const coordinator::Address &coordinator_address) const override {
|
||||
using TransportType = io::local_transport::LocalTransport;
|
||||
|
||||
auto query_io = io_.ForkLocal(boost::uuids::uuid{boost::uuids::random_generator()()});
|
||||
auto local_transport_io = io_.ForkLocal(boost::uuids::uuid{boost::uuids::random_generator()()});
|
||||
|
||||
return std::make_unique<RequestRouter<TransportType>>(
|
||||
coordinator::CoordinatorClient<TransportType>(query_io, coordinator_address, {coordinator_address}),
|
||||
std::move(local_transport_io));
|
||||
}
|
||||
};
|
||||
|
||||
class SimulatedRequestRouterFactory : public RequestRouterFactory {
|
||||
io::simulator::Simulator *simulator_;
|
||||
|
||||
public:
|
||||
explicit SimulatedRequestRouterFactory(io::simulator::Simulator &simulator) : simulator_(&simulator) {}
|
||||
|
||||
std::unique_ptr<RequestRouterInterface> CreateRequestRouter(
|
||||
const coordinator::Address &coordinator_address) const override {
|
||||
using TransportType = io::simulator::SimulatorTransport;
|
||||
auto actual_transport_handle = simulator_->GetSimulatorHandle();
|
||||
|
||||
boost::uuids::uuid random_uuid;
|
||||
io::Address unique_local_addr_query;
|
||||
|
||||
// The simulated RR should not introduce stochastic behavior.
|
||||
random_uuid = boost::uuids::uuid{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
unique_local_addr_query = {.unique_id = boost::uuids::uuid{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
|
||||
auto io = simulator_->Register(unique_local_addr_query);
|
||||
auto query_io = io.ForkLocal(random_uuid);
|
||||
|
||||
return std::make_unique<RequestRouter<TransportType>>(
|
||||
coordinator::CoordinatorClient<TransportType>(query_io, coordinator_address, {coordinator_address}),
|
||||
std::move(io));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::query::v2
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -12,6 +12,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
@ -25,6 +26,7 @@
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/result.hpp"
|
||||
#include "utils/fnv.hpp"
|
||||
|
||||
namespace memgraph::msgs {
|
||||
|
||||
@ -36,6 +38,7 @@ struct Value;
|
||||
struct Label {
|
||||
LabelId id;
|
||||
friend bool operator==(const Label &lhs, const Label &rhs) { return lhs.id == rhs.id; }
|
||||
friend bool operator==(const Label &lhs, const LabelId &rhs) { return lhs.id == rhs; }
|
||||
};
|
||||
|
||||
// TODO(kostasrim) update this with CompoundKey, same for the rest of the file.
|
||||
@ -569,6 +572,16 @@ struct CommitResponse {
|
||||
std::optional<ShardError> error;
|
||||
};
|
||||
|
||||
struct SplitInfo {
|
||||
PrimaryKey split_key;
|
||||
uint64_t shard_version;
|
||||
};
|
||||
|
||||
struct PerformSplitDataInfo {
|
||||
PrimaryKey split_key;
|
||||
uint64_t shard_version;
|
||||
};
|
||||
|
||||
using ReadRequests = std::variant<ExpandOneRequest, GetPropertiesRequest, ScanVerticesRequest>;
|
||||
using ReadResponses = std::variant<ExpandOneResponse, GetPropertiesResponse, ScanVerticesResponse>;
|
||||
|
||||
@ -578,3 +591,48 @@ using WriteResponses = std::variant<CreateVerticesResponse, DeleteVerticesRespon
|
||||
CreateExpandResponse, DeleteEdgesResponse, UpdateEdgesResponse, CommitResponse>;
|
||||
|
||||
} // namespace memgraph::msgs
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
struct hash<memgraph::msgs::Value>;
|
||||
|
||||
template <>
|
||||
struct hash<memgraph::msgs::VertexId> {
|
||||
size_t operator()(const memgraph::msgs::VertexId &id) const {
|
||||
using LabelId = memgraph::storage::v3::LabelId;
|
||||
using Value = memgraph::msgs::Value;
|
||||
return memgraph::utils::HashCombine<LabelId, std::vector<Value>, std::hash<LabelId>,
|
||||
memgraph::utils::FnvCollection<std::vector<Value>, Value>>{}(id.first.id,
|
||||
id.second);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<memgraph::msgs::Value> {
|
||||
size_t operator()(const memgraph::msgs::Value &value) const {
|
||||
using Type = memgraph::msgs::Value::Type;
|
||||
switch (value.type) {
|
||||
case Type::Null:
|
||||
return std::hash<size_t>{}(0U);
|
||||
case Type::Bool:
|
||||
return std::hash<bool>{}(value.bool_v);
|
||||
case Type::Int64:
|
||||
return std::hash<int64_t>{}(value.int_v);
|
||||
case Type::Double:
|
||||
return std::hash<double>{}(value.double_v);
|
||||
case Type::String:
|
||||
return std::hash<std::string>{}(value.string_v);
|
||||
case Type::List:
|
||||
LOG_FATAL("Add hash for lists");
|
||||
case Type::Map:
|
||||
LOG_FATAL("Add hash for maps");
|
||||
case Type::Vertex:
|
||||
LOG_FATAL("Add hash for vertices");
|
||||
case Type::Edge:
|
||||
LOG_FATAL("Add hash for edges");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
|
@ -18,6 +18,7 @@ set(storage_v3_src_files
|
||||
bindings/typed_value.cpp
|
||||
expr.cpp
|
||||
vertex.cpp
|
||||
splitter.cpp
|
||||
request_helper.cpp)
|
||||
|
||||
# ######################
|
||||
|
@ -30,6 +30,10 @@ struct Config {
|
||||
io::Duration reclamation_interval{};
|
||||
} gc;
|
||||
|
||||
struct Split {
|
||||
uint64_t max_shard_vertex_size{500'000};
|
||||
} split;
|
||||
|
||||
struct Items {
|
||||
bool properties_on_edges{true};
|
||||
} items;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,12 +13,14 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include "storage/v3/edge_ref.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
@ -27,6 +29,11 @@ struct Edge;
|
||||
struct Delta;
|
||||
struct CommitInfo;
|
||||
|
||||
inline uint64_t GetNextDeltaId() {
|
||||
static utils::Synchronized<uint64_t, utils::SpinLock> delta_id{0};
|
||||
return delta_id.WithLock([](auto &id) { return id++; });
|
||||
}
|
||||
|
||||
// This class stores one of three pointers (`Delta`, `Vertex` and `Edge`)
|
||||
// without using additional memory for storing the type. The type is stored in
|
||||
// the pointer itself in the lower bits. All of those structures contain large
|
||||
@ -158,46 +165,54 @@ struct Delta {
|
||||
struct RemoveInEdgeTag {};
|
||||
struct RemoveOutEdgeTag {};
|
||||
|
||||
Delta(DeleteObjectTag /*unused*/, CommitInfo *commit_info, uint64_t command_id)
|
||||
: action(Action::DELETE_OBJECT), commit_info(commit_info), command_id(command_id) {}
|
||||
Delta(DeleteObjectTag /*unused*/, CommitInfo *commit_info, uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::DELETE_OBJECT), id(delta_id), commit_info(commit_info), command_id(command_id) {}
|
||||
|
||||
Delta(RecreateObjectTag /*unused*/, CommitInfo *commit_info, uint64_t command_id)
|
||||
: action(Action::RECREATE_OBJECT), commit_info(commit_info), command_id(command_id) {}
|
||||
Delta(RecreateObjectTag /*unused*/, CommitInfo *commit_info, uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::RECREATE_OBJECT), id(delta_id), commit_info(commit_info), command_id(command_id) {}
|
||||
|
||||
Delta(AddLabelTag /*unused*/, LabelId label, CommitInfo *commit_info, uint64_t command_id)
|
||||
: action(Action::ADD_LABEL), commit_info(commit_info), command_id(command_id), label(label) {}
|
||||
Delta(AddLabelTag /*unused*/, LabelId label, CommitInfo *commit_info, uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::ADD_LABEL), id(delta_id), commit_info(commit_info), command_id(command_id), label(label) {}
|
||||
|
||||
Delta(RemoveLabelTag /*unused*/, LabelId label, CommitInfo *commit_info, uint64_t command_id)
|
||||
: action(Action::REMOVE_LABEL), commit_info(commit_info), command_id(command_id), label(label) {}
|
||||
Delta(RemoveLabelTag /*unused*/, LabelId label, CommitInfo *commit_info, uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::REMOVE_LABEL), id(delta_id), commit_info(commit_info), command_id(command_id), label(label) {}
|
||||
|
||||
Delta(SetPropertyTag /*unused*/, PropertyId key, const PropertyValue &value, CommitInfo *commit_info,
|
||||
uint64_t command_id)
|
||||
: action(Action::SET_PROPERTY), commit_info(commit_info), command_id(command_id), property({key, value}) {}
|
||||
uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::SET_PROPERTY),
|
||||
id(delta_id),
|
||||
commit_info(commit_info),
|
||||
command_id(command_id),
|
||||
property({key, value}) {}
|
||||
|
||||
Delta(AddInEdgeTag /*unused*/, EdgeTypeId edge_type, VertexId vertex_id, EdgeRef edge, CommitInfo *commit_info,
|
||||
uint64_t command_id)
|
||||
uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::ADD_IN_EDGE),
|
||||
id(delta_id),
|
||||
commit_info(commit_info),
|
||||
command_id(command_id),
|
||||
vertex_edge({edge_type, std::move(vertex_id), edge}) {}
|
||||
|
||||
Delta(AddOutEdgeTag /*unused*/, EdgeTypeId edge_type, VertexId vertex_id, EdgeRef edge, CommitInfo *commit_info,
|
||||
uint64_t command_id)
|
||||
uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::ADD_OUT_EDGE),
|
||||
id(delta_id),
|
||||
commit_info(commit_info),
|
||||
command_id(command_id),
|
||||
vertex_edge({edge_type, std::move(vertex_id), edge}) {}
|
||||
|
||||
Delta(RemoveInEdgeTag /*unused*/, EdgeTypeId edge_type, VertexId vertex_id, EdgeRef edge, CommitInfo *commit_info,
|
||||
uint64_t command_id)
|
||||
uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::REMOVE_IN_EDGE),
|
||||
id(delta_id),
|
||||
commit_info(commit_info),
|
||||
command_id(command_id),
|
||||
vertex_edge({edge_type, std::move(vertex_id), edge}) {}
|
||||
|
||||
Delta(RemoveOutEdgeTag /*unused*/, EdgeTypeId edge_type, VertexId vertex_id, EdgeRef edge, CommitInfo *commit_info,
|
||||
uint64_t command_id)
|
||||
uint64_t delta_id, uint64_t command_id)
|
||||
: action(Action::REMOVE_OUT_EDGE),
|
||||
id(delta_id),
|
||||
commit_info(commit_info),
|
||||
command_id(command_id),
|
||||
vertex_edge({edge_type, std::move(vertex_id), edge}) {}
|
||||
@ -226,8 +241,10 @@ struct Delta {
|
||||
}
|
||||
}
|
||||
|
||||
Action action;
|
||||
friend bool operator==(const Delta &lhs, const Delta &rhs) noexcept { return lhs.id == rhs.id; }
|
||||
|
||||
Action action;
|
||||
uint64_t id;
|
||||
// TODO: optimize with in-place copy
|
||||
CommitInfo *commit_info;
|
||||
uint64_t command_id;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -35,7 +35,7 @@ msgs::Value ConstructValueVertex(const VertexAccessor &acc, View view) {
|
||||
memgraph::msgs::Label value_label{.id = prim_label};
|
||||
|
||||
auto prim_key = conversions::ConvertValueVector(acc.PrimaryKey(view).GetValue());
|
||||
memgraph::msgs::VertexId vertex_id = std::make_pair(value_label, prim_key);
|
||||
memgraph::msgs::VertexId vertex_id = std::make_pair(value_label, std::move(prim_key));
|
||||
|
||||
// Get the labels
|
||||
auto vertex_labels = acc.Labels(view).GetValue();
|
||||
@ -45,7 +45,7 @@ msgs::Value ConstructValueVertex(const VertexAccessor &acc, View view) {
|
||||
std::transform(vertex_labels.begin(), vertex_labels.end(), std::back_inserter(value_labels),
|
||||
[](const auto &label) { return msgs::Label{.id = label}; });
|
||||
|
||||
return msgs::Value({.id = vertex_id, .labels = value_labels});
|
||||
return msgs::Value({.id = std::move(vertex_id), .labels = std::move(value_labels)});
|
||||
}
|
||||
|
||||
msgs::Value ConstructValueEdge(const EdgeAccessor &acc, View view) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -325,7 +325,7 @@ void LabelIndex::RemoveObsoleteEntries(const uint64_t clean_up_before_timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
LabelIndex::Iterable::Iterator::Iterator(Iterable *self, LabelIndexContainer::iterator index_iterator)
|
||||
LabelIndex::Iterable::Iterator::Iterator(Iterable *self, IndexContainer::iterator index_iterator)
|
||||
: self_(self),
|
||||
index_iterator_(index_iterator),
|
||||
current_vertex_accessor_(nullptr, nullptr, nullptr, self_->config_, *self_->vertex_validator_),
|
||||
@ -353,7 +353,7 @@ void LabelIndex::Iterable::Iterator::AdvanceUntilValid() {
|
||||
}
|
||||
}
|
||||
|
||||
LabelIndex::Iterable::Iterable(LabelIndexContainer &index_container, LabelId label, View view, Transaction *transaction,
|
||||
LabelIndex::Iterable::Iterable(IndexContainer &index_container, LabelId label, View view, Transaction *transaction,
|
||||
Indices *indices, Config::Items config, const VertexValidator &vertex_validator)
|
||||
: index_container_(&index_container),
|
||||
label_(label),
|
||||
@ -465,7 +465,7 @@ void LabelPropertyIndex::RemoveObsoleteEntries(const uint64_t clean_up_before_ti
|
||||
}
|
||||
}
|
||||
|
||||
LabelPropertyIndex::Iterable::Iterator::Iterator(Iterable *self, LabelPropertyIndexContainer::iterator index_iterator)
|
||||
LabelPropertyIndex::Iterable::Iterator::Iterator(Iterable *self, IndexContainer::iterator index_iterator)
|
||||
: self_(self),
|
||||
index_iterator_(index_iterator),
|
||||
current_vertex_accessor_(nullptr, nullptr, nullptr, self_->config_, *self_->vertex_validator_),
|
||||
@ -526,7 +526,7 @@ const PropertyValue kSmallestMap = PropertyValue(std::map<std::string, PropertyV
|
||||
const PropertyValue kSmallestTemporalData =
|
||||
PropertyValue(TemporalData{static_cast<TemporalType>(0), std::numeric_limits<int64_t>::min()});
|
||||
|
||||
LabelPropertyIndex::Iterable::Iterable(LabelPropertyIndexContainer &index_container, LabelId label, PropertyId property,
|
||||
LabelPropertyIndex::Iterable::Iterable(IndexContainer &index_container, LabelId label, PropertyId property,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view,
|
||||
Transaction *transaction, Indices *indices, Config::Items config,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -18,6 +18,8 @@
|
||||
#include <utility>
|
||||
|
||||
#include "storage/v3/config.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/transaction.hpp"
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
@ -40,12 +42,18 @@ class LabelIndex {
|
||||
bool operator==(const Entry &rhs) const { return vertex == rhs.vertex && timestamp == rhs.timestamp; }
|
||||
};
|
||||
|
||||
using IndexType = LabelId;
|
||||
|
||||
public:
|
||||
using LabelIndexContainer = std::set<Entry>;
|
||||
using IndexContainer = std::set<Entry>;
|
||||
|
||||
LabelIndex(Indices *indices, Config::Items config, const VertexValidator &vertex_validator)
|
||||
: indices_(indices), config_(config), vertex_validator_{&vertex_validator} {}
|
||||
|
||||
LabelIndex(Indices *indices, Config::Items config, const VertexValidator &vertex_validator,
|
||||
std::map<LabelId, IndexContainer> &data)
|
||||
: index_{std::move(data)}, indices_(indices), config_(config), vertex_validator_{&vertex_validator} {}
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
void UpdateOnAddLabel(LabelId label, Vertex *vertex, const Transaction &tx);
|
||||
|
||||
@ -63,12 +71,12 @@ class LabelIndex {
|
||||
|
||||
class Iterable {
|
||||
public:
|
||||
Iterable(LabelIndexContainer &index_container, LabelId label, View view, Transaction *transaction, Indices *indices,
|
||||
Iterable(IndexContainer &index_container, LabelId label, View view, Transaction *transaction, Indices *indices,
|
||||
Config::Items config, const VertexValidator &vertex_validator);
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
Iterator(Iterable *self, LabelIndexContainer::iterator index_iterator);
|
||||
Iterator(Iterable *self, IndexContainer::iterator index_iterator);
|
||||
|
||||
VertexAccessor operator*() const { return current_vertex_accessor_; }
|
||||
|
||||
@ -81,7 +89,7 @@ class LabelIndex {
|
||||
void AdvanceUntilValid();
|
||||
|
||||
Iterable *self_;
|
||||
LabelIndexContainer::iterator index_iterator_;
|
||||
IndexContainer::iterator index_iterator_;
|
||||
VertexAccessor current_vertex_accessor_;
|
||||
Vertex *current_vertex_;
|
||||
};
|
||||
@ -90,7 +98,7 @@ class LabelIndex {
|
||||
Iterator end() { return {this, index_container_->end()}; }
|
||||
|
||||
private:
|
||||
LabelIndexContainer *index_container_;
|
||||
IndexContainer *index_container_;
|
||||
LabelId label_;
|
||||
View view_;
|
||||
Transaction *transaction_;
|
||||
@ -114,8 +122,29 @@ class LabelIndex {
|
||||
|
||||
void Clear() { index_.clear(); }
|
||||
|
||||
std::map<IndexType, IndexContainer> SplitIndexEntries(const PrimaryKey &split_key) {
|
||||
std::map<IndexType, IndexContainer> cloned_indices;
|
||||
for (auto &[index_type_val, index] : index_) {
|
||||
auto entry_it = index.begin();
|
||||
auto &cloned_indices_container = cloned_indices[index_type_val];
|
||||
while (entry_it != index.end()) {
|
||||
// We need to save the next iterator since the current one will be
|
||||
// invalidated after extract
|
||||
auto next_entry_it = std::next(entry_it);
|
||||
if (entry_it->vertex->first > split_key) {
|
||||
[[maybe_unused]] const auto &[inserted_entry_it, inserted, node] =
|
||||
cloned_indices_container.insert(index.extract(entry_it));
|
||||
MG_ASSERT(inserted, "Failed to extract index entry!");
|
||||
}
|
||||
entry_it = next_entry_it;
|
||||
}
|
||||
}
|
||||
|
||||
return cloned_indices;
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<LabelId, LabelIndexContainer> index_;
|
||||
std::map<LabelId, IndexContainer> index_;
|
||||
Indices *indices_;
|
||||
Config::Items config_;
|
||||
const VertexValidator *vertex_validator_;
|
||||
@ -133,9 +162,10 @@ class LabelPropertyIndex {
|
||||
bool operator<(const PropertyValue &rhs) const;
|
||||
bool operator==(const PropertyValue &rhs) const;
|
||||
};
|
||||
using IndexType = std::pair<LabelId, PropertyId>;
|
||||
|
||||
public:
|
||||
using LabelPropertyIndexContainer = std::set<Entry>;
|
||||
using IndexContainer = std::set<Entry>;
|
||||
|
||||
LabelPropertyIndex(Indices *indices, Config::Items config, const VertexValidator &vertex_validator)
|
||||
: indices_(indices), config_(config), vertex_validator_{&vertex_validator} {}
|
||||
@ -159,14 +189,14 @@ class LabelPropertyIndex {
|
||||
|
||||
class Iterable {
|
||||
public:
|
||||
Iterable(LabelPropertyIndexContainer &index_container, LabelId label, PropertyId property,
|
||||
Iterable(IndexContainer &index_container, LabelId label, PropertyId property,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Transaction *transaction,
|
||||
Indices *indices, Config::Items config, const VertexValidator &vertex_validator);
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
Iterator(Iterable *self, LabelPropertyIndexContainer::iterator index_iterator);
|
||||
Iterator(Iterable *self, IndexContainer::iterator index_iterator);
|
||||
|
||||
VertexAccessor operator*() const { return current_vertex_accessor_; }
|
||||
|
||||
@ -179,7 +209,7 @@ class LabelPropertyIndex {
|
||||
void AdvanceUntilValid();
|
||||
|
||||
Iterable *self_;
|
||||
LabelPropertyIndexContainer::iterator index_iterator_;
|
||||
IndexContainer::iterator index_iterator_;
|
||||
VertexAccessor current_vertex_accessor_;
|
||||
Vertex *current_vertex_;
|
||||
};
|
||||
@ -188,7 +218,7 @@ class LabelPropertyIndex {
|
||||
Iterator end();
|
||||
|
||||
private:
|
||||
LabelPropertyIndexContainer *index_container_;
|
||||
IndexContainer *index_container_;
|
||||
LabelId label_;
|
||||
PropertyId property_;
|
||||
std::optional<utils::Bound<PropertyValue>> lower_bound_;
|
||||
@ -229,8 +259,29 @@ class LabelPropertyIndex {
|
||||
|
||||
void Clear() { index_.clear(); }
|
||||
|
||||
std::map<IndexType, IndexContainer> SplitIndexEntries(const PrimaryKey &split_key) {
|
||||
std::map<IndexType, IndexContainer> cloned_indices;
|
||||
for (auto &[index_type_val, index] : index_) {
|
||||
auto entry_it = index.begin();
|
||||
auto &cloned_index_container = cloned_indices[index_type_val];
|
||||
while (entry_it != index.end()) {
|
||||
// We need to save the next iterator since the current one will be
|
||||
// invalidated after extract
|
||||
auto next_entry_it = std::next(entry_it);
|
||||
if (entry_it->vertex->first > split_key) {
|
||||
[[maybe_unused]] const auto &[inserted_entry_it, inserted, node] =
|
||||
cloned_index_container.insert(index.extract(entry_it));
|
||||
MG_ASSERT(inserted, "Failed to extract index entry!");
|
||||
}
|
||||
entry_it = next_entry_it;
|
||||
}
|
||||
}
|
||||
|
||||
return cloned_indices;
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<std::pair<LabelId, PropertyId>, LabelPropertyIndexContainer> index_;
|
||||
std::map<std::pair<LabelId, PropertyId>, IndexContainer> index_;
|
||||
Indices *indices_;
|
||||
Config::Items config_;
|
||||
const VertexValidator *vertex_validator_;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -108,7 +108,7 @@ inline bool PrepareForWrite(Transaction *transaction, TObj *object) {
|
||||
/// a `DELETE_OBJECT` delta).
|
||||
/// @throw std::bad_alloc
|
||||
inline Delta *CreateDeleteObjectDelta(Transaction *transaction) {
|
||||
return &transaction->deltas.emplace_back(Delta::DeleteObjectTag(), transaction->commit_info.get(),
|
||||
return &transaction->deltas.emplace_back(Delta::DeleteObjectTag(), transaction->commit_info.get(), GetNextDeltaId(),
|
||||
transaction->command_id);
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ template <typename TObj, class... Args>
|
||||
requires utils::SameAsAnyOf<TObj, Edge, Vertex>
|
||||
inline void CreateAndLinkDelta(Transaction *transaction, TObj *object, Args &&...args) {
|
||||
auto delta = &transaction->deltas.emplace_back(std::forward<Args>(args)..., transaction->commit_info.get(),
|
||||
transaction->command_id);
|
||||
GetNextDeltaId(), transaction->command_id);
|
||||
auto *delta_holder = GetDeltaHolder(object);
|
||||
|
||||
// The operations are written in such order so that both `next` and `prev`
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -46,7 +46,6 @@ struct VertexIdCmpr {
|
||||
std::optional<std::map<PropertyId, Value>> PrimaryKeysFromAccessor(const VertexAccessor &acc, View view,
|
||||
const Schemas::Schema &schema) {
|
||||
std::map<PropertyId, Value> ret;
|
||||
auto props = acc.Properties(view);
|
||||
auto maybe_pk = acc.PrimaryKey(view);
|
||||
if (maybe_pk.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get vertex primary key.");
|
||||
@ -58,7 +57,7 @@ std::optional<std::map<PropertyId, Value>> PrimaryKeysFromAccessor(const VertexA
|
||||
ret.emplace(schema.second[i].property_id, FromPropertyValueToValue(std::move(pk[i])));
|
||||
}
|
||||
|
||||
return ret;
|
||||
return {std::move(ret)};
|
||||
}
|
||||
|
||||
ShardResult<std::vector<msgs::Label>> FillUpSourceVertexSecondaryLabels(const std::optional<VertexAccessor> &v_acc,
|
||||
@ -99,7 +98,7 @@ ShardResult<std::map<PropertyId, Value>> FillUpSourceVertexProperties(const std:
|
||||
}
|
||||
auto pks = PrimaryKeysFromAccessor(*v_acc, view, schema);
|
||||
if (pks) {
|
||||
src_vertex_properties.merge(*pks);
|
||||
src_vertex_properties.merge(std::move(*pks));
|
||||
}
|
||||
|
||||
} else if (req.src_vertex_properties.value().empty()) {
|
||||
@ -321,12 +320,15 @@ EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req) {
|
||||
value_properties.insert(std::make_pair(prop_key, FromPropertyValueToValue(std::move(prop_val))));
|
||||
}
|
||||
using EdgeWithAllProperties = msgs::ExpandOneResultRow::EdgeWithAllProperties;
|
||||
EdgeWithAllProperties edges{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()}, edge.Gid().AsUint(),
|
||||
std::move(value_properties)};
|
||||
|
||||
if (is_in_edge) {
|
||||
result_row.in_edges_with_all_properties.push_back(std::move(edges));
|
||||
result_row.in_edges_with_all_properties.push_back(
|
||||
EdgeWithAllProperties{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()}, edge.Gid().AsUint(),
|
||||
std::move(value_properties)});
|
||||
} else {
|
||||
result_row.out_edges_with_all_properties.push_back(std::move(edges));
|
||||
result_row.out_edges_with_all_properties.push_back(
|
||||
EdgeWithAllProperties{ToMsgsVertexId(edge.To()), msgs::EdgeType{edge.EdgeType()}, edge.Gid().AsUint(),
|
||||
std::move(value_properties)});
|
||||
}
|
||||
return {};
|
||||
};
|
||||
@ -346,12 +348,15 @@ EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req) {
|
||||
value_properties.emplace_back(FromPropertyValueToValue(std::move(property_result.GetValue())));
|
||||
}
|
||||
using EdgeWithSpecificProperties = msgs::ExpandOneResultRow::EdgeWithSpecificProperties;
|
||||
EdgeWithSpecificProperties edges{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()},
|
||||
edge.Gid().AsUint(), std::move(value_properties)};
|
||||
|
||||
if (is_in_edge) {
|
||||
result_row.in_edges_with_specific_properties.push_back(std::move(edges));
|
||||
result_row.in_edges_with_specific_properties.push_back(
|
||||
EdgeWithSpecificProperties{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()},
|
||||
edge.Gid().AsUint(), std::move(value_properties)});
|
||||
} else {
|
||||
result_row.out_edges_with_specific_properties.push_back(std::move(edges));
|
||||
result_row.out_edges_with_specific_properties.push_back(
|
||||
EdgeWithSpecificProperties{ToMsgsVertexId(edge.To()), msgs::EdgeType{edge.EdgeType()}, edge.Gid().AsUint(),
|
||||
std::move(value_properties)});
|
||||
}
|
||||
return {};
|
||||
};
|
||||
@ -378,13 +383,10 @@ bool FilterOnEdge(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc, con
|
||||
}
|
||||
|
||||
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
|
||||
Shard::Accessor &acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
|
||||
VertexAccessor v_acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
|
||||
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
|
||||
const Schemas::Schema &schema) {
|
||||
/// Fill up source vertex
|
||||
const auto primary_key = ConvertPropertyVector(src_vertex.second);
|
||||
auto v_acc = acc.FindVertex(primary_key, View::NEW);
|
||||
|
||||
msgs::Vertex source_vertex = {.id = src_vertex};
|
||||
auto maybe_secondary_labels = FillUpSourceVertexSecondaryLabels(v_acc, req);
|
||||
if (maybe_secondary_labels.HasError()) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -233,7 +233,7 @@ ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesImpl(const TAccesso
|
||||
[](std::pair<const PropertyId, PropertyValue> &pair) {
|
||||
return std::make_pair(pair.first, conversions::FromPropertyValueToValue(std::move(pair.second)));
|
||||
});
|
||||
return ret;
|
||||
return {std::move(ret)};
|
||||
}
|
||||
} // namespace impl
|
||||
|
||||
@ -247,7 +247,7 @@ EdgeUniquenessFunction InitializeEdgeUniquenessFunction(bool only_unique_neighbo
|
||||
EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req);
|
||||
|
||||
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
|
||||
Shard::Accessor &acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
|
||||
VertexAccessor v_acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
|
||||
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
|
||||
const Schemas::Schema &schema);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -18,14 +18,14 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <variant>
|
||||
|
||||
#include <bits/ranges_algo.h>
|
||||
#include <gflags/gflags.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "io/network/endpoint.hpp"
|
||||
#include "io/time.hpp"
|
||||
#include "storage/v3/delta.hpp"
|
||||
#include "storage/v3/edge.hpp"
|
||||
#include "storage/v3/edge_accessor.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/indices.hpp"
|
||||
@ -332,16 +332,64 @@ Shard::Shard(const LabelId primary_label, const PrimaryKey min_primary_key,
|
||||
vertex_validator_{schema_validator_, primary_label},
|
||||
indices_{config.items, vertex_validator_},
|
||||
isolation_level_{config.transaction.isolation_level},
|
||||
config_{config},
|
||||
uuid_{utils::GenerateUUID()},
|
||||
epoch_id_{utils::GenerateUUID()},
|
||||
global_locker_{file_retainer_.AddLocker()} {
|
||||
config_{config} {
|
||||
CreateSchema(primary_label_, schema);
|
||||
StoreMapping(std::move(id_to_name));
|
||||
}
|
||||
|
||||
Shard::Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, VertexContainer &&vertices, EdgeContainer &&edges,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &&start_logical_id_to_transaction, const Config &config,
|
||||
const std::unordered_map<uint64_t, std::string> &id_to_name, const uint64_t shard_version)
|
||||
: primary_label_{primary_label},
|
||||
min_primary_key_{min_primary_key},
|
||||
max_primary_key_{max_primary_key},
|
||||
vertices_(std::move(vertices)),
|
||||
edges_(std::move(edges)),
|
||||
shard_version_(shard_version),
|
||||
schema_validator_{schemas_, name_id_mapper_},
|
||||
vertex_validator_{schema_validator_, primary_label},
|
||||
indices_{config.items, vertex_validator_},
|
||||
isolation_level_{config.transaction.isolation_level},
|
||||
config_{config},
|
||||
start_logical_id_to_transaction_(std::move(start_logical_id_to_transaction)) {
|
||||
CreateSchema(primary_label_, schema);
|
||||
StoreMapping(id_to_name);
|
||||
}
|
||||
|
||||
Shard::Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, VertexContainer &&vertices,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &&start_logical_id_to_transaction, const Config &config,
|
||||
const std::unordered_map<uint64_t, std::string> &id_to_name, const uint64_t shard_version)
|
||||
: primary_label_{primary_label},
|
||||
min_primary_key_{min_primary_key},
|
||||
max_primary_key_{max_primary_key},
|
||||
vertices_(std::move(vertices)),
|
||||
shard_version_(shard_version),
|
||||
schema_validator_{schemas_, name_id_mapper_},
|
||||
vertex_validator_{schema_validator_, primary_label},
|
||||
indices_{config.items, vertex_validator_},
|
||||
isolation_level_{config.transaction.isolation_level},
|
||||
config_{config},
|
||||
start_logical_id_to_transaction_(std::move(start_logical_id_to_transaction)) {
|
||||
CreateSchema(primary_label_, schema);
|
||||
StoreMapping(id_to_name);
|
||||
}
|
||||
|
||||
Shard::~Shard() {}
|
||||
|
||||
std::unique_ptr<Shard> Shard::FromSplitData(SplitData &&split_data) {
|
||||
if (split_data.config.items.properties_on_edges) [[likely]] {
|
||||
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.max_primary_key,
|
||||
split_data.schema, std::move(split_data.vertices), std::move(*split_data.edges),
|
||||
std::move(split_data.transactions), split_data.config, split_data.id_to_name,
|
||||
split_data.shard_version);
|
||||
}
|
||||
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.max_primary_key,
|
||||
split_data.schema, std::move(split_data.vertices), std::move(split_data.transactions),
|
||||
split_data.config, split_data.id_to_name, split_data.shard_version);
|
||||
}
|
||||
|
||||
Shard::Accessor::Accessor(Shard &shard, Transaction &transaction)
|
||||
: shard_(&shard), transaction_(&transaction), config_(shard_->config_.items) {}
|
||||
|
||||
@ -436,7 +484,7 @@ ShardResult<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>>
|
||||
}
|
||||
|
||||
std::vector<EdgeAccessor> deleted_edges;
|
||||
const VertexId vertex_id{shard_->primary_label_, *vertex->PrimaryKey(View::OLD)}; // TODO Replace
|
||||
const VertexId vertex_id{shard_->primary_label_, *vertex->PrimaryKey(View::OLD)};
|
||||
for (const auto &item : in_edges) {
|
||||
auto [edge_type, from_vertex, edge] = item;
|
||||
EdgeAccessor e(edge, edge_type, from_vertex, vertex_id, transaction_, &shard_->indices_, config_);
|
||||
@ -1048,6 +1096,28 @@ void Shard::StoreMapping(std::unordered_map<uint64_t, std::string> id_to_name) {
|
||||
name_id_mapper_.StoreMapping(std::move(id_to_name));
|
||||
}
|
||||
|
||||
std::optional<SplitInfo> Shard::ShouldSplit() const noexcept {
|
||||
if (vertices_.size() > config_.split.max_shard_vertex_size) {
|
||||
auto mid_elem = vertices_.begin();
|
||||
// TODO(tyler) the first time we calculate the split point, we should store it so that we don't have to
|
||||
// iterate over half of the entire index each time Cron is run until the split succeeds.
|
||||
std::ranges::advance(mid_elem, static_cast<VertexContainer::difference_type>(vertices_.size() / 2));
|
||||
return SplitInfo{mid_elem->first, shard_version_};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
SplitData Shard::PerformSplit(const PrimaryKey &split_key, const uint64_t shard_version) {
|
||||
shard_version_ = shard_version;
|
||||
const auto old_max_key = max_primary_key_;
|
||||
max_primary_key_ = split_key;
|
||||
const auto *schema = GetSchema(primary_label_);
|
||||
MG_ASSERT(schema, "Shard must know about schema of primary label!");
|
||||
Splitter shard_splitter(primary_label_, vertices_, edges_, start_logical_id_to_transaction_, indices_, config_,
|
||||
schema->second, name_id_mapper_);
|
||||
return shard_splitter.SplitShard(split_key, old_max_key, shard_version);
|
||||
}
|
||||
|
||||
bool Shard::IsVertexBelongToShard(const VertexId &vertex_id) const {
|
||||
return vertex_id.primary_label == primary_label_ && vertex_id.primary_key >= min_primary_key_ &&
|
||||
(!max_primary_key_.has_value() || vertex_id.primary_key < *max_primary_key_);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -14,6 +14,7 @@
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
@ -37,6 +38,7 @@
|
||||
#include "storage/v3/result.hpp"
|
||||
#include "storage/v3/schema_validator.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/splitter.hpp"
|
||||
#include "storage/v3/transaction.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
@ -174,6 +176,11 @@ struct SchemasInfo {
|
||||
Schemas::SchemasList schemas;
|
||||
};
|
||||
|
||||
struct SplitInfo {
|
||||
PrimaryKey split_point;
|
||||
uint64_t shard_version;
|
||||
};
|
||||
|
||||
/// Structure used to return information about the storage.
|
||||
struct StorageInfo {
|
||||
uint64_t vertex_count;
|
||||
@ -186,9 +193,19 @@ class Shard final {
|
||||
public:
|
||||
/// @throw std::system_error
|
||||
/// @throw std::bad_alloc
|
||||
explicit Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, Config config = Config(),
|
||||
std::unordered_map<uint64_t, std::string> id_to_name = {});
|
||||
Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, Config config = Config(),
|
||||
std::unordered_map<uint64_t, std::string> id_to_name = {});
|
||||
|
||||
Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, VertexContainer &&vertices, EdgeContainer &&edges,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &&start_logical_id_to_transaction, const Config &config,
|
||||
const std::unordered_map<uint64_t, std::string> &id_to_name, uint64_t shard_version);
|
||||
|
||||
Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
|
||||
std::vector<SchemaProperty> schema, VertexContainer &&vertices,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &&start_logical_id_to_transaction, const Config &config,
|
||||
const std::unordered_map<uint64_t, std::string> &id_to_name, uint64_t shard_version);
|
||||
|
||||
Shard(const Shard &) = delete;
|
||||
Shard(Shard &&) noexcept = delete;
|
||||
@ -196,6 +213,8 @@ class Shard final {
|
||||
Shard operator=(Shard &&) noexcept = delete;
|
||||
~Shard();
|
||||
|
||||
static std::unique_ptr<Shard> FromSplitData(SplitData &&split_data);
|
||||
|
||||
class Accessor final {
|
||||
private:
|
||||
friend class Shard;
|
||||
@ -360,6 +379,10 @@ class Shard final {
|
||||
|
||||
void StoreMapping(std::unordered_map<uint64_t, std::string> id_to_name);
|
||||
|
||||
std::optional<SplitInfo> ShouldSplit() const noexcept;
|
||||
|
||||
SplitData PerformSplit(const PrimaryKey &split_key, uint64_t shard_version);
|
||||
|
||||
private:
|
||||
Transaction &GetTransaction(coordinator::Hlc start_timestamp, IsolationLevel isolation_level);
|
||||
|
||||
@ -377,6 +400,7 @@ class Shard final {
|
||||
// list is used only when properties are enabled for edges. Because of that we
|
||||
// keep a separate count of edges that is always updated.
|
||||
uint64_t edge_count_{0};
|
||||
uint64_t shard_version_{0};
|
||||
|
||||
SchemaValidator schema_validator_;
|
||||
VertexValidator vertex_validator_;
|
||||
@ -396,38 +420,6 @@ class Shard final {
|
||||
// storage.
|
||||
std::list<Gid> deleted_edges_;
|
||||
|
||||
// UUID used to distinguish snapshots and to link snapshots to WALs
|
||||
std::string uuid_;
|
||||
// Sequence number used to keep track of the chain of WALs.
|
||||
uint64_t wal_seq_num_{0};
|
||||
|
||||
// UUID to distinguish different main instance runs for replication process
|
||||
// on SAME storage.
|
||||
// Multiple instances can have same storage UUID and be MAIN at the same time.
|
||||
// We cannot compare commit timestamps of those instances if one of them
|
||||
// becomes the replica of the other so we use epoch_id_ as additional
|
||||
// discriminating property.
|
||||
// Example of this:
|
||||
// We have 2 instances of the same storage, S1 and S2.
|
||||
// S1 and S2 are MAIN and accept their own commits and write them to the WAL.
|
||||
// At the moment when S1 commited a transaction with timestamp 20, and S2
|
||||
// a different transaction with timestamp 15, we change S2's role to REPLICA
|
||||
// and register it on S1.
|
||||
// Without using the epoch_id, we don't know that S1 and S2 have completely
|
||||
// different transactions, we think that the S2 is behind only by 5 commits.
|
||||
std::string epoch_id_;
|
||||
// History of the previous epoch ids.
|
||||
// Each value consists of the epoch id along the last commit belonging to that
|
||||
// epoch.
|
||||
std::deque<std::pair<std::string, uint64_t>> epoch_history_;
|
||||
|
||||
uint64_t wal_unsynced_transactions_{0};
|
||||
|
||||
utils::FileRetainer file_retainer_;
|
||||
|
||||
// Global locker that is used for clients file locking
|
||||
utils::FileRetainer::FileLocker global_locker_;
|
||||
|
||||
// Holds all of the (in progress, committed and aborted) transactions that are read or write to this shard, but
|
||||
// haven't been cleaned up yet
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> start_logical_id_to_transaction_{};
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -250,8 +250,8 @@ class ShardManager {
|
||||
|
||||
spdlog::info("SM sending heartbeat to coordinator {}", coordinator_leader_.ToString());
|
||||
heartbeat_res_.emplace(std::move(
|
||||
io_.template Request<WriteRequest<CoordinatorWriteRequests>, WriteResponse<CoordinatorWriteResponses>>(
|
||||
coordinator_leader_, ww)));
|
||||
io_.template Request<WriteResponse<CoordinatorWriteResponses>, WriteRequest<CoordinatorWriteRequests>>(
|
||||
coordinator_leader_, std::move(ww))));
|
||||
spdlog::info("SM sent heartbeat");
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -472,7 +472,8 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ExpandOneRequest &&req) {
|
||||
if (req.order_by_edges.empty()) {
|
||||
const auto *schema = shard_->GetSchema(shard_->PrimaryLabel());
|
||||
MG_ASSERT(schema);
|
||||
return GetExpandOneResult(acc, src_vertex, req, maybe_filter_based_on_edge_uniqueness, edge_filler, *schema);
|
||||
return GetExpandOneResult(src_vertex_acc, std::move(src_vertex), req, maybe_filter_based_on_edge_uniqueness,
|
||||
edge_filler, *schema);
|
||||
}
|
||||
auto [in_edge_accessors, out_edge_accessors] = GetEdgesFromVertex(src_vertex_acc, req.direction);
|
||||
const auto in_ordered_edges = OrderByEdges(dba, in_edge_accessors, req.order_by_edges, src_vertex_acc);
|
||||
@ -487,12 +488,13 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ExpandOneRequest &&req) {
|
||||
[](const auto &edge_element) { return edge_element.object_acc; });
|
||||
const auto *schema = shard_->GetSchema(shard_->PrimaryLabel());
|
||||
MG_ASSERT(schema);
|
||||
return GetExpandOneResult(src_vertex_acc, src_vertex, req, in_edge_ordered_accessors, out_edge_ordered_accessors,
|
||||
maybe_filter_based_on_edge_uniqueness, edge_filler, *schema);
|
||||
return GetExpandOneResult(src_vertex_acc, std::move(src_vertex), req, std::move(in_edge_ordered_accessors),
|
||||
std::move(out_edge_ordered_accessors), maybe_filter_based_on_edge_uniqueness,
|
||||
edge_filler, *schema);
|
||||
});
|
||||
|
||||
if (maybe_result.HasError()) {
|
||||
shard_error.emplace(CreateErrorResponse(primary_key.GetError(), req.transaction_id, "getting primary key"));
|
||||
shard_error.emplace(CreateErrorResponse(maybe_result.GetError(), req.transaction_id, "getting expand result"));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -535,13 +537,17 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::GetPropertiesRequest &&req) {
|
||||
return result;
|
||||
};
|
||||
|
||||
auto collect_props = [&req](const VertexAccessor &v_acc,
|
||||
const std::optional<EdgeAccessor> &e_acc) -> ShardResult<std::map<PropertyId, Value>> {
|
||||
auto collect_props = [this, &req](
|
||||
const VertexAccessor &v_acc,
|
||||
const std::optional<EdgeAccessor> &e_acc) -> ShardResult<std::map<PropertyId, Value>> {
|
||||
if (!req.property_ids) {
|
||||
if (e_acc) {
|
||||
return CollectAllPropertiesFromAccessor(*e_acc, view);
|
||||
}
|
||||
return CollectAllPropertiesFromAccessor(v_acc, view);
|
||||
const auto *schema = shard_->GetSchema(shard_->PrimaryLabel());
|
||||
MG_ASSERT(schema);
|
||||
|
||||
return CollectAllPropertiesFromAccessor(v_acc, view, *schema);
|
||||
}
|
||||
|
||||
if (e_acc) {
|
||||
@ -577,12 +583,12 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::GetPropertiesRequest &&req) {
|
||||
if (maybe_id.HasError()) {
|
||||
return {maybe_id.GetError()};
|
||||
}
|
||||
const auto &id = maybe_id.GetValue();
|
||||
auto &vertex_id = maybe_id.GetValue();
|
||||
std::optional<msgs::EdgeId> e_id;
|
||||
if (e_acc) {
|
||||
e_id = msgs::EdgeId{e_acc->Gid().AsUint()};
|
||||
}
|
||||
msgs::VertexId v_id{msgs::Label{id.primary_label}, ConvertValueVector(id.primary_key)};
|
||||
msgs::VertexId v_id{msgs::Label{vertex_id.primary_label}, ConvertValueVector(std::move(vertex_id.primary_key))};
|
||||
auto maybe_props = collect_props(v_acc, e_acc);
|
||||
if (maybe_props.HasError()) {
|
||||
return {maybe_props.GetError()};
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -12,11 +12,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <variant>
|
||||
|
||||
#include <openssl/ec.h>
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/value_conversions.hpp"
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
@ -41,8 +43,21 @@ class ShardRsm {
|
||||
public:
|
||||
explicit ShardRsm(std::unique_ptr<Shard> &&shard) : shard_(std::move(shard)){};
|
||||
|
||||
std::optional<msgs::SplitInfo> ShouldSplit() const noexcept {
|
||||
auto split_info = shard_->ShouldSplit();
|
||||
if (split_info) {
|
||||
return msgs::SplitInfo{conversions::ConvertValueVector(split_info->split_point), split_info->shard_version};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::unique_ptr<Shard> PerformSplit(msgs::PerformSplitDataInfo perform_split) const noexcept {
|
||||
return Shard::FromSplitData(
|
||||
shard_->PerformSplit(conversions::ConvertPropertyVector(perform_split.split_key), perform_split.shard_version));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
|
||||
msgs::ReadResponses Read(msgs::ReadRequests requests) {
|
||||
msgs::ReadResponses Read(msgs::ReadRequests &&requests) {
|
||||
return std::visit([&](auto &&request) mutable { return HandleRead(std::forward<decltype(request)>(request)); },
|
||||
std::move(requests));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -100,7 +100,7 @@ class Queue {
|
||||
|
||||
inner_->submitted++;
|
||||
|
||||
inner_->queue.emplace_back(std::forward<Message>(message));
|
||||
inner_->queue.emplace_back(std::move(message));
|
||||
} // lock dropped before notifying condition variable
|
||||
|
||||
inner_->cv.notify_all();
|
||||
|
411
src/storage/v3/splitter.cpp
Normal file
411
src/storage/v3/splitter.cpp
Normal file
@ -0,0 +1,411 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v3/splitter.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
|
||||
#include "storage/v3/config.hpp"
|
||||
#include "storage/v3/delta.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/indices.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/name_id_mapper.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/transaction.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
Splitter::Splitter(const LabelId primary_label, VertexContainer &vertices, EdgeContainer &edges,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &start_logical_id_to_transaction, Indices &indices,
|
||||
const Config &config, const std::vector<SchemaProperty> &schema, const NameIdMapper &name_id_mapper)
|
||||
: primary_label_(primary_label),
|
||||
vertices_(vertices),
|
||||
edges_(edges),
|
||||
start_logical_id_to_transaction_(start_logical_id_to_transaction),
|
||||
indices_(indices),
|
||||
config_(config),
|
||||
schema_(schema),
|
||||
name_id_mapper_(name_id_mapper) {}
|
||||
|
||||
SplitData Splitter::SplitShard(const PrimaryKey &split_key, const std::optional<PrimaryKey> &max_primary_key,
|
||||
const uint64_t shard_version) {
|
||||
SplitData data{.primary_label = primary_label_,
|
||||
.min_primary_key = split_key,
|
||||
.max_primary_key = max_primary_key,
|
||||
.schema = schema_,
|
||||
.config = config_,
|
||||
.id_to_name = name_id_mapper_.GetIdToNameMap(),
|
||||
.shard_version = shard_version};
|
||||
|
||||
std::set<uint64_t> collected_transactions_;
|
||||
data.vertices = CollectVertices(data, collected_transactions_, split_key);
|
||||
data.edges = CollectEdges(collected_transactions_, data.vertices, split_key);
|
||||
data.transactions = CollectTransactions(collected_transactions_, *data.edges, split_key);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
void Splitter::ScanDeltas(std::set<uint64_t> &collected_transactions_, const Delta *delta) {
|
||||
while (delta != nullptr) {
|
||||
collected_transactions_.insert(delta->commit_info->start_or_commit_timestamp.logical_id);
|
||||
delta = delta->next;
|
||||
}
|
||||
}
|
||||
|
||||
VertexContainer Splitter::CollectVertices(SplitData &data, std::set<uint64_t> &collected_transactions_,
|
||||
const PrimaryKey &split_key) {
|
||||
data.label_indices = indices_.label_index.SplitIndexEntries(split_key);
|
||||
data.label_property_indices = indices_.label_property_index.SplitIndexEntries(split_key);
|
||||
|
||||
VertexContainer splitted_data;
|
||||
auto split_key_it = vertices_.find(split_key);
|
||||
while (split_key_it != vertices_.end()) {
|
||||
// Go through deltas and pick up transactions start_id/commit_id
|
||||
ScanDeltas(collected_transactions_, split_key_it->second.delta);
|
||||
|
||||
auto next_it = std::next(split_key_it);
|
||||
|
||||
const auto new_it = splitted_data.insert(splitted_data.end(), vertices_.extract(split_key_it));
|
||||
MG_ASSERT(new_it != splitted_data.end(), "Failed to extract vertex!");
|
||||
|
||||
split_key_it = next_it;
|
||||
}
|
||||
return splitted_data;
|
||||
}
|
||||
|
||||
std::optional<EdgeContainer> Splitter::CollectEdges(std::set<uint64_t> &collected_transactions_,
|
||||
const VertexContainer &split_vertices,
|
||||
const PrimaryKey &split_key) {
|
||||
if (!config_.items.properties_on_edges) {
|
||||
return std::nullopt;
|
||||
}
|
||||
EdgeContainer splitted_edges;
|
||||
const auto split_vertex_edges = [&](const auto &edges_ref) {
|
||||
// This is safe since if properties_on_edges is true, the this must be a ptr
|
||||
for (const auto &edge_ref : edges_ref) {
|
||||
auto *edge = std::get<2>(edge_ref).ptr;
|
||||
const auto &other_vtx = std::get<1>(edge_ref);
|
||||
ScanDeltas(collected_transactions_, edge->delta);
|
||||
// Check if src and dest edge are both on splitted shard so we know if we
|
||||
// should remove orphan edge, or make a clone
|
||||
if (other_vtx.primary_key >= split_key) {
|
||||
// Remove edge from shard
|
||||
splitted_edges.insert(edges_.extract(edge->gid));
|
||||
} else {
|
||||
splitted_edges.insert({edge->gid, Edge{edge->gid, edge->delta}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const auto &vertex : split_vertices) {
|
||||
split_vertex_edges(vertex.second.in_edges);
|
||||
split_vertex_edges(vertex.second.out_edges);
|
||||
}
|
||||
return splitted_edges;
|
||||
}
|
||||
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> Splitter::CollectTransactions(
|
||||
const std::set<uint64_t> &collected_transactions_, EdgeContainer &cloned_edges, const PrimaryKey &split_key) {
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> transactions;
|
||||
|
||||
for (const auto &[commit_start, transaction] : start_logical_id_to_transaction_) {
|
||||
// We need all transaction whose deltas need to be resolved for any of the
|
||||
// entities
|
||||
if (collected_transactions_.contains(transaction->commit_info->start_or_commit_timestamp.logical_id)) {
|
||||
transactions.insert({commit_start, start_logical_id_to_transaction_[commit_start]->Clone()});
|
||||
}
|
||||
}
|
||||
|
||||
// It is necessary to clone all the transactions first so we have new addresses
|
||||
// for deltas, before doing alignment of deltas and prev_ptr
|
||||
AdjustClonedTransactions(transactions, cloned_edges, split_key);
|
||||
return transactions;
|
||||
}
|
||||
|
||||
void EraseDeltaChain(auto &transaction, auto &transactions, auto &delta_head_it) {
|
||||
auto *current_next_delta = delta_head_it->next;
|
||||
// We need to keep track of delta_head_it in the delta list of current transaction
|
||||
delta_head_it = transaction.deltas.erase(delta_head_it);
|
||||
|
||||
while (current_next_delta != nullptr) {
|
||||
auto *next_delta = current_next_delta->next;
|
||||
// Find next delta transaction delta list
|
||||
auto current_transaction_it = std::ranges::find_if(
|
||||
transactions, [&start_or_commit_timestamp =
|
||||
current_next_delta->commit_info->start_or_commit_timestamp](const auto &transaction) {
|
||||
return transaction.second->start_timestamp == start_or_commit_timestamp ||
|
||||
transaction.second->commit_info->start_or_commit_timestamp == start_or_commit_timestamp;
|
||||
});
|
||||
MG_ASSERT(current_transaction_it != transactions.end(), "Error when pruning deltas!");
|
||||
// Remove the delta
|
||||
const auto delta_it =
|
||||
std::ranges::find_if(current_transaction_it->second->deltas,
|
||||
[current_next_delta](const auto &elem) { return elem.id == current_next_delta->id; });
|
||||
if (delta_it != current_transaction_it->second->deltas.end()) {
|
||||
// If the next delta is next in transaction list replace current_transaction_it
|
||||
// with the next one
|
||||
if (current_transaction_it->second->start_timestamp == transaction.start_timestamp &&
|
||||
current_transaction_it == std::next(current_transaction_it)) {
|
||||
delta_head_it = current_transaction_it->second->deltas.erase(delta_it);
|
||||
} else {
|
||||
current_transaction_it->second->deltas.erase(delta_it);
|
||||
}
|
||||
}
|
||||
|
||||
current_next_delta = next_delta;
|
||||
}
|
||||
}
|
||||
|
||||
void PruneDeltas(Transaction &cloned_transaction, std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
const PrimaryKey &split_key, EdgeContainer &cloned_edges) {
|
||||
// Remove delta chains that don't point to objects on splitted shard
|
||||
auto cloned_delta_it = cloned_transaction.deltas.begin();
|
||||
|
||||
while (cloned_delta_it != cloned_transaction.deltas.end()) {
|
||||
const auto prev = cloned_delta_it->prev.Get();
|
||||
switch (prev.type) {
|
||||
case PreviousPtr::Type::DELTA:
|
||||
case PreviousPtr::Type::NULLPTR:
|
||||
++cloned_delta_it;
|
||||
break;
|
||||
case PreviousPtr::Type::VERTEX: {
|
||||
if (prev.vertex->first < split_key) {
|
||||
// We can remove this delta chain
|
||||
EraseDeltaChain(cloned_transaction, cloned_transactions, cloned_delta_it);
|
||||
} else {
|
||||
++cloned_delta_it;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PreviousPtr::Type::EDGE: {
|
||||
if (const auto edge_gid = prev.edge->gid; !cloned_edges.contains(edge_gid)) {
|
||||
// We can remove this delta chain
|
||||
EraseDeltaChain(cloned_transaction, cloned_transactions, cloned_delta_it);
|
||||
} else {
|
||||
++cloned_delta_it;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Splitter::PruneOriginalDeltas(Transaction &transaction,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &transactions,
|
||||
const PrimaryKey &split_key) {
|
||||
// Remove delta chains that don't point to objects on splitted shard
|
||||
auto delta_it = transaction.deltas.begin();
|
||||
|
||||
while (delta_it != transaction.deltas.end()) {
|
||||
const auto prev = delta_it->prev.Get();
|
||||
switch (prev.type) {
|
||||
case PreviousPtr::Type::DELTA:
|
||||
case PreviousPtr::Type::NULLPTR:
|
||||
++delta_it;
|
||||
break;
|
||||
case PreviousPtr::Type::VERTEX: {
|
||||
if (prev.vertex->first >= split_key) {
|
||||
// We can remove this delta chain
|
||||
EraseDeltaChain(transaction, transactions, delta_it);
|
||||
} else {
|
||||
++delta_it;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PreviousPtr::Type::EDGE: {
|
||||
if (const auto edge_gid = prev.edge->gid; !edges_.contains(edge_gid)) {
|
||||
// We can remove this delta chain
|
||||
EraseDeltaChain(transaction, transactions, delta_it);
|
||||
} else {
|
||||
++delta_it;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Splitter::AdjustClonedTransactions(std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges, const PrimaryKey &split_key) {
|
||||
for (auto &[start_id, cloned_transaction] : cloned_transactions) {
|
||||
AdjustClonedTransaction(*cloned_transaction, *start_logical_id_to_transaction_[start_id], cloned_transactions,
|
||||
cloned_edges);
|
||||
}
|
||||
// Prune deltas whose delta chain points to vertex/edge that should not belong on that shard
|
||||
// Prune must be after adjust, since next, and prev are not set and we cannot follow the chain
|
||||
for (auto &[start_id, cloned_transaction] : cloned_transactions) {
|
||||
PruneDeltas(*cloned_transaction, cloned_transactions, split_key, cloned_edges);
|
||||
}
|
||||
// Also we need to remove deltas from original transactions
|
||||
for (auto &[start_id, original_transaction] : start_logical_id_to_transaction_) {
|
||||
PruneOriginalDeltas(*original_transaction, start_logical_id_to_transaction_, split_key);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool IsDeltaHeadOfChain(const PreviousPtr::Type &delta_type) {
|
||||
return delta_type == PreviousPtr::Type::VERTEX || delta_type == PreviousPtr::Type::EDGE;
|
||||
}
|
||||
|
||||
bool DoesPrevPtrPointsToSplittedData(const PreviousPtr::Pointer &prev_ptr, const PrimaryKey &split_key) {
|
||||
return prev_ptr.type == PreviousPtr::Type::VERTEX && prev_ptr.vertex->first < split_key;
|
||||
}
|
||||
|
||||
void Splitter::AdjustClonedTransaction(Transaction &cloned_transaction, const Transaction &transaction,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges) {
|
||||
auto delta_it = transaction.deltas.begin();
|
||||
auto cloned_delta_it = cloned_transaction.deltas.begin();
|
||||
|
||||
while (delta_it != transaction.deltas.end()) {
|
||||
// We can safely ignore deltas which are not head of delta chain
|
||||
// Dont' adjust delta chain that points to irrelevant data vertices/edges
|
||||
if (const auto delta_prev = delta_it->prev.Get(); !IsDeltaHeadOfChain(delta_prev.type)) {
|
||||
++delta_it;
|
||||
++cloned_delta_it;
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto *delta = &*delta_it;
|
||||
auto *cloned_delta = &*cloned_delta_it;
|
||||
Delta *cloned_delta_prev_ptr = cloned_delta;
|
||||
// The head of delta chains contain either vertex/edge as prev ptr so we adjust
|
||||
// it just at the beginning of delta chain
|
||||
AdjustDeltaPrevPtr(*delta, *cloned_delta_prev_ptr, cloned_transactions, cloned_edges);
|
||||
|
||||
while (delta->next != nullptr) {
|
||||
AdjustEdgeRef(*cloned_delta, cloned_edges);
|
||||
|
||||
// Align next ptr and prev ptr
|
||||
AdjustDeltaNextAndPrev(*delta, *cloned_delta, cloned_transactions);
|
||||
|
||||
// Next delta might not belong to the cloned transaction and thats
|
||||
// why we skip this delta of the delta chain
|
||||
if (cloned_delta->next != nullptr) {
|
||||
cloned_delta = cloned_delta->next;
|
||||
cloned_delta_prev_ptr = cloned_delta;
|
||||
} else {
|
||||
cloned_delta_prev_ptr = nullptr;
|
||||
}
|
||||
delta = delta->next;
|
||||
}
|
||||
// Align prev ptr
|
||||
if (cloned_delta_prev_ptr != nullptr) {
|
||||
AdjustDeltaPrevPtr(*delta, *cloned_delta_prev_ptr, cloned_transactions, cloned_edges);
|
||||
}
|
||||
|
||||
++delta_it;
|
||||
++cloned_delta_it;
|
||||
}
|
||||
MG_ASSERT(delta_it == transaction.deltas.end() && cloned_delta_it == cloned_transaction.deltas.end(),
|
||||
"Both iterators must be exhausted!");
|
||||
}
|
||||
|
||||
void Splitter::AdjustEdgeRef(Delta &cloned_delta, EdgeContainer &cloned_edges) const {
|
||||
switch (cloned_delta.action) {
|
||||
case Delta::Action::ADD_IN_EDGE:
|
||||
case Delta::Action::ADD_OUT_EDGE:
|
||||
case Delta::Action::REMOVE_IN_EDGE:
|
||||
case Delta::Action::REMOVE_OUT_EDGE: {
|
||||
// Find edge
|
||||
if (config_.items.properties_on_edges) {
|
||||
if (const auto cloned_edge_it = cloned_edges.find(cloned_delta.vertex_edge.edge.ptr->gid);
|
||||
cloned_edge_it != cloned_edges.end()) {
|
||||
cloned_delta.vertex_edge.edge = EdgeRef{&cloned_edge_it->second};
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Delta::Action::DELETE_OBJECT:
|
||||
case Delta::Action::RECREATE_OBJECT:
|
||||
case Delta::Action::SET_PROPERTY:
|
||||
case Delta::Action::ADD_LABEL:
|
||||
case Delta::Action::REMOVE_LABEL: {
|
||||
// noop
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Splitter::AdjustDeltaNextAndPrev(const Delta &original, Delta &cloned,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions) {
|
||||
// Get cloned_delta->next transaction, using delta->next original transaction
|
||||
// cloned_transactions key is start_timestamp
|
||||
auto cloned_transaction_it =
|
||||
cloned_transactions.find(original.next->commit_info->start_or_commit_timestamp.logical_id);
|
||||
if (cloned_transaction_it == cloned_transactions.end()) {
|
||||
cloned_transaction_it = std::ranges::find_if(cloned_transactions, [&original](const auto &elem) {
|
||||
return elem.second->commit_info->start_or_commit_timestamp ==
|
||||
original.next->commit_info->start_or_commit_timestamp;
|
||||
});
|
||||
}
|
||||
// TODO(jbajic) What if next in delta chain does not belong to cloned transaction?
|
||||
// MG_ASSERT(cloned_transaction_it != cloned_transactions.end(), "Cloned transaction not found");
|
||||
if (cloned_transaction_it == cloned_transactions.end()) return;
|
||||
// Find cloned delta in delta list of cloned transaction
|
||||
auto found_cloned_delta_it = std::ranges::find_if(
|
||||
cloned_transaction_it->second->deltas, [&original](const auto &elem) { return elem.id == original.next->id; });
|
||||
MG_ASSERT(found_cloned_delta_it != cloned_transaction_it->second->deltas.end(), "Delta with given uuid must exist!");
|
||||
cloned.next = &*found_cloned_delta_it;
|
||||
found_cloned_delta_it->prev.Set(&cloned);
|
||||
}
|
||||
|
||||
void Splitter::AdjustDeltaPrevPtr(const Delta &original, Delta &cloned,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges) {
|
||||
auto ptr = original.prev.Get();
|
||||
switch (ptr.type) {
|
||||
case PreviousPtr::Type::NULLPTR: {
|
||||
MG_ASSERT(false, "PreviousPtr cannot be a nullptr!");
|
||||
break;
|
||||
}
|
||||
case PreviousPtr::Type::DELTA: {
|
||||
// Same as for deltas except don't align next but prev
|
||||
auto cloned_transaction_it = std::ranges::find_if(cloned_transactions, [&ptr](const auto &elem) {
|
||||
return elem.second->start_timestamp == ptr.delta->commit_info->start_or_commit_timestamp ||
|
||||
elem.second->commit_info->start_or_commit_timestamp == ptr.delta->commit_info->start_or_commit_timestamp;
|
||||
});
|
||||
MG_ASSERT(cloned_transaction_it != cloned_transactions.end(), "Cloned transaction not found");
|
||||
// Find cloned delta in delta list of cloned transaction
|
||||
auto found_cloned_delta_it =
|
||||
std::ranges::find_if(cloned_transaction_it->second->deltas,
|
||||
[delta = ptr.delta](const auto &elem) { return elem.id == delta->id; });
|
||||
MG_ASSERT(found_cloned_delta_it != cloned_transaction_it->second->deltas.end(),
|
||||
"Delta with given id must exist!");
|
||||
|
||||
cloned.prev.Set(&*found_cloned_delta_it);
|
||||
break;
|
||||
}
|
||||
case PreviousPtr::Type::VERTEX: {
|
||||
// The vertex was extracted and it is safe to reuse address
|
||||
cloned.prev.Set(ptr.vertex);
|
||||
ptr.vertex->second.delta = &cloned;
|
||||
break;
|
||||
}
|
||||
case PreviousPtr::Type::EDGE: {
|
||||
// We can never be here if we have properties on edge disabled
|
||||
auto *cloned_edge = &*cloned_edges.find(ptr.edge->gid);
|
||||
ptr.edge->delta = &cloned;
|
||||
cloned.prev.Set(&cloned_edge->second);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage::v3
|
109
src/storage/v3/splitter.hpp
Normal file
109
src/storage/v3/splitter.hpp
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
|
||||
#include "storage/v3/config.hpp"
|
||||
#include "storage/v3/delta.hpp"
|
||||
#include "storage/v3/edge.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/indices.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/name_id_mapper.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/transaction.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "utils/concepts.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
// If edge properties-on-edges is false then we don't need to send edges but
|
||||
// only vertices, since they will contain those edges
|
||||
struct SplitData {
|
||||
LabelId primary_label;
|
||||
PrimaryKey min_primary_key;
|
||||
std::optional<PrimaryKey> max_primary_key;
|
||||
std::vector<SchemaProperty> schema;
|
||||
Config config;
|
||||
std::unordered_map<uint64_t, std::string> id_to_name;
|
||||
uint64_t shard_version;
|
||||
|
||||
VertexContainer vertices;
|
||||
std::optional<EdgeContainer> edges;
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> transactions;
|
||||
std::map<LabelId, LabelIndex::IndexContainer> label_indices;
|
||||
std::map<std::pair<LabelId, PropertyId>, LabelPropertyIndex::IndexContainer> label_property_indices;
|
||||
};
|
||||
|
||||
// TODO(jbajic) Handle deleted_vertices_ and deleted_edges_ after the finishing GC
|
||||
class Splitter final {
|
||||
public:
|
||||
Splitter(LabelId primary_label, VertexContainer &vertices, EdgeContainer &edges,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &start_logical_id_to_transaction, Indices &indices,
|
||||
const Config &config, const std::vector<SchemaProperty> &schema, const NameIdMapper &name_id_mapper_);
|
||||
|
||||
Splitter(const Splitter &) = delete;
|
||||
Splitter(Splitter &&) noexcept = delete;
|
||||
Splitter &operator=(const Splitter &) = delete;
|
||||
Splitter operator=(Splitter &&) noexcept = delete;
|
||||
~Splitter() = default;
|
||||
|
||||
SplitData SplitShard(const PrimaryKey &split_key, const std::optional<PrimaryKey> &max_primary_key,
|
||||
uint64_t shard_version);
|
||||
|
||||
private:
|
||||
VertexContainer CollectVertices(SplitData &data, std::set<uint64_t> &collected_transactions_start_id,
|
||||
const PrimaryKey &split_key);
|
||||
|
||||
std::optional<EdgeContainer> CollectEdges(std::set<uint64_t> &collected_transactions_start_id,
|
||||
const VertexContainer &split_vertices, const PrimaryKey &split_key);
|
||||
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> CollectTransactions(
|
||||
const std::set<uint64_t> &collected_transactions_start_id, EdgeContainer &cloned_edges,
|
||||
const PrimaryKey &split_key);
|
||||
|
||||
static void ScanDeltas(std::set<uint64_t> &collected_transactions_start_id, const Delta *delta);
|
||||
|
||||
void PruneOriginalDeltas(Transaction &transaction, std::map<uint64_t, std::unique_ptr<Transaction>> &transactions,
|
||||
const PrimaryKey &split_key);
|
||||
|
||||
void AdjustClonedTransaction(Transaction &cloned_transaction, const Transaction &transaction,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges);
|
||||
|
||||
void AdjustClonedTransactions(std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges, const PrimaryKey &split_key);
|
||||
|
||||
void AdjustEdgeRef(Delta &cloned_delta, EdgeContainer &cloned_edges) const;
|
||||
|
||||
static void AdjustDeltaNextAndPrev(const Delta &original, Delta &cloned,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions);
|
||||
|
||||
static void AdjustDeltaPrevPtr(const Delta &original, Delta &cloned,
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &cloned_transactions,
|
||||
EdgeContainer &cloned_edges);
|
||||
|
||||
const LabelId primary_label_;
|
||||
VertexContainer &vertices_;
|
||||
EdgeContainer &edges_;
|
||||
std::map<uint64_t, std::unique_ptr<Transaction>> &start_logical_id_to_transaction_;
|
||||
Indices &indices_;
|
||||
const Config &config_;
|
||||
const std::vector<SchemaProperty> schema_;
|
||||
const NameIdMapper &name_id_mapper_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage::v3
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -31,6 +31,15 @@ struct CommitInfo {
|
||||
};
|
||||
|
||||
struct Transaction {
|
||||
Transaction(coordinator::Hlc start_timestamp, CommitInfo new_commit_info, uint64_t command_id, bool must_abort,
|
||||
bool is_aborted, IsolationLevel isolation_level)
|
||||
: start_timestamp{start_timestamp},
|
||||
commit_info{std::make_unique<CommitInfo>(new_commit_info)},
|
||||
command_id(command_id),
|
||||
must_abort(must_abort),
|
||||
is_aborted(is_aborted),
|
||||
isolation_level(isolation_level){};
|
||||
|
||||
Transaction(coordinator::Hlc start_timestamp, IsolationLevel isolation_level)
|
||||
: start_timestamp(start_timestamp),
|
||||
commit_info(std::make_unique<CommitInfo>(CommitInfo{false, {start_timestamp}})),
|
||||
@ -54,6 +63,56 @@ struct Transaction {
|
||||
|
||||
~Transaction() {}
|
||||
|
||||
std::list<Delta> CopyDeltas(CommitInfo *commit_info) const {
|
||||
std::list<Delta> copied_deltas;
|
||||
for (const auto &delta : deltas) {
|
||||
switch (delta.action) {
|
||||
case Delta::Action::DELETE_OBJECT:
|
||||
copied_deltas.emplace_back(Delta::DeleteObjectTag{}, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::RECREATE_OBJECT:
|
||||
copied_deltas.emplace_back(Delta::RecreateObjectTag{}, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::ADD_LABEL:
|
||||
copied_deltas.emplace_back(Delta::AddLabelTag{}, delta.label, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::REMOVE_LABEL:
|
||||
copied_deltas.emplace_back(Delta::RemoveLabelTag{}, delta.label, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::ADD_IN_EDGE:
|
||||
copied_deltas.emplace_back(Delta::AddInEdgeTag{}, delta.vertex_edge.edge_type, delta.vertex_edge.vertex_id,
|
||||
delta.vertex_edge.edge, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::ADD_OUT_EDGE:
|
||||
copied_deltas.emplace_back(Delta::AddOutEdgeTag{}, delta.vertex_edge.edge_type, delta.vertex_edge.vertex_id,
|
||||
delta.vertex_edge.edge, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::REMOVE_IN_EDGE:
|
||||
copied_deltas.emplace_back(Delta::RemoveInEdgeTag{}, delta.vertex_edge.edge_type, delta.vertex_edge.vertex_id,
|
||||
delta.vertex_edge.edge, commit_info, delta.id, command_id);
|
||||
break;
|
||||
case Delta::Action::REMOVE_OUT_EDGE:
|
||||
copied_deltas.emplace_back(Delta::RemoveOutEdgeTag{}, delta.vertex_edge.edge_type,
|
||||
delta.vertex_edge.vertex_id, delta.vertex_edge.edge, commit_info, delta.id,
|
||||
command_id);
|
||||
break;
|
||||
case Delta::Action::SET_PROPERTY:
|
||||
copied_deltas.emplace_back(Delta::SetPropertyTag{}, delta.property.key, delta.property.value, commit_info,
|
||||
delta.id, command_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return copied_deltas;
|
||||
}
|
||||
|
||||
// This does not solve the whole problem of copying deltas
|
||||
std::unique_ptr<Transaction> Clone() const {
|
||||
auto transaction_ptr = std::make_unique<Transaction>(start_timestamp, *commit_info, command_id, must_abort,
|
||||
is_aborted, isolation_level);
|
||||
transaction_ptr->deltas = CopyDeltas(transaction_ptr->commit_info.get());
|
||||
return transaction_ptr;
|
||||
}
|
||||
|
||||
coordinator::Hlc start_timestamp;
|
||||
std::unique_ptr<CommitInfo> commit_info;
|
||||
uint64_t command_id;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -126,6 +126,17 @@ inline std::vector<Value> ConvertValueVector(const std::vector<v3::PropertyValue
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline std::vector<Value> ConvertValueVector(std::vector<v3::PropertyValue> &&vec) {
|
||||
std::vector<Value> ret;
|
||||
ret.reserve(vec.size());
|
||||
|
||||
for (auto &&elem : vec) {
|
||||
ret.push_back(FromPropertyValueToValue(std::move(elem)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline msgs::VertexId ToMsgsVertexId(const v3::VertexId &vertex_id) {
|
||||
return {msgs::Label{vertex_id.primary_label}, ConvertValueVector(vertex_id.primary_key)};
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -12,6 +12,7 @@
|
||||
#pragma once
|
||||
#include <concepts>
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
namespace memgraph::utils {
|
||||
template <typename T, typename... Args>
|
||||
@ -34,4 +35,7 @@ template <typename T>
|
||||
concept Dereferenceable = requires(T t) {
|
||||
{ *t } -> CanReference;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept Object = std::is_object_v<T>;
|
||||
} // namespace memgraph::utils
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -25,6 +25,7 @@
|
||||
M(ScanAllByLabelPropertyValueOperator, "Number of times ScanAllByLabelPropertyValue operator was used.") \
|
||||
M(ScanAllByLabelPropertyOperator, "Number of times ScanAllByLabelProperty operator was used.") \
|
||||
M(ScanAllByIdOperator, "Number of times ScanAllById operator was used.") \
|
||||
M(ScanByPrimaryKeyOperator, "Number of times ScanByPrimaryKey operator was used.") \
|
||||
M(ExpandOperator, "Number of times Expand operator was used.") \
|
||||
M(ExpandVariableOperator, "Number of times ExpandVariable operator was used.") \
|
||||
M(ConstructNamedPathOperator, "Number of times ConstructNamedPath operator was used.") \
|
||||
|
@ -79,3 +79,12 @@ target_link_libraries(${test_prefix}data_structures_contains mg-utils mg-storage
|
||||
|
||||
add_benchmark(data_structures_remove.cpp)
|
||||
target_link_libraries(${test_prefix}data_structures_remove mg-utils mg-storage-v3)
|
||||
|
||||
add_benchmark(storage_v3_split.cpp)
|
||||
target_link_libraries(${test_prefix}storage_v3_split mg-storage-v3 mg-query-v2)
|
||||
|
||||
add_benchmark(storage_v3_split_1.cpp)
|
||||
target_link_libraries(${test_prefix}storage_v3_split_1 mg-storage-v3 mg-query-v2)
|
||||
|
||||
add_benchmark(storage_v3_split_2.cpp)
|
||||
target_link_libraries(${test_prefix}storage_v3_split_2 mg-storage-v3 mg-query-v2)
|
||||
|
249
tests/benchmark/storage_v3_split.cpp
Normal file
249
tests/benchmark/storage_v3_split.cpp
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
|
||||
namespace memgraph::benchmark {
|
||||
|
||||
class ShardSplitBenchmark : public ::benchmark::Fixture {
|
||||
protected:
|
||||
using PrimaryKey = storage::v3::PrimaryKey;
|
||||
using PropertyId = storage::v3::PropertyId;
|
||||
using PropertyValue = storage::v3::PropertyValue;
|
||||
using LabelId = storage::v3::LabelId;
|
||||
using EdgeTypeId = storage::v3::EdgeTypeId;
|
||||
using Shard = storage::v3::Shard;
|
||||
using VertexId = storage::v3::VertexId;
|
||||
using Gid = storage::v3::Gid;
|
||||
|
||||
void SetUp(const ::benchmark::State &state) override {
|
||||
storage.emplace(primary_label, min_pk, std::nullopt, schema_property_vector);
|
||||
storage->StoreMapping(
|
||||
{{1, "label"}, {2, "property"}, {3, "edge_property"}, {4, "secondary_label"}, {5, "secondary_prop"}});
|
||||
}
|
||||
|
||||
void TearDown(const ::benchmark::State &) override { storage = std::nullopt; }
|
||||
|
||||
const PropertyId primary_property{PropertyId::FromUint(2)};
|
||||
const PropertyId secondary_property{PropertyId::FromUint(5)};
|
||||
std::vector<storage::v3::SchemaProperty> schema_property_vector = {
|
||||
storage::v3::SchemaProperty{primary_property, common::SchemaType::INT}};
|
||||
const std::vector<PropertyValue> min_pk{PropertyValue{0}};
|
||||
const LabelId primary_label{LabelId::FromUint(1)};
|
||||
const LabelId secondary_label{LabelId::FromUint(4)};
|
||||
const EdgeTypeId edge_type_id{EdgeTypeId::FromUint(3)};
|
||||
std::optional<Shard> storage;
|
||||
|
||||
coordinator::Hlc last_hlc{0, io::Time{}};
|
||||
|
||||
coordinator::Hlc GetNextHlc() {
|
||||
++last_hlc.logical_id;
|
||||
last_hlc.coordinator_wall_clock += std::chrono::seconds(1);
|
||||
return last_hlc;
|
||||
}
|
||||
};
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)(::benchmark::State &state) {
|
||||
const auto number_of_vertices = state.range(0);
|
||||
const auto number_of_edges = state.range(1);
|
||||
const auto number_of_transactions = state.range(2);
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices - number_of_transactions - 1);
|
||||
|
||||
// Create Vertices
|
||||
int64_t vertex_count{0};
|
||||
{
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
for (; vertex_count < number_of_vertices - number_of_transactions; ++vertex_count) {
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
}
|
||||
|
||||
// Create Edges
|
||||
for (int64_t i{0}; i < number_of_edges; ++i) {
|
||||
const auto vtx1 = uniform_dist(e1);
|
||||
const auto vtx2 = uniform_dist(e1);
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
// Clean up transactional data
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
|
||||
// Create rest of the objects and leave transactions
|
||||
for (; vertex_count < number_of_vertices; ++vertex_count) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
|
||||
for (auto _ : state) {
|
||||
// Don't create shard since shard deallocation can take some time as well
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is pessimistic, number of vertices corresponds with number if transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplit)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 100'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is optimistic, in this run there are no transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithGc)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 1'000'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Args:
|
||||
// Number of vertices
|
||||
// Number of edges
|
||||
// Number of transaction
|
||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
->Args({100'000, 100'000, 100})
|
||||
->Args({200'000, 100'000, 100})
|
||||
->Args({300'000, 100'000, 100})
|
||||
->Args({400'000, 100'000, 100})
|
||||
->Args({500'000, 100'000, 100})
|
||||
->Args({600'000, 100'000, 100})
|
||||
->Args({700'000, 100'000, 100})
|
||||
->Args({800'000, 100'000, 100})
|
||||
->Args({900'000, 100'000, 100})
|
||||
->Args({1'000'000, 100'000, 100})
|
||||
->Args({2'000'000, 100'000, 100})
|
||||
->Args({3'000'000, 100'000, 100})
|
||||
->Args({4'000'000, 100'000, 100})
|
||||
->Args({5'000'000, 100'000, 100})
|
||||
->Args({6'000'000, 100'000, 100})
|
||||
->Args({7'000'000, 100'000, 100})
|
||||
->Args({8'000'000, 100'000, 100})
|
||||
->Args({9'000'000, 100'000, 100})
|
||||
->Args({10'000'000, 100'000, 100})
|
||||
->Unit(::benchmark::kMillisecond)
|
||||
->Name("IncreaseVertices");
|
||||
|
||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
->Args({100'000, 100'000, 100})
|
||||
->Args({100'000, 200'000, 100})
|
||||
->Args({100'000, 300'000, 100})
|
||||
->Args({100'000, 400'000, 100})
|
||||
->Args({100'000, 500'000, 100})
|
||||
->Args({100'000, 600'000, 100})
|
||||
->Args({100'000, 700'000, 100})
|
||||
->Args({100'000, 800'000, 100})
|
||||
->Args({100'000, 900'000, 100})
|
||||
->Args({100'000, 1'000'000, 100})
|
||||
->Args({100'000, 2'000'000, 100})
|
||||
->Args({100'000, 3'000'000, 100})
|
||||
->Args({100'000, 4'000'000, 100})
|
||||
->Args({100'000, 5'000'000, 100})
|
||||
->Args({100'000, 6'000'000, 100})
|
||||
->Args({100'000, 7'000'000, 100})
|
||||
->Args({100'000, 8'000'000, 100})
|
||||
->Args({100'000, 9'000'000, 100})
|
||||
->Args({100'000, 10'000'000, 100})
|
||||
->Unit(::benchmark::kMillisecond)
|
||||
->Name("IncreaseEdges");
|
||||
|
||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
->Args({100'000, 100'000, 100})
|
||||
->Args({100'000, 100'000, 1'000})
|
||||
->Args({100'000, 100'000, 10'000})
|
||||
->Args({100'000, 100'000, 100'000})
|
||||
->Unit(::benchmark::kMillisecond)
|
||||
->Name("IncreaseTransactions");
|
||||
|
||||
} // namespace memgraph::benchmark
|
||||
|
||||
BENCHMARK_MAIN();
|
270
tests/benchmark/storage_v3_split_1.cpp
Normal file
270
tests/benchmark/storage_v3_split_1.cpp
Normal file
@ -0,0 +1,270 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
|
||||
namespace memgraph::benchmark {
|
||||
|
||||
class ShardSplitBenchmark : public ::benchmark::Fixture {
|
||||
protected:
|
||||
using PrimaryKey = storage::v3::PrimaryKey;
|
||||
using PropertyId = storage::v3::PropertyId;
|
||||
using PropertyValue = storage::v3::PropertyValue;
|
||||
using LabelId = storage::v3::LabelId;
|
||||
using EdgeTypeId = storage::v3::EdgeTypeId;
|
||||
using Shard = storage::v3::Shard;
|
||||
using VertexId = storage::v3::VertexId;
|
||||
using Gid = storage::v3::Gid;
|
||||
|
||||
void SetUp(const ::benchmark::State &state) override {
|
||||
storage.emplace(primary_label, min_pk, std::nullopt, schema_property_vector);
|
||||
storage->StoreMapping(
|
||||
{{1, "label"}, {2, "property"}, {3, "edge_property"}, {4, "secondary_label"}, {5, "secondary_prop"}});
|
||||
}
|
||||
|
||||
void TearDown(const ::benchmark::State &) override { storage = std::nullopt; }
|
||||
|
||||
const PropertyId primary_property{PropertyId::FromUint(2)};
|
||||
const PropertyId secondary_property{PropertyId::FromUint(5)};
|
||||
std::vector<storage::v3::SchemaProperty> schema_property_vector = {
|
||||
storage::v3::SchemaProperty{primary_property, common::SchemaType::INT}};
|
||||
const std::vector<PropertyValue> min_pk{PropertyValue{0}};
|
||||
const LabelId primary_label{LabelId::FromUint(1)};
|
||||
const LabelId secondary_label{LabelId::FromUint(4)};
|
||||
const EdgeTypeId edge_type_id{EdgeTypeId::FromUint(3)};
|
||||
std::optional<Shard> storage;
|
||||
|
||||
coordinator::Hlc last_hlc{0, io::Time{}};
|
||||
|
||||
coordinator::Hlc GetNextHlc() {
|
||||
++last_hlc.logical_id;
|
||||
last_hlc.coordinator_wall_clock += std::chrono::seconds(1);
|
||||
return last_hlc;
|
||||
}
|
||||
};
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)(::benchmark::State &state) {
|
||||
const auto number_of_vertices = state.range(0);
|
||||
const auto number_of_edges = state.range(1);
|
||||
const auto number_of_transactions = state.range(2);
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices - number_of_transactions - 1);
|
||||
|
||||
// Create Vertices
|
||||
int64_t vertex_count{0};
|
||||
{
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
for (; vertex_count < number_of_vertices - number_of_transactions; ++vertex_count) {
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
}
|
||||
|
||||
// Create Edges
|
||||
for (int64_t i{0}; i < number_of_edges; ++i) {
|
||||
const auto vtx1 = uniform_dist(e1);
|
||||
const auto vtx2 = uniform_dist(e1);
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
// Clean up transactional data
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
|
||||
// Create rest of the objects and leave transactions
|
||||
for (; vertex_count < number_of_vertices; ++vertex_count) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
|
||||
for (auto _ : state) {
|
||||
// Don't create shard since shard deallocation can take some time as well
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is pessimistic, number of vertices corresponds with number if transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplit)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 100'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is optimistic, in this run there are no transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithGc)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 1'000'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Args:
|
||||
// Number of vertices
|
||||
// Number of edges
|
||||
// Number of transaction
|
||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({200'000, 100'000, 100})
|
||||
// ->Args({300'000, 100'000, 100})
|
||||
// ->Args({400'000, 100'000, 100})
|
||||
// ->Args({500'000, 100'000, 100})
|
||||
// ->Args({600'000, 100'000, 100})
|
||||
// ->Args({700'000, 100'000, 100})
|
||||
// ->Args({800'000, 100'000, 100})
|
||||
->Args({900'000, 100'000, 100})
|
||||
// ->Args({1'000'000, 100'000, 100})
|
||||
// ->Args({2'000'000, 100'000, 100})
|
||||
// ->Args({3'000'000, 100'000, 100})
|
||||
// ->Args({4'000'000, 100'000, 100})
|
||||
// ->Args({6'000'000, 100'000, 100})
|
||||
// ->Args({7'000'000, 100'000, 100})
|
||||
// ->Args({8'000'000, 100'000, 100})
|
||||
// ->Args({9'000'000, 100'000, 100})
|
||||
// ->Args({10'000'000, 100'000, 100})
|
||||
->Unit(::benchmark::kMillisecond)
|
||||
->Name("IncreaseVertices");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({200'000, 100'000, 100})
|
||||
// ->Args({300'000, 100'000, 100})
|
||||
// ->Args({400'000, 100'000, 100})
|
||||
// ->Args({500'000, 100'000, 100})
|
||||
// ->Args({600'000, 100'000, 100})
|
||||
// ->Args({700'000, 100'000, 100})
|
||||
// ->Args({800'000, 100'000, 100})
|
||||
// ->Args({900'000, 100'000, 100})
|
||||
// ->Args({1'000'000, 100'000, 100})
|
||||
// ->Args({2'000'000, 100'000, 100})
|
||||
// ->Args({3'000'000, 100'000, 100})
|
||||
// ->Args({4'000'000, 100'000, 100})
|
||||
// ->Args({6'000'000, 100'000, 100})
|
||||
// ->Args({7'000'000, 100'000, 100})
|
||||
// ->Args({8'000'000, 100'000, 100})
|
||||
// ->Args({9'000'000, 100'000, 100})
|
||||
// ->Args({10'000'000, 100'000, 100})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseVertices");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({100'000, 200'000, 100})
|
||||
// ->Args({100'000, 300'000, 100})
|
||||
// ->Args({100'000, 400'000, 100})
|
||||
// ->Args({100'000, 500'000, 100})
|
||||
// ->Args({100'000, 600'000, 100})
|
||||
// ->Args({100'000, 700'000, 100})
|
||||
// ->Args({100'000, 800'000, 100})
|
||||
// ->Args({100'000, 900'000, 100})
|
||||
// ->Args({100'000, 1'000'000, 100})
|
||||
// ->Args({100'000, 2'000'000, 100})
|
||||
// ->Args({100'000, 3'000'000, 100})
|
||||
// ->Args({100'000, 4'000'000, 100})
|
||||
// ->Args({100'000, 5'000'000, 100})
|
||||
// ->Args({100'000, 6'000'000, 100})
|
||||
// ->Args({100'000, 7'000'000, 100})
|
||||
// ->Args({100'000, 8'000'000, 100})
|
||||
// ->Args({100'000, 9'000'000, 100})
|
||||
// ->Args({100'000, 10'000'000, 100})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseEdges");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({100'000, 100'000, 1'000})
|
||||
// ->Args({100'000, 100'000, 10'000})
|
||||
// ->Args({100'000, 100'000, 100'000})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseTransactions");
|
||||
|
||||
} // namespace memgraph::benchmark
|
||||
|
||||
BENCHMARK_MAIN();
|
270
tests/benchmark/storage_v3_split_2.cpp
Normal file
270
tests/benchmark/storage_v3_split_2.cpp
Normal file
@ -0,0 +1,270 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
|
||||
namespace memgraph::benchmark {
|
||||
|
||||
class ShardSplitBenchmark : public ::benchmark::Fixture {
|
||||
protected:
|
||||
using PrimaryKey = storage::v3::PrimaryKey;
|
||||
using PropertyId = storage::v3::PropertyId;
|
||||
using PropertyValue = storage::v3::PropertyValue;
|
||||
using LabelId = storage::v3::LabelId;
|
||||
using EdgeTypeId = storage::v3::EdgeTypeId;
|
||||
using Shard = storage::v3::Shard;
|
||||
using VertexId = storage::v3::VertexId;
|
||||
using Gid = storage::v3::Gid;
|
||||
|
||||
void SetUp(const ::benchmark::State &state) override {
|
||||
storage.emplace(primary_label, min_pk, std::nullopt, schema_property_vector);
|
||||
storage->StoreMapping(
|
||||
{{1, "label"}, {2, "property"}, {3, "edge_property"}, {4, "secondary_label"}, {5, "secondary_prop"}});
|
||||
}
|
||||
|
||||
void TearDown(const ::benchmark::State &) override { storage = std::nullopt; }
|
||||
|
||||
const PropertyId primary_property{PropertyId::FromUint(2)};
|
||||
const PropertyId secondary_property{PropertyId::FromUint(5)};
|
||||
std::vector<storage::v3::SchemaProperty> schema_property_vector = {
|
||||
storage::v3::SchemaProperty{primary_property, common::SchemaType::INT}};
|
||||
const std::vector<PropertyValue> min_pk{PropertyValue{0}};
|
||||
const LabelId primary_label{LabelId::FromUint(1)};
|
||||
const LabelId secondary_label{LabelId::FromUint(4)};
|
||||
const EdgeTypeId edge_type_id{EdgeTypeId::FromUint(3)};
|
||||
std::optional<Shard> storage;
|
||||
|
||||
coordinator::Hlc last_hlc{0, io::Time{}};
|
||||
|
||||
coordinator::Hlc GetNextHlc() {
|
||||
++last_hlc.logical_id;
|
||||
last_hlc.coordinator_wall_clock += std::chrono::seconds(1);
|
||||
return last_hlc;
|
||||
}
|
||||
};
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &state) {
|
||||
const auto number_of_vertices{state.range(0)};
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||
|
||||
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||
{{secondary_property, PropertyValue(i)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", i);
|
||||
if (i > 1) {
|
||||
const auto vtx1 = uniform_dist(e1) % i;
|
||||
const auto vtx2 = uniform_dist(e1) % i;
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
for (auto _ : state) {
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)(::benchmark::State &state) {
|
||||
const auto number_of_vertices = state.range(0);
|
||||
const auto number_of_edges = state.range(1);
|
||||
const auto number_of_transactions = state.range(2);
|
||||
std::random_device r;
|
||||
std::default_random_engine e1(r());
|
||||
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices - number_of_transactions - 1);
|
||||
|
||||
// Create Vertices
|
||||
int64_t vertex_count{0};
|
||||
{
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
for (; vertex_count < number_of_vertices - number_of_transactions; ++vertex_count) {
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
}
|
||||
|
||||
// Create Edges
|
||||
for (int64_t i{0}; i < number_of_edges; ++i) {
|
||||
const auto vtx1 = uniform_dist(e1);
|
||||
const auto vtx2 = uniform_dist(e1);
|
||||
|
||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||
.HasValue(),
|
||||
"Failed on {} and {}", vtx1, vtx2);
|
||||
}
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
// Clean up transactional data
|
||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||
|
||||
// Create rest of the objects and leave transactions
|
||||
for (; vertex_count < number_of_vertices; ++vertex_count) {
|
||||
auto acc = storage->Access(GetNextHlc());
|
||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(vertex_count)},
|
||||
{{secondary_property, PropertyValue(vertex_count)}})
|
||||
.HasValue(),
|
||||
"Failed creating with pk {}", vertex_count);
|
||||
acc.Commit(GetNextHlc());
|
||||
}
|
||||
|
||||
for (auto _ : state) {
|
||||
// Don't create shard since shard deallocation can take some time as well
|
||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is pessimistic, number of vertices corresponds with number if transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplit)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 100'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Range:
|
||||
// Number of vertices
|
||||
// This run is optimistic, in this run there are no transactions
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithGc)
|
||||
// ->RangeMultiplier(10)
|
||||
// ->Range(100'000, 1'000'000)
|
||||
// ->Unit(::benchmark::kMillisecond);
|
||||
|
||||
// Args:
|
||||
// Number of vertices
|
||||
// Number of edges
|
||||
// Number of transaction
|
||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({200'000, 100'000, 100})
|
||||
// ->Args({300'000, 100'000, 100})
|
||||
// ->Args({400'000, 100'000, 100})
|
||||
// ->Args({500'000, 100'000, 100})
|
||||
// ->Args({600'000, 100'000, 100})
|
||||
// ->Args({700'000, 100'000, 100})
|
||||
// ->Args({800'000, 100'000, 100})
|
||||
// ->Args({900'000, 100'000, 100})
|
||||
->Args({1'000'000, 100'000, 100})
|
||||
// ->Args({2'000'000, 100'000, 100})
|
||||
// ->Args({3'000'000, 100'000, 100})
|
||||
// ->Args({4'000'000, 100'000, 100})
|
||||
// ->Args({6'000'000, 100'000, 100})
|
||||
// ->Args({7'000'000, 100'000, 100})
|
||||
// ->Args({8'000'000, 100'000, 100})
|
||||
// ->Args({9'000'000, 100'000, 100})
|
||||
// ->Args({10'000'000, 100'000, 100})
|
||||
->Unit(::benchmark::kMillisecond)
|
||||
->Name("IncreaseVertices");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({200'000, 100'000, 100})
|
||||
// ->Args({300'000, 100'000, 100})
|
||||
// ->Args({400'000, 100'000, 100})
|
||||
// ->Args({500'000, 100'000, 100})
|
||||
// ->Args({600'000, 100'000, 100})
|
||||
// ->Args({700'000, 100'000, 100})
|
||||
// ->Args({800'000, 100'000, 100})
|
||||
// ->Args({900'000, 100'000, 100})
|
||||
// ->Args({1'000'000, 100'000, 100})
|
||||
// ->Args({2'000'000, 100'000, 100})
|
||||
// ->Args({3'000'000, 100'000, 100})
|
||||
// ->Args({4'000'000, 100'000, 100})
|
||||
// ->Args({6'000'000, 100'000, 100})
|
||||
// ->Args({7'000'000, 100'000, 100})
|
||||
// ->Args({8'000'000, 100'000, 100})
|
||||
// ->Args({9'000'000, 100'000, 100})
|
||||
// ->Args({10'000'000, 100'000, 100})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseVertices");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({100'000, 200'000, 100})
|
||||
// ->Args({100'000, 300'000, 100})
|
||||
// ->Args({100'000, 400'000, 100})
|
||||
// ->Args({100'000, 500'000, 100})
|
||||
// ->Args({100'000, 600'000, 100})
|
||||
// ->Args({100'000, 700'000, 100})
|
||||
// ->Args({100'000, 800'000, 100})
|
||||
// ->Args({100'000, 900'000, 100})
|
||||
// ->Args({100'000, 1'000'000, 100})
|
||||
// ->Args({100'000, 2'000'000, 100})
|
||||
// ->Args({100'000, 3'000'000, 100})
|
||||
// ->Args({100'000, 4'000'000, 100})
|
||||
// ->Args({100'000, 5'000'000, 100})
|
||||
// ->Args({100'000, 6'000'000, 100})
|
||||
// ->Args({100'000, 7'000'000, 100})
|
||||
// ->Args({100'000, 8'000'000, 100})
|
||||
// ->Args({100'000, 9'000'000, 100})
|
||||
// ->Args({100'000, 10'000'000, 100})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseEdges");
|
||||
|
||||
// BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplitWithFewTransactionsOnVertices)
|
||||
// ->Args({100'000, 100'000, 100})
|
||||
// ->Args({100'000, 100'000, 1'000})
|
||||
// ->Args({100'000, 100'000, 10'000})
|
||||
// ->Args({100'000, 100'000, 100'000})
|
||||
// ->Unit(::benchmark::kMillisecond)
|
||||
// ->Name("IncreaseTransactions");
|
||||
|
||||
} // namespace memgraph::benchmark
|
||||
|
||||
BENCHMARK_MAIN();
|
@ -36,7 +36,9 @@ def test_awesome_memgraph_functions(connection):
|
||||
assert len(results) == 1
|
||||
assert results[0][0] == 5
|
||||
|
||||
results = execute_and_fetch_all(cursor, "MATCH (n) WITH COLLECT(n.property) as nn RETURN ALL(i IN nn WHERE i > 0)")
|
||||
results = execute_and_fetch_all(
|
||||
cursor, "UNWIND [2, 1, 3] AS value WITH COLLECT(value) as nn RETURN ALL(i IN nn WHERE i > 0)"
|
||||
)
|
||||
assert len(results) == 1
|
||||
assert results[0][0] == True
|
||||
|
||||
|
@ -9,11 +9,13 @@
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import typing
|
||||
import mgclient
|
||||
import sys
|
||||
import pytest
|
||||
import time
|
||||
import typing
|
||||
|
||||
import mgclient
|
||||
import pytest
|
||||
|
||||
from common import *
|
||||
|
||||
|
||||
@ -30,8 +32,7 @@ def test_distinct(connection):
|
||||
assert len(results) == 2
|
||||
for i, n in enumerate(results):
|
||||
n_props = n[0].properties
|
||||
assert len(n_props) == 1
|
||||
assert n_props["property"] == i
|
||||
assert len(n_props) == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -13,7 +13,12 @@ import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from common import connection, execute_and_fetch_all, has_n_result_row, wait_for_shard_manager_to_initialize
|
||||
from common import (
|
||||
connection,
|
||||
execute_and_fetch_all,
|
||||
has_n_result_row,
|
||||
wait_for_shard_manager_to_initialize,
|
||||
)
|
||||
|
||||
|
||||
def test_sequenced_expand_one(connection):
|
||||
@ -22,15 +27,21 @@ def test_sequenced_expand_one(connection):
|
||||
|
||||
for i in range(1, 4):
|
||||
assert has_n_result_row(cursor, f"CREATE (:label {{property:{i}}})", 0), f"Failed creating node"
|
||||
assert has_n_result_row(cursor, "MATCH (n {property:1}), (m {property:2}) CREATE (n)-[:TO]->(m)", 0)
|
||||
assert has_n_result_row(cursor, "MATCH (n {property:2}), (m {property:3}) CREATE (n)-[:TO]->(m)", 0)
|
||||
assert has_n_result_row(cursor, "MATCH (n:label {property:1}), (m:label {property:2}) CREATE (n)-[:TO]->(m)", 0)
|
||||
assert has_n_result_row(cursor, "MATCH (n:label {property:2}), (m:label {property:3}) CREATE (n)-[:TO]->(m)", 0)
|
||||
|
||||
results = execute_and_fetch_all(cursor, "MATCH (n)-[:TO]->(m)-[:TO]->(l) RETURN n,m,l")
|
||||
assert len(results) == 1
|
||||
n, m, l = results[0]
|
||||
assert n.properties["property"] == 1
|
||||
assert m.properties["property"] == 2
|
||||
assert l.properties["property"] == 3
|
||||
assert (
|
||||
len(n.properties) == 0
|
||||
), "we don't return any properties of the node received from expansion and the bolt layer doesn't serialize the primary key of vertices"
|
||||
assert (
|
||||
len(m.properties) == 0
|
||||
), "we don't return any properties of the node received from expansion and the bolt layer doesn't serialize the primary key of vertices"
|
||||
assert (
|
||||
len(l.properties) == 0
|
||||
), "we don't return any properties of the node received from expansion and the bolt layer doesn't serialize the primary key of vertices"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -9,11 +9,13 @@
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import typing
|
||||
import mgclient
|
||||
import sys
|
||||
import pytest
|
||||
import time
|
||||
import typing
|
||||
|
||||
import mgclient
|
||||
import pytest
|
||||
|
||||
from common import *
|
||||
|
||||
|
||||
@ -35,13 +37,13 @@ def test_vertex_creation_and_scanall(connection):
|
||||
assert len(results) == 9
|
||||
for (n, r, m) in results:
|
||||
n_props = n.properties
|
||||
assert len(n_props) == 1, "n is not expected to have properties, update the test!"
|
||||
assert len(n_props) == 0, "n is not expected to have properties, update the test!"
|
||||
assert len(n.labels) == 0, "n is not expected to have labels, update the test!"
|
||||
|
||||
assert r.type == "TO"
|
||||
|
||||
m_props = m.properties
|
||||
assert m_props["property"] <= 3 and m_props["property"] >= 0, "Wrong key"
|
||||
assert len(m_props) == 0, "n is not expected to have properties, update the test!"
|
||||
assert len(m.labels) == 0, "m is not expected to have labels, update the test!"
|
||||
|
||||
|
||||
|
@ -9,11 +9,13 @@
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import typing
|
||||
import mgclient
|
||||
import sys
|
||||
import pytest
|
||||
import time
|
||||
import typing
|
||||
|
||||
import mgclient
|
||||
import pytest
|
||||
|
||||
from common import *
|
||||
|
||||
|
||||
@ -21,23 +23,23 @@ def test_order_by_and_limit(connection):
|
||||
wait_for_shard_manager_to_initialize()
|
||||
cursor = connection.cursor()
|
||||
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:1})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:3})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:4})", 0)
|
||||
|
||||
results = execute_and_fetch_all(cursor, "MATCH (n) RETURN n ORDER BY n.property DESC")
|
||||
assert len(results) == 4
|
||||
i = 4
|
||||
for n in results:
|
||||
n_props = n[0].properties
|
||||
assert len(n_props) == 1
|
||||
assert n_props["property"] == i
|
||||
results = execute_and_fetch_all(
|
||||
cursor,
|
||||
"UNWIND [{property:1}, {property:3}, {property:2}] AS map RETURN map ORDER BY map.property DESC",
|
||||
)
|
||||
assert len(results) == 3
|
||||
i = 3
|
||||
for map in results:
|
||||
assert len(map) == 1
|
||||
assert map[0]["property"] == i
|
||||
i = i - 1
|
||||
|
||||
result = execute_and_fetch_all(cursor, "MATCH (n) RETURN n ORDER BY n.property LIMIT 1")
|
||||
result = execute_and_fetch_all(
|
||||
cursor,
|
||||
"UNWIND [{property:1}, {property:3}, {property:2}] AS map RETURN map ORDER BY map.property LIMIT 1",
|
||||
)
|
||||
assert len(result) == 1
|
||||
assert result[0][0].properties["property"] == 1
|
||||
assert result[0][0]["property"] == 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -9,6 +9,7 @@ function(add_manual_test test_cpp)
|
||||
get_filename_component(exec_name ${test_cpp} NAME_WE)
|
||||
set(target_name ${test_prefix}${exec_name})
|
||||
add_executable(${target_name} ${test_cpp} ${ARGN})
|
||||
|
||||
# OUTPUT_NAME sets the real name of a target when it is built and can be
|
||||
# used to help create two targets of the same name even though CMake
|
||||
# requires unique logical target names
|
||||
@ -21,7 +22,7 @@ target_link_libraries(${test_prefix}antlr_parser antlr_opencypher_parser_lib)
|
||||
|
||||
add_manual_test(antlr_sigsegv.cpp)
|
||||
target_link_libraries(${test_prefix}antlr_sigsegv gtest gtest_main
|
||||
antlr_opencypher_parser_lib mg-utils)
|
||||
antlr_opencypher_parser_lib mg-utils)
|
||||
|
||||
add_manual_test(antlr_tree_pretty_print.cpp)
|
||||
target_link_libraries(${test_prefix}antlr_tree_pretty_print antlr_opencypher_parser_lib)
|
||||
@ -37,13 +38,15 @@ target_link_libraries(${test_prefix}query_hash mg-query)
|
||||
|
||||
add_manual_test(query_planner.cpp interactive/planning.cpp)
|
||||
target_link_libraries(${test_prefix}query_planner mg-query)
|
||||
if (READLINE_FOUND)
|
||||
|
||||
if(READLINE_FOUND)
|
||||
target_link_libraries(${test_prefix}query_planner readline)
|
||||
endif()
|
||||
|
||||
add_manual_test(query_execution_dummy.cpp)
|
||||
target_link_libraries(${test_prefix}query_execution_dummy mg-query)
|
||||
if (READLINE_FOUND)
|
||||
|
||||
if(READLINE_FOUND)
|
||||
target_link_libraries(${test_prefix}query_execution_dummy readline)
|
||||
endif()
|
||||
|
||||
@ -61,3 +64,6 @@ target_link_libraries(${test_prefix}ssl_client mg-communication)
|
||||
|
||||
add_manual_test(ssl_server.cpp)
|
||||
target_link_libraries(${test_prefix}ssl_server mg-communication)
|
||||
|
||||
add_manual_test(query_performance.cpp)
|
||||
target_link_libraries(${test_prefix}query_performance mg-communication mg-utils mg-io mg-io-simulator mg-coordinator mg-query-v2 mg-storage-v3 mg-query mg-storage-v2)
|
||||
|
352
tests/manual/query_performance.cpp
Normal file
352
tests/manual/query_performance.cpp
Normal file
@ -0,0 +1,352 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
// This binary is meant to easily compare the performance of:
|
||||
// - Memgraph v2
|
||||
// - Memgraph v3
|
||||
// - Memgraph v3 with MultiFrame
|
||||
// This binary measures three things which provides a high level and easily understandable metric about the performance
|
||||
// difference between the different versions:
|
||||
// 1. Read time: how much time does it take to read the files:
|
||||
// 2. Init time: how much time does it take to run the init queries, including the index creation. For details please
|
||||
// check RunV2.
|
||||
// 3. Benchmark time: how much time does it take to run the benchmark queries.
|
||||
// To quickly compare performance of the different versions just change the query or queries in the benchmark queries
|
||||
// file you can see the different by running this executable. This way we don't have keep multiple binaries of Memgraph
|
||||
// v2 and Memgraph v3 with/without MultiFrame, start Memgraph and connect to it with mgconsole and other hassles. As
|
||||
// everything is run in this binary, it makes easier to generate perf reports/flamegraphs from the query execution of
|
||||
// different Memgraph versions compared to using the full blown version of Memgraph.
|
||||
//
|
||||
// A few important notes:
|
||||
// - All the input files are mandated to have an empty line at the end of the file as the reading logic expect that.
|
||||
// - tests/mgbench/dataset_creator_unwind.py is recommended to generate the dataset because it generates queries with
|
||||
// UNWIND that makes the import faster in Memgraph v3, thus we can compare the performance on non trivial datasets
|
||||
// also. To make it possible to use the generated dataset, you have to move the generated index queries into a
|
||||
// separate file that can be supplied as index queries file for this binary when using Memgraph v2. The reason for
|
||||
// this is Memgraph v3 cannot handle indices yet, thus it crashes.
|
||||
// - Check the command line flags and their description defined in this file.
|
||||
// - Also check out the --default-multi-frame-size command line flag if you want to play with that.
|
||||
// - The log level is manually set to warning in the main function to avoid the overwhelming log messages from Memgraph
|
||||
// v3. Apart from ease of use, the huge amount of looging can degrade the actual performance.
|
||||
//
|
||||
// Example usage with Memgraph v2:
|
||||
// ./query_performance
|
||||
// --index-queries-file indices.cypher
|
||||
// --init-queries-file dataset.cypher
|
||||
// --benchmark-queries-files expand.cypher,match.cypyher
|
||||
// --use-v3=false
|
||||
//
|
||||
// Example usage with Memgraph v3 without MultiFrame:
|
||||
// ./query_performance
|
||||
// --split-file split_file
|
||||
// --init-queries-file dataset.cypher
|
||||
// --benchmark-queries-files expand.cypher,match.cypyher
|
||||
// --use-v3=true
|
||||
// --use-multi-frame=false
|
||||
//
|
||||
// Example usage with Memgraph v3 with MultiFrame:
|
||||
// ./query_performance
|
||||
// --split-file split_file
|
||||
// --init-queries-file dataset.cypher
|
||||
// --benchmark-queries-files expand.cypher,match.cypyher
|
||||
// --use-v3=true
|
||||
// --use-multi-frame=true
|
||||
//
|
||||
// The examples are using only the necessary flags, however specifying all of them is not a problem, so if you specify
|
||||
// --index-queries-file for Memgraph v3, then it will be safely ignored just as --split-file for Memgraph v2.
|
||||
//
|
||||
// To generate flamegraph you can use the following command:
|
||||
// flamegraph --cmd "record -F 997 --call-graph fp -g" --root -o flamegraph.svg -- ./query_performance <flags>
|
||||
// Using the default option (dwarf) for --call-graph when calling perf might result in too long runtine of flamegraph
|
||||
// because of address resolution. See https://github.com/flamegraph-rs/flamegraph/issues/74.
|
||||
|
||||
#include <chrono>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <istream>
|
||||
#include <thread>
|
||||
|
||||
#include <fmt/core.h>
|
||||
#include <gflags/gflags.h>
|
||||
#include <spdlog/cfg/env.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
#include <json/json.hpp>
|
||||
|
||||
// v3 includes
|
||||
#include "io/address.hpp"
|
||||
#include "io/local_transport/local_system.hpp"
|
||||
#include "io/message_histogram_collector.hpp"
|
||||
#include "machine_manager/machine_manager.hpp"
|
||||
#include "query/discard_value_stream.hpp"
|
||||
#include "query/v2/discard_value_stream.hpp"
|
||||
#include "query/v2/interpreter.hpp"
|
||||
#include "query/v2/request_router.hpp"
|
||||
|
||||
// v2 includes
|
||||
#include "query/interpreter.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
|
||||
// common includes
|
||||
#include "utils/string.hpp"
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(index_queries_file, "",
|
||||
"Path to the file which contains the queries to create indices. Used only for v2. Must contain an empty "
|
||||
"line at the end of the file after the queries.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(split_file, "",
|
||||
"Path to the split file which contains the predefined labels, properties, edge types and shard-ranges. "
|
||||
"Used only for v3. Must contain an empty line at the end of the file.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(init_queries_file, "",
|
||||
"Path to the file that is used to insert the initial dataset, one query per line. Must contain an empty "
|
||||
"line at the end of the file after the queries.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(benchmark_queries_files, "",
|
||||
"Comma separated paths to the files that contain the queries that we want to compare, one query per "
|
||||
"line. Must contain an empty line at the end of each file after the queries.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(use_v3, true, "If set to true, then Memgraph v3 will be used, otherwise Memgraph v2 will be used.");
|
||||
|
||||
DEFINE_string(export_json_results, "", "If not empty, then the results will be exported as a json file.");
|
||||
|
||||
DEFINE_string(data_directory, "mg_data", "Path to directory to use as storage directory for Memgraph v2.");
|
||||
|
||||
namespace memgraph::tests::manual {
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
struct DependantTypes {};
|
||||
|
||||
template <>
|
||||
struct DependantTypes<query::InterpreterContext> {
|
||||
using Interpreter = query::Interpreter;
|
||||
using DiscardValueResultStream = query::DiscardValueResultStream;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct DependantTypes<query::v2::InterpreterContext> {
|
||||
using Interpreter = query::v2::Interpreter;
|
||||
using DiscardValueResultStream = query::v2::DiscardValueResultStream;
|
||||
};
|
||||
|
||||
template <typename TRep, typename TPeriod>
|
||||
void PutResult(nlohmann::json &json, const std::string_view name, std::chrono::duration<TRep, TPeriod> duration) {
|
||||
json[name] = std::chrono::duration_cast<std::chrono::microseconds>(duration).count();
|
||||
}
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
using Interpreter = typename DependantTypes<TInterpreterContext>::Interpreter;
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
using DiscardValueResultStream = typename DependantTypes<TInterpreterContext>::DiscardValueResultStream;
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
void RunQueries(TInterpreterContext &interpreter_context, const std::vector<std::string> &queries) {
|
||||
Interpreter<TInterpreterContext> interpreter{&interpreter_context};
|
||||
DiscardValueResultStream<TInterpreterContext> stream;
|
||||
|
||||
for (const auto &query : queries) {
|
||||
auto result = interpreter.Prepare(query, {}, nullptr);
|
||||
interpreter.Pull(&stream, std::nullopt, result.qid);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
void RunInitQueries(TInterpreterContext &interpreter_context, const std::vector<std::string> &init_queries) {
|
||||
RunQueries(interpreter_context, init_queries);
|
||||
}
|
||||
|
||||
template <typename TInterpreterContext>
|
||||
void RunBenchmarkQueries(TInterpreterContext &interpreter_context, const std::vector<std::string> &benchmark_queries) {
|
||||
RunQueries(interpreter_context, benchmark_queries);
|
||||
}
|
||||
|
||||
std::vector<std::string> ReadQueries(const std::string &file_name) {
|
||||
std::vector<std::string> queries{};
|
||||
std::string buffer;
|
||||
|
||||
std::ifstream file{file_name, std::ios::in};
|
||||
MG_ASSERT(file.good(), "Cannot open queries file to read: {}", file_name);
|
||||
while (file.good()) {
|
||||
std::getline(file, buffer);
|
||||
if (buffer.empty()) {
|
||||
continue;
|
||||
}
|
||||
// Trim the trailing `;`
|
||||
queries.push_back(buffer.substr(0, buffer.size() - 1));
|
||||
}
|
||||
return queries;
|
||||
}
|
||||
|
||||
std::map<std::string, std::vector<std::string>> ReadBenchmarkQueries(const std::string benchmark_queries_files) {
|
||||
auto benchmark_files = utils::Split(benchmark_queries_files, ",");
|
||||
std::map<std::string, std::vector<std::string>> result;
|
||||
for (const auto &benchmark_file : benchmark_files) {
|
||||
const auto path = std::filesystem::path(benchmark_file);
|
||||
result.emplace(path.stem().string(), ReadQueries(benchmark_file));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void RunV2() {
|
||||
spdlog::critical("Running V2");
|
||||
const auto run_start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
const auto index_queries = ReadQueries(FLAGS_index_queries_file);
|
||||
const auto init_queries = ReadQueries(FLAGS_init_queries_file);
|
||||
const auto benchmarks = ReadBenchmarkQueries(FLAGS_benchmark_queries_files);
|
||||
|
||||
storage::Storage storage{
|
||||
storage::Config{.durability{.storage_directory = FLAGS_data_directory,
|
||||
.snapshot_wal_mode = storage::Config::Durability::SnapshotWalMode::DISABLED}}};
|
||||
|
||||
memgraph::query::InterpreterContext interpreter_context{
|
||||
&storage,
|
||||
{.query = {.allow_load_csv = false},
|
||||
.execution_timeout_sec = 0,
|
||||
.replication_replica_check_frequency = std::chrono::seconds(0),
|
||||
.default_kafka_bootstrap_servers = "",
|
||||
.default_pulsar_service_url = "",
|
||||
.stream_transaction_conflict_retries = 0,
|
||||
.stream_transaction_retry_interval = std::chrono::milliseconds(0)},
|
||||
FLAGS_data_directory};
|
||||
|
||||
const auto init_start = std::chrono::high_resolution_clock::now();
|
||||
RunInitQueries(interpreter_context, index_queries);
|
||||
RunInitQueries(interpreter_context, init_queries);
|
||||
const auto benchmark_start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
spdlog::critical("Read: {}ms", std::chrono::duration_cast<std::chrono::milliseconds>(init_start - run_start).count());
|
||||
spdlog::critical("Init: {}ms",
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(benchmark_start - init_start).count());
|
||||
|
||||
std::map<std::string, std::chrono::nanoseconds> benchmark_results;
|
||||
for (const auto &[name, queries] : benchmarks) {
|
||||
const auto current_start = std::chrono::high_resolution_clock::now();
|
||||
RunBenchmarkQueries(interpreter_context, queries);
|
||||
const auto current_stop = std::chrono::high_resolution_clock::now();
|
||||
const auto elapsed = current_stop - current_start;
|
||||
spdlog::critical("Benchmark {}: {}ms", name,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
|
||||
benchmark_results.emplace(name, elapsed);
|
||||
}
|
||||
|
||||
const auto benchmark_end = std::chrono::high_resolution_clock::now();
|
||||
spdlog::critical("Benchmark: {}ms",
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(benchmark_end - benchmark_start).count());
|
||||
|
||||
if (!FLAGS_export_json_results.empty()) {
|
||||
nlohmann::json results;
|
||||
PutResult(results, "init", benchmark_start - init_start);
|
||||
nlohmann::json benchmark_results_json;
|
||||
for (const auto &[name, duration] : benchmark_results) {
|
||||
PutResult(benchmark_results_json, name, duration);
|
||||
}
|
||||
results["benchmarks"] = std::move(benchmark_results_json);
|
||||
std::ofstream results_file{FLAGS_export_json_results};
|
||||
results_file << results.dump();
|
||||
}
|
||||
}
|
||||
|
||||
void RunV3() {
|
||||
spdlog::critical("Running V3");
|
||||
const auto run_start = std::chrono::high_resolution_clock::now();
|
||||
std::ifstream sm_file{FLAGS_split_file, std::ios::in};
|
||||
MG_ASSERT(sm_file.good(), "Cannot open split file to read: {}", FLAGS_split_file);
|
||||
auto sm = memgraph::coordinator::ShardMap::Parse(sm_file);
|
||||
|
||||
const auto init_queries = ReadQueries(FLAGS_init_queries_file);
|
||||
const auto benchmarks = ReadBenchmarkQueries(FLAGS_benchmark_queries_files);
|
||||
|
||||
io::local_transport::LocalSystem ls;
|
||||
|
||||
auto unique_local_addr_query = io::Address::UniqueLocalAddress();
|
||||
auto io = ls.Register(unique_local_addr_query);
|
||||
|
||||
memgraph::machine_manager::MachineConfig config{
|
||||
.coordinator_addresses = std::vector<memgraph::io::Address>{unique_local_addr_query},
|
||||
.is_storage = true,
|
||||
.is_coordinator = true,
|
||||
.listen_ip = unique_local_addr_query.last_known_ip,
|
||||
.listen_port = unique_local_addr_query.last_known_port,
|
||||
.shard_worker_threads = 2,
|
||||
};
|
||||
|
||||
memgraph::coordinator::Coordinator coordinator{sm};
|
||||
|
||||
memgraph::machine_manager::MachineManager<memgraph::io::local_transport::LocalTransport> mm{io, config, coordinator};
|
||||
std::jthread mm_thread([&mm] { mm.Run(); });
|
||||
|
||||
auto rr_factory = std::make_unique<memgraph::query::v2::LocalRequestRouterFactory>(io);
|
||||
|
||||
query::v2::InterpreterContext interpreter_context{(memgraph::storage::v3::Shard *)(nullptr),
|
||||
{.execution_timeout_sec = 0},
|
||||
"data",
|
||||
std::move(rr_factory),
|
||||
mm.CoordinatorAddress()};
|
||||
|
||||
// without this it fails sometimes because the CreateVertices request might reach the shard worker faster than the
|
||||
// ShardToInitialize
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(150));
|
||||
|
||||
const auto init_start = std::chrono::high_resolution_clock::now();
|
||||
RunInitQueries(interpreter_context, init_queries);
|
||||
const auto benchmark_start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
spdlog::critical("Read: {}ms", std::chrono::duration_cast<std::chrono::milliseconds>(init_start - run_start).count());
|
||||
spdlog::critical("Init: {}ms",
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(benchmark_start - init_start).count());
|
||||
|
||||
std::map<std::string, std::chrono::nanoseconds> benchmark_results;
|
||||
for (const auto &[name, queries] : benchmarks) {
|
||||
const auto current_start = std::chrono::high_resolution_clock::now();
|
||||
RunBenchmarkQueries(interpreter_context, queries);
|
||||
const auto current_stop = std::chrono::high_resolution_clock::now();
|
||||
const auto elapsed = current_stop - current_start;
|
||||
spdlog::critical("Benchmark {}: {}ms", name,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
|
||||
benchmark_results.emplace(name, elapsed);
|
||||
}
|
||||
|
||||
const auto benchmark_end = std::chrono::high_resolution_clock::now();
|
||||
spdlog::critical("Benchmark: {}ms",
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(benchmark_end - benchmark_start).count());
|
||||
|
||||
ls.ShutDown();
|
||||
auto latency_histograms = nlohmann::json::parse(fmt::format("{}", io.ResponseLatencies()));
|
||||
spdlog::warn(latency_histograms.dump(4));
|
||||
|
||||
if (!FLAGS_export_json_results.empty()) {
|
||||
nlohmann::json results;
|
||||
PutResult(results, "init", benchmark_start - init_start);
|
||||
nlohmann::json benchmark_results_json;
|
||||
for (const auto &[name, duration] : benchmark_results) {
|
||||
PutResult(benchmark_results_json, name, duration);
|
||||
}
|
||||
results["benchmarks"] = std::move(benchmark_results_json);
|
||||
results["latencies"] = std::move(latency_histograms);
|
||||
std::ofstream results_file{FLAGS_export_json_results};
|
||||
results_file << results.dump();
|
||||
}
|
||||
}
|
||||
} // namespace memgraph::tests::manual
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
spdlog::set_level(spdlog::level::warn);
|
||||
spdlog::cfg::load_env_levels();
|
||||
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if (FLAGS_use_v3) {
|
||||
memgraph::tests::manual::RunV3();
|
||||
} else {
|
||||
memgraph::tests::manual::RunV2();
|
||||
}
|
||||
return 0;
|
||||
}
|
116
tests/manual/query_performance_runner.py
Executable file
116
tests/manual/query_performance_runner.py
Executable file
@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2023 Memgraph Ltd.
|
||||
#
|
||||
# Use of this software is governed by the Business Source License
|
||||
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
# License, and you may not use this file except in compliance with the Business Source License.
|
||||
#
|
||||
# As of the Change Date specified in that file, in accordance with
|
||||
# the Business Source License, use of this software will be governed
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import argparse
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import tarfile
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
|
||||
BUILD_DIR = os.path.join(PROJECT_DIR, "build")
|
||||
BINARY_DIR = os.path.join(BUILD_DIR, "tests/manual")
|
||||
DEFAULT_BENCHMARK_DIR = os.path.join(BINARY_DIR, "query_performance_benchmark")
|
||||
DATA_URL = (
|
||||
"https://s3.eu-west-1.amazonaws.com/deps.memgraph.io/dataset/query_performance/query_performance_benchmark.tar.gz"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument(
|
||||
"--binary",
|
||||
type=str,
|
||||
default=os.path.join(BINARY_DIR, "query_performance"),
|
||||
help="Path to the binary to use for the benchmark.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--data-dir",
|
||||
type=str,
|
||||
default=tempfile.TemporaryDirectory().name,
|
||||
help="Path to directory that can be used as a data directory for ",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--summary-path",
|
||||
type=str,
|
||||
default=os.path.join(DEFAULT_BENCHMARK_DIR, "summary.json"),
|
||||
help="Path to which file write the summary.",
|
||||
)
|
||||
|
||||
parser.add_argument("--init-queries-file", type=str, default=os.path.join(DEFAULT_BENCHMARK_DIR, "dataset.cypher"))
|
||||
parser.add_argument("--index-queries-file", type=str, default=os.path.join(DEFAULT_BENCHMARK_DIR, "indices.cypher"))
|
||||
parser.add_argument("--split-file", type=str, default=os.path.join(DEFAULT_BENCHMARK_DIR, "split_file"))
|
||||
|
||||
parser.add_argument(
|
||||
"--benchmark-queries-files",
|
||||
type=str,
|
||||
default=",".join(
|
||||
[os.path.join(DEFAULT_BENCHMARK_DIR, file_name) for file_name in ["expand.cypher", "match_files.cypher"]]
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
v2_results_path = os.path.join(DEFAULT_BENCHMARK_DIR, "v2_results.json")
|
||||
v3_results_path = os.path.join(DEFAULT_BENCHMARK_DIR, "v3_results.json")
|
||||
|
||||
|
||||
if os.path.exists(DEFAULT_BENCHMARK_DIR):
|
||||
print(f"Using cachced data from {DEFAULT_BENCHMARK_DIR}")
|
||||
else:
|
||||
print(f"Downloading benchmark data to {DEFAULT_BENCHMARK_DIR}")
|
||||
r = requests.get(DATA_URL)
|
||||
assert r.ok, "Cannot download data"
|
||||
file_like_object = io.BytesIO(r.content)
|
||||
tar = tarfile.open(fileobj=file_like_object)
|
||||
tar.extractall(os.path.dirname(DEFAULT_BENCHMARK_DIR))
|
||||
|
||||
subprocess.run(
|
||||
[
|
||||
args.binary,
|
||||
f"--split-file={args.split_file}",
|
||||
f"--index-queries-file={args.index_queries_file}",
|
||||
f"--init-queries-file={args.init_queries_file}",
|
||||
f"--benchmark-queries-files={args.benchmark_queries_files}",
|
||||
"--use-v3=false",
|
||||
"--use-multi-frame=true",
|
||||
f"--export-json-results={v2_results_path}",
|
||||
f"--data-directory={args.data_dir}",
|
||||
]
|
||||
)
|
||||
|
||||
subprocess.run(
|
||||
[
|
||||
args.binary,
|
||||
f"--split-file={args.split_file}",
|
||||
f"--index-queries-file={args.index_queries_file}",
|
||||
f"--init-queries-file={args.init_queries_file}",
|
||||
f"--benchmark-queries-files={args.benchmark_queries_files}",
|
||||
"--use-v3=true",
|
||||
"--use-multi-frame=true",
|
||||
f"--export-json-results={v3_results_path}",
|
||||
f"--data-directory={args.data_dir}",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
v2_results_file = open(v2_results_path)
|
||||
v2_results = json.load(v2_results_file)
|
||||
v3_results_file = open(v3_results_path)
|
||||
v3_results = json.load(v3_results_file)
|
||||
|
||||
with open(args.summary_path, "w") as summary:
|
||||
json.dump({"v2": v2_results, "v3": v3_results}, summary)
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -14,7 +14,6 @@
|
||||
import argparse
|
||||
import json
|
||||
|
||||
|
||||
FIELDS = [
|
||||
{
|
||||
"name": "throughput",
|
||||
@ -85,39 +84,32 @@ def compare_results(results_from, results_to, fields):
|
||||
if group == "__import__":
|
||||
continue
|
||||
for scenario, summary_to in scenarios.items():
|
||||
summary_from = recursive_get(
|
||||
results_from, dataset, variant, group, scenario,
|
||||
value={})
|
||||
if len(summary_from) > 0 and \
|
||||
summary_to["count"] != summary_from["count"] or \
|
||||
summary_to["num_workers"] != \
|
||||
summary_from["num_workers"]:
|
||||
summary_from = recursive_get(results_from, dataset, variant, group, scenario, value={})
|
||||
if (
|
||||
len(summary_from) > 0
|
||||
and summary_to["count"] != summary_from["count"]
|
||||
or summary_to["num_workers"] != summary_from["num_workers"]
|
||||
):
|
||||
raise Exception("Incompatible results!")
|
||||
testcode = "/".join([dataset, variant, group, scenario,
|
||||
"{:02d}".format(
|
||||
summary_to["num_workers"])])
|
||||
testcode = "/".join([dataset, variant, group, scenario, "{:02d}".format(summary_to["num_workers"])])
|
||||
row = {}
|
||||
performance_changed = False
|
||||
for field in fields:
|
||||
key = field["name"]
|
||||
if key in summary_to:
|
||||
row[key] = compute_diff(
|
||||
summary_from.get(key, None),
|
||||
summary_to[key])
|
||||
row[key] = compute_diff(summary_from.get(key, None), summary_to[key])
|
||||
elif key in summary_to["database"]:
|
||||
row[key] = compute_diff(
|
||||
recursive_get(summary_from, "database", key,
|
||||
value=None),
|
||||
summary_to["database"][key])
|
||||
recursive_get(summary_from, "database", key, value=None), summary_to["database"][key]
|
||||
)
|
||||
else:
|
||||
row[key] = compute_diff(
|
||||
recursive_get(summary_from, "metadata", key,
|
||||
"average", value=None),
|
||||
summary_to["metadata"][key]["average"])
|
||||
if "diff" not in row[key] or \
|
||||
("diff_treshold" in field and
|
||||
abs(row[key]["diff"]) >=
|
||||
field["diff_treshold"]):
|
||||
recursive_get(summary_from, "metadata", key, "average", value=None),
|
||||
summary_to["metadata"][key]["average"],
|
||||
)
|
||||
if "diff" not in row[key] or (
|
||||
"diff_treshold" in field and abs(row[key]["diff"]) >= field["diff_treshold"]
|
||||
):
|
||||
performance_changed = True
|
||||
if performance_changed:
|
||||
ret[testcode] = row
|
||||
@ -130,29 +122,36 @@ def generate_remarkup(fields, data):
|
||||
ret += "<table>\n"
|
||||
ret += " <tr>\n"
|
||||
ret += " <th>Testcode</th>\n"
|
||||
ret += "\n".join(map(lambda x: " <th>{}</th>".format(
|
||||
x["name"].replace("_", " ").capitalize()), fields)) + "\n"
|
||||
ret += (
|
||||
"\n".join(
|
||||
map(
|
||||
lambda x: " <th>{}</th>".format(x["name"].replace("_", " ").capitalize()),
|
||||
fields,
|
||||
)
|
||||
)
|
||||
+ "\n"
|
||||
)
|
||||
ret += " </tr>\n"
|
||||
for testcode in sorted(data.keys()):
|
||||
ret += " <tr>\n"
|
||||
ret += " <td>{}</td>\n".format(testcode)
|
||||
for field in fields:
|
||||
result = data[testcode][field["name"]]
|
||||
value = result["value"] * field["scaling"]
|
||||
if "diff" in result:
|
||||
diff = result["diff"]
|
||||
arrow = "arrow-up" if diff >= 0 else "arrow-down"
|
||||
if not (field["positive_diff_better"] ^ (diff >= 0)):
|
||||
color = "green"
|
||||
result = data[testcode].get(field["name"])
|
||||
if result != None:
|
||||
value = result["value"] * field["scaling"]
|
||||
if "diff" in result:
|
||||
diff = result["diff"]
|
||||
arrow = "arrow-up" if diff >= 0 else "arrow-down"
|
||||
if not (field["positive_diff_better"] ^ (diff >= 0)):
|
||||
color = "green"
|
||||
else:
|
||||
color = "red"
|
||||
sign = "{{icon {} color={}}}".format(arrow, color)
|
||||
ret += ' <td bgcolor="{}">{:.3f}{} ({:+.2%})</td>\n'.format(
|
||||
color, value, field["unit"], diff
|
||||
)
|
||||
else:
|
||||
color = "red"
|
||||
sign = "{{icon {} color={}}}".format(arrow, color)
|
||||
ret += " <td>{:.3f}{} //({:+.2%})// {}</td>\n".format(
|
||||
value, field["unit"], diff, sign)
|
||||
else:
|
||||
ret += " <td>{:.3f}{} //(new)// " \
|
||||
"{{icon plus color=blue}}</td>\n".format(
|
||||
value, field["unit"])
|
||||
ret += '<td bgcolor="blue">{:.3f}{} //(new)// </td>\n'.format(value, field["unit"])
|
||||
ret += " </tr>\n"
|
||||
ret += "</table>\n"
|
||||
else:
|
||||
@ -161,11 +160,14 @@ def generate_remarkup(fields, data):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compare results of multiple benchmark runs.")
|
||||
parser.add_argument("--compare", action="append", nargs=2,
|
||||
metavar=("from", "to"),
|
||||
help="compare results between `from` and `to` files")
|
||||
parser = argparse.ArgumentParser(description="Compare results of multiple benchmark runs.")
|
||||
parser.add_argument(
|
||||
"--compare",
|
||||
action="append",
|
||||
nargs=2,
|
||||
metavar=("from", "to"),
|
||||
help="compare results between `from` and `to` files",
|
||||
)
|
||||
parser.add_argument("--output", default="", help="output file name")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -51,10 +51,22 @@ import helpers
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--number_of_identities", type=int, default=10)
|
||||
parser.add_argument("--number_of_files", type=int, default=10)
|
||||
parser.add_argument("--percentage_of_permissions", type=float, default=1.0)
|
||||
parser.add_argument("--filename", default="dataset.cypher")
|
||||
parser.add_argument(
|
||||
"--number_of_identities",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Determines how many :Identity nodes will the dataset contain.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--number_of_files", type=int, default=10, help="Determines how many :File nodes will the dataset contain."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--percentage_of_permissions",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Determines approximately what percentage of the all possible identity-permission-file connections will be created.",
|
||||
)
|
||||
parser.add_argument("--filename", default="dataset.cypher", help="The name of the output file.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
151
tests/mgbench/dataset_creator_unwind.py
Normal file
151
tests/mgbench/dataset_creator_unwind.py
Normal file
@ -0,0 +1,151 @@
|
||||
# Copyright 2022 Memgraph Ltd.
|
||||
#
|
||||
# Use of this software is governed by the Business Source License
|
||||
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
# License, and you may not use this file except in compliance with the Business Source License.
|
||||
#
|
||||
# As of the Change Date specified in that file, in accordance with
|
||||
# the Business Source License, use of this software will be governed
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import argparse
|
||||
import random
|
||||
|
||||
import helpers
|
||||
|
||||
# Explaination of datasets:
|
||||
# - empty_only_index: contains index; contains no data
|
||||
# - small: contains index; contains data (small dataset)
|
||||
#
|
||||
# Datamodel is as follow:
|
||||
#
|
||||
# ┌──────────────┐
|
||||
# │ Permission │
|
||||
# ┌────────────────┐ │ Schema:uuid │ ┌────────────┐
|
||||
# │:IS_FOR_IDENTITY├────┤ Index:name ├───┤:IS_FOR_FILE│
|
||||
# └┬───────────────┘ └──────────────┘ └────────────┤
|
||||
# │ │
|
||||
# ┌──────▼──────────────┐ ┌──▼────────────────┐
|
||||
# │ Identity │ │ File │
|
||||
# │ Schema:uuid │ │ Schema:uuid │
|
||||
# │ Index:email │ │ Index:name │
|
||||
# └─────────────────────┘ │ Index:platformId │
|
||||
# └───────────────────┘
|
||||
#
|
||||
# - File: attributes: ["uuid", "name", "platformId"]
|
||||
# - Permission: attributes: ["uuid", "name"]
|
||||
# - Identity: attributes: ["uuid", "email"]
|
||||
#
|
||||
# Indexes:
|
||||
# - File: [File(uuid), File(platformId), File(name)]
|
||||
# - Permission: [Permission(uuid), Permission(name)]
|
||||
# - Identity: [Identity(uuid), Identity(email)]
|
||||
#
|
||||
# Edges:
|
||||
# - (:Permission)-[:IS_FOR_FILE]->(:File)
|
||||
# - (:Permission)-[:IS_FOR_IDENTITYR]->(:Identity)
|
||||
#
|
||||
# AccessControl specific: uuid is the schema
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--number_of_identities",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Determines how many :Identity nodes will the dataset contain.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--number_of_files", type=int, default=10, help="Determines how many :File nodes will the dataset contain."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--percentage_of_permissions",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Determines approximately what percentage of the all possible identity-permission-file connections will be created.",
|
||||
)
|
||||
parser.add_argument("--filename", default="dataset.cypher", help="The name of the output file.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
number_of_identities = args.number_of_identities
|
||||
number_of_files = args.number_of_files
|
||||
percentage_of_permissions = args.percentage_of_permissions
|
||||
filename = args.filename
|
||||
|
||||
assert number_of_identities >= 0
|
||||
assert number_of_files >= 0
|
||||
assert percentage_of_permissions > 0.0 and percentage_of_permissions <= 1.0
|
||||
assert filename != ""
|
||||
|
||||
with open(filename, "w") as f:
|
||||
f.write("MATCH (n) DETACH DELETE n;\n")
|
||||
|
||||
# Create the indexes
|
||||
f.write("CREATE INDEX ON :File;\n")
|
||||
f.write("CREATE INDEX ON :Permission;\n")
|
||||
f.write("CREATE INDEX ON :Identity;\n")
|
||||
f.write("CREATE INDEX ON :File(platformId);\n")
|
||||
f.write("CREATE INDEX ON :File(name);\n")
|
||||
f.write("CREATE INDEX ON :Permission(name);\n")
|
||||
f.write("CREATE INDEX ON :Identity(email);\n")
|
||||
|
||||
# Create extra index: in distributed, this will be the schema
|
||||
f.write("CREATE INDEX ON :File(uuid);\n")
|
||||
f.write("CREATE INDEX ON :Permission(uuid);\n")
|
||||
f.write("CREATE INDEX ON :Identity(uuid);\n")
|
||||
|
||||
uuid = 1
|
||||
|
||||
# Create the nodes File
|
||||
f.write("UNWIND [")
|
||||
for index in range(0, number_of_files):
|
||||
if index != 0:
|
||||
f.write(",")
|
||||
f.write(f' {{uuid: {uuid}, platformId: "platform_id", name: "name_file_{uuid}"}}')
|
||||
uuid += 1
|
||||
f.write("] AS props CREATE (:File {uuid: props.uuid, platformId: props.platformId, name: props.name});\n")
|
||||
|
||||
identities = []
|
||||
f.write("UNWIND [")
|
||||
# Create the nodes Identity
|
||||
for index in range(0, number_of_identities):
|
||||
if index != 0:
|
||||
f.write(",")
|
||||
f.write(f' {{uuid: {uuid}, name: "mail_{uuid}@something.com"}}')
|
||||
uuid += 1
|
||||
f.write("] AS props CREATE (:Identity {uuid: props.uuid, name: props.name});\n")
|
||||
|
||||
f.write("UNWIND [")
|
||||
created = 0
|
||||
for outer_index in range(0, number_of_files):
|
||||
for inner_index in range(0, number_of_identities):
|
||||
|
||||
file_uuid = outer_index + 1
|
||||
identity_uuid = number_of_files + inner_index + 1
|
||||
|
||||
if random.random() <= percentage_of_permissions:
|
||||
|
||||
if created > 0:
|
||||
f.write(",")
|
||||
|
||||
f.write(
|
||||
f' {{permUuid: {uuid}, permName: "name_permission_{uuid}", fileUuid: {file_uuid}, identityUuid: {identity_uuid}}}'
|
||||
)
|
||||
created += 1
|
||||
uuid += 1
|
||||
|
||||
if created == 5000:
|
||||
f.write(
|
||||
"] AS props MATCH (file:File {uuid:props.fileUuid}), (identity:Identity {uuid: props.identityUuid}) CREATE (permission:Permission {uuid: props.permUuid, name: props.permName}) CREATE (permission)-[: IS_FOR_FILE]->(file) CREATE (permission)-[: IS_FOR_IDENTITY]->(identity);\nUNWIND ["
|
||||
)
|
||||
created = 0
|
||||
f.write(
|
||||
"] AS props MATCH (file:File {uuid:props.fileUuid}), (identity:Identity {uuid: props.identityUuid}) CREATE (permission:Permission {uuid: props.permUuid, name: props.permName}) CREATE (permission)-[: IS_FOR_FILE]->(file) CREATE (permission)-[: IS_FOR_IDENTITY]->(identity);\n"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -353,7 +353,7 @@ class AccessControl(Dataset):
|
||||
|
||||
def benchmark__create__vertex(self):
|
||||
self.next_value_idx += 1
|
||||
query = (f"CREATE (:File {{uuid: {self.next_value_idx}}});", {})
|
||||
query = ("CREATE (:File {uuid: $uuid})", {"uuid": self.next_value_idx})
|
||||
return query
|
||||
|
||||
def benchmark__create__edges(self):
|
||||
@ -379,6 +379,24 @@ class AccessControl(Dataset):
|
||||
return query
|
||||
|
||||
def benchmark__match__match_all_vertices_with_edges(self):
|
||||
self.next_value_idx += 1
|
||||
query = ("MATCH (permission:Permission)-[e:IS_FOR_FILE]->(file:File) RETURN *", {})
|
||||
return query
|
||||
|
||||
def benchmark__match__match_users_with_permission_for_files(self):
|
||||
file_uuid_1 = self._get_random_uuid("File")
|
||||
file_uuid_2 = self._get_random_uuid("File")
|
||||
min_file_uuid = min(file_uuid_1, file_uuid_2)
|
||||
max_file_uuid = max(file_uuid_1, file_uuid_2)
|
||||
query = (
|
||||
"MATCH (f:File)<-[ff:IS_FOR_FILE]-(p:Permission)-[fi:IS_FOR_IDENTITY]->(i:Identity) WHERE f.uuid >= $min_file_uuid AND f.uuid <= $max_file_uuid RETURN *",
|
||||
{"min_file_uuid": min_file_uuid, "max_file_uuid": max_file_uuid},
|
||||
)
|
||||
return query
|
||||
|
||||
def benchmark__match__match_users_with_permission_for_specific_file(self):
|
||||
file_uuid = self._get_random_uuid("File")
|
||||
query = (
|
||||
"MATCH (f:File {uuid: $file_uuid})<-[ff:IS_FOR_FILE]-(p:Permission)-[fi:IS_FOR_IDENTITY]->(i:Identity) RETURN *",
|
||||
{"file_uuid": file_uuid},
|
||||
)
|
||||
return query
|
||||
|
@ -68,6 +68,15 @@ class Memgraph:
|
||||
self._cleanup()
|
||||
atexit.unregister(self._cleanup)
|
||||
|
||||
# Returns None if string_value is not true or false, casing doesn't matter
|
||||
def _get_bool_value(self, string_value):
|
||||
lower_string_value = string_value.lower()
|
||||
if lower_string_value == "true":
|
||||
return True
|
||||
if lower_string_value == "false":
|
||||
return False
|
||||
return None
|
||||
|
||||
def _get_args(self, **kwargs):
|
||||
data_directory = os.path.join(self._directory.name, "memgraph")
|
||||
if self._memgraph_version >= (0, 50, 0):
|
||||
@ -83,7 +92,13 @@ class Memgraph:
|
||||
args_list = self._extra_args.split(" ")
|
||||
assert len(args_list) % 2 == 0
|
||||
for i in range(0, len(args_list), 2):
|
||||
kwargs[args_list[i]] = args_list[i + 1]
|
||||
key = args_list[i]
|
||||
value = args_list[i + 1]
|
||||
maybe_bool_value = self._get_bool_value(value)
|
||||
if maybe_bool_value is not None:
|
||||
kwargs[key] = maybe_bool_value
|
||||
else:
|
||||
kwargs[key] = value
|
||||
|
||||
return _convert_args_to_flags(self._memgraph_binary, **kwargs)
|
||||
|
||||
|
@ -1,8 +1,12 @@
|
||||
4
|
||||
8
|
||||
uuid
|
||||
email
|
||||
name
|
||||
platformId
|
||||
permUuid
|
||||
permName
|
||||
fileUuid
|
||||
identityUuid
|
||||
2
|
||||
IS_FOR_IDENTITY
|
||||
IS_FOR_FILE
|
||||
|
@ -1,8 +1,12 @@
|
||||
4
|
||||
8
|
||||
uuid
|
||||
email
|
||||
name
|
||||
platformId
|
||||
permUuid
|
||||
permName
|
||||
fileUuid
|
||||
identityUuid
|
||||
2
|
||||
IS_FOR_IDENTITY
|
||||
IS_FOR_FILE
|
||||
|
@ -1,8 +1,12 @@
|
||||
4
|
||||
8
|
||||
uuid
|
||||
email
|
||||
name
|
||||
platformId
|
||||
permUuid
|
||||
permName
|
||||
fileUuid
|
||||
identityUuid
|
||||
2
|
||||
IS_FOR_IDENTITY
|
||||
IS_FOR_FILE
|
||||
|
@ -17,7 +17,7 @@ function(add_simulation_test test_cpp)
|
||||
# requires unique logical target names
|
||||
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
|
||||
|
||||
target_link_libraries(${target_name} mg-storage-v3 mg-communication mg-utils mg-io mg-io-simulator mg-coordinator mg-query-v2)
|
||||
target_link_libraries(${target_name} mg-communication mg-utils mg-io mg-io-simulator mg-coordinator mg-query-v2 mg-storage-v3)
|
||||
target_link_libraries(${target_name} Boost::headers)
|
||||
target_link_libraries(${target_name} gtest gtest_main gmock rapidcheck rapidcheck_gtest)
|
||||
|
||||
@ -32,4 +32,5 @@ add_simulation_test(trial_query_storage/query_storage_test.cpp)
|
||||
add_simulation_test(sharded_map.cpp)
|
||||
add_simulation_test(shard_rsm.cpp)
|
||||
add_simulation_test(cluster_property_test.cpp)
|
||||
add_simulation_test(cluster_property_test_cypher_queries.cpp)
|
||||
add_simulation_test(request_router.cpp)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -55,7 +55,7 @@ void run_server(Io<SimulatorTransport> io) {
|
||||
highest_seen = std::max(highest_seen, req.proposal);
|
||||
auto srv_res = CounterResponse{highest_seen};
|
||||
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, srv_res);
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, std::move(srv_res));
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ std::pair<SimulatorStats, LatencyHistogramSummaries> RunWorkload(SimulatorConfig
|
||||
CounterRequest cli_req;
|
||||
cli_req.proposal = i;
|
||||
spdlog::info("[CLIENT] calling Request");
|
||||
auto res_f = cli_io.Request<CounterRequest, CounterResponse>(srv_addr, cli_req);
|
||||
auto res_f = cli_io.Request<CounterResponse, CounterRequest>(srv_addr, std::move(cli_req));
|
||||
spdlog::info("[CLIENT] calling Wait");
|
||||
auto res_rez = std::move(res_f).Wait();
|
||||
spdlog::info("[CLIENT] Wait returned");
|
||||
|
64
tests/simulation/cluster_property_test_cypher_queries.cpp
Normal file
64
tests/simulation/cluster_property_test_cypher_queries.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
// This test serves as an example of a property-based model test.
|
||||
// It generates a cluster configuration and a set of operations to
|
||||
// apply against both the real system and a greatly simplified model.
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <rapidcheck.h>
|
||||
#include <rapidcheck/gtest.h>
|
||||
#include <spdlog/cfg/env.h>
|
||||
|
||||
#include "generated_operations.hpp"
|
||||
#include "io/simulator/simulator_config.hpp"
|
||||
#include "io/time.hpp"
|
||||
#include "storage/v3/shard_manager.hpp"
|
||||
#include "test_cluster.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
using io::Duration;
|
||||
using io::Time;
|
||||
using io::simulator::SimulatorConfig;
|
||||
using storage::v3::kMaximumCronInterval;
|
||||
|
||||
RC_GTEST_PROP(RandomClusterConfig, HappyPath, (ClusterConfig cluster_config, NonEmptyOpVec ops, uint64_t rng_seed)) {
|
||||
spdlog::cfg::load_env_levels();
|
||||
|
||||
SimulatorConfig sim_config{
|
||||
.drop_percent = 0,
|
||||
.perform_timeouts = false,
|
||||
.scramble_messages = true,
|
||||
.rng_seed = rng_seed,
|
||||
.start_time = Time::min(),
|
||||
.abort_time = Time::max(),
|
||||
};
|
||||
|
||||
std::vector<std::string> queries = {"CREATE (n:test_label{property_1: 0, property_2: 0});", "MATCH (n) RETURN n;"};
|
||||
|
||||
auto [sim_stats_1, latency_stats_1] = RunClusterSimulationWithQueries(sim_config, cluster_config, queries);
|
||||
auto [sim_stats_2, latency_stats_2] = RunClusterSimulationWithQueries(sim_config, cluster_config, queries);
|
||||
|
||||
if (latency_stats_1 != latency_stats_2) {
|
||||
spdlog::error("simulator stats diverged across runs");
|
||||
spdlog::error("run 1 simulator stats: {}", sim_stats_1);
|
||||
spdlog::error("run 2 simulator stats: {}", sim_stats_2);
|
||||
spdlog::error("run 1 latency:\n{}", latency_stats_1.SummaryTable());
|
||||
spdlog::error("run 2 latency:\n{}", latency_stats_2.SummaryTable());
|
||||
RC_ASSERT(latency_stats_1 == latency_stats_2);
|
||||
RC_ASSERT(sim_stats_1 == sim_stats_2);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -1305,7 +1305,7 @@ void TestGetProperties(ShardClient &client) {
|
||||
MG_ASSERT(!result.error);
|
||||
MG_ASSERT(result.result_row.size() == 2);
|
||||
for (const auto &elem : result.result_row) {
|
||||
MG_ASSERT(elem.props.size() == 3);
|
||||
MG_ASSERT(elem.props.size() == 4);
|
||||
}
|
||||
}
|
||||
{
|
||||
|
93
tests/simulation/simulation_interpreter.hpp
Normal file
93
tests/simulation/simulation_interpreter.hpp
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "io/simulator/simulator_handle.hpp"
|
||||
#include "machine_manager/machine_config.hpp"
|
||||
#include "machine_manager/machine_manager.hpp"
|
||||
#include "query/v2/config.hpp"
|
||||
#include "query/v2/discard_value_stream.hpp"
|
||||
#include "query/v2/frontend/ast/ast.hpp"
|
||||
#include "query/v2/interpreter.hpp"
|
||||
#include "query/v2/request_router.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// TODO(gvolfing)
|
||||
// -How to set up the entire raft cluster with the QE. Also provide abrstraction for that.
|
||||
// -Pass an argument to the setup to determine, how many times the retry of a query should happen.
|
||||
|
||||
namespace memgraph::io::simulator {
|
||||
|
||||
class SimulatedInterpreter {
|
||||
using ResultStream = query::v2::DiscardValueResultStream;
|
||||
|
||||
public:
|
||||
explicit SimulatedInterpreter(std::unique_ptr<query::v2::InterpreterContext> interpreter_context)
|
||||
: interpreter_context_(std::move(interpreter_context)) {
|
||||
interpreter_ = std::make_unique<memgraph::query::v2::Interpreter>(interpreter_context_.get());
|
||||
}
|
||||
|
||||
SimulatedInterpreter(const SimulatedInterpreter &) = delete;
|
||||
SimulatedInterpreter &operator=(const SimulatedInterpreter &) = delete;
|
||||
SimulatedInterpreter(SimulatedInterpreter &&) = delete;
|
||||
SimulatedInterpreter &operator=(SimulatedInterpreter &&) = delete;
|
||||
~SimulatedInterpreter() = default;
|
||||
|
||||
void InstallSimulatorTicker(Simulator &simulator) {
|
||||
interpreter_->InstallSimulatorTicker(simulator.GetSimulatorTickClosure());
|
||||
}
|
||||
|
||||
std::vector<ResultStream> RunQueries(const std::vector<std::string> &queries) {
|
||||
std::vector<ResultStream> results;
|
||||
results.reserve(queries.size());
|
||||
|
||||
for (const auto &query : queries) {
|
||||
results.emplace_back(RunQuery(query));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private:
|
||||
ResultStream RunQuery(const std::string &query) {
|
||||
ResultStream stream;
|
||||
|
||||
std::map<std::string, memgraph::storage::v3::PropertyValue> params;
|
||||
const std::string *username = nullptr;
|
||||
|
||||
interpreter_->Prepare(query, params, username);
|
||||
interpreter_->PullAll(&stream);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::unique_ptr<query::v2::InterpreterContext> interpreter_context_;
|
||||
std::unique_ptr<query::v2::Interpreter> interpreter_;
|
||||
};
|
||||
|
||||
SimulatedInterpreter SetUpInterpreter(Address coordinator_address, Simulator &simulator) {
|
||||
auto rr_factory = std::make_unique<memgraph::query::v2::SimulatedRequestRouterFactory>(simulator);
|
||||
|
||||
auto interpreter_context = std::make_unique<memgraph::query::v2::InterpreterContext>(
|
||||
nullptr,
|
||||
memgraph::query::v2::InterpreterConfig{.query = {.allow_load_csv = true},
|
||||
.execution_timeout_sec = 600,
|
||||
.replication_replica_check_frequency = std::chrono::seconds(1),
|
||||
.default_kafka_bootstrap_servers = "",
|
||||
.default_pulsar_service_url = "",
|
||||
.stream_transaction_conflict_retries = 30,
|
||||
.stream_transaction_retry_interval = std::chrono::milliseconds(500)},
|
||||
std::filesystem::path("mg_data"), std::move(rr_factory), coordinator_address);
|
||||
|
||||
return SimulatedInterpreter(std::move(interpreter_context));
|
||||
}
|
||||
|
||||
} // namespace memgraph::io::simulator
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -36,6 +36,8 @@
|
||||
#include "utils/print_helpers.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
#include "simulation_interpreter.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
using coordinator::Coordinator;
|
||||
@ -279,4 +281,65 @@ std::pair<SimulatorStats, LatencyHistogramSummaries> RunClusterSimulation(const
|
||||
return std::make_pair(stats, histo);
|
||||
}
|
||||
|
||||
std::pair<SimulatorStats, LatencyHistogramSummaries> RunClusterSimulationWithQueries(
|
||||
const SimulatorConfig &sim_config, const ClusterConfig &cluster_config, const std::vector<std::string> &queries) {
|
||||
spdlog::info("========================== NEW SIMULATION ==========================");
|
||||
|
||||
auto simulator = Simulator(sim_config);
|
||||
|
||||
auto machine_1_addr = Address::TestAddress(1);
|
||||
auto cli_addr = Address::TestAddress(2);
|
||||
auto cli_addr_2 = Address::TestAddress(3);
|
||||
|
||||
Io<SimulatorTransport> cli_io = simulator.Register(cli_addr);
|
||||
Io<SimulatorTransport> cli_io_2 = simulator.Register(cli_addr_2);
|
||||
|
||||
auto coordinator_addresses = std::vector{
|
||||
machine_1_addr,
|
||||
};
|
||||
|
||||
ShardMap initialization_sm = TestShardMap(cluster_config.shards - 1, cluster_config.replication_factor);
|
||||
|
||||
auto mm_1 = MkMm(simulator, coordinator_addresses, machine_1_addr, initialization_sm);
|
||||
Address coordinator_address = mm_1.CoordinatorAddress();
|
||||
|
||||
auto mm_thread_1 = std::jthread(RunMachine, std::move(mm_1));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(machine_1_addr);
|
||||
|
||||
auto detach_on_error = DetachIfDropped{.handle = mm_thread_1};
|
||||
|
||||
// TODO(tyler) clarify addresses of coordinator etc... as it's a mess
|
||||
|
||||
CoordinatorClient<SimulatorTransport> coordinator_client(cli_io, coordinator_address, {coordinator_address});
|
||||
WaitForShardsToInitialize(coordinator_client);
|
||||
|
||||
auto simulated_interpreter = io::simulator::SetUpInterpreter(coordinator_address, simulator);
|
||||
simulated_interpreter.InstallSimulatorTicker(simulator);
|
||||
|
||||
auto query_results = simulated_interpreter.RunQueries(queries);
|
||||
|
||||
// We have now completed our workload without failing any assertions, so we can
|
||||
// disable detaching the worker thread, which will cause the mm_thread_1 jthread
|
||||
// to be joined when this function returns.
|
||||
detach_on_error.detach = false;
|
||||
|
||||
simulator.ShutDown();
|
||||
|
||||
mm_thread_1.join();
|
||||
|
||||
SimulatorStats stats = simulator.Stats();
|
||||
|
||||
spdlog::info("total messages: {}", stats.total_messages);
|
||||
spdlog::info("dropped messages: {}", stats.dropped_messages);
|
||||
spdlog::info("timed out requests: {}", stats.timed_out_requests);
|
||||
spdlog::info("total requests: {}", stats.total_requests);
|
||||
spdlog::info("total responses: {}", stats.total_responses);
|
||||
spdlog::info("simulator ticks: {}", stats.simulator_ticks);
|
||||
|
||||
auto histo = cli_io_2.ResponseLatencies();
|
||||
|
||||
spdlog::info("========================== SUCCESS :) ==========================");
|
||||
return std::make_pair(stats, histo);
|
||||
}
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -44,7 +44,7 @@ void run_server(Io<SimulatorTransport> io) {
|
||||
for (auto index = start_index; index < start_index + req.count; ++index) {
|
||||
response.vertices.push_back({std::string("Vertex_") + std::to_string(index)});
|
||||
}
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, response);
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, std::move(response));
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ int main() {
|
||||
|
||||
auto req = ScanVerticesRequest{2, std::nullopt};
|
||||
|
||||
auto res_f = cli_io.Request<ScanVerticesRequest, VerticesResponse>(srv_addr, req);
|
||||
auto res_f = cli_io.Request<VerticesResponse, ScanVerticesRequest>(srv_addr, std::move(req));
|
||||
auto res_rez = std::move(res_f).Wait();
|
||||
simulator.ShutDown();
|
||||
return 0;
|
||||
|
@ -93,6 +93,9 @@ target_link_libraries(${test_prefix}query_expression_evaluator mg-query)
|
||||
add_unit_test(query_plan.cpp)
|
||||
target_link_libraries(${test_prefix}query_plan mg-query)
|
||||
|
||||
add_unit_test(query_v2_plan.cpp)
|
||||
target_link_libraries(${test_prefix}query_v2_plan mg-query-v2)
|
||||
|
||||
add_unit_test(query_plan_accumulate_aggregate.cpp)
|
||||
target_link_libraries(${test_prefix}query_plan_accumulate_aggregate mg-query)
|
||||
|
||||
@ -291,6 +294,9 @@ target_link_libraries(${test_prefix}storage_v3_expr mg-storage-v3 mg-expr)
|
||||
add_unit_test(storage_v3_schema.cpp)
|
||||
target_link_libraries(${test_prefix}storage_v3_schema mg-storage-v3)
|
||||
|
||||
add_unit_test(storage_v3_shard_split.cpp)
|
||||
target_link_libraries(${test_prefix}storage_v3_shard_split mg-storage-v3 mg-query-v2)
|
||||
|
||||
# Test mg-query-v2
|
||||
# These are commented out because of the new TypedValue in the query engine
|
||||
# add_unit_test(query_v2_interpreter.cpp ${CMAKE_SOURCE_DIR}/src/glue/v2/communication.cpp)
|
||||
@ -411,3 +417,6 @@ target_link_libraries(${test_prefix}query_v2_expression_evaluator mg-query-v2)
|
||||
# Tests for multiframes
|
||||
add_unit_test(query_v2_create_expand_multiframe.cpp)
|
||||
target_link_libraries(${test_prefix}query_v2_create_expand_multiframe mg-query-v2)
|
||||
|
||||
add_unit_test(query_v2_create_node_multiframe.cpp)
|
||||
target_link_libraries(${test_prefix}query_v2_create_node_multiframe mg-query-v2)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -88,14 +88,14 @@ TEST_P(SingleNodeBfsTest, All) {
|
||||
|
||||
std::unique_ptr<SingleNodeDb> SingleNodeBfsTest::db_{nullptr};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DirectionAndExpansionDepth, SingleNodeBfsTest,
|
||||
testing::Combine(testing::Range(-1, kVertexCount), testing::Range(-1, kVertexCount),
|
||||
testing::Values(EdgeAtom::Direction::OUT, EdgeAtom::Direction::IN,
|
||||
EdgeAtom::Direction::BOTH),
|
||||
testing::Values(std::vector<std::string>{}), testing::Bool(),
|
||||
testing::Values(FilterLambdaType::NONE)));
|
||||
INSTANTIATE_TEST_SUITE_P(DirectionAndExpansionDepth, SingleNodeBfsTest,
|
||||
testing::Combine(testing::Range(-1, kVertexCount), testing::Range(-1, kVertexCount),
|
||||
testing::Values(EdgeAtom::Direction::OUT, EdgeAtom::Direction::IN,
|
||||
EdgeAtom::Direction::BOTH),
|
||||
testing::Values(std::vector<std::string>{}), testing::Bool(),
|
||||
testing::Values(FilterLambdaType::NONE)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
EdgeType, SingleNodeBfsTest,
|
||||
testing::Combine(testing::Values(-1), testing::Values(-1),
|
||||
testing::Values(EdgeAtom::Direction::OUT, EdgeAtom::Direction::IN, EdgeAtom::Direction::BOTH),
|
||||
@ -103,11 +103,11 @@ INSTANTIATE_TEST_CASE_P(
|
||||
std::vector<std::string>{"b"}, std::vector<std::string>{"a", "b"}),
|
||||
testing::Bool(), testing::Values(FilterLambdaType::NONE)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(FilterLambda, SingleNodeBfsTest,
|
||||
testing::Combine(testing::Values(-1), testing::Values(-1),
|
||||
testing::Values(EdgeAtom::Direction::OUT, EdgeAtom::Direction::IN,
|
||||
EdgeAtom::Direction::BOTH),
|
||||
testing::Values(std::vector<std::string>{}), testing::Bool(),
|
||||
testing::Values(FilterLambdaType::NONE, FilterLambdaType::USE_FRAME,
|
||||
FilterLambdaType::USE_FRAME_NULL, FilterLambdaType::USE_CTX,
|
||||
FilterLambdaType::ERROR)));
|
||||
INSTANTIATE_TEST_SUITE_P(FilterLambda, SingleNodeBfsTest,
|
||||
testing::Combine(testing::Values(-1), testing::Values(-1),
|
||||
testing::Values(EdgeAtom::Direction::OUT, EdgeAtom::Direction::IN,
|
||||
EdgeAtom::Direction::BOTH),
|
||||
testing::Values(std::vector<std::string>{}), testing::Bool(),
|
||||
testing::Values(FilterLambdaType::NONE, FilterLambdaType::USE_FRAME,
|
||||
FilterLambdaType::USE_FRAME_NULL, FilterLambdaType::USE_CTX,
|
||||
FilterLambdaType::ERROR)));
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -294,7 +294,7 @@ std::shared_ptr<Base> gAstGeneratorTypes[] = {
|
||||
std::make_shared<CachedAstGenerator>(),
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(AstGeneratorTypes, CypherMainVisitorTest, ::testing::ValuesIn(gAstGeneratorTypes));
|
||||
INSTANTIATE_TEST_SUITE_P(AstGeneratorTypes, CypherMainVisitorTest, ::testing::ValuesIn(gAstGeneratorTypes));
|
||||
|
||||
// NOTE: The above used to use *Typed Tests* functionality of gtest library.
|
||||
// Unfortunately, the compilation time of this test increased to full 2 minutes!
|
||||
@ -308,7 +308,7 @@ INSTANTIATE_TEST_CASE_P(AstGeneratorTypes, CypherMainVisitorTest, ::testing::Val
|
||||
// ClonedAstGenerator, CachedAstGenerator>
|
||||
// AstGeneratorTypes;
|
||||
//
|
||||
// TYPED_TEST_CASE(CypherMainVisitorTest, AstGeneratorTypes);
|
||||
// TYPED_TEST_SUITE(CypherMainVisitorTest, AstGeneratorTypes);
|
||||
|
||||
TEST_P(CypherMainVisitorTest, SyntaxException) {
|
||||
auto &ast_generator = *GetParam();
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -48,7 +48,7 @@ void RunServer(Io<LocalTransport> io) {
|
||||
highest_seen = std::max(highest_seen, req.proposal);
|
||||
auto srv_res = CounterResponse{highest_seen};
|
||||
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, srv_res);
|
||||
io.Send(request_envelope.from_address, request_envelope.request_id, std::move(srv_res));
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ TEST(LocalTransport, BasicRequest) {
|
||||
auto value = 1; // i;
|
||||
cli_req.proposal = value;
|
||||
spdlog::info("[CLIENT] sending request");
|
||||
auto res_f = cli_io.Request<CounterRequest, CounterResponse>(srv_addr, cli_req);
|
||||
auto res_f = cli_io.Request<CounterResponse, CounterRequest>(srv_addr, std::move(cli_req));
|
||||
spdlog::info("[CLIENT] waiting on future");
|
||||
|
||||
auto res_rez = std::move(res_f).Wait();
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -41,7 +41,10 @@ class MockedRequestRouter : public RequestRouterInterface {
|
||||
MOCK_METHOD(std::optional<storage::v3::EdgeTypeId>, MaybeNameToEdgeType, (const std::string &), (const));
|
||||
MOCK_METHOD(std::optional<storage::v3::LabelId>, MaybeNameToLabel, (const std::string &), (const));
|
||||
MOCK_METHOD(bool, IsPrimaryLabel, (storage::v3::LabelId), (const));
|
||||
MOCK_METHOD(bool, IsPrimaryKey, (storage::v3::LabelId, storage::v3::PropertyId), (const));
|
||||
MOCK_METHOD(bool, IsPrimaryProperty, (storage::v3::LabelId, storage::v3::PropertyId), (const));
|
||||
MOCK_METHOD((std::optional<std::pair<uint64_t, uint64_t>>), AllocateInitialEdgeIds, (io::Address));
|
||||
MOCK_METHOD(void, InstallSimulatorTicker, (std::function<bool()>));
|
||||
MOCK_METHOD(const std::vector<coordinator::SchemaProperty> &, GetSchemaForLabel, (storage::v3::LabelId), (const));
|
||||
};
|
||||
|
||||
class MockedLogicalOperator : public plan::LogicalOperator {
|
||||
@ -58,7 +61,7 @@ class MockedLogicalOperator : public plan::LogicalOperator {
|
||||
class MockedCursor : public plan::Cursor {
|
||||
public:
|
||||
MOCK_METHOD(bool, Pull, (Frame &, expr::ExecutionContext &));
|
||||
MOCK_METHOD(void, PullMultiple, (MultiFrame &, expr::ExecutionContext &));
|
||||
MOCK_METHOD(bool, PullMultiple, (MultiFrame &, expr::ExecutionContext &));
|
||||
MOCK_METHOD(void, Reset, ());
|
||||
MOCK_METHOD(void, Shutdown, ());
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user