Merge branch 'project-pineapples' into T1167-MG-create-scanbyprimarykey-operator

This commit is contained in:
gvolfing 2022-12-15 10:26:36 +01:00
commit 14000d727f
129 changed files with 9041 additions and 8221 deletions

View File

@ -99,7 +99,7 @@ jobs:
echo ${file}
if [[ ${file} == *.py ]]; then
python3 -m black --check --diff ${file}
python3 -m isort --check-only --diff ${file}
python3 -m isort --check-only --profile "black" --diff ${file}
fi
done

View File

@ -14,6 +14,7 @@ repos:
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black"]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v13.0.0
hooks:

View File

@ -182,7 +182,8 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
# c99-designator is disabled because of required mixture of designated and
# non-designated initializers in Python Query Module code (`py_module.cpp`).
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall \
-Werror=switch -Werror=switch-bool -Werror=return-type \
-Werror=switch -Werror=switch-bool -Werror=implicit-fallthrough \
-Werror=return-type \
-Werror=return-stack-address \
-Wno-c99-designator \
-DBOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT")

View File

@ -21,6 +21,7 @@ add_subdirectory(auth)
add_subdirectory(parser)
add_subdirectory(expr)
add_subdirectory(coordinator)
add_subdirectory(functions)
if (MG_ENTERPRISE)
add_subdirectory(audit)

68
src/common/errors.hpp Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <cstdint>
#include <string_view>
namespace memgraph::common {
enum class ErrorCode : uint8_t {
SERIALIZATION_ERROR,
NONEXISTENT_OBJECT,
DELETED_OBJECT,
VERTEX_HAS_EDGES,
PROPERTIES_DISABLED,
VERTEX_ALREADY_INSERTED,
// Schema Violations
SCHEMA_NO_SCHEMA_DEFINED_FOR_LABEL,
SCHEMA_VERTEX_PROPERTY_WRONG_TYPE,
SCHEMA_VERTEX_UPDATE_PRIMARY_KEY,
SCHEMA_VERTEX_UPDATE_PRIMARY_LABEL,
SCHEMA_VERTEX_SECONDARY_LABEL_IS_PRIMARY,
SCHEMA_VERTEX_PRIMARY_PROPERTIES_UNDEFINED,
OBJECT_NOT_FOUND,
};
constexpr std::string_view ErrorCodeToString(const ErrorCode code) {
switch (code) {
case ErrorCode::SERIALIZATION_ERROR:
return "SERIALIZATION_ERROR";
case ErrorCode::NONEXISTENT_OBJECT:
return "NONEXISTENT_OBJECT";
case ErrorCode::DELETED_OBJECT:
return "DELETED_OBJECT";
case ErrorCode::VERTEX_HAS_EDGES:
return "VERTEX_HAS_EDGES";
case ErrorCode::PROPERTIES_DISABLED:
return "PROPERTIES_DISABLED";
case ErrorCode::VERTEX_ALREADY_INSERTED:
return "VERTEX_ALREADY_INSERTED";
case ErrorCode::SCHEMA_NO_SCHEMA_DEFINED_FOR_LABEL:
return "SCHEMA_NO_SCHEMA_DEFINED_FOR_LABEL";
case ErrorCode::SCHEMA_VERTEX_PROPERTY_WRONG_TYPE:
return "SCHEMA_VERTEX_PROPERTY_WRONG_TYPE";
case ErrorCode::SCHEMA_VERTEX_UPDATE_PRIMARY_KEY:
return "SCHEMA_VERTEX_UPDATE_PRIMARY_KEY";
case ErrorCode::SCHEMA_VERTEX_UPDATE_PRIMARY_LABEL:
return "SCHEMA_VERTEX_UPDATE_PRIMARY_LABEL";
case ErrorCode::SCHEMA_VERTEX_SECONDARY_LABEL_IS_PRIMARY:
return "SCHEMA_VERTEX_SECONDARY_LABEL_IS_PRIMARY";
case ErrorCode::SCHEMA_VERTEX_PRIMARY_PROPERTIES_UNDEFINED:
return "SCHEMA_VERTEX_PRIMARY_PROPERTIES_UNDEFINED";
case ErrorCode::OBJECT_NOT_FOUND:
return "OBJECT_NOT_FOUND";
}
}
} // namespace memgraph::common

View File

@ -74,7 +74,7 @@ State RunHandlerV4(Signature signature, TSession &session, State state, Marker m
}
case Signature::Route: {
if constexpr (bolt_minor >= 3) {
if (signature == Signature::Route) return HandleRoute<TSession>(session);
return HandleRoute<TSession>(session);
} else {
spdlog::trace("Supported only in bolt v4.3");
return State::Close;

View File

@ -71,6 +71,9 @@ struct QueueInner {
// starvation by sometimes randomizing priorities, rather than following a strict
// prioritization.
std::deque<Message> queue;
uint64_t submitted = 0;
uint64_t calls_to_pop = 0;
};
/// There are two reasons to implement our own Queue instead of using
@ -86,6 +89,8 @@ class Queue {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
inner_->submitted++;
inner_->queue.emplace_back(std::move(message));
} // lock dropped before notifying condition variable
@ -96,6 +101,9 @@ class Queue {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
inner_->calls_to_pop++;
inner_->cv.notify_all();
while (inner_->queue.empty()) {
inner_->cv.wait(lock);
}
@ -105,6 +113,15 @@ class Queue {
return message;
}
void BlockOnQuiescence() const {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
while (inner_->calls_to_pop <= inner_->submitted) {
inner_->cv.wait(lock);
}
}
};
/// A CoordinatorWorker owns Raft<CoordinatorRsm> instances. receives messages from the MachineManager.
@ -129,9 +146,7 @@ class CoordinatorWorker {
public:
CoordinatorWorker(io::Io<IoImpl> io, Queue queue, Coordinator coordinator)
: io_(std::move(io)),
queue_(std::move(queue)),
coordinator_{std::move(io_.ForkLocal()), {}, std::move(coordinator)} {}
: io_(std::move(io)), queue_(std::move(queue)), coordinator_{std::move(io_), {}, std::move(coordinator)} {}
CoordinatorWorker(CoordinatorWorker &&) noexcept = default;
CoordinatorWorker &operator=(CoordinatorWorker &&) noexcept = default;
@ -140,15 +155,12 @@ class CoordinatorWorker {
~CoordinatorWorker() = default;
void Run() {
while (true) {
bool should_continue = true;
while (should_continue) {
Message message = queue_.Pop();
const bool should_continue = std::visit(
[this](auto &&msg) { return this->Process(std::forward<decltype(msg)>(msg)); }, std::move(message));
if (!should_continue) {
return;
}
should_continue = std::visit([this](auto &&msg) { return this->Process(std::forward<decltype(msg)>(msg)); },
std::move(message));
}
}
};

View File

@ -228,7 +228,7 @@ Hlc ShardMap::IncrementShardMapVersion() noexcept {
return shard_map_version;
}
// TODO(antaljanosbenjamin) use a single map for all name id
// TODO(antaljanosbenjamin) use a single map for all name id
// mapping and a single counter to maintain the next id
std::unordered_map<uint64_t, std::string> ShardMap::IdToNames() {
std::unordered_map<uint64_t, std::string> id_to_names;
@ -248,6 +248,25 @@ std::unordered_map<uint64_t, std::string> ShardMap::IdToNames() {
Hlc ShardMap::GetHlc() const noexcept { return shard_map_version; }
boost::uuids::uuid NewShardUuid(uint64_t shard_id) {
return boost::uuids::uuid{0,
0,
0,
0,
0,
0,
0,
0,
static_cast<unsigned char>(shard_id >> 56U),
static_cast<unsigned char>(shard_id >> 48U),
static_cast<unsigned char>(shard_id >> 40U),
static_cast<unsigned char>(shard_id >> 32U),
static_cast<unsigned char>(shard_id >> 24U),
static_cast<unsigned char>(shard_id >> 16U),
static_cast<unsigned char>(shard_id >> 8U),
static_cast<unsigned char>(shard_id)};
}
std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
std::set<boost::uuids::uuid> initialized) {
std::vector<ShardToInitialize> ret{};
@ -264,10 +283,11 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
// TODO(tyler) avoid these triple-nested loops by having the heartbeat include better info
bool machine_contains_shard = false;
for (auto &aas : shard) {
for (auto &aas : shard.peers) {
if (initialized.contains(aas.address.unique_id)) {
machine_contains_shard = true;
if (aas.status != Status::CONSENSUS_PARTICIPANT) {
mutated = true;
spdlog::info("marking shard as full consensus participant: {}", aas.address.unique_id);
aas.status = Status::CONSENSUS_PARTICIPANT;
}
@ -291,11 +311,14 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
}
}
if (!machine_contains_shard && shard.size() < label_space.replication_factor) {
if (!machine_contains_shard && shard.peers.size() < label_space.replication_factor) {
// increment version for each new uuid for deterministic creation
IncrementShardMapVersion();
Address address = storage_manager;
// TODO(tyler) use deterministic UUID so that coordinators don't diverge here
address.unique_id = boost::uuids::uuid{boost::uuids::random_generator()()},
address.unique_id = NewShardUuid(shard_map_version.logical_id);
spdlog::info("assigning shard manager to shard");
@ -314,7 +337,7 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
.status = Status::INITIALIZING,
};
shard.emplace_back(aas);
shard.peers.emplace_back(aas);
}
}
}
@ -337,9 +360,9 @@ bool ShardMap::SplitShard(Hlc previous_shard_map_version, LabelId label_id, cons
MG_ASSERT(!shards_in_map.contains(key));
MG_ASSERT(label_spaces.contains(label_id));
// Finding the Shard that the new PrimaryKey should map to.
// Finding the ShardMetadata that the new PrimaryKey should map to.
auto prev = std::prev(shards_in_map.upper_bound(key));
Shard duplicated_shard = prev->second;
ShardMetadata duplicated_shard = prev->second;
// Apply the split
shards_in_map[key] = duplicated_shard;
@ -360,7 +383,7 @@ std::optional<LabelId> ShardMap::InitializeNewLabel(std::string label_name, std:
labels.emplace(std::move(label_name), label_id);
PrimaryKey initial_key = SchemaToMinKey(schema);
Shard empty_shard = {};
ShardMetadata empty_shard = {};
Shards shards = {
{initial_key, empty_shard},
@ -383,6 +406,7 @@ std::optional<LabelId> ShardMap::InitializeNewLabel(std::string label_name, std:
void ShardMap::AddServer(Address server_address) {
// Find a random place for the server to plug in
}
std::optional<LabelId> ShardMap::GetLabelId(const std::string &label) const {
if (const auto it = labels.find(label); it != labels.end()) {
return it->second;
@ -455,7 +479,7 @@ Shards ShardMap::GetShardsForRange(const LabelName &label_name, const PrimaryKey
return shards;
}
Shard ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const {
ShardMetadata ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const {
MG_ASSERT(labels.contains(label_name));
LabelId label_id = labels.at(label_name);
@ -468,7 +492,7 @@ Shard ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &ke
return std::prev(label_space.shards.upper_bound(key))->second;
}
Shard ShardMap::GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const {
ShardMetadata ShardMap::GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const {
MG_ASSERT(label_spaces.contains(label_id));
const auto &label_space = label_spaces.at(label_id);
@ -532,12 +556,12 @@ EdgeTypeIdMap ShardMap::AllocateEdgeTypeIds(const std::vector<EdgeTypeName> &new
bool ShardMap::ClusterInitialized() const {
for (const auto &[label_id, label_space] : label_spaces) {
for (const auto &[low_key, shard] : label_space.shards) {
if (shard.size() < label_space.replication_factor) {
if (shard.peers.size() < label_space.replication_factor) {
spdlog::info("label_space below desired replication factor");
return false;
}
for (const auto &aas : shard) {
for (const auto &aas : shard.peers) {
if (aas.status != Status::CONSENSUS_PARTICIPANT) {
spdlog::info("shard member not yet a CONSENSUS_PARTICIPANT");
return false;

View File

@ -76,8 +76,35 @@ struct AddressAndStatus {
};
using PrimaryKey = std::vector<PropertyValue>;
using Shard = std::vector<AddressAndStatus>;
using Shards = std::map<PrimaryKey, Shard>;
struct ShardMetadata {
std::vector<AddressAndStatus> peers;
uint64_t version;
friend std::ostream &operator<<(std::ostream &in, const ShardMetadata &shard) {
using utils::print_helpers::operator<<;
in << "ShardMetadata { peers: ";
in << shard.peers;
in << " version: ";
in << shard.version;
in << " }";
return in;
}
friend bool operator==(const ShardMetadata &lhs, const ShardMetadata &rhs) = default;
friend bool operator<(const ShardMetadata &lhs, const ShardMetadata &rhs) {
if (lhs.peers != rhs.peers) {
return lhs.peers < rhs.peers;
}
return lhs.version < rhs.version;
}
};
using Shards = std::map<PrimaryKey, ShardMetadata>;
using LabelName = std::string;
using PropertyName = std::string;
using EdgeTypeName = std::string;
@ -99,7 +126,7 @@ PrimaryKey SchemaToMinKey(const std::vector<SchemaProperty> &schema);
struct LabelSpace {
std::vector<SchemaProperty> schema;
// Maps between the smallest primary key stored in the shard and the shard
std::map<PrimaryKey, Shard> shards;
std::map<PrimaryKey, ShardMetadata> shards;
size_t replication_factor;
friend std::ostream &operator<<(std::ostream &in, const LabelSpace &label_space) {
@ -160,9 +187,9 @@ struct ShardMap {
Shards GetShardsForRange(const LabelName &label_name, const PrimaryKey &start_key, const PrimaryKey &end_key) const;
Shard GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const;
ShardMetadata GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const;
Shard GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const;
ShardMetadata GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const;
PropertyMap AllocatePropertyIds(const std::vector<PropertyName> &new_properties);

View File

@ -17,4 +17,4 @@ target_include_directories(mg-expr PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(mg-expr PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/ast)
target_include_directories(mg-expr PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/interpret)
target_include_directories(mg-expr PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/semantic)
target_link_libraries(mg-expr cppitertools Boost::headers mg-utils mg-parser)
target_link_libraries(mg-expr cppitertools Boost::headers mg-utils mg-parser mg-functions)

View File

@ -24,6 +24,7 @@
#include "expr/exceptions.hpp"
#include "expr/interpret/frame.hpp"
#include "expr/semantic/symbol_table.hpp"
#include "functions/awesome_memgraph_functions.hpp"
#include "utils/exceptions.hpp"
namespace memgraph::expr {
@ -35,8 +36,8 @@ template <typename TypedValue, typename EvaluationContext, typename DbAccessor,
typename PropertyValue, typename ConvFunctor, typename Error, typename Tag = StorageTag>
class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
public:
ExpressionEvaluator(Frame<TypedValue> *frame, const SymbolTable &symbol_table, const EvaluationContext &ctx,
DbAccessor *dba, StorageView view)
ExpressionEvaluator(Frame *frame, const SymbolTable &symbol_table, const EvaluationContext &ctx, DbAccessor *dba,
StorageView view)
: frame_(frame), symbol_table_(&symbol_table), ctx_(&ctx), dba_(dba), view_(view) {}
using ExpressionVisitor<TypedValue>::Visit;
@ -100,6 +101,28 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
#undef BINARY_OPERATOR_VISITOR
#undef UNARY_OPERATOR_VISITOR
void HandleObjectAccessError(Error &shard_error, const std::string_view accessed_object) {
switch (shard_error) {
case Error::DELETED_OBJECT:
throw ExpressionRuntimeException("Trying to access {} on a deleted object.", accessed_object);
case Error::NONEXISTENT_OBJECT:
throw ExpressionRuntimeException("Trying to access {} from a node object doesn't exist.", accessed_object);
case Error::SERIALIZATION_ERROR:
case Error::VERTEX_HAS_EDGES:
case Error::PROPERTIES_DISABLED:
case Error::VERTEX_ALREADY_INSERTED:
case Error::OBJECT_NOT_FOUND:
throw ExpressionRuntimeException("Unexpected error when accessing {}.", accessed_object);
case Error::SCHEMA_NO_SCHEMA_DEFINED_FOR_LABEL:
case Error::SCHEMA_VERTEX_PROPERTY_WRONG_TYPE:
case Error::SCHEMA_VERTEX_UPDATE_PRIMARY_KEY:
case Error::SCHEMA_VERTEX_UPDATE_PRIMARY_LABEL:
case Error::SCHEMA_VERTEX_SECONDARY_LABEL_IS_PRIMARY:
case Error::SCHEMA_VERTEX_PRIMARY_PROPERTIES_UNDEFINED:
throw ExpressionRuntimeException("Unexpected schema violation when accessing {}.", accessed_object);
}
}
TypedValue Visit(AndOperator &op) override {
auto value1 = op.expression1_->Accept(*this);
if (value1.IsBool() && !value1.ValueBool()) {
@ -396,17 +419,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
has_label = vertex.HasLabel(StorageView::NEW, GetLabel(label));
}
if (has_label.HasError()) {
switch (has_label.GetError()) {
case Error::DELETED_OBJECT:
throw ExpressionRuntimeException("Trying to access labels on a deleted node.");
case Error::NONEXISTENT_OBJECT:
throw ExpressionRuntimeException("Trying to access labels from a node that doesn't exist.");
case Error::SERIALIZATION_ERROR:
case Error::VERTEX_HAS_EDGES:
case Error::PROPERTIES_DISABLED:
case Error::VERTEX_ALREADY_INSERTED:
throw ExpressionRuntimeException("Unexpected error when accessing labels.");
}
HandleObjectAccessError(has_label.GetError().code, "labels");
}
return *has_label;
}
@ -415,8 +428,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
typename TReturnType = std::enable_if_t<std::is_same_v<TTag, QueryEngineTag>, bool>>
TReturnType HasLabelImpl(const VertexAccessor &vertex, const LabelIx &label_ix, QueryEngineTag /*tag*/) {
auto label = typename VertexAccessor::Label{LabelId::FromUint(label_ix.ix)};
auto has_label = vertex.HasLabel(label);
return !has_label;
return vertex.HasLabel(label);
}
TypedValue Visit(LabelsTest &labels_test) override {
@ -479,7 +491,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
}
TypedValue Visit(Function &function) override {
FunctionContext function_ctx{dba_, ctx_->memory, ctx_->timestamp, &ctx_->counters, view_};
functions::FunctionContext<DbAccessor> function_ctx{dba_, ctx_->memory, ctx_->timestamp, &ctx_->counters, view_};
// Stack allocate evaluated arguments when there's a small number of them.
if (function.arguments_.size() <= 8) {
TypedValue arguments[8] = {TypedValue(ctx_->memory), TypedValue(ctx_->memory), TypedValue(ctx_->memory),
@ -744,17 +756,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
maybe_prop = record_accessor.GetProperty(StorageView::NEW, ctx_->properties[prop.ix]);
}
if (maybe_prop.HasError()) {
switch (maybe_prop.GetError()) {
case Error::DELETED_OBJECT:
throw ExpressionRuntimeException("Trying to get a property from a deleted object.");
case Error::NONEXISTENT_OBJECT:
throw ExpressionRuntimeException("Trying to get a property from an object that doesn't exist.");
case Error::SERIALIZATION_ERROR:
case Error::VERTEX_HAS_EDGES:
case Error::PROPERTIES_DISABLED:
case Error::VERTEX_ALREADY_INSERTED:
throw ExpressionRuntimeException("Unexpected error when getting a property.");
}
HandleObjectAccessError(maybe_prop.GetError().code, "property");
}
return conv_(*maybe_prop, ctx_->memory);
}
@ -773,24 +775,14 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
maybe_prop = record_accessor.GetProperty(view_, dba_->NameToProperty(name));
}
if (maybe_prop.HasError()) {
switch (maybe_prop.GetError()) {
case Error::DELETED_OBJECT:
throw ExpressionRuntimeException("Trying to get a property from a deleted object.");
case Error::NONEXISTENT_OBJECT:
throw ExpressionRuntimeException("Trying to get a property from an object that doesn't exist.");
case Error::SERIALIZATION_ERROR:
case Error::VERTEX_HAS_EDGES:
case Error::PROPERTIES_DISABLED:
case Error::VERTEX_ALREADY_INSERTED:
throw ExpressionRuntimeException("Unexpected error when getting a property.");
}
HandleObjectAccessError(maybe_prop.GetError().code, "property");
}
return conv_(*maybe_prop, ctx_->memory);
}
LabelId GetLabel(LabelIx label) { return ctx_->labels[label.ix]; }
Frame<TypedValue> *frame_;
Frame *frame_;
const SymbolTable *symbol_table_;
const EvaluationContext *ctx_;
DbAccessor *dba_;

View File

@ -20,7 +20,6 @@
namespace memgraph::expr {
template <typename TypedValue>
class Frame {
public:
/// Create a Frame of given size backed by a utils::NewDeleteResource()
@ -42,4 +41,18 @@ class Frame {
utils::pmr::vector<TypedValue> elems_;
};
class FrameWithValidity final : public Frame {
public:
explicit FrameWithValidity(int64_t size) : Frame(size), is_valid_(false) {}
FrameWithValidity(int64_t size, utils::MemoryResource *memory) : Frame(size, memory), is_valid_(false) {}
bool IsValid() const noexcept { return is_valid_; }
void MakeValid() noexcept { is_valid_ = true; }
void MakeInvalid() noexcept { is_valid_ = false; }
private:
bool is_valid_;
};
} // namespace memgraph::expr

View File

@ -0,0 +1 @@
add_library(mg-functions INTERFACE)

File diff suppressed because it is too large Load Diff

View File

@ -15,13 +15,13 @@
#include <string>
#include <vector>
#include "common/errors.hpp"
#include "coordinator/shard_map.hpp"
#include "query/v2/accessors.hpp"
#include "query/v2/request_router.hpp"
#include "query/v2/requests.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "storage/v3/edge_accessor.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard.hpp"
#include "storage/v3/vertex_accessor.hpp"
#include "storage/v3/view.hpp"
@ -71,106 +71,101 @@ query::v2::TypedValue ToTypedValue(const Value &value) {
}
}
storage::v3::Result<communication::bolt::Vertex> ToBoltVertex(
const query::v2::accessors::VertexAccessor &vertex, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View /*view*/) {
communication::bolt::Vertex ToBoltVertex(const query::v2::accessors::VertexAccessor &vertex,
const query::v2::RequestRouterInterface *request_router,
storage::v3::View /*view*/) {
auto id = communication::bolt::Id::FromUint(0);
auto labels = vertex.Labels();
std::vector<std::string> new_labels;
new_labels.reserve(labels.size());
for (const auto &label : labels) {
new_labels.push_back(shard_request_manager->LabelToName(label.id));
new_labels.push_back(request_router->LabelToName(label.id));
}
auto properties = vertex.Properties();
std::map<std::string, Value> new_properties;
for (const auto &[prop, property_value] : properties) {
new_properties[shard_request_manager->PropertyToName(prop)] = ToBoltValue(property_value);
new_properties[request_router->PropertyToName(prop)] = ToBoltValue(property_value);
}
return communication::bolt::Vertex{id, new_labels, new_properties};
}
storage::v3::Result<communication::bolt::Edge> ToBoltEdge(
const query::v2::accessors::EdgeAccessor &edge, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View /*view*/) {
communication::bolt::Edge ToBoltEdge(const query::v2::accessors::EdgeAccessor &edge,
const query::v2::RequestRouterInterface *request_router,
storage::v3::View /*view*/) {
// TODO(jbajic) Fix bolt communication
auto id = communication::bolt::Id::FromUint(0);
auto from = communication::bolt::Id::FromUint(0);
auto to = communication::bolt::Id::FromUint(0);
const auto &type = shard_request_manager->EdgeTypeToName(edge.EdgeType());
const auto &type = request_router->EdgeTypeToName(edge.EdgeType());
auto properties = edge.Properties();
std::map<std::string, Value> new_properties;
for (const auto &[prop, property_value] : properties) {
new_properties[shard_request_manager->PropertyToName(prop)] = ToBoltValue(property_value);
new_properties[request_router->PropertyToName(prop)] = ToBoltValue(property_value);
}
return communication::bolt::Edge{id, from, to, type, new_properties};
}
storage::v3::Result<communication::bolt::Path> ToBoltPath(
const query::v2::accessors::Path & /*edge*/, const msgs::ShardRequestManagerInterface * /*shard_request_manager*/,
storage::v3::View /*view*/) {
communication::bolt::Path ToBoltPath(const query::v2::accessors::Path & /*edge*/,
const query::v2::RequestRouterInterface * /*request_router*/,
storage::v3::View /*view*/) {
// TODO(jbajic) Fix bolt communication
return {storage::v3::Error::DELETED_OBJECT};
MG_ASSERT(false, "Path is unimplemented!");
return {};
}
storage::v3::Result<Value> ToBoltValue(const query::v2::TypedValue &value,
const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View view) {
Value ToBoltValue(const query::v2::TypedValue &value, const query::v2::RequestRouterInterface *request_router,
storage::v3::View view) {
switch (value.type()) {
case query::v2::TypedValue::Type::Null:
return Value();
return {};
case query::v2::TypedValue::Type::Bool:
return Value(value.ValueBool());
return {value.ValueBool()};
case query::v2::TypedValue::Type::Int:
return Value(value.ValueInt());
return {value.ValueInt()};
case query::v2::TypedValue::Type::Double:
return Value(value.ValueDouble());
return {value.ValueDouble()};
case query::v2::TypedValue::Type::String:
return Value(std::string(value.ValueString()));
return {std::string(value.ValueString())};
case query::v2::TypedValue::Type::List: {
std::vector<Value> values;
values.reserve(value.ValueList().size());
for (const auto &v : value.ValueList()) {
auto maybe_value = ToBoltValue(v, shard_request_manager, view);
if (maybe_value.HasError()) return maybe_value.GetError();
values.emplace_back(std::move(*maybe_value));
auto value = ToBoltValue(v, request_router, view);
values.emplace_back(std::move(value));
}
return Value(std::move(values));
return {std::move(values)};
}
case query::v2::TypedValue::Type::Map: {
std::map<std::string, Value> map;
for (const auto &kv : value.ValueMap()) {
auto maybe_value = ToBoltValue(kv.second, shard_request_manager, view);
if (maybe_value.HasError()) return maybe_value.GetError();
map.emplace(kv.first, std::move(*maybe_value));
auto value = ToBoltValue(kv.second, request_router, view);
map.emplace(kv.first, std::move(value));
}
return Value(std::move(map));
return {std::move(map)};
}
case query::v2::TypedValue::Type::Vertex: {
auto maybe_vertex = ToBoltVertex(value.ValueVertex(), shard_request_manager, view);
if (maybe_vertex.HasError()) return maybe_vertex.GetError();
return Value(std::move(*maybe_vertex));
auto vertex = ToBoltVertex(value.ValueVertex(), request_router, view);
return {std::move(vertex)};
}
case query::v2::TypedValue::Type::Edge: {
auto maybe_edge = ToBoltEdge(value.ValueEdge(), shard_request_manager, view);
if (maybe_edge.HasError()) return maybe_edge.GetError();
return Value(std::move(*maybe_edge));
auto edge = ToBoltEdge(value.ValueEdge(), request_router, view);
return {std::move(edge)};
}
case query::v2::TypedValue::Type::Path: {
auto maybe_path = ToBoltPath(value.ValuePath(), shard_request_manager, view);
if (maybe_path.HasError()) return maybe_path.GetError();
return Value(std::move(*maybe_path));
auto path = ToBoltPath(value.ValuePath(), request_router, view);
return {std::move(path)};
}
case query::v2::TypedValue::Type::Date:
return Value(value.ValueDate());
return {value.ValueDate()};
case query::v2::TypedValue::Type::LocalTime:
return Value(value.ValueLocalTime());
return {value.ValueLocalTime()};
case query::v2::TypedValue::Type::LocalDateTime:
return Value(value.ValueLocalDateTime());
return {value.ValueLocalDateTime()};
case query::v2::TypedValue::Type::Duration:
return Value(value.ValueDuration());
return {value.ValueDuration()};
}
}

View File

@ -15,11 +15,12 @@
#include "communication/bolt/v1/value.hpp"
#include "coordinator/shard_map.hpp"
#include "query/v2/bindings/typed_value.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard.hpp"
#include "storage/v3/view.hpp"
#include "utils/result.hpp"
namespace memgraph::storage::v3 {
class EdgeAccessor;
@ -31,40 +32,37 @@ namespace memgraph::glue::v2 {
/// @param storage::v3::VertexAccessor for converting to
/// communication::bolt::Vertex.
/// @param msgs::ShardRequestManagerInterface *shard_request_manager getting label and property names.
/// @param query::v2::RequestRouterInterface *request_router getting label and property names.
/// @param storage::v3::View for deciding which vertex attributes are visible.
///
/// @throw std::bad_alloc
storage::v3::Result<communication::bolt::Vertex> ToBoltVertex(
const storage::v3::VertexAccessor &vertex, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View view);
communication::bolt::Vertex ToBoltVertex(const storage::v3::VertexAccessor &vertex,
const query::v2::RequestRouterInterface *request_router,
storage::v3::View view);
/// @param storage::v3::EdgeAccessor for converting to communication::bolt::Edge.
/// @param msgs::ShardRequestManagerInterface *shard_request_manager getting edge type and property names.
/// @param query::v2::RequestRouterInterface *request_router getting edge type and property names.
/// @param storage::v3::View for deciding which edge attributes are visible.
///
/// @throw std::bad_alloc
storage::v3::Result<communication::bolt::Edge> ToBoltEdge(
const storage::v3::EdgeAccessor &edge, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View view);
communication::bolt::Edge ToBoltEdge(const storage::v3::EdgeAccessor &edge,
const query::v2::RequestRouterInterface *request_router, storage::v3::View view);
/// @param query::v2::Path for converting to communication::bolt::Path.
/// @param msgs::ShardRequestManagerInterface *shard_request_manager ToBoltVertex and ToBoltEdge.
/// @param query::v2::RequestRouterInterface *request_router ToBoltVertex and ToBoltEdge.
/// @param storage::v3::View for ToBoltVertex and ToBoltEdge.
///
/// @throw std::bad_alloc
storage::v3::Result<communication::bolt::Path> ToBoltPath(
const query::v2::accessors::Path &path, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View view);
communication::bolt::Path ToBoltPath(const query::v2::accessors::Path &path,
const query::v2::RequestRouterInterface *request_router, storage::v3::View view);
/// @param query::v2::TypedValue for converting to communication::bolt::Value.
/// @param msgs::ShardRequestManagerInterface *shard_request_manager ToBoltVertex and ToBoltEdge.
/// @param query::v2::RequestRouterInterface *request_router ToBoltVertex and ToBoltEdge.
/// @param storage::v3::View for ToBoltVertex and ToBoltEdge.
///
/// @throw std::bad_alloc
storage::v3::Result<communication::bolt::Value> ToBoltValue(
const query::v2::TypedValue &value, const msgs::ShardRequestManagerInterface *shard_request_manager,
storage::v3::View view);
communication::bolt::Value ToBoltValue(const query::v2::TypedValue &value,
const query::v2::RequestRouterInterface *request_router, storage::v3::View view);
query::v2::TypedValue ToTypedValue(const communication::bolt::Value &value);
@ -74,8 +72,7 @@ storage::v3::PropertyValue ToPropertyValue(const communication::bolt::Value &val
communication::bolt::Value ToBoltValue(msgs::Value value);
communication::bolt::Value ToBoltValue(msgs::Value value,
const msgs::ShardRequestManagerInterface *shard_request_manager,
communication::bolt::Value ToBoltValue(msgs::Value value, const query::v2::RequestRouterInterface *request_router,
storage::v3::View view);
} // namespace memgraph::glue::v2

View File

@ -20,6 +20,8 @@
#include <boost/uuid/uuid_generators.hpp>
#include <boost/uuid/uuid_io.hpp>
#include "utils/logging.hpp"
namespace memgraph::io {
struct PartialAddress {
@ -58,18 +60,39 @@ struct Address {
uint16_t last_known_port;
static Address TestAddress(uint16_t port) {
MG_ASSERT(port <= 255);
return Address{
.unique_id = boost::uuids::uuid{boost::uuids::random_generator()()},
.unique_id = boost::uuids::uuid{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, static_cast<unsigned char>(port)},
.last_known_port = port,
};
}
// NB: don't use this in test code because it is non-deterministic
static Address UniqueLocalAddress() {
return Address{
.unique_id = boost::uuids::uuid{boost::uuids::random_generator()()},
};
}
/// `Coordinator`s have constant UUIDs because there is at most one per ip/port pair.
Address ForkLocalCoordinator() {
return Address{
.unique_id = boost::uuids::uuid{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.last_known_ip = last_known_ip,
.last_known_port = last_known_port,
};
}
/// `ShardManager`s have constant UUIDs because there is at most one per ip/port pair.
Address ForkLocalShardManager() {
return Address{
.unique_id = boost::uuids::uuid{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.last_known_ip = last_known_ip,
.last_known_port = last_known_port,
};
}
/// Returns a new ID with the same IP and port but a unique UUID.
Address ForkUniqueAddress() {
return Address{

View File

@ -35,10 +35,13 @@ class Shared {
std::optional<T> item_;
bool consumed_ = false;
bool waiting_ = false;
std::function<bool()> simulator_notifier_ = nullptr;
bool filled_ = false;
std::function<bool()> wait_notifier_ = nullptr;
std::function<void()> fill_notifier_ = nullptr;
public:
explicit Shared(std::function<bool()> simulator_notifier) : simulator_notifier_(simulator_notifier) {}
explicit Shared(std::function<bool()> wait_notifier, std::function<void()> fill_notifier)
: wait_notifier_(wait_notifier), fill_notifier_(fill_notifier) {}
Shared() = default;
Shared(Shared &&) = delete;
Shared &operator=(Shared &&) = delete;
@ -64,8 +67,7 @@ class Shared {
waiting_ = true;
while (!item_) {
bool simulator_progressed = false;
if (simulator_notifier_) [[unlikely]] {
if (wait_notifier_) [[unlikely]] {
// We can't hold our own lock while notifying
// the simulator because notifying the simulator
// involves acquiring the simulator's mutex
@ -77,7 +79,7 @@ class Shared {
// so we have to get out of its way to avoid
// a cyclical deadlock.
lock.unlock();
simulator_progressed = std::invoke(simulator_notifier_);
std::invoke(wait_notifier_);
lock.lock();
if (item_) {
// item may have been filled while we
@ -85,8 +87,7 @@ class Shared {
// the simulator of our waiting_ status.
break;
}
}
if (!simulator_progressed) [[likely]] {
} else {
cv_.wait(lock);
}
MG_ASSERT(!consumed_, "Future consumed twice!");
@ -117,11 +118,19 @@ class Shared {
std::unique_lock<std::mutex> lock(mu_);
MG_ASSERT(!consumed_, "Promise filled after it was already consumed!");
MG_ASSERT(!item_, "Promise filled twice!");
MG_ASSERT(!filled_, "Promise filled twice!");
item_ = item;
filled_ = true;
} // lock released before condition variable notification
if (fill_notifier_) {
spdlog::trace("calling fill notifier");
std::invoke(fill_notifier_);
} else {
spdlog::trace("not calling fill notifier");
}
cv_.notify_all();
}
@ -253,8 +262,9 @@ std::pair<Future<T>, Promise<T>> FuturePromisePair() {
}
template <typename T>
std::pair<Future<T>, Promise<T>> FuturePromisePairWithNotifier(std::function<bool()> simulator_notifier) {
std::shared_ptr<details::Shared<T>> shared = std::make_shared<details::Shared<T>>(simulator_notifier);
std::pair<Future<T>, Promise<T>> FuturePromisePairWithNotifications(std::function<bool()> wait_notifier,
std::function<void()> fill_notifier) {
std::shared_ptr<details::Shared<T>> shared = std::make_shared<details::Shared<T>>(wait_notifier, fill_notifier);
Future<T> future = Future<T>(shared);
Promise<T> promise = Promise<T>(shared);

View File

@ -31,9 +31,10 @@ class LocalTransport {
: local_transport_handle_(std::move(local_transport_handle)) {}
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request, Duration timeout) {
return local_transport_handle_->template SubmitRequest<RequestT, ResponseT>(to_address, from_address,
std::move(request), timeout);
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request,
std::function<void()> fill_notifier, Duration timeout) {
return local_transport_handle_->template SubmitRequest<RequestT, ResponseT>(
to_address, from_address, std::move(request), timeout, fill_notifier);
}
template <Message... Ms>

View File

@ -140,8 +140,12 @@ class LocalTransportHandle {
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> SubmitRequest(Address to_address, Address from_address, RequestT &&request,
Duration timeout) {
auto [future, promise] = memgraph::io::FuturePromisePair<ResponseResult<ResponseT>>();
Duration timeout, std::function<void()> fill_notifier) {
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifications<ResponseResult<ResponseT>>(
// set null notifier for when the Future::Wait is called
nullptr,
// set notifier for when Promise::Fill is called
std::forward<std::function<void()>>(fill_notifier));
const bool port_matches = to_address.last_known_port == from_address.last_known_port;
const bool ip_matches = to_address.last_known_ip == from_address.last_known_ip;

View File

@ -13,6 +13,7 @@
#include <chrono>
#include <cmath>
#include <compare>
#include <unordered_map>
#include <boost/core/demangle.hpp>
@ -39,6 +40,8 @@ struct LatencyHistogramSummary {
Duration p100;
Duration sum;
friend bool operator==(const LatencyHistogramSummary &lhs, const LatencyHistogramSummary &rhs) = default;
friend std::ostream &operator<<(std::ostream &in, const LatencyHistogramSummary &histo) {
in << "{ \"count\": " << histo.count;
in << ", \"p0\": " << histo.p0.count();
@ -80,6 +83,8 @@ struct LatencyHistogramSummaries {
return output;
}
friend bool operator==(const LatencyHistogramSummaries &lhs, const LatencyHistogramSummaries &rhs) = default;
friend std::ostream &operator<<(std::ostream &in, const LatencyHistogramSummaries &histo) {
using memgraph::utils::print_helpers::operator<<;
in << histo.latencies;

93
src/io/notifier.hpp Normal file
View File

@ -0,0 +1,93 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <condition_variable>
#include <functional>
#include <mutex>
#include <optional>
#include <vector>
namespace memgraph::io {
class ReadinessToken {
size_t id_;
public:
explicit ReadinessToken(size_t id) : id_(id) {}
size_t GetId() const { return id_; }
};
class Inner {
std::condition_variable cv_;
std::mutex mu_;
std::vector<ReadinessToken> ready_;
std::optional<std::function<bool()>> tick_simulator_;
public:
void Notify(ReadinessToken readiness_token) {
{
std::unique_lock<std::mutex> lock(mu_);
ready_.emplace_back(readiness_token);
} // mutex dropped
cv_.notify_all();
}
ReadinessToken Await() {
std::unique_lock<std::mutex> lock(mu_);
while (ready_.empty()) {
if (tick_simulator_) [[unlikely]] {
// This avoids a deadlock in a similar way that
// Future::Wait will release its mutex while
// interacting with the simulator, due to
// the fact that the simulator may cause
// notifications that we are interested in.
lock.unlock();
std::invoke(tick_simulator_.value());
lock.lock();
} else {
cv_.wait(lock);
}
}
ReadinessToken ret = ready_.back();
ready_.pop_back();
return ret;
}
void InstallSimulatorTicker(std::function<bool()> tick_simulator) {
std::unique_lock<std::mutex> lock(mu_);
tick_simulator_ = tick_simulator;
}
};
class Notifier {
std::shared_ptr<Inner> inner_;
public:
Notifier() : inner_(std::make_shared<Inner>()) {}
Notifier(const Notifier &) = default;
Notifier &operator=(const Notifier &) = default;
Notifier(Notifier &&old) = default;
Notifier &operator=(Notifier &&old) = default;
~Notifier() = default;
void Notify(ReadinessToken readiness_token) const { inner_->Notify(readiness_token); }
ReadinessToken Await() const { return inner_->Await(); }
void InstallSimulatorTicker(std::function<bool()> tick_simulator) { inner_->InstallSimulatorTicker(tick_simulator); }
};
} // namespace memgraph::io

View File

@ -19,7 +19,6 @@
#include <map>
#include <set>
#include <thread>
#include <unordered_map>
#include <vector>
#include <boost/core/demangle.hpp>
@ -92,33 +91,43 @@ struct ReadResponse {
};
template <class... ReadReturn>
utils::TypeInfoRef TypeInfoFor(const ReadResponse<std::variant<ReadReturn...>> &read_response) {
return TypeInfoForVariant(read_response.read_return);
utils::TypeInfoRef TypeInfoFor(const ReadResponse<std::variant<ReadReturn...>> &response) {
return TypeInfoForVariant(response.read_return);
}
template <class ReadReturn>
utils::TypeInfoRef TypeInfoFor(const ReadResponse<ReadReturn> & /* read_response */) {
utils::TypeInfoRef TypeInfoFor(const ReadResponse<ReadReturn> & /* response */) {
return typeid(ReadReturn);
}
template <class ReadOperation>
utils::TypeInfoRef TypeInfoFor(const ReadRequest<ReadOperation> & /* request */) {
return typeid(ReadOperation);
}
template <class... ReadOperations>
utils::TypeInfoRef TypeInfoFor(const ReadRequest<std::variant<ReadOperations...>> &request) {
return TypeInfoForVariant(request.operation);
}
template <class... WriteReturn>
utils::TypeInfoRef TypeInfoFor(const WriteResponse<std::variant<WriteReturn...>> &write_response) {
return TypeInfoForVariant(write_response.write_return);
utils::TypeInfoRef TypeInfoFor(const WriteResponse<std::variant<WriteReturn...>> &response) {
return TypeInfoForVariant(response.write_return);
}
template <class WriteReturn>
utils::TypeInfoRef TypeInfoFor(const WriteResponse<WriteReturn> & /* write_response */) {
utils::TypeInfoRef TypeInfoFor(const WriteResponse<WriteReturn> & /* response */) {
return typeid(WriteReturn);
}
template <class WriteOperation>
utils::TypeInfoRef TypeInfoFor(const WriteRequest<WriteOperation> & /* write_request */) {
utils::TypeInfoRef TypeInfoFor(const WriteRequest<WriteOperation> & /* request */) {
return typeid(WriteOperation);
}
template <class... WriteOperations>
utils::TypeInfoRef TypeInfoFor(const WriteRequest<std::variant<WriteOperations...>> &write_request) {
return TypeInfoForVariant(write_request.operation);
utils::TypeInfoRef TypeInfoFor(const WriteRequest<std::variant<WriteOperations...>> &request) {
return TypeInfoForVariant(request.operation);
}
/// AppendRequest is a raft-level message that the Leader
@ -182,7 +191,7 @@ struct PendingClientRequest {
struct Leader {
std::map<Address, FollowerTracker> followers;
std::unordered_map<LogIndex, PendingClientRequest> pending_client_requests;
std::map<LogIndex, PendingClientRequest> pending_client_requests;
Time last_broadcast = Time::min();
std::string static ToString() { return "\tLeader \t"; }
@ -683,7 +692,7 @@ class Raft {
return Leader{
.followers = std::move(followers),
.pending_client_requests = std::unordered_map<LogIndex, PendingClientRequest>(),
.pending_client_requests = std::map<LogIndex, PendingClientRequest>(),
};
}
@ -847,7 +856,9 @@ class Raft {
// Leaders are able to immediately respond to the requester (with a ReadResponseValue) applied to the ReplicatedState
std::optional<Role> Handle(Leader & /* variable */, ReadRequest<ReadOperation> &&req, RequestId request_id,
Address from_address) {
Log("handling ReadOperation");
auto type_info = TypeInfoFor(req);
std::string demangled_name = boost::core::demangle(type_info.get().name());
Log("handling ReadOperation<" + demangled_name + ">");
ReadOperation read_operation = req.operation;
ReadResponseValue read_return = replicated_state_.Read(read_operation);

View File

@ -14,10 +14,12 @@
#include <iostream>
#include <optional>
#include <type_traits>
#include <unordered_map>
#include <vector>
#include "io/address.hpp"
#include "io/errors.hpp"
#include "io/notifier.hpp"
#include "io/rsm/raft.hpp"
#include "utils/result.hpp"
@ -36,6 +38,14 @@ using memgraph::io::rsm::WriteRequest;
using memgraph::io::rsm::WriteResponse;
using memgraph::utils::BasicResult;
template <typename RequestT, typename ResponseT>
struct AsyncRequest {
Time start_time;
RequestT request;
Notifier notifier;
ResponseFuture<ResponseT> future;
};
template <typename IoImpl, typename WriteRequestT, typename WriteResponseT, typename ReadRequestT,
typename ReadResponseT>
class RsmClient {
@ -47,23 +57,15 @@ class RsmClient {
/// State for single async read/write operations. In the future this could become a map
/// of async operations that can be accessed via an ID etc...
std::optional<Time> async_read_before_;
std::optional<ResponseFuture<ReadResponse<ReadResponseT>>> async_read_;
ReadRequestT current_read_request_;
std::optional<Time> async_write_before_;
std::optional<ResponseFuture<WriteResponse<WriteResponseT>>> async_write_;
WriteRequestT current_write_request_;
std::unordered_map<size_t, AsyncRequest<ReadRequestT, ReadResponse<ReadResponseT>>> async_reads_;
std::unordered_map<size_t, AsyncRequest<WriteRequestT, WriteResponse<WriteResponseT>>> async_writes_;
void SelectRandomLeader() {
std::uniform_int_distribution<size_t> addr_distrib(0, (server_addrs_.size() - 1));
size_t addr_index = io_.Rand(addr_distrib);
leader_ = server_addrs_[addr_index];
spdlog::debug(
"client NOT redirected to leader server despite our success failing to be processed (it probably was sent to "
"a RSM Candidate) trying a random one at index {} with address {}",
addr_index, leader_.ToString());
spdlog::debug("selecting a random leader at index {} with address {}", addr_index, leader_.ToString());
}
template <typename ResponseT>
@ -91,107 +93,76 @@ class RsmClient {
~RsmClient() = default;
BasicResult<TimedOut, WriteResponseT> SendWriteRequest(WriteRequestT req) {
WriteRequest<WriteRequestT> client_req;
client_req.operation = req;
const Duration overall_timeout = io_.GetDefaultTimeout();
const Time before = io_.Now();
do {
spdlog::debug("client sending WriteRequest to Leader {}", leader_.ToString());
ResponseFuture<WriteResponse<WriteResponseT>> response_future =
io_.template Request<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(leader_, client_req);
ResponseResult<WriteResponse<WriteResponseT>> response_result = std::move(response_future).Wait();
if (response_result.HasError()) {
spdlog::debug("client timed out while trying to communicate with leader server {}", leader_.ToString());
return response_result.GetError();
}
ResponseEnvelope<WriteResponse<WriteResponseT>> &&response_envelope = std::move(response_result.GetValue());
WriteResponse<WriteResponseT> &&write_response = std::move(response_envelope.message);
if (write_response.success) {
return std::move(write_response.write_return);
}
PossiblyRedirectLeader(write_response);
} while (io_.Now() < before + overall_timeout);
return TimedOut{};
Notifier notifier;
const ReadinessToken readiness_token{0};
SendAsyncWriteRequest(req, notifier, readiness_token);
auto poll_result = AwaitAsyncWriteRequest(readiness_token);
while (!poll_result) {
poll_result = AwaitAsyncWriteRequest(readiness_token);
}
return poll_result.value();
}
BasicResult<TimedOut, ReadResponseT> SendReadRequest(ReadRequestT req) {
ReadRequest<ReadRequestT> read_req;
read_req.operation = req;
const Duration overall_timeout = io_.GetDefaultTimeout();
const Time before = io_.Now();
do {
spdlog::debug("client sending ReadRequest to Leader {}", leader_.ToString());
ResponseFuture<ReadResponse<ReadResponseT>> get_response_future =
io_.template Request<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(leader_, read_req);
// receive response
ResponseResult<ReadResponse<ReadResponseT>> get_response_result = std::move(get_response_future).Wait();
if (get_response_result.HasError()) {
spdlog::debug("client timed out while trying to communicate with leader server {}", leader_.ToString());
return get_response_result.GetError();
}
ResponseEnvelope<ReadResponse<ReadResponseT>> &&get_response_envelope = std::move(get_response_result.GetValue());
ReadResponse<ReadResponseT> &&read_get_response = std::move(get_response_envelope.message);
if (read_get_response.success) {
return std::move(read_get_response.read_return);
}
PossiblyRedirectLeader(read_get_response);
} while (io_.Now() < before + overall_timeout);
return TimedOut{};
Notifier notifier;
const ReadinessToken readiness_token{0};
SendAsyncReadRequest(req, notifier, readiness_token);
auto poll_result = AwaitAsyncReadRequest(readiness_token);
while (!poll_result) {
poll_result = AwaitAsyncReadRequest(readiness_token);
}
return poll_result.value();
}
/// AsyncRead methods
void SendAsyncReadRequest(const ReadRequestT &req) {
MG_ASSERT(!async_read_);
void SendAsyncReadRequest(const ReadRequestT &req, Notifier notifier, ReadinessToken readiness_token) {
ReadRequest<ReadRequestT> read_req = {.operation = req};
if (!async_read_before_) {
async_read_before_ = io_.Now();
}
current_read_request_ = std::move(req);
async_read_ = io_.template Request<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(leader_, read_req);
AsyncRequest<ReadRequestT, ReadResponse<ReadResponseT>> async_request{
.start_time = io_.Now(),
.request = std::move(req),
.notifier = notifier,
.future = io_.template RequestWithNotification<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(
leader_, read_req, notifier, readiness_token),
};
async_reads_.emplace(readiness_token.GetId(), std::move(async_request));
}
std::optional<BasicResult<TimedOut, ReadResponseT>> PollAsyncReadRequest() {
MG_ASSERT(async_read_);
void ResendAsyncReadRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_reads_.at(readiness_token.GetId());
if (!async_read_->IsReady()) {
ReadRequest<ReadRequestT> read_req = {.operation = async_request.request};
async_request.future = io_.template RequestWithNotification<ReadRequest<ReadRequestT>, ReadResponse<ReadResponseT>>(
leader_, read_req, async_request.notifier, readiness_token);
}
std::optional<BasicResult<TimedOut, ReadResponseT>> PollAsyncReadRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_reads_.at(readiness_token.GetId());
if (!async_request.future.IsReady()) {
return std::nullopt;
}
return AwaitAsyncReadRequest();
return AwaitAsyncReadRequest(readiness_token);
}
std::optional<BasicResult<TimedOut, ReadResponseT>> AwaitAsyncReadRequest() {
ResponseResult<ReadResponse<ReadResponseT>> get_response_result = std::move(*async_read_).Wait();
async_read_.reset();
std::optional<BasicResult<TimedOut, ReadResponseT>> AwaitAsyncReadRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_reads_.at(readiness_token.GetId());
ResponseResult<ReadResponse<ReadResponseT>> get_response_result = std::move(async_request.future).Wait();
const Duration overall_timeout = io_.GetDefaultTimeout();
const bool past_time_out = io_.Now() < *async_read_before_ + overall_timeout;
const bool past_time_out = io_.Now() > async_request.start_time + overall_timeout;
const bool result_has_error = get_response_result.HasError();
if (result_has_error && past_time_out) {
// TODO static assert the exact type of error.
spdlog::debug("client timed out while trying to communicate with leader server {}", leader_.ToString());
async_read_before_ = std::nullopt;
async_reads_.erase(readiness_token.GetId());
return TimedOut{};
}
if (!result_has_error) {
ResponseEnvelope<ReadResponse<ReadResponseT>> &&get_response_envelope = std::move(get_response_result.GetValue());
ReadResponse<ReadResponseT> &&read_get_response = std::move(get_response_envelope.message);
@ -199,54 +170,69 @@ class RsmClient {
PossiblyRedirectLeader(read_get_response);
if (read_get_response.success) {
async_read_before_ = std::nullopt;
async_reads_.erase(readiness_token.GetId());
spdlog::debug("returning read_return for RSM request");
return std::move(read_get_response.read_return);
}
SendAsyncReadRequest(current_read_request_);
} else if (result_has_error) {
} else {
SelectRandomLeader();
SendAsyncReadRequest(current_read_request_);
}
ResendAsyncReadRequest(readiness_token);
return std::nullopt;
}
/// AsyncWrite methods
void SendAsyncWriteRequest(const WriteRequestT &req) {
MG_ASSERT(!async_write_);
void SendAsyncWriteRequest(const WriteRequestT &req, Notifier notifier, ReadinessToken readiness_token) {
WriteRequest<WriteRequestT> write_req = {.operation = req};
if (!async_write_before_) {
async_write_before_ = io_.Now();
}
current_write_request_ = std::move(req);
async_write_ = io_.template Request<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(leader_, write_req);
AsyncRequest<WriteRequestT, WriteResponse<WriteResponseT>> async_request{
.start_time = io_.Now(),
.request = std::move(req),
.notifier = notifier,
.future = io_.template RequestWithNotification<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(
leader_, write_req, notifier, readiness_token),
};
async_writes_.emplace(readiness_token.GetId(), std::move(async_request));
}
std::optional<BasicResult<TimedOut, WriteResponseT>> PollAsyncWriteRequest() {
MG_ASSERT(async_write_);
void ResendAsyncWriteRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_writes_.at(readiness_token.GetId());
if (!async_write_->IsReady()) {
WriteRequest<WriteRequestT> write_req = {.operation = async_request.request};
async_request.future =
io_.template RequestWithNotification<WriteRequest<WriteRequestT>, WriteResponse<WriteResponseT>>(
leader_, write_req, async_request.notifier, readiness_token);
}
std::optional<BasicResult<TimedOut, WriteResponseT>> PollAsyncWriteRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_writes_.at(readiness_token.GetId());
if (!async_request.future.IsReady()) {
return std::nullopt;
}
return AwaitAsyncWriteRequest();
return AwaitAsyncWriteRequest(readiness_token);
}
std::optional<BasicResult<TimedOut, WriteResponseT>> AwaitAsyncWriteRequest() {
ResponseResult<WriteResponse<WriteResponseT>> get_response_result = std::move(*async_write_).Wait();
async_write_.reset();
std::optional<BasicResult<TimedOut, WriteResponseT>> AwaitAsyncWriteRequest(const ReadinessToken &readiness_token) {
auto &async_request = async_writes_.at(readiness_token.GetId());
ResponseResult<WriteResponse<WriteResponseT>> get_response_result = std::move(async_request.future).Wait();
const Duration overall_timeout = io_.GetDefaultTimeout();
const bool past_time_out = io_.Now() < *async_write_before_ + overall_timeout;
const bool past_time_out = io_.Now() > async_request.start_time + overall_timeout;
const bool result_has_error = get_response_result.HasError();
if (result_has_error && past_time_out) {
// TODO static assert the exact type of error.
spdlog::debug("client timed out while trying to communicate with leader server {}", leader_.ToString());
async_write_before_ = std::nullopt;
async_writes_.erase(readiness_token.GetId());
return TimedOut{};
}
if (!result_has_error) {
ResponseEnvelope<WriteResponse<WriteResponseT>> &&get_response_envelope =
std::move(get_response_result.GetValue());
@ -255,14 +241,15 @@ class RsmClient {
PossiblyRedirectLeader(write_get_response);
if (write_get_response.success) {
async_write_before_ = std::nullopt;
async_writes_.erase(readiness_token.GetId());
return std::move(write_get_response.write_return);
}
SendAsyncWriteRequest(current_write_request_);
} else if (result_has_error) {
} else {
SelectRandomLeader();
SendAsyncWriteRequest(current_write_request_);
}
ResendAsyncWriteRequest(readiness_token);
return std::nullopt;
}
};

View File

@ -49,5 +49,10 @@ class Simulator {
}
SimulatorStats Stats() { return simulator_handle_->Stats(); }
std::function<bool()> GetSimulatorTickClosure() {
std::function<bool()> tick_closure = [handle_copy = simulator_handle_] { return handle_copy->MaybeTickSimulator(); };
return tick_closure;
}
};
}; // namespace memgraph::io::simulator

View File

@ -23,6 +23,12 @@ namespace memgraph::io::simulator {
void SimulatorHandle::ShutDown() {
std::unique_lock<std::mutex> lock(mu_);
should_shut_down_ = true;
for (auto it = promises_.begin(); it != promises_.end();) {
auto &[promise_key, dop] = *it;
std::move(dop).promise.TimeOut();
it = promises_.erase(it);
}
can_receive_.clear();
cv_.notify_all();
}
@ -46,52 +52,84 @@ void SimulatorHandle::IncrementServerCountAndWaitForQuiescentState(Address addre
const bool all_servers_blocked = blocked_servers == server_addresses_.size();
if (all_servers_blocked) {
spdlog::trace("quiescent state detected - {} out of {} servers now blocked on receive", blocked_servers,
server_addresses_.size());
return;
}
spdlog::trace("not returning from quiescent because we see {} blocked out of {}", blocked_servers,
server_addresses_.size());
cv_.wait(lock);
}
}
bool SortInFlight(const std::pair<Address, OpaqueMessage> &lhs, const std::pair<Address, OpaqueMessage> &rhs) {
// NB: never sort based on the request ID etc...
// This should only be used from std::stable_sort
// because by comparing on the from_address alone,
// we expect the sender ordering to remain
// deterministic.
const auto &[addr_1, om_1] = lhs;
const auto &[addr_2, om_2] = rhs;
return om_1.from_address < om_2.from_address;
}
bool SimulatorHandle::MaybeTickSimulator() {
std::unique_lock<std::mutex> lock(mu_);
const size_t blocked_servers = blocked_on_receive_.size();
if (blocked_servers < server_addresses_.size()) {
if (should_shut_down_ || blocked_servers < server_addresses_.size()) {
// we only need to advance the simulator when all
// servers have reached a quiescent state, blocked
// on their own futures or receive methods.
return false;
}
// We allow the simulator to progress the state of the system only
// after all servers are blocked on receive.
spdlog::trace("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ simulator tick ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
stats_.simulator_ticks++;
blocked_on_receive_.clear();
cv_.notify_all();
TimeoutPromisesPastDeadline();
bool timed_anything_out = TimeoutPromisesPastDeadline();
if (timed_anything_out) {
spdlog::trace("simulator progressing: timed out a request");
}
const Duration clock_advance = std::chrono::microseconds{time_distrib_(rng_)};
// We don't always want to advance the clock with every message that we deliver because
// when we advance it for every message, it causes timeouts to occur for any "request path"
// over a certain length. Alternatively, we don't want to simply deliver multiple messages
// in a single simulator tick because that would reduce the amount of concurrent message
// mixing that may naturally occur in production. This approach is to mod the random clock
// advance by a prime number (hopefully avoiding most harmonic effects that would be introduced
// by only advancing the clock by an even amount etc...) and only advancing the clock close to
// half of the time.
if (clock_advance.count() % 97 > 49) {
spdlog::trace("simulator progressing: clock advanced by {}", clock_advance.count());
cluster_wide_time_microseconds_ += clock_advance;
stats_.elapsed_time = cluster_wide_time_microseconds_ - config_.start_time;
}
if (cluster_wide_time_microseconds_ >= config_.abort_time) {
spdlog::error(
"Cluster has executed beyond its configured abort_time, and something may be failing to make progress "
"in an expected amount of time. The SimulatorConfig.rng_seed for this run is {}",
config_.rng_seed);
throw utils::BasicException{"Cluster has executed beyond its configured abort_time"};
}
if (in_flight_.empty()) {
// return early here because there are no messages to schedule
// We tick the clock forward when all servers are blocked but
// there are no in-flight messages to schedule delivery of.
const Duration clock_advance = std::chrono::microseconds{time_distrib_(rng_)};
cluster_wide_time_microseconds_ += clock_advance;
if (cluster_wide_time_microseconds_ >= config_.abort_time) {
if (should_shut_down_) {
return false;
}
spdlog::error(
"Cluster has executed beyond its configured abort_time, and something may be failing to make progress "
"in an expected amount of time.");
throw utils::BasicException{"Cluster has executed beyond its configured abort_time"};
}
return true;
}
if (config_.scramble_messages) {
std::stable_sort(in_flight_.begin(), in_flight_.end(), SortInFlight);
if (config_.scramble_messages && in_flight_.size() > 1) {
// scramble messages
std::uniform_int_distribution<size_t> swap_distrib(0, in_flight_.size() - 1);
const size_t swap_index = swap_distrib(rng_);
@ -120,17 +158,22 @@ bool SimulatorHandle::MaybeTickSimulator() {
if (should_drop || normal_timeout) {
stats_.timed_out_requests++;
dop.promise.TimeOut();
spdlog::trace("simulator timing out request ");
} else {
stats_.total_responses++;
Duration response_latency = cluster_wide_time_microseconds_ - dop.requested_at;
auto type_info = opaque_message.type_info;
dop.promise.Fill(std::move(opaque_message), response_latency);
histograms_.Measure(type_info, response_latency);
spdlog::trace("simulator replying to request");
}
} else if (should_drop) {
// don't add it anywhere, let it drop
spdlog::trace("simulator silently dropping request");
} else {
// add to can_receive_ if not
spdlog::trace("simulator adding message to can_receive_ from {} to {}", opaque_message.from_address.last_known_port,
opaque_message.to_address.last_known_port);
const auto &[om_vec, inserted] =
can_receive_.try_emplace(to_address.ToPartialAddress(), std::vector<OpaqueMessage>());
om_vec->second.emplace_back(std::move(opaque_message));

View File

@ -22,6 +22,8 @@
#include <variant>
#include <vector>
#include <boost/core/demangle.hpp>
#include "io/address.hpp"
#include "io/errors.hpp"
#include "io/message_conversion.hpp"
@ -52,26 +54,29 @@ class SimulatorHandle {
std::set<Address> blocked_on_receive_;
std::set<Address> server_addresses_;
std::mt19937 rng_;
std::uniform_int_distribution<int> time_distrib_{5, 50};
std::uniform_int_distribution<int> time_distrib_{0, 30000};
std::uniform_int_distribution<int> drop_distrib_{0, 99};
SimulatorConfig config_;
MessageHistogramCollector histograms_;
RequestId request_id_counter_{0};
void TimeoutPromisesPastDeadline() {
bool TimeoutPromisesPastDeadline() {
bool timed_anything_out = false;
const Time now = cluster_wide_time_microseconds_;
for (auto it = promises_.begin(); it != promises_.end();) {
auto &[promise_key, dop] = *it;
if (dop.deadline < now && config_.perform_timeouts) {
spdlog::info("timing out request from requester {}.", promise_key.requester_address.ToString());
spdlog::trace("timing out request from requester {}.", promise_key.requester_address.ToString());
std::move(dop).promise.TimeOut();
it = promises_.erase(it);
stats_.timed_out_requests++;
timed_anything_out = true;
} else {
++it;
}
}
return timed_anything_out;
}
public:
@ -102,40 +107,48 @@ class SimulatorHandle {
template <Message Request, Message Response>
ResponseFuture<Response> SubmitRequest(Address to_address, Address from_address, Request &&request, Duration timeout,
std::function<bool()> &&maybe_tick_simulator) {
std::function<bool()> &&maybe_tick_simulator,
std::function<void()> &&fill_notifier) {
auto type_info = TypeInfoFor(request);
std::string demangled_name = boost::core::demangle(type_info.get().name());
spdlog::trace("simulator sending request {} to {}", demangled_name, to_address);
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifier<ResponseResult<Response>>(
std::forward<std::function<bool()>>(maybe_tick_simulator));
auto [future, promise] = memgraph::io::FuturePromisePairWithNotifications<ResponseResult<Response>>(
// set notifier for when the Future::Wait is called
std::forward<std::function<bool()>>(maybe_tick_simulator),
// set notifier for when Promise::Fill is called
std::forward<std::function<void()>>(fill_notifier));
std::unique_lock<std::mutex> lock(mu_);
{
std::unique_lock<std::mutex> lock(mu_);
RequestId request_id = ++request_id_counter_;
RequestId request_id = ++request_id_counter_;
const Time deadline = cluster_wide_time_microseconds_ + timeout;
const Time deadline = cluster_wide_time_microseconds_ + timeout;
std::any message(request);
OpaqueMessage om{.to_address = to_address,
.from_address = from_address,
.request_id = request_id,
.message = std::move(message),
.type_info = type_info};
in_flight_.emplace_back(std::make_pair(to_address, std::move(om)));
std::any message(request);
OpaqueMessage om{.to_address = to_address,
.from_address = from_address,
.request_id = request_id,
.message = std::move(message),
.type_info = type_info};
in_flight_.emplace_back(std::make_pair(to_address, std::move(om)));
PromiseKey promise_key{.requester_address = from_address, .request_id = request_id};
OpaquePromise opaque_promise(std::move(promise).ToUnique());
DeadlineAndOpaquePromise dop{
.requested_at = cluster_wide_time_microseconds_,
.deadline = deadline,
.promise = std::move(opaque_promise),
};
PromiseKey promise_key{.requester_address = from_address, .request_id = request_id};
OpaquePromise opaque_promise(std::move(promise).ToUnique());
DeadlineAndOpaquePromise dop{
.requested_at = cluster_wide_time_microseconds_,
.deadline = deadline,
.promise = std::move(opaque_promise),
};
MG_ASSERT(!promises_.contains(promise_key));
MG_ASSERT(!promises_.contains(promise_key));
promises_.emplace(std::move(promise_key), std::move(dop));
promises_.emplace(std::move(promise_key), std::move(dop));
stats_.total_messages++;
stats_.total_requests++;
stats_.total_messages++;
stats_.total_requests++;
} // lock dropped here
cv_.notify_all();
@ -146,8 +159,6 @@ class SimulatorHandle {
requires(sizeof...(Ms) > 0) RequestResult<Ms...> Receive(const Address &receiver, Duration timeout) {
std::unique_lock<std::mutex> lock(mu_);
blocked_on_receive_.emplace(receiver);
const Time deadline = cluster_wide_time_microseconds_ + timeout;
auto partial_address = receiver.ToPartialAddress();
@ -164,38 +175,40 @@ class SimulatorHandle {
auto m_opt = std::move(message).Take<Ms...>();
MG_ASSERT(m_opt.has_value(), "Wrong message type received compared to the expected type");
blocked_on_receive_.erase(receiver);
return std::move(m_opt).value();
}
}
lock.unlock();
bool made_progress = MaybeTickSimulator();
lock.lock();
if (!should_shut_down_ && !made_progress) {
if (!should_shut_down_) {
if (!blocked_on_receive_.contains(receiver)) {
blocked_on_receive_.emplace(receiver);
spdlog::trace("blocking receiver {}", receiver.ToPartialAddress().port);
cv_.notify_all();
}
cv_.wait(lock);
}
}
blocked_on_receive_.erase(receiver);
spdlog::trace("timing out receiver {}", receiver.ToPartialAddress().port);
return TimedOut{};
}
template <Message M>
void Send(Address to_address, Address from_address, RequestId request_id, M message) {
spdlog::trace("sending message from {} to {}", from_address.last_known_port, to_address.last_known_port);
auto type_info = TypeInfoFor(message);
std::unique_lock<std::mutex> lock(mu_);
std::any message_any(std::move(message));
OpaqueMessage om{.to_address = to_address,
.from_address = from_address,
.request_id = request_id,
.message = std::move(message_any),
.type_info = type_info};
in_flight_.emplace_back(std::make_pair(std::move(to_address), std::move(om)));
{
std::unique_lock<std::mutex> lock(mu_);
std::any message_any(std::move(message));
OpaqueMessage om{.to_address = to_address,
.from_address = from_address,
.request_id = request_id,
.message = std::move(message_any),
.type_info = type_info};
in_flight_.emplace_back(std::make_pair(std::move(to_address), std::move(om)));
stats_.total_messages++;
stats_.total_messages++;
} // lock dropped before cv notification
cv_.notify_all();
}

View File

@ -13,6 +13,10 @@
#include <cstdint>
#include <fmt/format.h>
#include "io/time.hpp"
namespace memgraph::io::simulator {
struct SimulatorStats {
uint64_t total_messages = 0;
@ -21,5 +25,22 @@ struct SimulatorStats {
uint64_t total_requests = 0;
uint64_t total_responses = 0;
uint64_t simulator_ticks = 0;
Duration elapsed_time;
friend bool operator==(const SimulatorStats & /* lhs */, const SimulatorStats & /* rhs */) = default;
friend std::ostream &operator<<(std::ostream &in, const SimulatorStats &stats) {
auto elapsed_ms = std::chrono::duration_cast<std::chrono::milliseconds>(stats.elapsed_time).count();
std::string formated = fmt::format(
"SimulatorStats {{ total_messages: {}, dropped_messages: {}, timed_out_requests: {}, total_requests: {}, "
"total_responses: {}, simulator_ticks: {}, elapsed_time: {}ms }}",
stats.total_messages, stats.dropped_messages, stats.timed_out_requests, stats.total_requests,
stats.total_responses, stats.simulator_ticks, elapsed_ms);
in << formated;
return in;
}
};
}; // namespace memgraph::io::simulator

View File

@ -15,6 +15,7 @@
#include <utility>
#include "io/address.hpp"
#include "io/notifier.hpp"
#include "io/simulator/simulator_handle.hpp"
#include "io/time.hpp"
@ -33,11 +34,12 @@ class SimulatorTransport {
: simulator_handle_(simulator_handle), address_(address), rng_(std::mt19937{seed}) {}
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request, Duration timeout) {
std::function<bool()> maybe_tick_simulator = [this] { return simulator_handle_->MaybeTickSimulator(); };
ResponseFuture<ResponseT> Request(Address to_address, Address from_address, RequestT request,
std::function<void()> notification, Duration timeout) {
std::function<bool()> tick_simulator = [handle_copy = simulator_handle_] { return handle_copy->MaybeTickSimulator(); };
return simulator_handle_->template SubmitRequest<RequestT, ResponseT>(to_address, from_address, std::move(request),
timeout, std::move(maybe_tick_simulator));
return simulator_handle_->template SubmitRequest<RequestT, ResponseT>(
to_address, from_address, std::move(request), timeout, std::move(tick_simulator), std::move(notification));
}
template <Message... Ms>

View File

@ -20,6 +20,7 @@
#include "io/errors.hpp"
#include "io/future.hpp"
#include "io/message_histogram_collector.hpp"
#include "io/notifier.hpp"
#include "io/time.hpp"
#include "utils/result.hpp"
@ -84,7 +85,9 @@ class Io {
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> RequestWithTimeout(Address address, RequestT request, Duration timeout) {
const Address from_address = address_;
return implementation_.template Request<RequestT, ResponseT>(address, from_address, request, timeout);
std::function<void()> fill_notifier = nullptr;
return implementation_.template Request<RequestT, ResponseT>(address, from_address, request, fill_notifier,
timeout);
}
/// Issue a request that times out after the default timeout. This tends
@ -93,7 +96,30 @@ class Io {
ResponseFuture<ResponseT> Request(Address to_address, RequestT request) {
const Duration timeout = default_timeout_;
const Address from_address = address_;
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request), timeout);
std::function<void()> fill_notifier = nullptr;
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
fill_notifier, timeout);
}
/// Issue a request that will notify a Notifier when it is filled or times out.
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> RequestWithNotification(Address to_address, RequestT request, Notifier notifier,
ReadinessToken readiness_token) {
const Duration timeout = default_timeout_;
const Address from_address = address_;
std::function<void()> fill_notifier = [notifier, readiness_token]() { notifier.Notify(readiness_token); };
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
fill_notifier, timeout);
}
/// Issue a request that will notify a Notifier when it is filled or times out.
template <Message RequestT, Message ResponseT>
ResponseFuture<ResponseT> RequestWithNotificationAndTimeout(Address to_address, RequestT request, Notifier notifier,
ReadinessToken readiness_token, Duration timeout) {
const Address from_address = address_;
std::function<void()> fill_notifier = [notifier, readiness_token]() { notifier.Notify(readiness_token); };
return implementation_.template Request<RequestT, ResponseT>(to_address, from_address, std::move(request),
fill_notifier, timeout);
}
/// Wait for an explicit number of microseconds for a request of one of the
@ -137,7 +163,14 @@ class Io {
Address GetAddress() { return address_; }
void SetAddress(Address address) { address_ = address; }
Io<I> ForkLocal() { return Io(implementation_, address_.ForkUniqueAddress()); }
Io<I> ForkLocal(boost::uuids::uuid uuid) {
Address new_address{
.unique_id = uuid,
.last_known_ip = address_.last_known_ip,
.last_known_port = address_.last_known_port,
};
return Io(implementation_, new_address);
}
LatencyHistogramSummaries ResponseLatencies() { return implementation_.ResponseLatencies(); }
};

View File

@ -42,6 +42,7 @@ struct MachineConfig {
boost::asio::ip::address listen_ip;
uint16_t listen_port;
size_t shard_worker_threads = std::max(static_cast<unsigned int>(1), std::thread::hardware_concurrency());
bool sync_message_handling = false;
};
} // namespace memgraph::machine_manager

View File

@ -78,10 +78,10 @@ class MachineManager {
MachineManager(io::Io<IoImpl> io, MachineConfig config, Coordinator coordinator)
: io_(io),
config_(config),
coordinator_address_(io.GetAddress().ForkUniqueAddress()),
shard_manager_{io.ForkLocal(), config.shard_worker_threads, coordinator_address_} {
auto coordinator_io = io.ForkLocal();
coordinator_io.SetAddress(coordinator_address_);
coordinator_address_(io.GetAddress().ForkLocalCoordinator()),
shard_manager_{io.ForkLocal(io.GetAddress().ForkLocalShardManager().unique_id), config.shard_worker_threads,
coordinator_address_} {
auto coordinator_io = io.ForkLocal(coordinator_address_.unique_id);
CoordinatorWorker coordinator_worker{coordinator_io, coordinator_queue_, coordinator};
coordinator_handle_ = std::jthread([coordinator = std::move(coordinator_worker)]() mutable { coordinator.Run(); });
}
@ -101,11 +101,23 @@ class MachineManager {
Address CoordinatorAddress() { return coordinator_address_; }
void Run() {
while (!io_.ShouldShutDown()) {
while (true) {
MaybeBlockOnSyncHandling();
if (io_.ShouldShutDown()) {
break;
}
const auto now = io_.Now();
uint64_t now_us = now.time_since_epoch().count();
uint64_t next_us = next_cron_.time_since_epoch().count();
if (now >= next_cron_) {
spdlog::info("now {} >= next_cron_ {}", now_us, next_us);
next_cron_ = Cron();
} else {
spdlog::info("now {} < next_cron_ {}", now_us, next_us);
}
Duration receive_timeout = std::max(next_cron_, now) - now;
@ -194,10 +206,27 @@ class MachineManager {
}
private:
// This method exists for controlling concurrency
// during deterministic simulation testing.
void MaybeBlockOnSyncHandling() {
if (!config_.sync_message_handling) {
return;
}
// block on coordinator
coordinator_queue_.BlockOnQuiescence();
// block on shards
shard_manager_.BlockOnQuiescence();
}
Time Cron() {
spdlog::info("running MachineManager::Cron, address {}", io_.GetAddress().ToString());
coordinator_queue_.Push(coordinator::coordinator_worker::Cron{});
return shard_manager_.Cron();
MaybeBlockOnSyncHandling();
Time ret = shard_manager_.Cron();
MaybeBlockOnSyncHandling();
return ret;
}
};

View File

@ -33,6 +33,7 @@
#include <spdlog/sinks/dist_sink.h>
#include <spdlog/sinks/stdout_color_sinks.h>
#include "common/errors.hpp"
#include "communication/bolt/v1/constants.hpp"
#include "communication/websocket/auth.hpp"
#include "communication/websocket/server.hpp"
@ -453,7 +454,7 @@ class BoltSession final : public memgraph::communication::bolt::Session<memgraph
std::map<std::string, memgraph::communication::bolt::Value> Pull(TEncoder *encoder, std::optional<int> n,
std::optional<int> qid) override {
TypedValueResultStream stream(encoder, interpreter_.GetShardRequestManager());
TypedValueResultStream stream(encoder, interpreter_.GetRequestRouter());
return PullResults(stream, n, qid);
}
@ -480,20 +481,9 @@ class BoltSession final : public memgraph::communication::bolt::Session<memgraph
const auto &summary = interpreter_.Pull(&stream, n, qid);
std::map<std::string, memgraph::communication::bolt::Value> decoded_summary;
for (const auto &kv : summary) {
auto maybe_value = memgraph::glue::v2::ToBoltValue(kv.second, interpreter_.GetShardRequestManager(),
memgraph::storage::v3::View::NEW);
if (maybe_value.HasError()) {
switch (maybe_value.GetError()) {
case memgraph::storage::v3::Error::DELETED_OBJECT:
case memgraph::storage::v3::Error::SERIALIZATION_ERROR:
case memgraph::storage::v3::Error::VERTEX_HAS_EDGES:
case memgraph::storage::v3::Error::PROPERTIES_DISABLED:
case memgraph::storage::v3::Error::NONEXISTENT_OBJECT:
case memgraph::storage::v3::Error::VERTEX_ALREADY_INSERTED:
throw memgraph::communication::bolt::ClientError("Unexpected storage error when streaming summary.");
}
}
decoded_summary.emplace(kv.first, std::move(*maybe_value));
auto bolt_value = memgraph::glue::v2::ToBoltValue(kv.second, interpreter_.GetRequestRouter(),
memgraph::storage::v3::View::NEW);
decoded_summary.emplace(kv.first, std::move(bolt_value));
}
return decoded_summary;
} catch (const memgraph::query::v2::QueryException &e) {
@ -507,35 +497,22 @@ class BoltSession final : public memgraph::communication::bolt::Session<memgraph
/// before forwarding the calls to original TEncoder.
class TypedValueResultStream {
public:
TypedValueResultStream(TEncoder *encoder, const memgraph::msgs::ShardRequestManagerInterface *shard_request_manager)
: encoder_(encoder), shard_request_manager_(shard_request_manager) {}
TypedValueResultStream(TEncoder *encoder, const memgraph::query::v2::RequestRouterInterface *request_router)
: encoder_(encoder), request_router_(request_router) {}
void Result(const std::vector<memgraph::query::v2::TypedValue> &values) {
std::vector<memgraph::communication::bolt::Value> decoded_values;
decoded_values.reserve(values.size());
for (const auto &v : values) {
auto maybe_value = memgraph::glue::v2::ToBoltValue(v, shard_request_manager_, memgraph::storage::v3::View::NEW);
if (maybe_value.HasError()) {
switch (maybe_value.GetError()) {
case memgraph::storage::v3::Error::DELETED_OBJECT:
throw memgraph::communication::bolt::ClientError("Returning a deleted object as a result.");
case memgraph::storage::v3::Error::NONEXISTENT_OBJECT:
throw memgraph::communication::bolt::ClientError("Returning a nonexistent object as a result.");
case memgraph::storage::v3::Error::VERTEX_HAS_EDGES:
case memgraph::storage::v3::Error::SERIALIZATION_ERROR:
case memgraph::storage::v3::Error::PROPERTIES_DISABLED:
case memgraph::storage::v3::Error::VERTEX_ALREADY_INSERTED:
throw memgraph::communication::bolt::ClientError("Unexpected storage error when streaming results.");
}
}
decoded_values.emplace_back(std::move(*maybe_value));
auto bolt_value = memgraph::glue::v2::ToBoltValue(v, request_router_, memgraph::storage::v3::View::NEW);
decoded_values.emplace_back(std::move(bolt_value));
}
encoder_->MessageRecord(decoded_values);
}
private:
TEncoder *encoder_;
const memgraph::msgs::ShardRequestManagerInterface *shard_request_manager_{nullptr};
const memgraph::query::v2::RequestRouterInterface *request_router_{nullptr};
};
memgraph::query::v2::Interpreter interpreter_;
memgraph::communication::v2::ServerEndpoint endpoint_;

View File

@ -11,7 +11,6 @@ set(mg_query_v2_sources
cypher_query_interpreter.cpp
frontend/semantic/required_privileges.cpp
frontend/stripped.cpp
interpret/awesome_memgraph_functions.cpp
interpreter.cpp
metadata.cpp
plan/operator.cpp
@ -24,7 +23,8 @@ set(mg_query_v2_sources
plan/variable_start_planner.cpp
serialization/property_value.cpp
bindings/typed_value.cpp
accessors.cpp)
accessors.cpp
multiframe.cpp)
find_package(Boost REQUIRED)
@ -34,7 +34,7 @@ target_include_directories(mg-query-v2 PUBLIC ${CMAKE_SOURCE_DIR}/include)
target_include_directories(mg-query-v2 PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bindings)
target_link_libraries(mg-query-v2 dl cppitertools Boost::headers)
target_link_libraries(mg-query-v2 mg-integrations-pulsar mg-integrations-kafka mg-storage-v3 mg-license mg-utils mg-kvstore mg-memory mg-coordinator)
target_link_libraries(mg-query-v2 mg-expr)
target_link_libraries(mg-query-v2 mg-expr mg-functions)
if(NOT "${MG_PYTHON_PATH}" STREQUAL "")
set(Python3_ROOT_DIR "${MG_PYTHON_PATH}")

View File

@ -10,24 +10,25 @@
// licenses/APL.txt.
#include "query/v2/accessors.hpp"
#include "query/v2/request_router.hpp"
#include "query/v2/requests.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "storage/v3/id_types.hpp"
namespace memgraph::query::v2::accessors {
EdgeAccessor::EdgeAccessor(Edge edge, const msgs::ShardRequestManagerInterface *manager)
: edge(std::move(edge)), manager_(manager) {}
EdgeAccessor::EdgeAccessor(Edge edge, const RequestRouterInterface *request_router)
: edge(std::move(edge)), request_router_(request_router) {}
EdgeTypeId EdgeAccessor::EdgeType() const { return edge.type.id; }
const std::vector<std::pair<PropertyId, Value>> &EdgeAccessor::Properties() const { return edge.properties; }
Value EdgeAccessor::GetProperty(const std::string &prop_name) const {
auto prop_id = manager_->NameToProperty(prop_name);
auto it = std::find_if(edge.properties.begin(), edge.properties.end(), [&](auto &pr) { return prop_id == pr.first; });
if (it == edge.properties.end()) {
auto maybe_prop = request_router_->MaybeNameToProperty(prop_name);
if (!maybe_prop) {
return {};
}
const auto prop_id = *maybe_prop;
auto it = std::find_if(edge.properties.begin(), edge.properties.end(), [&](auto &pr) { return prop_id == pr.first; });
return it->second;
}
@ -35,21 +36,23 @@ const Edge &EdgeAccessor::GetEdge() const { return edge; }
bool EdgeAccessor::IsCycle() const { return edge.src == edge.dst; };
size_t EdgeAccessor::CypherId() const { return edge.id.gid; }
VertexAccessor EdgeAccessor::To() const {
return VertexAccessor(Vertex{edge.dst}, std::vector<std::pair<PropertyId, msgs::Value>>{}, manager_);
return VertexAccessor(Vertex{edge.dst}, std::vector<std::pair<PropertyId, msgs::Value>>{}, request_router_);
}
VertexAccessor EdgeAccessor::From() const {
return VertexAccessor(Vertex{edge.src}, std::vector<std::pair<PropertyId, msgs::Value>>{}, manager_);
return VertexAccessor(Vertex{edge.src}, std::vector<std::pair<PropertyId, msgs::Value>>{}, request_router_);
}
VertexAccessor::VertexAccessor(Vertex v, std::vector<std::pair<PropertyId, Value>> props,
const msgs::ShardRequestManagerInterface *manager)
: vertex(std::move(v)), properties(std::move(props)), manager_(manager) {}
const RequestRouterInterface *request_router)
: vertex(std::move(v)), properties(std::move(props)), request_router_(request_router) {}
VertexAccessor::VertexAccessor(Vertex v, std::map<PropertyId, Value> &&props,
const msgs::ShardRequestManagerInterface *manager)
: vertex(std::move(v)), manager_(manager) {
const RequestRouterInterface *request_router)
: vertex(std::move(v)), request_router_(request_router) {
properties.reserve(props.size());
for (auto &[id, value] : props) {
properties.emplace_back(std::make_pair(id, std::move(value)));
@ -57,8 +60,8 @@ VertexAccessor::VertexAccessor(Vertex v, std::map<PropertyId, Value> &&props,
}
VertexAccessor::VertexAccessor(Vertex v, const std::map<PropertyId, Value> &props,
const msgs::ShardRequestManagerInterface *manager)
: vertex(std::move(v)), manager_(manager) {
const RequestRouterInterface *request_router)
: vertex(std::move(v)), request_router_(request_router) {
properties.reserve(props.size());
for (const auto &[id, value] : props) {
properties.emplace_back(std::make_pair(id, value));
@ -88,7 +91,11 @@ Value VertexAccessor::GetProperty(PropertyId prop_id) const {
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
Value VertexAccessor::GetProperty(const std::string &prop_name) const {
return GetProperty(manager_->NameToProperty(prop_name));
auto maybe_prop = request_router_->MaybeNameToProperty(prop_name);
if (!maybe_prop) {
return {};
}
return GetProperty(*maybe_prop);
}
msgs::Vertex VertexAccessor::GetVertex() const { return vertex; }

View File

@ -24,24 +24,24 @@
#include "utils/memory.hpp"
#include "utils/memory_tracker.hpp"
namespace memgraph::msgs {
class ShardRequestManagerInterface;
} // namespace memgraph::msgs
namespace memgraph::query::v2 {
class RequestRouterInterface;
} // namespace memgraph::query::v2
namespace memgraph::query::v2::accessors {
using Value = memgraph::msgs::Value;
using Edge = memgraph::msgs::Edge;
using Vertex = memgraph::msgs::Vertex;
using Label = memgraph::msgs::Label;
using PropertyId = memgraph::msgs::PropertyId;
using EdgeTypeId = memgraph::msgs::EdgeTypeId;
using Value = msgs::Value;
using Edge = msgs::Edge;
using Vertex = msgs::Vertex;
using Label = msgs::Label;
using PropertyId = msgs::PropertyId;
using EdgeTypeId = msgs::EdgeTypeId;
class VertexAccessor;
class EdgeAccessor final {
public:
explicit EdgeAccessor(Edge edge, const msgs::ShardRequestManagerInterface *manager);
explicit EdgeAccessor(Edge edge, const RequestRouterInterface *request_router);
[[nodiscard]] EdgeTypeId EdgeType() const;
@ -53,12 +53,7 @@ class EdgeAccessor final {
[[nodiscard]] bool IsCycle() const;
// Dummy function
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
[[nodiscard]] size_t CypherId() const { return 10; }
// bool HasSrcAccessor const { return src == nullptr; }
// bool HasDstAccessor const { return dst == nullptr; }
[[nodiscard]] size_t CypherId() const;
[[nodiscard]] VertexAccessor To() const;
[[nodiscard]] VertexAccessor From() const;
@ -69,7 +64,7 @@ class EdgeAccessor final {
private:
Edge edge;
const msgs::ShardRequestManagerInterface *manager_;
const RequestRouterInterface *request_router_;
};
class VertexAccessor final {
@ -78,10 +73,10 @@ class VertexAccessor final {
using Label = msgs::Label;
using VertexId = msgs::VertexId;
VertexAccessor(Vertex v, std::vector<std::pair<PropertyId, Value>> props,
const msgs::ShardRequestManagerInterface *manager);
const RequestRouterInterface *request_router);
VertexAccessor(Vertex v, std::map<PropertyId, Value> &&props, const msgs::ShardRequestManagerInterface *manager);
VertexAccessor(Vertex v, const std::map<PropertyId, Value> &props, const msgs::ShardRequestManagerInterface *manager);
VertexAccessor(Vertex v, std::map<PropertyId, Value> &&props, const RequestRouterInterface *request_router);
VertexAccessor(Vertex v, const std::map<PropertyId, Value> &props, const RequestRouterInterface *request_router);
[[nodiscard]] Label PrimaryLabel() const;
@ -98,48 +93,11 @@ class VertexAccessor final {
[[nodiscard]] msgs::Vertex GetVertex() const;
// Dummy function
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
[[nodiscard]] size_t CypherId() const { return 10; }
[[nodiscard]] size_t InDegree() const { throw utils::NotYetImplemented("InDegree() not yet implemented"); }
// auto InEdges(storage::View view, const std::vector<storage::EdgeTypeId> &edge_types) const
// -> storage::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
// auto maybe_edges = impl_.InEdges(view, edge_types);
// if (maybe_edges.HasError()) return maybe_edges.GetError();
// return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
// }
//
// auto InEdges(storage::View view) const { return InEdges(view, {}); }
//
// auto InEdges(storage::View view, const std::vector<storage::EdgeTypeId> &edge_types, const VertexAccessor &dest)
// const
// -> storage::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
// auto maybe_edges = impl_.InEdges(view, edge_types, &dest.impl_);
// if (maybe_edges.HasError()) return maybe_edges.GetError();
// return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
// }
//
// auto OutEdges(storage::View view, const std::vector<storage::EdgeTypeId> &edge_types) const
// -> storage::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
// auto maybe_edges = impl_.OutEdges(view, edge_types);
// if (maybe_edges.HasError()) return maybe_edges.GetError();
// return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
// }
//
// auto OutEdges(storage::View view) const { return OutEdges(view, {}); }
//
// auto OutEdges(storage::View view, const std::vector<storage::EdgeTypeId> &edge_types,
// const VertexAccessor &dest) const
// -> storage::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
// auto maybe_edges = impl_.OutEdges(view, edge_types, &dest.impl_);
// if (maybe_edges.HasError()) return maybe_edges.GetError();
// return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
// }
// storage::Result<size_t> InDegree(storage::View view) const { return impl_.InDegree(view); }
//
// storage::Result<size_t> OutDegree(storage::View view) const { return impl_.OutDegree(view); }
//
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
[[nodiscard]] size_t OutDegree() const { throw utils::NotYetImplemented("OutDegree() not yet implemented"); }
friend bool operator==(const VertexAccessor &lhs, const VertexAccessor &rhs) {
return lhs.vertex == rhs.vertex && lhs.properties == rhs.properties;
@ -150,13 +108,9 @@ class VertexAccessor final {
private:
Vertex vertex;
std::vector<std::pair<PropertyId, Value>> properties;
const msgs::ShardRequestManagerInterface *manager_;
const RequestRouterInterface *request_router_;
};
// inline VertexAccessor EdgeAccessor::To() const { return VertexAccessor(impl_.ToVertex()); }
// inline VertexAccessor EdgeAccessor::From() const { return VertexAccessor(impl_.FromVertex()); }
// Highly mocked interface. Won't work if used.
class Path {
public:
@ -197,7 +151,14 @@ class Path {
friend bool operator==(const Path & /*lhs*/, const Path & /*rhs*/) { return true; };
utils::MemoryResource *GetMemoryResource() { return mem; }
auto &vertices() { return vertices_; }
auto &edges() { return edges_; }
const auto &vertices() const { return vertices_; }
const auto &edges() const { return edges_; }
private:
std::vector<VertexAccessor> vertices_;
std::vector<EdgeAccessor> edges_;
utils::MemoryResource *mem = utils::NewDeleteResource();
};
} // namespace memgraph::query::v2::accessors

View File

@ -21,29 +21,27 @@
#include "query/v2/requests.hpp"
#include "storage/v3/conversions.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/view.hpp"
namespace memgraph::msgs {
class ShardRequestManagerInterface;
} // namespace memgraph::msgs
namespace memgraph::query::v2 {
inline const auto lam = [](const auto &val) { return ValueToTypedValue(val); };
class RequestRouterInterface;
namespace detail {
class Callable {
public:
auto operator()(const memgraph::storage::v3::PropertyValue &val) const {
return memgraph::storage::v3::PropertyToTypedValue<TypedValue>(val);
auto operator()(const storage::v3::PropertyValue &val) const {
return storage::v3::PropertyToTypedValue<TypedValue>(val);
};
auto operator()(const msgs::Value &val, memgraph::msgs::ShardRequestManagerInterface *manager) const {
return ValueToTypedValue(val, manager);
auto operator()(const msgs::Value &val, RequestRouterInterface *request_router) const {
return ValueToTypedValue(val, request_router);
};
};
} // namespace detail
using ExpressionEvaluator = memgraph::expr::ExpressionEvaluator<
TypedValue, memgraph::query::v2::EvaluationContext, memgraph::msgs::ShardRequestManagerInterface, storage::v3::View,
storage::v3::LabelId, msgs::Value, detail::Callable, memgraph::storage::v3::Error, memgraph::expr::QueryEngineTag>;
using ExpressionEvaluator = expr::ExpressionEvaluator<TypedValue, query::v2::EvaluationContext, RequestRouterInterface,
storage::v3::View, storage::v3::LabelId, msgs::Value,
detail::Callable, common::ErrorCode, expr::QueryEngineTag>;
} // namespace memgraph::query::v2

View File

@ -13,9 +13,10 @@
#include "query/v2/bindings/bindings.hpp"
#include "query/v2/bindings/typed_value.hpp"
#include "expr/interpret/frame.hpp"
#include "query/v2/bindings/typed_value.hpp"
namespace memgraph::query::v2 {
using Frame = memgraph::expr::Frame<TypedValue>;
} // namespace memgraph::query::v2
using Frame = memgraph::expr::Frame;
using FrameWithValidity = memgraph::expr::FrameWithValidity;
} // namespace memgraph::query::v2

View File

@ -20,7 +20,6 @@
#include "query/v2/bindings/symbol.hpp"
#include "query/v2/bindings/typed_value.hpp"
#include "query/v2/db_accessor.hpp"
#include "query/v2/exceptions.hpp"
#include "query/v2/frontend/ast/ast.hpp"
#include "query/v2/path.hpp"
@ -28,7 +27,6 @@
#include "storage/v3/id_types.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard_operation_result.hpp"
#include "storage/v3/view.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
@ -82,39 +80,5 @@ inline void ExpectType(const Symbol &symbol, const TypedValue &value, TypedValue
throw QueryRuntimeException("Expected a {} for '{}', but got {}.", expected, symbol.name(), value.type());
}
template <typename T>
concept AccessorWithSetProperty = requires(T accessor, const storage::v3::PropertyId key,
const storage::v3::PropertyValue new_value) {
{ accessor.SetProperty(key, new_value) } -> std::same_as<storage::v3::Result<storage::v3::PropertyValue>>;
};
template <typename T>
concept AccessorWithSetPropertyAndValidate = requires(T accessor, const storage::v3::PropertyId key,
const storage::v3::PropertyValue new_value) {
{
accessor.SetPropertyAndValidate(key, new_value)
} -> std::same_as<storage::v3::ShardOperationResult<storage::v3::PropertyValue>>;
};
template <typename TRecordAccessor>
concept RecordAccessor =
AccessorWithSetProperty<TRecordAccessor> || AccessorWithSetPropertyAndValidate<TRecordAccessor>;
inline void HandleErrorOnPropertyUpdate(const storage::v3::Error error) {
switch (error) {
case storage::v3::Error::SERIALIZATION_ERROR:
throw TransactionSerializationException();
case storage::v3::Error::DELETED_OBJECT:
throw QueryRuntimeException("Trying to set properties on a deleted object.");
case storage::v3::Error::PROPERTIES_DISABLED:
throw QueryRuntimeException("Can't set property because properties on edges are disabled.");
case storage::v3::Error::VERTEX_HAS_EDGES:
case storage::v3::Error::NONEXISTENT_OBJECT:
case storage::v3::Error::VERTEX_ALREADY_INSERTED:
throw QueryRuntimeException("Unexpected error when setting a property.");
}
}
int64_t QueryTimestamp();
} // namespace memgraph::query::v2

View File

@ -20,7 +20,7 @@
#include "query/v2/parameters.hpp"
#include "query/v2/plan/profile.hpp"
//#include "query/v2/trigger.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
#include "utils/async_timer.hpp"
namespace memgraph::query::v2 {
@ -60,27 +60,27 @@ struct EvaluationContext {
mutable std::unordered_map<std::string, int64_t> counters;
};
inline std::vector<storage::v3::PropertyId> NamesToProperties(
const std::vector<std::string> &property_names, msgs::ShardRequestManagerInterface *shard_request_manager) {
inline std::vector<storage::v3::PropertyId> NamesToProperties(const std::vector<std::string> &property_names,
RequestRouterInterface *request_router) {
std::vector<storage::v3::PropertyId> properties;
// TODO Fix by using reference
properties.reserve(property_names.size());
if (shard_request_manager != nullptr) {
if (request_router != nullptr) {
for (const auto &name : property_names) {
properties.push_back(shard_request_manager->NameToProperty(name));
properties.push_back(request_router->NameToProperty(name));
}
}
return properties;
}
inline std::vector<storage::v3::LabelId> NamesToLabels(const std::vector<std::string> &label_names,
msgs::ShardRequestManagerInterface *shard_request_manager) {
RequestRouterInterface *request_router) {
std::vector<storage::v3::LabelId> labels;
labels.reserve(label_names.size());
// TODO Fix by using reference
if (shard_request_manager != nullptr) {
if (request_router != nullptr) {
for (const auto &name : label_names) {
labels.push_back(shard_request_manager->NameToLabel(name));
labels.push_back(request_router->NameToLabel(name));
}
}
return labels;
@ -97,7 +97,7 @@ struct ExecutionContext {
plan::ProfilingStats *stats_root{nullptr};
ExecutionStats execution_stats;
utils::AsyncTimer timer;
msgs::ShardRequestManagerInterface *shard_request_manager{nullptr};
RequestRouterInterface *request_router{nullptr};
IdAllocator *edge_ids_alloc;
};

View File

@ -12,12 +12,12 @@
#pragma once
#include "bindings/typed_value.hpp"
#include "query/v2/accessors.hpp"
#include "query/v2/request_router.hpp"
#include "query/v2/requests.hpp"
#include "query/v2/shard_request_manager.hpp"
namespace memgraph::query::v2 {
inline TypedValue ValueToTypedValue(const msgs::Value &value, msgs::ShardRequestManagerInterface *manager) {
inline TypedValue ValueToTypedValue(const msgs::Value &value, RequestRouterInterface *request_router) {
using Value = msgs::Value;
switch (value.type) {
case Value::Type::Null:
@ -35,7 +35,7 @@ inline TypedValue ValueToTypedValue(const msgs::Value &value, msgs::ShardRequest
std::vector<TypedValue> dst;
dst.reserve(lst.size());
for (const auto &elem : lst) {
dst.push_back(ValueToTypedValue(elem, manager));
dst.push_back(ValueToTypedValue(elem, request_router));
}
return TypedValue(std::move(dst));
}
@ -43,19 +43,23 @@ inline TypedValue ValueToTypedValue(const msgs::Value &value, msgs::ShardRequest
const auto &value_map = value.map_v;
std::map<std::string, TypedValue> dst;
for (const auto &[key, val] : value_map) {
dst[key] = ValueToTypedValue(val, manager);
dst[key] = ValueToTypedValue(val, request_router);
}
return TypedValue(std::move(dst));
}
case Value::Type::Vertex:
return TypedValue(accessors::VertexAccessor(
value.vertex_v, std::vector<std::pair<storage::v3::PropertyId, msgs::Value>>{}, manager));
value.vertex_v, std::vector<std::pair<storage::v3::PropertyId, msgs::Value>>{}, request_router));
case Value::Type::Edge:
return TypedValue(accessors::EdgeAccessor(value.edge_v, manager));
return TypedValue(accessors::EdgeAccessor(value.edge_v, request_router));
}
throw std::runtime_error("Incorrect type in conversion");
}
inline const auto ValueToTypedValueFunctor = [](const msgs::Value &value, RequestRouterInterface *request_router) {
return ValueToTypedValue(value, request_router);
};
inline msgs::Value TypedValueToValue(const TypedValue &value) {
using Value = msgs::Value;
switch (value.type()) {

View File

@ -11,7 +11,7 @@
#include "query/v2/cypher_query_interpreter.hpp"
#include "query/v2/bindings/symbol_generator.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
DEFINE_HIDDEN_bool(query_cost_planner, true, "Use the cost-estimating query planner.");
@ -118,9 +118,9 @@ ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::stri
}
std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery *query, const Parameters &parameters,
msgs::ShardRequestManagerInterface *shard_manager,
RequestRouterInterface *request_router,
const std::vector<Identifier *> &predefined_identifiers) {
auto vertex_counts = plan::MakeVertexCountCache(shard_manager);
auto vertex_counts = plan::MakeVertexCountCache(request_router);
auto symbol_table = expr::MakeSymbolTable(query, predefined_identifiers);
auto planning_context = plan::MakePlanningContext(&ast_storage, &symbol_table, query, &vertex_counts);
auto [root, cost] = plan::MakeLogicalPlan(&planning_context, parameters, FLAGS_query_cost_planner);
@ -130,7 +130,7 @@ std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery
std::shared_ptr<CachedPlan> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
const Parameters &parameters, utils::SkipList<PlanCacheEntry> *plan_cache,
msgs::ShardRequestManagerInterface *shard_manager,
RequestRouterInterface *request_router,
const std::vector<Identifier *> &predefined_identifiers) {
std::optional<utils::SkipList<PlanCacheEntry>::Accessor> plan_cache_access;
if (plan_cache) {
@ -146,7 +146,7 @@ std::shared_ptr<CachedPlan> CypherQueryToPlan(uint64_t hash, AstStorage ast_stor
}
auto plan = std::make_shared<CachedPlan>(
MakeLogicalPlan(std::move(ast_storage), query, parameters, shard_manager, predefined_identifiers));
MakeLogicalPlan(std::move(ast_storage), query, parameters, request_router, predefined_identifiers));
if (plan_cache_access) {
plan_cache_access->insert({hash, plan});
}

View File

@ -132,7 +132,7 @@ class SingleNodeLogicalPlan final : public LogicalPlan {
};
std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery *query, const Parameters &parameters,
msgs::ShardRequestManagerInterface *shard_manager,
RequestRouterInterface *request_router,
const std::vector<Identifier *> &predefined_identifiers);
/**
@ -145,7 +145,7 @@ std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery
*/
std::shared_ptr<CachedPlan> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
const Parameters &parameters, utils::SkipList<PlanCacheEntry> *plan_cache,
msgs::ShardRequestManagerInterface *shard_manager,
RequestRouterInterface *request_router,
const std::vector<Identifier *> &predefined_identifiers = {});
} // namespace memgraph::query::v2

View File

@ -23,7 +23,6 @@
#include "storage/v3/key_store.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard_operation_result.hpp"
///////////////////////////////////////////////////////////
// Our communication layer and query engine don't mix
@ -37,7 +36,7 @@
// This cannot be avoided by simple include orderings so we
// simply undefine those macros as we're sure that libkrb5
// won't and can't be used anywhere in the query engine.
#include "storage/v3/storage.hpp"
#include "storage/v3/shard.hpp"
#include "utils/logging.hpp"
#include "utils/result.hpp"
@ -65,24 +64,11 @@ class EdgeAccessor final {
auto Properties(storage::v3::View view) const { return impl_.Properties(view); }
storage::v3::Result<storage::v3::PropertyValue> GetProperty(storage::v3::View view,
storage::v3::PropertyId key) const {
storage::v3::ShardResult<storage::v3::PropertyValue> GetProperty(storage::v3::View view,
storage::v3::PropertyId key) const {
return impl_.GetProperty(key, view);
}
storage::v3::Result<storage::v3::PropertyValue> SetProperty(storage::v3::PropertyId key,
const storage::v3::PropertyValue &value) {
return impl_.SetProperty(key, value);
}
storage::v3::Result<storage::v3::PropertyValue> RemoveProperty(storage::v3::PropertyId key) {
return SetProperty(key, storage::v3::PropertyValue());
}
storage::v3::Result<std::map<storage::v3::PropertyId, storage::v3::PropertyValue>> ClearProperties() {
return impl_.ClearProperties();
}
VertexAccessor To() const;
VertexAccessor From() const;
@ -114,53 +100,19 @@ class VertexAccessor final {
auto PrimaryKey(storage::v3::View view) const { return impl_.PrimaryKey(view); }
storage::v3::ShardOperationResult<bool> AddLabel(storage::v3::LabelId label) {
return impl_.AddLabelAndValidate(label);
}
storage::v3::ShardOperationResult<bool> AddLabelAndValidate(storage::v3::LabelId label) {
return impl_.AddLabelAndValidate(label);
}
storage::v3::ShardOperationResult<bool> RemoveLabel(storage::v3::LabelId label) {
return impl_.RemoveLabelAndValidate(label);
}
storage::v3::ShardOperationResult<bool> RemoveLabelAndValidate(storage::v3::LabelId label) {
return impl_.RemoveLabelAndValidate(label);
}
storage::v3::Result<bool> HasLabel(storage::v3::View view, storage::v3::LabelId label) const {
storage::v3::ShardResult<bool> HasLabel(storage::v3::View view, storage::v3::LabelId label) const {
return impl_.HasLabel(label, view);
}
auto Properties(storage::v3::View view) const { return impl_.Properties(view); }
storage::v3::Result<storage::v3::PropertyValue> GetProperty(storage::v3::View view,
storage::v3::PropertyId key) const {
storage::v3::ShardResult<storage::v3::PropertyValue> GetProperty(storage::v3::View view,
storage::v3::PropertyId key) const {
return impl_.GetProperty(key, view);
}
storage::v3::ShardOperationResult<storage::v3::PropertyValue> SetProperty(storage::v3::PropertyId key,
const storage::v3::PropertyValue &value) {
return impl_.SetPropertyAndValidate(key, value);
}
storage::v3::ShardOperationResult<storage::v3::PropertyValue> SetPropertyAndValidate(
storage::v3::PropertyId key, const storage::v3::PropertyValue &value) {
return impl_.SetPropertyAndValidate(key, value);
}
storage::v3::ShardOperationResult<storage::v3::PropertyValue> RemovePropertyAndValidate(storage::v3::PropertyId key) {
return SetPropertyAndValidate(key, storage::v3::PropertyValue{});
}
storage::v3::Result<std::map<storage::v3::PropertyId, storage::v3::PropertyValue>> ClearProperties() {
return impl_.ClearProperties();
}
auto InEdges(storage::v3::View view, const std::vector<storage::v3::EdgeTypeId> &edge_types) const
-> storage::v3::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
-> storage::v3::ShardResult<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
auto maybe_edges = impl_.InEdges(view, edge_types);
if (maybe_edges.HasError()) return maybe_edges.GetError();
return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
@ -170,7 +122,7 @@ class VertexAccessor final {
auto InEdges(storage::v3::View view, const std::vector<storage::v3::EdgeTypeId> &edge_types,
const VertexAccessor &dest) const
-> storage::v3::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
-> storage::v3::ShardResult<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
const auto dest_id = dest.impl_.Id(view).GetValue();
auto maybe_edges = impl_.InEdges(view, edge_types, &dest_id);
if (maybe_edges.HasError()) return maybe_edges.GetError();
@ -178,7 +130,7 @@ class VertexAccessor final {
}
auto OutEdges(storage::v3::View view, const std::vector<storage::v3::EdgeTypeId> &edge_types) const
-> storage::v3::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
-> storage::v3::ShardResult<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
auto maybe_edges = impl_.OutEdges(view, edge_types);
if (maybe_edges.HasError()) return maybe_edges.GetError();
return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
@ -188,16 +140,16 @@ class VertexAccessor final {
auto OutEdges(storage::v3::View view, const std::vector<storage::v3::EdgeTypeId> &edge_types,
const VertexAccessor &dest) const
-> storage::v3::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
-> storage::v3::ShardResult<decltype(iter::imap(MakeEdgeAccessor, *impl_.OutEdges(view)))> {
const auto dest_id = dest.impl_.Id(view).GetValue();
auto maybe_edges = impl_.OutEdges(view, edge_types, &dest_id);
if (maybe_edges.HasError()) return maybe_edges.GetError();
return iter::imap(MakeEdgeAccessor, std::move(*maybe_edges));
}
storage::v3::Result<size_t> InDegree(storage::v3::View view) const { return impl_.InDegree(view); }
storage::v3::ShardResult<size_t> InDegree(storage::v3::View view) const { return impl_.InDegree(view); }
storage::v3::Result<size_t> OutDegree(storage::v3::View view) const { return impl_.OutDegree(view); }
storage::v3::ShardResult<size_t> OutDegree(storage::v3::View view) const { return impl_.OutDegree(view); }
// TODO(jbajic) Fix Remove Gid
static int64_t CypherId() { return 1; }

View File

@ -224,12 +224,4 @@ class VersionInfoInMulticommandTxException : public QueryException {
: QueryException("Version info query not allowed in multicommand transactions.") {}
};
/**
* An exception for an illegal operation that violates schema
*/
class SchemaViolationException : public QueryRuntimeException {
public:
using QueryRuntimeException::QueryRuntimeException;
};
} // namespace memgraph::query::v2

View File

@ -20,11 +20,13 @@
#include "query/v2/bindings/ast_visitor.hpp"
#include "common/types.hpp"
#include "query/v2/bindings/symbol.hpp"
#include "query/v2/interpret/awesome_memgraph_functions.hpp"
#include "functions/awesome_memgraph_functions.hpp"
#include "query/v2/bindings/typed_value.hpp"
#include "query/v2/db_accessor.hpp"
#include "query/v2/path.hpp"
#include "query/v2/request_router.hpp"
#include "utils/typeinfo.hpp"
#include "query/v2/conversions.hpp"
cpp<#
@ -836,13 +838,15 @@ cpp<#
:slk-load (slk-load-ast-vector "Expression"))
(function-name "std::string" :scope :public)
(function "std::function<TypedValue(const TypedValue *, int64_t,
const FunctionContext &)>"
const functions::FunctionContext<RequestRouterInterface> &)>"
:scope :public
:dont-save t
:clone :copy
:slk-load (lambda (member)
#>cpp
self->${member} = query::v2::NameToFunction(self->function_name_);
self->${member} = functions::NameToFunction<TypedValue,
functions::FunctionContext<RequestRouterInterface>,
functions::QueryEngineTag, decltype(ValueToTypedValueFunctor)>(self->function_name_);
cpp<#)))
(:public
#>cpp
@ -865,7 +869,8 @@ cpp<#
const std::vector<Expression *> &arguments)
: arguments_(arguments),
function_name_(function_name),
function_(NameToFunction(function_name_)) {
function_(functions::NameToFunction<TypedValue, functions::FunctionContext<RequestRouterInterface>,
functions::QueryEngineTag, decltype(ValueToTypedValueFunctor)>(function_name_)) {
if (!function_) {
throw SemanticException("Function '{}' doesn't exist.", function_name);
}

File diff suppressed because it is too large Load Diff

View File

@ -20,12 +20,10 @@
#include "storage/v3/view.hpp"
#include "utils/memory.hpp"
namespace memgraph::msgs {
class ShardRequestManagerInterface;
} // namespace memgraph::msgs
namespace memgraph::query::v2 {
class RequestRouterInterface;
namespace {
const char kStartsWith[] = "STARTSWITH";
const char kEndsWith[] = "ENDSWITH";
@ -34,9 +32,9 @@ const char kId[] = "ID";
} // namespace
struct FunctionContext {
// TODO(kostasrim) consider optional here. ShardRequestManager does not exist on the storage.
// TODO(kostasrim) consider optional here. RequestRouter does not exist on the storage.
// DbAccessor *db_accessor;
msgs::ShardRequestManagerInterface *manager;
RequestRouterInterface *request_router;
utils::MemoryResource *memory;
int64_t timestamp;
std::unordered_map<std::string, int64_t> *counters;

View File

@ -41,13 +41,13 @@
#include "query/v2/frontend/ast/ast.hpp"
#include "query/v2/frontend/semantic/required_privileges.hpp"
#include "query/v2/metadata.hpp"
#include "query/v2/multiframe.hpp"
#include "query/v2/plan/planner.hpp"
#include "query/v2/plan/profile.hpp"
#include "query/v2/plan/vertex_count_cache.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/shard.hpp"
#include "storage/v3/storage.hpp"
#include "utils/algorithm.hpp"
#include "utils/csv_parsing.hpp"
#include "utils/event_counter.hpp"
@ -144,18 +144,18 @@ class ReplQueryHandler final : public query::v2::ReplicationQueryHandler {
/// @throw QueryRuntimeException if an error ocurred.
Callback HandleAuthQuery(AuthQuery *auth_query, AuthQueryHandler *auth, const Parameters &parameters,
msgs::ShardRequestManagerInterface *manager) {
RequestRouterInterface *request_router) {
// Empty frame for evaluation of password expression. This is OK since
// password should be either null or string literal and it's evaluation
// should not depend on frame.
expr::Frame<TypedValue> frame(0);
expr::Frame frame(0);
SymbolTable symbol_table;
EvaluationContext evaluation_context;
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
// the argument to Callback.
evaluation_context.timestamp = QueryTimestamp();
evaluation_context.parameters = parameters;
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, request_router, storage::v3::View::OLD);
std::string username = auth_query->user_;
std::string rolename = auth_query->role_;
@ -313,16 +313,16 @@ Callback HandleAuthQuery(AuthQuery *auth_query, AuthQueryHandler *auth, const Pa
}
Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &parameters,
InterpreterContext *interpreter_context, msgs::ShardRequestManagerInterface *manager,
InterpreterContext *interpreter_context, RequestRouterInterface *request_router,
std::vector<Notification> *notifications) {
expr::Frame<TypedValue> frame(0);
expr::Frame frame(0);
SymbolTable symbol_table;
EvaluationContext evaluation_context;
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
// the argument to Callback.
evaluation_context.timestamp = QueryTimestamp();
evaluation_context.parameters = parameters;
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, request_router, storage::v3::View::OLD);
Callback callback;
switch (repl_query->action_) {
@ -449,8 +449,8 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
}
Callback HandleSettingQuery(SettingQuery *setting_query, const Parameters &parameters,
msgs::ShardRequestManagerInterface *manager) {
expr::Frame<TypedValue> frame(0);
RequestRouterInterface *request_router) {
expr::Frame frame(0);
SymbolTable symbol_table;
EvaluationContext evaluation_context;
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
@ -459,7 +459,7 @@ Callback HandleSettingQuery(SettingQuery *setting_query, const Parameters &param
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch())
.count();
evaluation_context.parameters = parameters;
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, request_router, storage::v3::View::OLD);
Callback callback;
switch (setting_query->action_) {
@ -650,17 +650,21 @@ struct PullPlanVector {
struct PullPlan {
explicit PullPlan(std::shared_ptr<CachedPlan> plan, const Parameters &parameters, bool is_profile_query,
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
msgs::ShardRequestManagerInterface *shard_request_manager = nullptr,
RequestRouterInterface *request_router = nullptr,
// TriggerContextCollector *trigger_context_collector = nullptr,
std::optional<size_t> memory_limit = {});
std::optional<plan::ProfilingStatsWithTotalTime> Pull(AnyStream *stream, std::optional<int> n,
const std::vector<Symbol> &output_symbols,
std::map<std::string, TypedValue> *summary);
std::optional<plan::ProfilingStatsWithTotalTime> PullMultiple(AnyStream *stream, std::optional<int> n,
const std::vector<Symbol> &output_symbols,
std::map<std::string, TypedValue> *summary);
private:
std::shared_ptr<CachedPlan> plan_ = nullptr;
plan::UniqueCursorPtr cursor_ = nullptr;
expr::Frame<TypedValue> frame_;
expr::FrameWithValidity frame_;
MultiFrame multi_frame_;
ExecutionContext ctx_;
std::optional<size_t> memory_limit_;
@ -680,29 +684,137 @@ struct PullPlan {
PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters &parameters, const bool is_profile_query,
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
msgs::ShardRequestManagerInterface *shard_request_manager, const std::optional<size_t> memory_limit)
RequestRouterInterface *request_router, const std::optional<size_t> memory_limit)
: plan_(plan),
cursor_(plan->plan().MakeCursor(execution_memory)),
frame_(plan->symbol_table().max_position(), execution_memory),
multi_frame_(plan->symbol_table().max_position(), kNumberOfFramesInMultiframe, execution_memory),
memory_limit_(memory_limit) {
ctx_.db_accessor = dba;
ctx_.symbol_table = plan->symbol_table();
ctx_.evaluation_context.timestamp = QueryTimestamp();
ctx_.evaluation_context.parameters = parameters;
ctx_.evaluation_context.properties = NamesToProperties(plan->ast_storage().properties_, shard_request_manager);
ctx_.evaluation_context.labels = NamesToLabels(plan->ast_storage().labels_, shard_request_manager);
ctx_.evaluation_context.properties = NamesToProperties(plan->ast_storage().properties_, request_router);
ctx_.evaluation_context.labels = NamesToLabels(plan->ast_storage().labels_, request_router);
if (interpreter_context->config.execution_timeout_sec > 0) {
ctx_.timer = utils::AsyncTimer{interpreter_context->config.execution_timeout_sec};
}
ctx_.is_shutting_down = &interpreter_context->is_shutting_down;
ctx_.is_profile_query = is_profile_query;
ctx_.shard_request_manager = shard_request_manager;
ctx_.request_router = request_router;
ctx_.edge_ids_alloc = &interpreter_context->edge_ids_alloc;
}
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::PullMultiple(AnyStream *stream, std::optional<int> n,
const std::vector<Symbol> &output_symbols,
std::map<std::string, TypedValue> *summary) {
// Set up temporary memory for a single Pull. Initial memory comes from the
// stack. 256 KiB should fit on the stack and should be more than enough for a
// single `Pull`.
MG_ASSERT(!n.has_value(), "should pull all!");
static constexpr size_t stack_size = 256UL * 1024UL;
char stack_data[stack_size];
utils::ResourceWithOutOfMemoryException resource_with_exception;
utils::MonotonicBufferResource monotonic_memory(&stack_data[0], stack_size, &resource_with_exception);
// We can throw on every query because a simple queries for deleting will use only
// the stack allocated buffer.
// Also, we want to throw only when the query engine requests more memory and not the storage
// so we add the exception to the allocator.
// TODO (mferencevic): Tune the parameters accordingly.
utils::PoolResource pool_memory(128, 1024, &monotonic_memory);
std::optional<utils::LimitedMemoryResource> maybe_limited_resource;
if (memory_limit_) {
maybe_limited_resource.emplace(&pool_memory, *memory_limit_);
ctx_.evaluation_context.memory = &*maybe_limited_resource;
} else {
ctx_.evaluation_context.memory = &pool_memory;
}
// Returns true if a result was pulled.
const auto pull_result = [&]() -> bool {
cursor_->PullMultiple(multi_frame_, ctx_);
return multi_frame_.HasValidFrame();
};
const auto stream_values = [&output_symbols, &stream](const Frame &frame) {
// TODO: The streamed values should also probably use the above memory.
std::vector<TypedValue> values;
values.reserve(output_symbols.size());
for (const auto &symbol : output_symbols) {
values.emplace_back(frame[symbol]);
}
stream->Result(values);
};
// Get the execution time of all possible result pulls and streams.
utils::Timer timer;
int i = 0;
if (has_unsent_results_ && !output_symbols.empty()) {
// stream unsent results from previous pull
auto iterator_for_valid_frame_only = multi_frame_.GetValidFramesReader();
for (const auto &frame : iterator_for_valid_frame_only) {
stream_values(frame);
++i;
}
multi_frame_.MakeAllFramesInvalid();
}
for (; !n || i < n;) {
if (!pull_result()) {
break;
}
if (!output_symbols.empty()) {
auto iterator_for_valid_frame_only = multi_frame_.GetValidFramesReader();
for (const auto &frame : iterator_for_valid_frame_only) {
stream_values(frame);
++i;
}
}
multi_frame_.MakeAllFramesInvalid();
}
// If we finished because we streamed the requested n results,
// we try to pull the next result to see if there is more.
// If there is additional result, we leave the pulled result in the frame
// and set the flag to true.
has_unsent_results_ = i == n && pull_result();
execution_time_ += timer.Elapsed();
if (has_unsent_results_) {
return std::nullopt;
}
summary->insert_or_assign("plan_execution_time", execution_time_.count());
// We are finished with pulling all the data, therefore we can send any
// metadata about the results i.e. notifications and statistics
const bool is_any_counter_set =
std::any_of(ctx_.execution_stats.counters.begin(), ctx_.execution_stats.counters.end(),
[](const auto &counter) { return counter > 0; });
if (is_any_counter_set) {
std::map<std::string, TypedValue> stats;
for (size_t i = 0; i < ctx_.execution_stats.counters.size(); ++i) {
stats.emplace(ExecutionStatsKeyToString(ExecutionStats::Key(i)), ctx_.execution_stats.counters[i]);
}
summary->insert_or_assign("stats", std::move(stats));
}
cursor_->Shutdown();
ctx_.profile_execution_time = execution_time_;
return GetStatsWithTotalTime(ctx_);
}
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *stream, std::optional<int> n,
const std::vector<Symbol> &output_symbols,
std::map<std::string, TypedValue> *summary) {
auto should_pull_multiple = false; // TODO on the long term, we will only use PullMultiple
if (should_pull_multiple) {
return PullMultiple(stream, n, output_symbols, summary);
}
// Set up temporary memory for a single Pull. Initial memory comes from the
// stack. 256 KiB should fit on the stack and should be more than enough for a
// single `Pull`.
@ -800,8 +912,12 @@ InterpreterContext::InterpreterContext(storage::v3::Shard *db, const Interpreter
Interpreter::Interpreter(InterpreterContext *interpreter_context) : interpreter_context_(interpreter_context) {
MG_ASSERT(interpreter_context_, "Interpreter context must not be NULL");
auto query_io = interpreter_context_->io.ForkLocal();
shard_request_manager_ = std::make_unique<msgs::ShardRequestManager<io::local_transport::LocalTransport>>(
// TODO(tyler) make this deterministic so that it can be tested.
auto random_uuid = boost::uuids::uuid{boost::uuids::random_generator()()};
auto query_io = interpreter_context_->io.ForkLocal(random_uuid);
request_router_ = std::make_unique<RequestRouter<io::local_transport::LocalTransport>>(
coordinator::CoordinatorClient<io::local_transport::LocalTransport>(
query_io, interpreter_context_->coordinator_address, std::vector{interpreter_context_->coordinator_address}),
std::move(query_io));
@ -878,17 +994,16 @@ PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper)
PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string, TypedValue> *summary,
InterpreterContext *interpreter_context, DbAccessor *dba,
utils::MemoryResource *execution_memory, std::vector<Notification> *notifications,
msgs::ShardRequestManagerInterface *shard_request_manager) {
RequestRouterInterface *request_router) {
// TriggerContextCollector *trigger_context_collector = nullptr) {
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_query.query);
expr::Frame<TypedValue> frame(0);
expr::Frame frame(0);
SymbolTable symbol_table;
EvaluationContext evaluation_context;
evaluation_context.timestamp = QueryTimestamp();
evaluation_context.parameters = parsed_query.parameters;
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, shard_request_manager,
storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, request_router, storage::v3::View::OLD);
const auto memory_limit =
expr::EvaluateMemoryLimit(&evaluator, cypher_query->memory_limit_, cypher_query->memory_scale_);
if (memory_limit) {
@ -903,9 +1018,9 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
"convert the parsed row values to the appropriate type. This can be done using the built-in "
"conversion functions such as ToInteger, ToFloat, ToBoolean etc.");
}
auto plan = CypherQueryToPlan(
parsed_query.stripped_query.hash(), std::move(parsed_query.ast_storage), cypher_query, parsed_query.parameters,
parsed_query.is_cacheable ? &interpreter_context->plan_cache : nullptr, shard_request_manager);
auto plan = CypherQueryToPlan(parsed_query.stripped_query.hash(), std::move(parsed_query.ast_storage), cypher_query,
parsed_query.parameters,
parsed_query.is_cacheable ? &interpreter_context->plan_cache : nullptr, request_router);
summary->insert_or_assign("cost_estimate", plan->cost());
auto rw_type_checker = plan::ReadWriteTypeChecker();
@ -924,7 +1039,7 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
utils::FindOr(parsed_query.stripped_query.named_expressions(), symbol.token_position(), symbol.name()).first);
}
auto pull_plan = std::make_shared<PullPlan>(plan, parsed_query.parameters, false, dba, interpreter_context,
execution_memory, shard_request_manager, memory_limit);
execution_memory, request_router, memory_limit);
// execution_memory, trigger_context_collector, memory_limit);
return PreparedQuery{std::move(header), std::move(parsed_query.required_privileges),
[pull_plan = std::move(pull_plan), output_symbols = std::move(output_symbols), summary](
@ -938,8 +1053,7 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
}
PreparedQuery PrepareExplainQuery(ParsedQuery parsed_query, std::map<std::string, TypedValue> *summary,
InterpreterContext *interpreter_context,
msgs::ShardRequestManagerInterface *shard_request_manager,
InterpreterContext *interpreter_context, RequestRouterInterface *request_router,
utils::MemoryResource *execution_memory) {
const std::string kExplainQueryStart = "explain ";
MG_ASSERT(utils::StartsWith(utils::ToLowerCase(parsed_query.stripped_query.query()), kExplainQueryStart),
@ -958,20 +1072,20 @@ PreparedQuery PrepareExplainQuery(ParsedQuery parsed_query, std::map<std::string
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in EXPLAIN");
auto cypher_query_plan = CypherQueryToPlan(
parsed_inner_query.stripped_query.hash(), std::move(parsed_inner_query.ast_storage), cypher_query,
parsed_inner_query.parameters, parsed_inner_query.is_cacheable ? &interpreter_context->plan_cache : nullptr,
shard_request_manager);
auto cypher_query_plan =
CypherQueryToPlan(parsed_inner_query.stripped_query.hash(), std::move(parsed_inner_query.ast_storage),
cypher_query, parsed_inner_query.parameters,
parsed_inner_query.is_cacheable ? &interpreter_context->plan_cache : nullptr, request_router);
std::stringstream printed_plan;
plan::PrettyPrint(*shard_request_manager, &cypher_query_plan->plan(), &printed_plan);
plan::PrettyPrint(*request_router, &cypher_query_plan->plan(), &printed_plan);
std::vector<std::vector<TypedValue>> printed_plan_rows;
for (const auto &row : utils::Split(utils::RTrim(printed_plan.str()), "\n")) {
printed_plan_rows.push_back(std::vector<TypedValue>{TypedValue(row)});
}
summary->insert_or_assign("explain", plan::PlanToJson(*shard_request_manager, &cypher_query_plan->plan()).dump());
summary->insert_or_assign("explain", plan::PlanToJson(*request_router, &cypher_query_plan->plan()).dump());
return PreparedQuery{{"QUERY PLAN"},
std::move(parsed_query.required_privileges),
@ -988,7 +1102,7 @@ PreparedQuery PrepareExplainQuery(ParsedQuery parsed_query, std::map<std::string
PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
std::map<std::string, TypedValue> *summary, InterpreterContext *interpreter_context,
DbAccessor *dba, utils::MemoryResource *execution_memory,
msgs::ShardRequestManagerInterface *shard_request_manager = nullptr) {
RequestRouterInterface *request_router = nullptr) {
const std::string kProfileQueryStart = "profile ";
MG_ASSERT(utils::StartsWith(utils::ToLowerCase(parsed_query.stripped_query.query()), kProfileQueryStart),
@ -1026,27 +1140,26 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in PROFILE");
expr::Frame<TypedValue> frame(0);
expr::Frame frame(0);
SymbolTable symbol_table;
EvaluationContext evaluation_context;
evaluation_context.timestamp = QueryTimestamp();
evaluation_context.parameters = parsed_inner_query.parameters;
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, shard_request_manager,
storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, symbol_table, evaluation_context, request_router, storage::v3::View::OLD);
const auto memory_limit =
expr::EvaluateMemoryLimit(&evaluator, cypher_query->memory_limit_, cypher_query->memory_scale_);
auto cypher_query_plan = CypherQueryToPlan(
parsed_inner_query.stripped_query.hash(), std::move(parsed_inner_query.ast_storage), cypher_query,
parsed_inner_query.parameters, parsed_inner_query.is_cacheable ? &interpreter_context->plan_cache : nullptr,
shard_request_manager);
auto cypher_query_plan =
CypherQueryToPlan(parsed_inner_query.stripped_query.hash(), std::move(parsed_inner_query.ast_storage),
cypher_query, parsed_inner_query.parameters,
parsed_inner_query.is_cacheable ? &interpreter_context->plan_cache : nullptr, request_router);
auto rw_type_checker = plan::ReadWriteTypeChecker();
rw_type_checker.InferRWType(const_cast<plan::LogicalOperator &>(cypher_query_plan->plan()));
return PreparedQuery{{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME", "CUSTOM DATA"},
std::move(parsed_query.required_privileges),
[plan = std::move(cypher_query_plan), parameters = std::move(parsed_inner_query.parameters),
summary, dba, interpreter_context, execution_memory, memory_limit, shard_request_manager,
summary, dba, interpreter_context, execution_memory, memory_limit, request_router,
// We want to execute the query we are profiling lazily, so we delay
// the construction of the corresponding context.
stats_and_total_time = std::optional<plan::ProfilingStatsWithTotalTime>{},
@ -1055,7 +1168,7 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
// No output symbols are given so that nothing is streamed.
if (!stats_and_total_time) {
stats_and_total_time = PullPlan(plan, parameters, true, dba, interpreter_context,
execution_memory, shard_request_manager, memory_limit)
execution_memory, request_router, memory_limit)
.Pull(stream, {}, {}, summary);
pull_plan = std::make_shared<PullPlanVector>(ProfilingStatsToTable(*stats_and_total_time));
}
@ -1182,14 +1295,14 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans
PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
std::map<std::string, TypedValue> *summary, InterpreterContext *interpreter_context,
DbAccessor *dba, utils::MemoryResource *execution_memory,
msgs::ShardRequestManagerInterface *manager) {
RequestRouterInterface *request_router) {
if (in_explicit_transaction) {
throw UserModificationInMulticommandTxException();
}
auto *auth_query = utils::Downcast<AuthQuery>(parsed_query.query);
auto callback = HandleAuthQuery(auth_query, interpreter_context->auth, parsed_query.parameters, manager);
auto callback = HandleAuthQuery(auth_query, interpreter_context->auth, parsed_query.parameters, request_router);
SymbolTable symbol_table;
std::vector<Symbol> output_symbols;
@ -1218,14 +1331,14 @@ PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transa
PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, const bool in_explicit_transaction,
std::vector<Notification> *notifications, InterpreterContext *interpreter_context,
msgs::ShardRequestManagerInterface *manager) {
RequestRouterInterface *request_router) {
if (in_explicit_transaction) {
throw ReplicationModificationInMulticommandTxException();
}
auto *replication_query = utils::Downcast<ReplicationQuery>(parsed_query.query);
auto callback =
HandleReplicationQuery(replication_query, parsed_query.parameters, interpreter_context, manager, notifications);
auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, interpreter_context,
request_router, notifications);
return PreparedQuery{callback.header, std::move(parsed_query.required_privileges),
[callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr<PullPlanVector>{nullptr}](
@ -1314,14 +1427,14 @@ PreparedQuery PrepareCreateSnapshotQuery(ParsedQuery parsed_query, bool in_expli
}
PreparedQuery PrepareSettingQuery(ParsedQuery parsed_query, const bool in_explicit_transaction,
msgs::ShardRequestManagerInterface *manager) {
RequestRouterInterface *request_router) {
if (in_explicit_transaction) {
throw SettingConfigInMulticommandTxException{};
}
auto *setting_query = utils::Downcast<SettingQuery>(parsed_query.query);
MG_ASSERT(setting_query);
auto callback = HandleSettingQuery(setting_query, parsed_query.parameters, manager);
auto callback = HandleSettingQuery(setting_query, parsed_query.parameters, request_router);
return PreparedQuery{std::move(callback.header), std::move(parsed_query.required_privileges),
[callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr<PullPlanVector>{nullptr}](
@ -1518,7 +1631,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
if (!in_explicit_transaction_ &&
(utils::Downcast<CypherQuery>(parsed_query.query) || utils::Downcast<ExplainQuery>(parsed_query.query) ||
utils::Downcast<ProfileQuery>(parsed_query.query))) {
shard_request_manager_->StartTransaction();
request_router_->StartTransaction();
}
utils::Timer planning_timer;
@ -1527,14 +1640,14 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
if (utils::Downcast<CypherQuery>(parsed_query.query)) {
prepared_query = PrepareCypherQuery(std::move(parsed_query), &query_execution->summary, interpreter_context_,
&*execution_db_accessor_, &query_execution->execution_memory,
&query_execution->notifications, shard_request_manager_.get());
&query_execution->notifications, request_router_.get());
} else if (utils::Downcast<ExplainQuery>(parsed_query.query)) {
prepared_query = PrepareExplainQuery(std::move(parsed_query), &query_execution->summary, interpreter_context_,
&*shard_request_manager_, &query_execution->execution_memory_with_exception);
&*request_router_, &query_execution->execution_memory_with_exception);
} else if (utils::Downcast<ProfileQuery>(parsed_query.query)) {
prepared_query = PrepareProfileQuery(
std::move(parsed_query), in_explicit_transaction_, &query_execution->summary, interpreter_context_,
&*execution_db_accessor_, &query_execution->execution_memory_with_exception, shard_request_manager_.get());
prepared_query = PrepareProfileQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
interpreter_context_, &*execution_db_accessor_,
&query_execution->execution_memory_with_exception, request_router_.get());
} else if (utils::Downcast<DumpQuery>(parsed_query.query)) {
prepared_query = PrepareDumpQuery(std::move(parsed_query), &query_execution->summary, &*execution_db_accessor_,
&query_execution->execution_memory);
@ -1542,9 +1655,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
prepared_query = PrepareIndexQuery(std::move(parsed_query), in_explicit_transaction_,
&query_execution->notifications, interpreter_context_);
} else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
prepared_query = PrepareAuthQuery(
std::move(parsed_query), in_explicit_transaction_, &query_execution->summary, interpreter_context_,
&*execution_db_accessor_, &query_execution->execution_memory_with_exception, shard_request_manager_.get());
prepared_query = PrepareAuthQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
interpreter_context_, &*execution_db_accessor_,
&query_execution->execution_memory_with_exception, request_router_.get());
} else if (utils::Downcast<InfoQuery>(parsed_query.query)) {
prepared_query = PrepareInfoQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
interpreter_context_, interpreter_context_->db,
@ -1555,7 +1668,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
} else if (utils::Downcast<ReplicationQuery>(parsed_query.query)) {
prepared_query =
PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications,
interpreter_context_, shard_request_manager_.get());
interpreter_context_, request_router_.get());
} else if (utils::Downcast<LockPathQuery>(parsed_query.query)) {
prepared_query = PrepareLockPathQuery(std::move(parsed_query), in_explicit_transaction_, interpreter_context_,
&*execution_db_accessor_);
@ -1572,8 +1685,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
prepared_query =
PrepareCreateSnapshotQuery(std::move(parsed_query), in_explicit_transaction_, interpreter_context_);
} else if (utils::Downcast<SettingQuery>(parsed_query.query)) {
prepared_query =
PrepareSettingQuery(std::move(parsed_query), in_explicit_transaction_, shard_request_manager_.get());
prepared_query = PrepareSettingQuery(std::move(parsed_query), in_explicit_transaction_, request_router_.get());
} else if (utils::Downcast<VersionQuery>(parsed_query.query)) {
prepared_query = PrepareVersionQuery(std::move(parsed_query), in_explicit_transaction_);
} else if (utils::Downcast<SchemaQuery>(parsed_query.query)) {
@ -1614,7 +1726,7 @@ void Interpreter::Commit() {
// For now, we will not check if there are some unfinished queries.
// We should document clearly that all results should be pulled to complete
// a query.
shard_request_manager_->Commit();
request_router_->Commit();
if (!db_accessor_) return;
const auto reset_necessary_members = [this]() {

View File

@ -296,7 +296,7 @@ class Interpreter final {
*/
void Abort();
const msgs::ShardRequestManagerInterface *GetShardRequestManager() const { return shard_request_manager_.get(); }
const RequestRouterInterface *GetRequestRouter() const { return request_router_.get(); }
private:
struct QueryExecution {
@ -342,7 +342,7 @@ class Interpreter final {
// move this unique_ptr into a shared_ptr.
std::unique_ptr<storage::v3::Shard::Accessor> db_accessor_;
std::optional<DbAccessor> execution_db_accessor_;
std::unique_ptr<msgs::ShardRequestManagerInterface> shard_request_manager_;
std::unique_ptr<RequestRouterInterface> request_router_;
bool in_explicit_transaction_{false};
bool expect_rollback_{false};

144
src/query/v2/multiframe.cpp Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include "query/v2/multiframe.hpp"
#include <algorithm>
#include <iterator>
#include "query/v2/bindings/frame.hpp"
#include "utils/pmr/vector.hpp"
namespace memgraph::query::v2 {
static_assert(std::forward_iterator<ValidFramesReader::Iterator>);
static_assert(std::forward_iterator<ValidFramesModifier::Iterator>);
static_assert(std::forward_iterator<ValidFramesConsumer::Iterator>);
static_assert(std::forward_iterator<InvalidFramesPopulator::Iterator>);
MultiFrame::MultiFrame(int64_t size_of_frame, size_t number_of_frames, utils::MemoryResource *execution_memory)
: frames_(utils::pmr::vector<FrameWithValidity>(
number_of_frames, FrameWithValidity(size_of_frame, execution_memory), execution_memory)) {
MG_ASSERT(number_of_frames > 0);
}
MultiFrame::MultiFrame(const MultiFrame &other) : frames_{other.frames_} {}
// NOLINTNEXTLINE (bugprone-exception-escape)
MultiFrame::MultiFrame(MultiFrame &&other) noexcept : frames_(std::move(other.frames_)) {}
FrameWithValidity &MultiFrame::GetFirstFrame() {
MG_ASSERT(!frames_.empty());
return frames_.front();
}
void MultiFrame::MakeAllFramesInvalid() noexcept {
std::for_each(frames_.begin(), frames_.end(), [](auto &frame) { frame.MakeInvalid(); });
}
bool MultiFrame::HasValidFrame() const noexcept {
return std::any_of(frames_.begin(), frames_.end(), [](auto &frame) { return frame.IsValid(); });
}
// NOLINTNEXTLINE (bugprone-exception-escape)
void MultiFrame::DefragmentValidFrames() noexcept {
/*
from: https://en.cppreference.com/w/cpp/algorithm/remove
"Removing is done by shifting (by means of copy assignment (until C++11)move assignment (since C++11)) the elements
in the range in such a way that the elements that are not to be removed appear in the beginning of the range.
Relative order of the elements that remain is preserved and the physical size of the container is unchanged."
*/
// NOLINTNEXTLINE (bugprone-unused-return-value)
std::remove_if(frames_.begin(), frames_.end(), [](auto &frame) { return !frame.IsValid(); });
}
ValidFramesReader MultiFrame::GetValidFramesReader() { return ValidFramesReader{*this}; }
ValidFramesModifier MultiFrame::GetValidFramesModifier() { return ValidFramesModifier{*this}; }
ValidFramesConsumer MultiFrame::GetValidFramesConsumer() { return ValidFramesConsumer{*this}; }
InvalidFramesPopulator MultiFrame::GetInvalidFramesPopulator() { return InvalidFramesPopulator{*this}; }
ValidFramesReader::ValidFramesReader(MultiFrame &multiframe) : multiframe_(&multiframe) {
/*
From: https://en.cppreference.com/w/cpp/algorithm/find
Returns an iterator to the first element in the range [first, last) that satisfies specific criteria:
find_if searches for an element for which predicate p returns true
Return value
Iterator to the first element satisfying the condition or last if no such element is found.
-> this is what we want. We want the "after" last valid frame (weather this is vector::end or and invalid frame).
*/
auto it = std::find_if(multiframe.frames_.begin(), multiframe.frames_.end(),
[](const auto &frame) { return !frame.IsValid(); });
after_last_valid_frame_ = multiframe_->frames_.data() + std::distance(multiframe.frames_.begin(), it);
}
ValidFramesReader::Iterator ValidFramesReader::begin() {
if (multiframe_->frames_[0].IsValid()) {
return Iterator{&multiframe_->frames_[0]};
}
return end();
}
ValidFramesReader::Iterator ValidFramesReader::end() { return Iterator{after_last_valid_frame_}; }
ValidFramesModifier::ValidFramesModifier(MultiFrame &multiframe) : multiframe_(&multiframe) {}
ValidFramesModifier::Iterator ValidFramesModifier::begin() {
if (multiframe_->frames_[0].IsValid()) {
return Iterator{&multiframe_->frames_[0], *this};
}
return end();
}
ValidFramesModifier::Iterator ValidFramesModifier::end() {
return Iterator{multiframe_->frames_.data() + multiframe_->frames_.size(), *this};
}
ValidFramesConsumer::ValidFramesConsumer(MultiFrame &multiframe) : multiframe_(&multiframe) {}
// NOLINTNEXTLINE (bugprone-exception-escape)
ValidFramesConsumer::~ValidFramesConsumer() noexcept {
// TODO Possible optimisation: only DefragmentValidFrames if one frame has been invalidated? Only if does not
// cost too much to store it
multiframe_->DefragmentValidFrames();
}
ValidFramesConsumer::Iterator ValidFramesConsumer::begin() {
if (multiframe_->frames_[0].IsValid()) {
return Iterator{&multiframe_->frames_[0], *this};
}
return end();
}
ValidFramesConsumer::Iterator ValidFramesConsumer::end() {
return Iterator{multiframe_->frames_.data() + multiframe_->frames_.size(), *this};
}
InvalidFramesPopulator::InvalidFramesPopulator(MultiFrame &multiframe) : multiframe_(&multiframe) {}
InvalidFramesPopulator::Iterator InvalidFramesPopulator::begin() {
for (auto &frame : multiframe_->frames_) {
if (!frame.IsValid()) {
return Iterator{&frame};
}
}
return end();
}
InvalidFramesPopulator::Iterator InvalidFramesPopulator::end() {
return Iterator{multiframe_->frames_.data() + multiframe_->frames_.size()};
}
} // namespace memgraph::query::v2

302
src/query/v2/multiframe.hpp Normal file
View File

@ -0,0 +1,302 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <iterator>
#include "query/v2/bindings/frame.hpp"
namespace memgraph::query::v2 {
constexpr uint64_t kNumberOfFramesInMultiframe = 1000; // TODO have it configurable
class ValidFramesConsumer;
class ValidFramesModifier;
class ValidFramesReader;
class InvalidFramesPopulator;
class MultiFrame {
public:
friend class ValidFramesConsumer;
friend class ValidFramesModifier;
friend class ValidFramesReader;
friend class InvalidFramesPopulator;
MultiFrame(int64_t size_of_frame, size_t number_of_frames, utils::MemoryResource *execution_memory);
~MultiFrame() = default;
MultiFrame(const MultiFrame &other);
MultiFrame(MultiFrame &&other) noexcept;
MultiFrame &operator=(const MultiFrame &other) = delete;
MultiFrame &operator=(MultiFrame &&other) noexcept = delete;
/*
* Returns a object on which one can iterate in a for-loop. By doing so, you will only get Frames that are in a valid
* state in the MultiFrame.
* Iteration goes in a deterministic order.
* One can't modify the validity of the Frame nor its content with this implementation.
*/
ValidFramesReader GetValidFramesReader();
/*
* Returns a object on which one can iterate in a for-loop. By doing so, you will only get Frames that are in a valid
* state in the MultiFrame.
* Iteration goes in a deterministic order.
* One can't modify the validity of the Frame with this implementation. One can modify its content.
*/
ValidFramesModifier GetValidFramesModifier();
/*
* Returns a object on which one can iterate in a for-loop. By doing so, you will only get Frames that are in a valid
* state in the MultiFrame.
* Iteration goes in a deterministic order.
* One can modify the validity of the Frame with this implementation.
* If you do not plan to modify the validity of the Frames, use GetValidFramesReader/GetValidFramesModifer instead as
* this is faster.
*/
ValidFramesConsumer GetValidFramesConsumer();
/*
* Returns a object on which one can iterate in a for-loop. By doing so, you will only get Frames that are in an
* invalid state in the MultiFrame. Iteration goes in a deterministic order. One can modify the validity of
* the Frame with this implementation.
*/
InvalidFramesPopulator GetInvalidFramesPopulator();
/**
* Return the first Frame of the MultiFrame. This is only meant to be used in very specific cases. Please consider
* using the iterators instead.
* The Frame can be valid or invalid.
*/
FrameWithValidity &GetFirstFrame();
void MakeAllFramesInvalid() noexcept;
bool HasValidFrame() const noexcept;
inline utils::MemoryResource *GetMemoryResource() { return frames_[0].GetMemoryResource(); }
private:
void DefragmentValidFrames() noexcept;
utils::pmr::vector<FrameWithValidity> frames_;
};
class ValidFramesReader {
public:
explicit ValidFramesReader(MultiFrame &multiframe);
~ValidFramesReader() = default;
ValidFramesReader(const ValidFramesReader &other) = delete;
ValidFramesReader(ValidFramesReader &&other) noexcept = delete;
ValidFramesReader &operator=(const ValidFramesReader &other) = delete;
ValidFramesReader &operator=(ValidFramesReader &&other) noexcept = delete;
struct Iterator {
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = const Frame;
using pointer = value_type *;
using reference = const Frame &;
Iterator() = default;
explicit Iterator(FrameWithValidity *ptr) : ptr_(ptr) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
Iterator &operator++() {
ptr_++;
return *this;
}
// NOLINTNEXTLINE(cert-dcl21-cpp)
Iterator operator++(int) {
auto old = *this;
ptr_++;
return old;
}
friend bool operator==(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ == rhs.ptr_; };
friend bool operator!=(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ != rhs.ptr_; };
private:
FrameWithValidity *ptr_{nullptr};
};
Iterator begin();
Iterator end();
private:
FrameWithValidity *after_last_valid_frame_;
MultiFrame *multiframe_;
};
class ValidFramesModifier {
public:
explicit ValidFramesModifier(MultiFrame &multiframe);
~ValidFramesModifier() = default;
ValidFramesModifier(const ValidFramesModifier &other) = delete;
ValidFramesModifier(ValidFramesModifier &&other) noexcept = delete;
ValidFramesModifier &operator=(const ValidFramesModifier &other) = delete;
ValidFramesModifier &operator=(ValidFramesModifier &&other) noexcept = delete;
struct Iterator {
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = Frame;
using pointer = value_type *;
using reference = Frame &;
Iterator() = default;
Iterator(FrameWithValidity *ptr, ValidFramesModifier &iterator_wrapper)
: ptr_(ptr), iterator_wrapper_(&iterator_wrapper) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
// Prefix increment
Iterator &operator++() {
do {
ptr_++;
} while (*this != iterator_wrapper_->end() && ptr_->IsValid());
return *this;
}
// NOLINTNEXTLINE(cert-dcl21-cpp)
Iterator operator++(int) {
auto old = *this;
++*this;
return old;
}
friend bool operator==(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ == rhs.ptr_; };
friend bool operator!=(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ != rhs.ptr_; };
private:
FrameWithValidity *ptr_{nullptr};
ValidFramesModifier *iterator_wrapper_{nullptr};
};
Iterator begin();
Iterator end();
private:
MultiFrame *multiframe_;
};
class ValidFramesConsumer {
public:
explicit ValidFramesConsumer(MultiFrame &multiframe);
~ValidFramesConsumer() noexcept;
ValidFramesConsumer(const ValidFramesConsumer &other) = delete;
ValidFramesConsumer(ValidFramesConsumer &&other) noexcept = delete;
ValidFramesConsumer &operator=(const ValidFramesConsumer &other) = delete;
ValidFramesConsumer &operator=(ValidFramesConsumer &&other) noexcept = delete;
struct Iterator {
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = FrameWithValidity;
using pointer = value_type *;
using reference = FrameWithValidity &;
Iterator() = default;
Iterator(FrameWithValidity *ptr, ValidFramesConsumer &iterator_wrapper)
: ptr_(ptr), iterator_wrapper_(&iterator_wrapper) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
Iterator &operator++() {
do {
ptr_++;
} while (*this != iterator_wrapper_->end() && !ptr_->IsValid());
return *this;
}
// NOLINTNEXTLINE(cert-dcl21-cpp)
Iterator operator++(int) {
auto old = *this;
++*this;
return old;
}
friend bool operator==(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ == rhs.ptr_; };
friend bool operator!=(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ != rhs.ptr_; };
private:
FrameWithValidity *ptr_{nullptr};
ValidFramesConsumer *iterator_wrapper_{nullptr};
};
Iterator begin();
Iterator end();
private:
MultiFrame *multiframe_;
};
class InvalidFramesPopulator {
public:
explicit InvalidFramesPopulator(MultiFrame &multiframe);
~InvalidFramesPopulator() = default;
InvalidFramesPopulator(const InvalidFramesPopulator &other) = delete;
InvalidFramesPopulator(InvalidFramesPopulator &&other) noexcept = delete;
InvalidFramesPopulator &operator=(const InvalidFramesPopulator &other) = delete;
InvalidFramesPopulator &operator=(InvalidFramesPopulator &&other) noexcept = delete;
struct Iterator {
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = FrameWithValidity;
using pointer = value_type *;
using reference = FrameWithValidity &;
Iterator() = default;
explicit Iterator(FrameWithValidity *ptr) : ptr_(ptr) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
Iterator &operator++() {
ptr_->MakeValid();
ptr_++;
return *this;
}
// NOLINTNEXTLINE(cert-dcl21-cpp)
Iterator operator++(int) {
auto old = *this;
++ptr_;
return old;
}
friend bool operator==(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ == rhs.ptr_; };
friend bool operator!=(const Iterator &lhs, const Iterator &rhs) { return lhs.ptr_ != rhs.ptr_; };
private:
FrameWithValidity *ptr_{nullptr};
};
Iterator begin();
Iterator end();
private:
MultiFrame *multiframe_;
};
} // namespace memgraph::query::v2

View File

@ -26,6 +26,7 @@
#include <cppitertools/chain.hpp>
#include <cppitertools/imap.hpp>
#include "common/errors.hpp"
#include "expr/ast/pretty_print_ast_to_original_expression.hpp"
#include "expr/exceptions.hpp"
#include "query/exceptions.hpp"
@ -38,8 +39,8 @@
#include "query/v2/frontend/ast/ast.hpp"
#include "query/v2/path.hpp"
#include "query/v2/plan/scoped_profile.hpp"
#include "query/v2/request_router.hpp"
#include "query/v2/requests.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "storage/v3/conversions.hpp"
#include "storage/v3/property_value.hpp"
#include "utils/algorithm.hpp"
@ -176,10 +177,10 @@ class DistributedCreateNodeCursor : public Cursor {
bool Pull(Frame &frame, ExecutionContext &context) override {
SCOPED_PROFILE_OP("CreateNode");
if (input_cursor_->Pull(frame, context)) {
auto &shard_manager = context.shard_request_manager;
auto &request_router = context.request_router;
{
SCOPED_REQUEST_WAIT_PROFILE;
shard_manager->Request(state_, NodeCreationInfoToRequest(context, frame));
request_router->CreateVertices(NodeCreationInfoToRequest(context, frame));
}
PlaceNodeOnTheFrame(frame, context);
return true;
@ -190,14 +191,14 @@ class DistributedCreateNodeCursor : public Cursor {
void Shutdown() override { input_cursor_->Shutdown(); }
void Reset() override { state_ = {}; }
void Reset() override {}
void PlaceNodeOnTheFrame(Frame &frame, ExecutionContext &context) {
// TODO(kostasrim) Make this work with batching
const auto primary_label = msgs::Label{.id = nodes_info_[0]->labels[0]};
msgs::Vertex v{.id = std::make_pair(primary_label, primary_keys_[0])};
frame[nodes_info_.front()->symbol] = TypedValue(
query::v2::accessors::VertexAccessor(std::move(v), src_vertex_props_[0], context.shard_request_manager));
frame[nodes_info_.front()->symbol] =
TypedValue(query::v2::accessors::VertexAccessor(std::move(v), src_vertex_props_[0], context.request_router));
}
std::vector<msgs::NewVertex> NodeCreationInfoToRequest(ExecutionContext &context, Frame &frame) {
@ -217,7 +218,7 @@ class DistributedCreateNodeCursor : public Cursor {
if (const auto *node_info_properties = std::get_if<PropertiesMapList>(&node_info->properties)) {
for (const auto &[key, value_expression] : *node_info_properties) {
TypedValue val = value_expression->Accept(evaluator);
if (context.shard_request_manager->IsPrimaryKey(primary_label, key)) {
if (context.request_router->IsPrimaryKey(primary_label, key)) {
rqst.primary_key.push_back(TypedValueToValue(val));
pk.push_back(TypedValueToValue(val));
}
@ -226,8 +227,8 @@ class DistributedCreateNodeCursor : public Cursor {
auto property_map = evaluator.Visit(*std::get<ParameterLookup *>(node_info->properties)).ValueMap();
for (const auto &[key, value] : property_map) {
auto key_str = std::string(key);
auto property_id = context.shard_request_manager->NameToProperty(key_str);
if (context.shard_request_manager->IsPrimaryKey(primary_label, property_id)) {
auto property_id = context.request_router->NameToProperty(key_str);
if (context.request_router->IsPrimaryKey(primary_label, property_id)) {
rqst.primary_key.push_back(TypedValueToValue(value));
pk.push_back(TypedValueToValue(value));
}
@ -251,7 +252,6 @@ class DistributedCreateNodeCursor : public Cursor {
std::vector<const NodeCreationInfo *> nodes_info_;
std::vector<std::vector<std::pair<storage::v3::PropertyId, msgs::Value>>> src_vertex_props_;
std::vector<msgs::PrimaryKey> primary_keys_;
msgs::ExecutionState<msgs::CreateVerticesRequest> state_;
};
bool Once::OnceCursor::Pull(Frame &, ExecutionContext &context) {
@ -264,6 +264,16 @@ bool Once::OnceCursor::Pull(Frame &, ExecutionContext &context) {
return false;
}
void Once::OnceCursor::PullMultiple(MultiFrame &multi_frame, ExecutionContext &context) {
SCOPED_PROFILE_OP("OnceMF");
if (!did_pull_) {
auto &first_frame = multi_frame.GetFirstFrame();
first_frame.MakeValid();
did_pull_ = true;
}
}
UniqueCursorPtr Once::MakeCursor(utils::MemoryResource *mem) const {
EventCounter::IncrementCounter(EventCounter::OnceOperator);
@ -364,7 +374,6 @@ class ScanAllCursor : public Cursor {
std::optional<decltype(vertices_.value().begin())> vertices_it_;
const char *op_name_;
std::vector<msgs::ScanVerticesResponse> current_batch;
msgs::ExecutionState<msgs::ScanVerticesRequest> request_state;
};
class DistributedScanAllAndFilterCursor : public Cursor {
@ -383,37 +392,41 @@ class DistributedScanAllAndFilterCursor : public Cursor {
ResetExecutionState();
}
enum class State : int8_t { INITIALIZING, COMPLETED };
using VertexAccessor = accessors::VertexAccessor;
bool MakeRequest(msgs::ShardRequestManagerInterface &shard_manager, ExecutionContext &context) {
bool MakeRequest(RequestRouterInterface &request_router, ExecutionContext &context) {
{
SCOPED_REQUEST_WAIT_PROFILE;
current_batch = shard_manager.Request(request_state_);
std::optional<std::string> request_label = std::nullopt;
if (label_.has_value()) {
request_label = request_router.LabelToName(*label_);
}
current_batch = request_router.ScanVertices(request_label);
}
current_vertex_it = current_batch.begin();
request_state_ = State::COMPLETED;
return !current_batch.empty();
}
bool Pull(Frame &frame, ExecutionContext &context) override {
SCOPED_PROFILE_OP(op_name_);
auto &shard_manager = *context.shard_request_manager;
auto &request_router = *context.request_router;
while (true) {
if (MustAbort(context)) {
throw HintedAbortError();
}
using State = msgs::ExecutionState<msgs::ScanVerticesRequest>;
if (request_state_.state == State::INITIALIZING) {
if (request_state_ == State::INITIALIZING) {
if (!input_cursor_->Pull(frame, context)) {
return false;
}
}
request_state_.label = label_.has_value() ? std::make_optional(shard_manager.LabelToName(*label_)) : std::nullopt;
if (current_vertex_it == current_batch.end() &&
(request_state_.state == State::COMPLETED || !MakeRequest(shard_manager, context))) {
(request_state_ == State::COMPLETED || !MakeRequest(request_router, context))) {
ResetExecutionState();
continue;
}
@ -429,7 +442,7 @@ class DistributedScanAllAndFilterCursor : public Cursor {
void ResetExecutionState() {
current_batch.clear();
current_vertex_it = current_batch.end();
request_state_ = msgs::ExecutionState<msgs::ScanVerticesRequest>{};
request_state_ = State::INITIALIZING;
}
void Reset() override {
@ -443,7 +456,7 @@ class DistributedScanAllAndFilterCursor : public Cursor {
const char *op_name_;
std::vector<VertexAccessor> current_batch;
std::vector<VertexAccessor>::iterator current_vertex_it;
msgs::ExecutionState<msgs::ScanVerticesRequest> request_state_;
State request_state_ = State::INITIALIZING;
std::optional<storage::v3::LabelId> label_;
std::optional<std::pair<storage::v3::PropertyId, Expression *>> property_expression_pair_;
std::optional<std::vector<Expression *>> filter_expressions_;
@ -559,27 +572,6 @@ UniqueCursorPtr ScanAllByPrimaryKey::MakeCursor(utils::MemoryResource * /*mem*/)
throw QueryRuntimeException("ScanAllByPrimaryKey cursur is yet to be implemented.");
}
namespace {
template <class TEdges>
auto UnwrapEdgesResult(storage::v3::Result<TEdges> &&result) {
if (result.HasError()) {
switch (result.GetError()) {
case storage::v3::Error::DELETED_OBJECT:
throw QueryRuntimeException("Trying to get relationships of a deleted node.");
case storage::v3::Error::NONEXISTENT_OBJECT:
throw query::v2::QueryRuntimeException("Trying to get relationships from a node that doesn't exist.");
case storage::v3::Error::VERTEX_HAS_EDGES:
case storage::v3::Error::SERIALIZATION_ERROR:
case storage::v3::Error::PROPERTIES_DISABLED:
throw QueryRuntimeException("Unexpected error when accessing relationships.");
}
}
return std::move(*result);
}
} // namespace
Expand::Expand(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol, Symbol node_symbol,
Symbol edge_symbol, EdgeAtom::Direction direction,
const std::vector<storage::v3::EdgeTypeId> &edge_types, bool existing_node, storage::v3::View view)
@ -712,7 +704,7 @@ bool Filter::FilterCursor::Pull(Frame &frame, ExecutionContext &context) {
// Like all filters, newly set values should not affect filtering of old
// nodes and edges.
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.shard_request_manager,
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::OLD);
while (input_cursor_->Pull(frame, context)) {
if (EvaluateFilter(evaluator, self_.expression_)) return true;
@ -753,8 +745,8 @@ bool Produce::ProduceCursor::Pull(Frame &frame, ExecutionContext &context) {
if (input_cursor_->Pull(frame, context)) {
// Produce should always yield the latest results.
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::NEW);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::NEW);
for (auto named_expr : self_.named_expressions_) named_expr->Accept(evaluator);
return true;
@ -762,6 +754,23 @@ bool Produce::ProduceCursor::Pull(Frame &frame, ExecutionContext &context) {
return false;
}
void Produce::ProduceCursor::PullMultiple(MultiFrame &multi_frame, ExecutionContext &context) {
SCOPED_PROFILE_OP("ProduceMF");
input_cursor_->PullMultiple(multi_frame, context);
auto iterator_for_valid_frame_only = multi_frame.GetValidFramesModifier();
for (auto &frame : iterator_for_valid_frame_only) {
// Produce should always yield the latest results.
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::NEW);
for (auto *named_expr : self_.named_expressions_) {
named_expr->Accept(evaluator);
}
}
};
void Produce::ProduceCursor::Shutdown() { input_cursor_->Shutdown(); }
void Produce::ProduceCursor::Reset() { input_cursor_->Reset(); }
@ -832,45 +841,9 @@ std::vector<Symbol> SetProperties::ModifiedSymbols(const SymbolTable &table) con
SetProperties::SetPropertiesCursor::SetPropertiesCursor(const SetProperties &self, utils::MemoryResource *mem)
: self_(self), input_cursor_(self.input_->MakeCursor(mem)) {}
namespace {
template <typename T>
concept AccessorWithProperties = requires(T value, storage::v3::PropertyId property_id,
storage::v3::PropertyValue property_value) {
{
value.ClearProperties()
} -> std::same_as<storage::v3::Result<std::map<storage::v3::PropertyId, storage::v3::PropertyValue>>>;
{value.SetProperty(property_id, property_value)};
};
} // namespace
bool SetProperties::SetPropertiesCursor::Pull(Frame &frame, ExecutionContext &context) {
SCOPED_PROFILE_OP("SetProperties");
return false;
// if (!input_cursor_->Pull(frame, context)) return false;
//
// TypedValue &lhs = frame[self_.input_symbol_];
//
// // Set, just like Create needs to see the latest changes.
// ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
// storage::v3::View::NEW);
// TypedValue rhs = self_.rhs_->Accept(evaluator);
//
// switch (lhs.type()) {
// case TypedValue::Type::Vertex:
// SetPropertiesOnRecord(&lhs.ValueVertex(), rhs, self_.op_, &context);
// break;
// case TypedValue::Type::Edge:
// SetPropertiesOnRecord(&lhs.ValueEdge(), rhs, self_.op_, &context);
// break;
// case TypedValue::Type::Null:
// // Skip setting properties on Null (can occur in optional match).
// break;
// default:
// throw QueryRuntimeException("Properties can only be set on edges and vertices.");
// }
// return true;
}
void SetProperties::SetPropertiesCursor::Shutdown() { input_cursor_->Shutdown(); }
@ -1174,8 +1147,8 @@ class AggregateCursor : public Cursor {
* aggregation results, and not on the number of inputs.
*/
void ProcessAll(Frame *frame, ExecutionContext *context) {
ExpressionEvaluator evaluator(frame, context->symbol_table, context->evaluation_context,
context->shard_request_manager, storage::v3::View::NEW);
ExpressionEvaluator evaluator(frame, context->symbol_table, context->evaluation_context, context->request_router,
storage::v3::View::NEW);
while (input_cursor_->Pull(*frame, *context)) {
ProcessOne(*frame, &evaluator);
}
@ -1395,8 +1368,8 @@ bool Skip::SkipCursor::Pull(Frame &frame, ExecutionContext &context) {
// First successful pull from the input, evaluate the skip expression.
// The skip expression doesn't contain identifiers so graph view
// parameter is not important.
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::OLD);
TypedValue to_skip = self_.expression_->Accept(evaluator);
if (to_skip.type() != TypedValue::Type::Int)
throw QueryRuntimeException("Number of elements to skip must be an integer.");
@ -1450,8 +1423,8 @@ bool Limit::LimitCursor::Pull(Frame &frame, ExecutionContext &context) {
if (limit_ == -1) {
// Limit expression doesn't contain identifiers so graph view is not
// important.
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::OLD);
TypedValue limit = self_.expression_->Accept(evaluator);
if (limit.type() != TypedValue::Type::Int)
throw QueryRuntimeException("Limit on number of returned elements must be an integer.");
@ -1506,8 +1479,8 @@ class OrderByCursor : public Cursor {
SCOPED_PROFILE_OP("OrderBy");
if (!did_pull_all_) {
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::OLD);
auto *mem = cache_.get_allocator().GetMemoryResource();
while (input_cursor_->Pull(frame, context)) {
// collect the order_by elements
@ -1764,8 +1737,8 @@ class UnwindCursor : public Cursor {
if (!input_cursor_->Pull(frame, context)) return false;
// successful pull from input, initialize value and iterator
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::OLD);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::OLD);
TypedValue input_value = self_.input_expression_->Accept(evaluator);
if (input_value.type() != TypedValue::Type::List)
throw QueryRuntimeException("Argument of UNWIND must be a list, but '{}' was provided.", input_value.type());
@ -2276,7 +2249,7 @@ class LoadCsvCursor : public Cursor {
Frame frame(0);
SymbolTable symbol_table;
auto evaluator =
ExpressionEvaluator(&frame, symbol_table, eval_context, context.shard_request_manager, storage::v3::View::OLD);
ExpressionEvaluator(&frame, symbol_table, eval_context, context.request_router, storage::v3::View::OLD);
auto maybe_file = ToOptionalString(&evaluator, self_->file_);
auto maybe_delim = ToOptionalString(&evaluator, self_->delimiter_);
@ -2313,8 +2286,8 @@ class ForeachCursor : public Cursor {
return false;
}
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context,
context.shard_request_manager, storage::v3::View::NEW);
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.request_router,
storage::v3::View::NEW);
TypedValue expr_result = expression->Accept(evaluator);
if (expr_result.IsNull()) {
@ -2390,11 +2363,11 @@ class DistributedCreateExpandCursor : public Cursor {
if (!input_cursor_->Pull(frame, context)) {
return false;
}
auto &shard_manager = context.shard_request_manager;
auto &request_router = context.request_router;
ResetExecutionState();
{
SCOPED_REQUEST_WAIT_PROFILE;
shard_manager->Request(state_, ExpandCreationInfoToRequest(context, frame));
request_router->CreateExpand(ExpandCreationInfoToRequest(context, frame));
}
return true;
}
@ -2431,7 +2404,7 @@ class DistributedCreateExpandCursor : public Cursor {
// handle parameter
auto property_map = evaluator.Visit(*std::get<ParameterLookup *>(edge_info.properties)).ValueMap();
for (const auto &[property, value] : property_map) {
const auto property_id = context.shard_request_manager->NameToProperty(std::string(property));
const auto property_id = context.request_router->NameToProperty(std::string(property));
request.properties.emplace_back(property_id, storage::v3::TypedValueToValue(value));
}
}
@ -2446,7 +2419,7 @@ class DistributedCreateExpandCursor : public Cursor {
const auto set_vertex = [&context](const auto &vertex, auto &vertex_id) {
vertex_id.first = vertex.PrimaryLabel();
for (const auto &[key, val] : vertex.Properties()) {
if (context.shard_request_manager->IsPrimaryKey(vertex_id.first.id, key)) {
if (context.request_router->IsPrimaryKey(vertex_id.first.id, key)) {
vertex_id.second.push_back(val);
}
}
@ -2474,11 +2447,10 @@ class DistributedCreateExpandCursor : public Cursor {
}
private:
void ResetExecutionState() { state_ = {}; }
void ResetExecutionState() {}
const UniqueCursorPtr input_cursor_;
const CreateExpand &self_;
msgs::ExecutionState<msgs::CreateExpandRequest> state_;
};
class DistributedExpandCursor : public Cursor {
@ -2525,12 +2497,11 @@ class DistributedExpandCursor : public Cursor {
request.edge_properties.emplace();
request.src_vertices.push_back(get_dst_vertex(edge, direction));
request.direction = (direction == EdgeAtom::Direction::IN) ? msgs::EdgeDirection::OUT : msgs::EdgeDirection::IN;
msgs::ExecutionState<msgs::ExpandOneRequest> request_state;
auto result_rows = context.shard_request_manager->Request(request_state, std::move(request));
auto result_rows = context.request_router->ExpandOne(std::move(request));
MG_ASSERT(result_rows.size() == 1);
auto &result_row = result_rows.front();
frame[self_.common_.node_symbol] = accessors::VertexAccessor(
msgs::Vertex{result_row.src_vertex}, result_row.src_vertex_properties, context.shard_request_manager);
msgs::Vertex{result_row.src_vertex}, result_row.src_vertex_properties, context.request_router);
}
bool InitEdges(Frame &frame, ExecutionContext &context) {
@ -2551,10 +2522,9 @@ class DistributedExpandCursor : public Cursor {
// to not fetch any properties of the edges
request.edge_properties.emplace();
request.src_vertices.push_back(vertex.Id());
msgs::ExecutionState<msgs::ExpandOneRequest> request_state;
auto result_rows = std::invoke([&context, &request_state, &request]() mutable {
auto result_rows = std::invoke([&context, &request]() mutable {
SCOPED_REQUEST_WAIT_PROFILE;
return context.shard_request_manager->Request(request_state, std::move(request));
return context.request_router->ExpandOne(std::move(request));
});
MG_ASSERT(result_rows.size() == 1);
auto &result_row = result_rows.front();
@ -2577,14 +2547,14 @@ class DistributedExpandCursor : public Cursor {
case EdgeAtom::Direction::IN: {
for (auto &edge : edge_messages) {
edge_accessors.emplace_back(msgs::Edge{std::move(edge.other_end), vertex.Id(), {}, {edge.gid}, edge.type},
context.shard_request_manager);
context.request_router);
}
break;
}
case EdgeAtom::Direction::OUT: {
for (auto &edge : edge_messages) {
edge_accessors.emplace_back(msgs::Edge{vertex.Id(), std::move(edge.other_end), {}, {edge.gid}, edge.type},
context.shard_request_manager);
context.request_router);
}
break;
}

View File

@ -28,6 +28,7 @@
#include "query/v2/bindings/typed_value.hpp"
#include "query/v2/bindings/frame.hpp"
#include "query/v2/bindings/symbol_table.hpp"
#include "query/v2/multiframe.hpp"
#include "storage/v3/id_types.hpp"
#include "utils/bound.hpp"
#include "utils/fnv.hpp"
@ -71,6 +72,8 @@ class Cursor {
/// @throws QueryRuntimeException if something went wrong with execution
virtual bool Pull(Frame &, ExecutionContext &) = 0;
virtual void PullMultiple(MultiFrame &, ExecutionContext &) { LOG_FATAL("PullMultipleIsNotImplemented"); }
/// Resets the Cursor to its initial state.
virtual void Reset() = 0;
@ -332,6 +335,7 @@ and false on every following Pull.")
class OnceCursor : public Cursor {
public:
OnceCursor() {}
void PullMultiple(MultiFrame &, ExecutionContext &) override;
bool Pull(Frame &, ExecutionContext &) override;
void Shutdown() override;
void Reset() override;
@ -1210,6 +1214,7 @@ RETURN clause) the Produce's pull succeeds exactly once.")
public:
ProduceCursor(const Produce &, utils::MemoryResource *);
bool Pull(Frame &, ExecutionContext &) override;
void PullMultiple(MultiFrame &, ExecutionContext &) override;
void Shutdown() override;
void Reset() override;

View File

@ -398,7 +398,7 @@ void Filters::AnalyzeAndStoreFilter(Expression *expr, const SymbolTable &symbol_
auto add_id_equal = [&](auto *maybe_id_fun, auto *val_expr) -> bool {
auto *id_fun = utils::Downcast<Function>(maybe_id_fun);
if (!id_fun) return false;
if (id_fun->function_name_ != kId) return false;
if (id_fun->function_name_ != functions::kId) return false;
if (id_fun->arguments_.size() != 1U) return false;
auto *ident = utils::Downcast<Identifier>(id_fun->arguments_.front());
if (!ident) return false;

View File

@ -14,13 +14,13 @@
#include "query/v2/bindings/pretty_print.hpp"
#include "query/v2/db_accessor.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
#include "utils/string.hpp"
namespace memgraph::query::v2::plan {
PlanPrinter::PlanPrinter(const msgs::ShardRequestManagerInterface *request_manager, std::ostream *out)
: request_manager_(request_manager), out_(out) {}
PlanPrinter::PlanPrinter(const RequestRouterInterface *request_router, std::ostream *out)
: request_router_(request_router), out_(out) {}
#define PRE_VISIT(TOp) \
bool PlanPrinter::PreVisit(TOp &) { \
@ -34,7 +34,7 @@ bool PlanPrinter::PreVisit(CreateExpand &op) {
WithPrintLn([&](auto &out) {
out << "* CreateExpand (" << op.input_symbol_.name() << ")"
<< (op.edge_info_.direction == query::v2::EdgeAtom::Direction::IN ? "<-" : "-") << "["
<< op.edge_info_.symbol.name() << ":" << request_manager_->EdgeTypeToName(op.edge_info_.edge_type) << "]"
<< op.edge_info_.symbol.name() << ":" << request_router_->EdgeTypeToName(op.edge_info_.edge_type) << "]"
<< (op.edge_info_.direction == query::v2::EdgeAtom::Direction::OUT ? "->" : "-") << "("
<< op.node_info_.symbol.name() << ")";
});
@ -54,7 +54,7 @@ bool PlanPrinter::PreVisit(query::v2::plan::ScanAll &op) {
bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabel &op) {
WithPrintLn([&](auto &out) {
out << "* ScanAllByLabel"
<< " (" << op.output_symbol_.name() << " :" << request_manager_->LabelToName(op.label_) << ")";
<< " (" << op.output_symbol_.name() << " :" << request_router_->LabelToName(op.label_) << ")";
});
return true;
}
@ -62,8 +62,8 @@ bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabel &op) {
bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelPropertyValue &op) {
WithPrintLn([&](auto &out) {
out << "* ScanAllByLabelPropertyValue"
<< " (" << op.output_symbol_.name() << " :" << request_manager_->LabelToName(op.label_) << " {"
<< request_manager_->PropertyToName(op.property_) << "})";
<< " (" << op.output_symbol_.name() << " :" << request_router_->LabelToName(op.label_) << " {"
<< request_router_->PropertyToName(op.property_) << "})";
});
return true;
}
@ -71,8 +71,8 @@ bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelPropertyValue &op) {
bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelPropertyRange &op) {
WithPrintLn([&](auto &out) {
out << "* ScanAllByLabelPropertyRange"
<< " (" << op.output_symbol_.name() << " :" << request_manager_->LabelToName(op.label_) << " {"
<< request_manager_->PropertyToName(op.property_) << "})";
<< " (" << op.output_symbol_.name() << " :" << request_router_->LabelToName(op.label_) << " {"
<< request_router_->PropertyToName(op.property_) << "})";
});
return true;
}
@ -80,8 +80,8 @@ bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelPropertyRange &op) {
bool PlanPrinter::PreVisit(query::v2::plan::ScanAllByLabelProperty &op) {
WithPrintLn([&](auto &out) {
out << "* ScanAllByLabelProperty"
<< " (" << op.output_symbol_.name() << " :" << request_manager_->LabelToName(op.label_) << " {"
<< request_manager_->PropertyToName(op.property_) << "})";
<< " (" << op.output_symbol_.name() << " :" << request_router_->LabelToName(op.label_) << " {"
<< request_router_->PropertyToName(op.property_) << "})";
});
return true;
}
@ -100,7 +100,7 @@ bool PlanPrinter::PreVisit(query::v2::plan::Expand &op) {
<< (op.common_.direction == query::v2::EdgeAtom::Direction::IN ? "<-" : "-") << "["
<< op.common_.edge_symbol.name();
utils::PrintIterable(*out_, op.common_.edge_types, "|", [this](auto &stream, const auto &edge_type) {
stream << ":" << request_manager_->EdgeTypeToName(edge_type);
stream << ":" << request_router_->EdgeTypeToName(edge_type);
});
*out_ << "]" << (op.common_.direction == query::v2::EdgeAtom::Direction::OUT ? "->" : "-") << "("
<< op.common_.node_symbol.name() << ")";
@ -129,7 +129,7 @@ bool PlanPrinter::PreVisit(query::v2::plan::ExpandVariable &op) {
<< (op.common_.direction == query::v2::EdgeAtom::Direction::IN ? "<-" : "-") << "["
<< op.common_.edge_symbol.name();
utils::PrintIterable(*out_, op.common_.edge_types, "|", [this](auto &stream, const auto &edge_type) {
stream << ":" << request_manager_->EdgeTypeToName(edge_type);
stream << ":" << request_router_->EdgeTypeToName(edge_type);
});
*out_ << "]" << (op.common_.direction == query::v2::EdgeAtom::Direction::OUT ? "->" : "-") << "("
<< op.common_.node_symbol.name() << ")";
@ -263,15 +263,14 @@ void PlanPrinter::Branch(query::v2::plan::LogicalOperator &op, const std::string
--depth_;
}
void PrettyPrint(const msgs::ShardRequestManagerInterface &request_manager, const LogicalOperator *plan_root,
std::ostream *out) {
PlanPrinter printer(&request_manager, out);
void PrettyPrint(const RequestRouterInterface &request_router, const LogicalOperator *plan_root, std::ostream *out) {
PlanPrinter printer(&request_router, out);
// FIXME(mtomic): We should make visitors that take const arguments.
const_cast<LogicalOperator *>(plan_root)->Accept(printer);
}
nlohmann::json PlanToJson(const msgs::ShardRequestManagerInterface &request_manager, const LogicalOperator *plan_root) {
impl::PlanToJsonVisitor visitor(&request_manager);
nlohmann::json PlanToJson(const RequestRouterInterface &request_router, const LogicalOperator *plan_root) {
impl::PlanToJsonVisitor visitor(&request_router);
// FIXME(mtomic): We should make visitors that take const arguments.
const_cast<LogicalOperator *>(plan_root)->Accept(visitor);
return visitor.output();
@ -349,16 +348,16 @@ json ToJson(const utils::Bound<Expression *> &bound) {
json ToJson(const Symbol &symbol) { return symbol.name(); }
json ToJson(storage::v3::EdgeTypeId edge_type, const msgs::ShardRequestManagerInterface &request_manager) {
return request_manager.EdgeTypeToName(edge_type);
json ToJson(storage::v3::EdgeTypeId edge_type, const RequestRouterInterface &request_router) {
return request_router.EdgeTypeToName(edge_type);
}
json ToJson(storage::v3::LabelId label, const msgs::ShardRequestManagerInterface &request_manager) {
return request_manager.LabelToName(label);
json ToJson(storage::v3::LabelId label, const RequestRouterInterface &request_router) {
return request_router.LabelToName(label);
}
json ToJson(storage::v3::PropertyId property, const msgs::ShardRequestManagerInterface &request_manager) {
return request_manager.PropertyToName(property);
json ToJson(storage::v3::PropertyId property, const RequestRouterInterface &request_router) {
return request_router.PropertyToName(property);
}
json ToJson(NamedExpression *nexpr) {
@ -369,29 +368,29 @@ json ToJson(NamedExpression *nexpr) {
}
json ToJson(const std::vector<std::pair<storage::v3::PropertyId, Expression *>> &properties,
const msgs::ShardRequestManagerInterface &request_manager) {
const RequestRouterInterface &request_router) {
json json;
for (const auto &prop_pair : properties) {
json.emplace(ToJson(prop_pair.first, request_manager), ToJson(prop_pair.second));
json.emplace(ToJson(prop_pair.first, request_router), ToJson(prop_pair.second));
}
return json;
}
json ToJson(const NodeCreationInfo &node_info, const msgs::ShardRequestManagerInterface &request_manager) {
json ToJson(const NodeCreationInfo &node_info, const RequestRouterInterface &request_router) {
json self;
self["symbol"] = ToJson(node_info.symbol);
self["labels"] = ToJson(node_info.labels, request_manager);
self["labels"] = ToJson(node_info.labels, request_router);
const auto *props = std::get_if<PropertiesMapList>(&node_info.properties);
self["properties"] = ToJson(props ? *props : PropertiesMapList{}, request_manager);
self["properties"] = ToJson(props ? *props : PropertiesMapList{}, request_router);
return self;
}
json ToJson(const EdgeCreationInfo &edge_info, const msgs::ShardRequestManagerInterface &request_manager) {
json ToJson(const EdgeCreationInfo &edge_info, const RequestRouterInterface &request_router) {
json self;
self["symbol"] = ToJson(edge_info.symbol);
const auto *props = std::get_if<PropertiesMapList>(&edge_info.properties);
self["properties"] = ToJson(props ? *props : PropertiesMapList{}, request_manager);
self["edge_type"] = ToJson(edge_info.edge_type, request_manager);
self["properties"] = ToJson(props ? *props : PropertiesMapList{}, request_router);
self["edge_type"] = ToJson(edge_info.edge_type, request_router);
self["direction"] = ToString(edge_info.direction);
return self;
}
@ -433,7 +432,7 @@ bool PlanToJsonVisitor::PreVisit(ScanAll &op) {
bool PlanToJsonVisitor::PreVisit(ScanAllByLabel &op) {
json self;
self["name"] = "ScanAllByLabel";
self["label"] = ToJson(op.label_, *request_manager_);
self["label"] = ToJson(op.label_, *request_router_);
self["output_symbol"] = ToJson(op.output_symbol_);
op.input_->Accept(*this);
@ -446,8 +445,8 @@ bool PlanToJsonVisitor::PreVisit(ScanAllByLabel &op) {
bool PlanToJsonVisitor::PreVisit(ScanAllByLabelPropertyRange &op) {
json self;
self["name"] = "ScanAllByLabelPropertyRange";
self["label"] = ToJson(op.label_, *request_manager_);
self["property"] = ToJson(op.property_, *request_manager_);
self["label"] = ToJson(op.label_, *request_router_);
self["property"] = ToJson(op.property_, *request_router_);
self["lower_bound"] = op.lower_bound_ ? ToJson(*op.lower_bound_) : json();
self["upper_bound"] = op.upper_bound_ ? ToJson(*op.upper_bound_) : json();
self["output_symbol"] = ToJson(op.output_symbol_);
@ -462,8 +461,8 @@ bool PlanToJsonVisitor::PreVisit(ScanAllByLabelPropertyRange &op) {
bool PlanToJsonVisitor::PreVisit(ScanAllByLabelPropertyValue &op) {
json self;
self["name"] = "ScanAllByLabelPropertyValue";
self["label"] = ToJson(op.label_, *request_manager_);
self["property"] = ToJson(op.property_, *request_manager_);
self["label"] = ToJson(op.label_, *request_router_);
self["property"] = ToJson(op.property_, *request_router_);
self["expression"] = ToJson(op.expression_);
self["output_symbol"] = ToJson(op.output_symbol_);
@ -477,8 +476,8 @@ bool PlanToJsonVisitor::PreVisit(ScanAllByLabelPropertyValue &op) {
bool PlanToJsonVisitor::PreVisit(ScanAllByLabelProperty &op) {
json self;
self["name"] = "ScanAllByLabelProperty";
self["label"] = ToJson(op.label_, *request_manager_);
self["property"] = ToJson(op.property_, *request_manager_);
self["label"] = ToJson(op.label_, *request_router_);
self["property"] = ToJson(op.property_, *request_router_);
self["output_symbol"] = ToJson(op.output_symbol_);
op.input_->Accept(*this);
@ -504,7 +503,7 @@ bool PlanToJsonVisitor::PreVisit(ScanAllByPrimaryKey &op) {
bool PlanToJsonVisitor::PreVisit(CreateNode &op) {
json self;
self["name"] = "CreateNode";
self["node_info"] = ToJson(op.node_info_, *request_manager_);
self["node_info"] = ToJson(op.node_info_, *request_router_);
op.input_->Accept(*this);
self["input"] = PopOutput();
@ -517,8 +516,8 @@ bool PlanToJsonVisitor::PreVisit(CreateExpand &op) {
json self;
self["name"] = "CreateExpand";
self["input_symbol"] = ToJson(op.input_symbol_);
self["node_info"] = ToJson(op.node_info_, *request_manager_);
self["edge_info"] = ToJson(op.edge_info_, *request_manager_);
self["node_info"] = ToJson(op.node_info_, *request_router_);
self["edge_info"] = ToJson(op.edge_info_, *request_router_);
self["existing_node"] = op.existing_node_;
op.input_->Accept(*this);
@ -534,7 +533,7 @@ bool PlanToJsonVisitor::PreVisit(Expand &op) {
self["input_symbol"] = ToJson(op.input_symbol_);
self["node_symbol"] = ToJson(op.common_.node_symbol);
self["edge_symbol"] = ToJson(op.common_.edge_symbol);
self["edge_types"] = ToJson(op.common_.edge_types, *request_manager_);
self["edge_types"] = ToJson(op.common_.edge_types, *request_router_);
self["direction"] = ToString(op.common_.direction);
self["existing_node"] = op.common_.existing_node;
@ -551,7 +550,7 @@ bool PlanToJsonVisitor::PreVisit(ExpandVariable &op) {
self["input_symbol"] = ToJson(op.input_symbol_);
self["node_symbol"] = ToJson(op.common_.node_symbol);
self["edge_symbol"] = ToJson(op.common_.edge_symbol);
self["edge_types"] = ToJson(op.common_.edge_types, *request_manager_);
self["edge_types"] = ToJson(op.common_.edge_types, *request_router_);
self["direction"] = ToString(op.common_.direction);
self["type"] = ToString(op.type_);
self["is_reverse"] = op.is_reverse_;
@ -626,7 +625,7 @@ bool PlanToJsonVisitor::PreVisit(Delete &op) {
bool PlanToJsonVisitor::PreVisit(SetProperty &op) {
json self;
self["name"] = "SetProperty";
self["property"] = ToJson(op.property_, *request_manager_);
self["property"] = ToJson(op.property_, *request_router_);
self["lhs"] = ToJson(op.lhs_);
self["rhs"] = ToJson(op.rhs_);
@ -663,7 +662,7 @@ bool PlanToJsonVisitor::PreVisit(SetLabels &op) {
json self;
self["name"] = "SetLabels";
self["input_symbol"] = ToJson(op.input_symbol_);
self["labels"] = ToJson(op.labels_, *request_manager_);
self["labels"] = ToJson(op.labels_, *request_router_);
op.input_->Accept(*this);
self["input"] = PopOutput();
@ -675,7 +674,7 @@ bool PlanToJsonVisitor::PreVisit(SetLabels &op) {
bool PlanToJsonVisitor::PreVisit(RemoveProperty &op) {
json self;
self["name"] = "RemoveProperty";
self["property"] = ToJson(op.property_, *request_manager_);
self["property"] = ToJson(op.property_, *request_router_);
self["lhs"] = ToJson(op.lhs_);
op.input_->Accept(*this);
@ -689,7 +688,7 @@ bool PlanToJsonVisitor::PreVisit(RemoveLabels &op) {
json self;
self["name"] = "RemoveLabels";
self["input_symbol"] = ToJson(op.input_symbol_);
self["labels"] = ToJson(op.labels_, *request_manager_);
self["labels"] = ToJson(op.labels_, *request_router_);
op.input_->Accept(*this);
self["input"] = PopOutput();

View File

@ -18,7 +18,7 @@
#include "query/v2/frontend/ast/ast.hpp"
#include "query/v2/plan/operator.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
namespace memgraph::query::v2 {
@ -27,20 +27,19 @@ namespace plan {
class LogicalOperator;
/// Pretty print a `LogicalOperator` plan to a `std::ostream`.
/// ShardRequestManager is needed for resolving label and property names.
/// RequestRouter is needed for resolving label and property names.
/// Note that `plan_root` isn't modified, but we can't take it as a const
/// because we don't have support for visiting a const LogicalOperator.
void PrettyPrint(const msgs::ShardRequestManagerInterface &request_manager, const LogicalOperator *plan_root,
std::ostream *out);
void PrettyPrint(const RequestRouterInterface &request_router, const LogicalOperator *plan_root, std::ostream *out);
/// Overload of `PrettyPrint` which defaults the `std::ostream` to `std::cout`.
inline void PrettyPrint(const msgs::ShardRequestManagerInterface &request_manager, const LogicalOperator *plan_root) {
PrettyPrint(request_manager, plan_root, &std::cout);
inline void PrettyPrint(const RequestRouterInterface &request_router, const LogicalOperator *plan_root) {
PrettyPrint(request_router, plan_root, &std::cout);
}
/// Convert a `LogicalOperator` plan to a JSON representation.
/// DbAccessor is needed for resolving label and property names.
nlohmann::json PlanToJson(const msgs::ShardRequestManagerInterface &request_manager, const LogicalOperator *plan_root);
nlohmann::json PlanToJson(const RequestRouterInterface &request_router, const LogicalOperator *plan_root);
class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
public:
@ -48,7 +47,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
using HierarchicalLogicalOperatorVisitor::PreVisit;
using HierarchicalLogicalOperatorVisitor::Visit;
PlanPrinter(const msgs::ShardRequestManagerInterface *request_manager, std::ostream *out);
PlanPrinter(const RequestRouterInterface *request_router, std::ostream *out);
bool DefaultPreVisit() override;
@ -115,7 +114,7 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
void Branch(LogicalOperator &op, const std::string &branch_name = "");
int64_t depth_{0};
const msgs::ShardRequestManagerInterface *request_manager_{nullptr};
const RequestRouterInterface *request_router_{nullptr};
std::ostream *out_{nullptr};
};
@ -133,20 +132,20 @@ nlohmann::json ToJson(const utils::Bound<Expression *> &bound);
nlohmann::json ToJson(const Symbol &symbol);
nlohmann::json ToJson(storage::v3::EdgeTypeId edge_type, const msgs::ShardRequestManagerInterface &request_manager);
nlohmann::json ToJson(storage::v3::EdgeTypeId edge_type, const RequestRouterInterface &request_router);
nlohmann::json ToJson(storage::v3::LabelId label, const msgs::ShardRequestManagerInterface &request_manager);
nlohmann::json ToJson(storage::v3::LabelId label, const RequestRouterInterface &request_router);
nlohmann::json ToJson(storage::v3::PropertyId property, const msgs::ShardRequestManagerInterface &request_manager);
nlohmann::json ToJson(storage::v3::PropertyId property, const RequestRouterInterface &request_router);
nlohmann::json ToJson(NamedExpression *nexpr);
nlohmann::json ToJson(const std::vector<std::pair<storage::v3::PropertyId, Expression *>> &properties,
const msgs::ShardRequestManagerInterface &request_manager);
const RequestRouterInterface &request_router);
nlohmann::json ToJson(const NodeCreationInfo &node_info, const msgs::ShardRequestManagerInterface &request_manager);
nlohmann::json ToJson(const NodeCreationInfo &node_info, const RequestRouterInterface &request_router);
nlohmann::json ToJson(const EdgeCreationInfo &edge_info, const msgs::ShardRequestManagerInterface &request_manager);
nlohmann::json ToJson(const EdgeCreationInfo &edge_info, const RequestRouterInterface &request_router);
nlohmann::json ToJson(const Aggregate::Element &elem);
@ -161,8 +160,7 @@ nlohmann::json ToJson(const std::vector<T> &items, Args &&...args) {
class PlanToJsonVisitor : public virtual HierarchicalLogicalOperatorVisitor {
public:
explicit PlanToJsonVisitor(const msgs::ShardRequestManagerInterface *request_manager)
: request_manager_(request_manager) {}
explicit PlanToJsonVisitor(const RequestRouterInterface *request_router) : request_router_(request_router) {}
using HierarchicalLogicalOperatorVisitor::PostVisit;
using HierarchicalLogicalOperatorVisitor::PreVisit;
@ -218,7 +216,7 @@ class PlanToJsonVisitor : public virtual HierarchicalLogicalOperatorVisitor {
protected:
nlohmann::json output_;
const msgs::ShardRequestManagerInterface *request_manager_;
const RequestRouterInterface *request_router_;
nlohmann::json PopOutput() {
nlohmann::json tmp;

View File

@ -272,7 +272,7 @@ class RuleBasedPlanner {
PropertiesMapList vector_props;
vector_props.reserve(node_properties->size());
for (const auto &kv : *node_properties) {
// TODO(kostasrim) GetProperty should be implemented in terms of ShardRequestManager NameToProperty
// TODO(kostasrim) GetProperty should be implemented in terms of RequestRouter NameToProperty
vector_props.push_back({GetProperty(kv.first), kv.second});
}
return std::move(vector_props);

View File

@ -17,7 +17,7 @@
#include "query/v2/bindings/typed_value.hpp"
#include "query/v2/plan/preprocess.hpp"
#include "query/v2/shard_request_manager.hpp"
#include "query/v2/request_router.hpp"
#include "storage/v3/conversions.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/property_value.hpp"
@ -31,11 +31,11 @@ namespace memgraph::query::v2::plan {
template <class TDbAccessor>
class VertexCountCache {
public:
explicit VertexCountCache(TDbAccessor *shard_request_manager) : shard_request_manager_{shard_request_manager} {}
explicit VertexCountCache(TDbAccessor *request_router) : request_router_{request_router} {}
auto NameToLabel(const std::string &name) { return shard_request_manager_->NameToLabel(name); }
auto NameToProperty(const std::string &name) { return shard_request_manager_->NameToProperty(name); }
auto NameToEdgeType(const std::string &name) { return shard_request_manager_->NameToEdgeType(name); }
auto NameToLabel(const std::string &name) { return request_router_->NameToLabel(name); }
auto NameToProperty(const std::string &name) { return request_router_->NameToProperty(name); }
auto NameToEdgeType(const std::string &name) { return request_router_->NameToEdgeType(name); }
int64_t VerticesCount() { return 1; }
@ -54,14 +54,14 @@ class VertexCountCache {
return 1;
}
bool LabelIndexExists(storage::v3::LabelId label) { return shard_request_manager_->IsPrimaryLabel(label); }
bool LabelIndexExists(storage::v3::LabelId label) { return request_router_->IsPrimaryLabel(label); }
bool LabelPropertyIndexExists(storage::v3::LabelId /*label*/, storage::v3::PropertyId /*property*/) { return false; }
std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> ExtractPrimaryKey(
storage::v3::LabelId label, std::vector<query::v2::plan::FilterInfo> property_filters) {
std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>> pk;
const auto schema = shard_request_manager_->GetSchemaForLabel(label);
const auto schema = request_router_->GetSchemaForLabel(label);
std::vector<storage::v3::PropertyId> schema_properties;
schema_properties.reserve(schema.size());
@ -81,7 +81,7 @@ class VertexCountCache {
: std::vector<std::pair<query::v2::Expression *, query::v2::plan::FilterInfo>>{};
}
msgs::ShardRequestManagerInterface *shard_request_manager_;
RequestRouterInterface *request_router_;
};
template <class TDbAccessor>

View File

@ -0,0 +1,721 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <algorithm>
#include <chrono>
#include <deque>
#include <iostream>
#include <iterator>
#include <map>
#include <numeric>
#include <optional>
#include <random>
#include <set>
#include <stdexcept>
#include <thread>
#include <unordered_map>
#include <vector>
#include "coordinator/coordinator.hpp"
#include "coordinator/coordinator_client.hpp"
#include "coordinator/coordinator_rsm.hpp"
#include "coordinator/shard_map.hpp"
#include "io/address.hpp"
#include "io/errors.hpp"
#include "io/notifier.hpp"
#include "io/rsm/raft.hpp"
#include "io/rsm/rsm_client.hpp"
#include "io/rsm/shard_rsm.hpp"
#include "io/simulator/simulator.hpp"
#include "io/simulator/simulator_transport.hpp"
#include "query/v2/accessors.hpp"
#include "query/v2/requests.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/value_conversions.hpp"
#include "utils/result.hpp"
namespace memgraph::query::v2 {
template <typename TStorageClient>
class RsmStorageClientManager {
public:
using CompoundKey = io::rsm::ShardRsmKey;
using ShardMetadata = coordinator::ShardMetadata;
RsmStorageClientManager() = default;
RsmStorageClientManager(const RsmStorageClientManager &) = delete;
RsmStorageClientManager(RsmStorageClientManager &&) = delete;
RsmStorageClientManager &operator=(const RsmStorageClientManager &) = delete;
RsmStorageClientManager &operator=(RsmStorageClientManager &&) = delete;
~RsmStorageClientManager() = default;
void AddClient(ShardMetadata key, TStorageClient client) { cli_cache_.emplace(std::move(key), std::move(client)); }
bool Exists(const ShardMetadata &key) { return cli_cache_.contains(key); }
void PurgeCache() { cli_cache_.clear(); }
TStorageClient &GetClient(const ShardMetadata &key) {
auto it = cli_cache_.find(key);
MG_ASSERT(it != cli_cache_.end(), "Non-existing shard client");
return it->second;
}
private:
std::map<ShardMetadata, TStorageClient> cli_cache_;
};
template <typename TRequest>
struct ShardRequestState {
memgraph::coordinator::ShardMetadata shard;
TRequest request;
};
// maps from ReadinessToken's internal size_t to the associated state
template <typename TRequest>
using RunningRequests = std::unordered_map<size_t, ShardRequestState<TRequest>>;
class RequestRouterInterface {
public:
using VertexAccessor = query::v2::accessors::VertexAccessor;
RequestRouterInterface() = default;
RequestRouterInterface(const RequestRouterInterface &) = delete;
RequestRouterInterface(RequestRouterInterface &&) = delete;
RequestRouterInterface &operator=(const RequestRouterInterface &) = delete;
RequestRouterInterface &&operator=(RequestRouterInterface &&) = delete;
virtual ~RequestRouterInterface() = default;
virtual void StartTransaction() = 0;
virtual void Commit() = 0;
virtual std::vector<VertexAccessor> ScanVertices(std::optional<std::string> label) = 0;
virtual std::vector<msgs::CreateVerticesResponse> CreateVertices(std::vector<msgs::NewVertex> new_vertices) = 0;
virtual std::vector<msgs::ExpandOneResultRow> ExpandOne(msgs::ExpandOneRequest request) = 0;
virtual std::vector<msgs::CreateExpandResponse> CreateExpand(std::vector<msgs::NewExpand> new_edges) = 0;
virtual std::vector<msgs::GetPropertiesResultRow> GetProperties(msgs::GetPropertiesRequest request) = 0;
virtual storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const = 0;
virtual storage::v3::PropertyId NameToProperty(const std::string &name) const = 0;
virtual storage::v3::LabelId NameToLabel(const std::string &name) const = 0;
virtual const std::string &PropertyToName(memgraph::storage::v3::PropertyId prop) const = 0;
virtual const std::string &LabelToName(memgraph::storage::v3::LabelId label) const = 0;
virtual const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId type) const = 0;
virtual std::optional<storage::v3::PropertyId> MaybeNameToProperty(const std::string &name) const = 0;
virtual std::optional<storage::v3::EdgeTypeId> MaybeNameToEdgeType(const std::string &name) const = 0;
virtual std::optional<storage::v3::LabelId> MaybeNameToLabel(const std::string &name) const = 0;
virtual bool IsPrimaryLabel(storage::v3::LabelId label) const = 0;
virtual bool IsPrimaryKey(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const = 0;
};
// TODO(kostasrim)rename this class template
template <typename TTransport>
class RequestRouter : public RequestRouterInterface {
public:
using StorageClient = coordinator::RsmClient<TTransport, msgs::WriteRequests, msgs::WriteResponses,
msgs::ReadRequests, msgs::ReadResponses>;
using CoordinatorWriteRequests = coordinator::CoordinatorWriteRequests;
using CoordinatorClient = coordinator::CoordinatorClient<TTransport>;
using Address = io::Address;
using ShardMetadata = coordinator::ShardMetadata;
using ShardMap = coordinator::ShardMap;
using CompoundKey = coordinator::PrimaryKey;
using VertexAccessor = query::v2::accessors::VertexAccessor;
RequestRouter(CoordinatorClient coord, io::Io<TTransport> &&io) : coord_cli_(std::move(coord)), io_(std::move(io)) {}
RequestRouter(const RequestRouter &) = delete;
RequestRouter(RequestRouter &&) = delete;
RequestRouter &operator=(const RequestRouter &) = delete;
RequestRouter &operator=(RequestRouter &&) = delete;
~RequestRouter() override {}
void InstallSimulatorTicker(std::function<bool()> tick_simulator) {
notifier_.InstallSimulatorTicker(tick_simulator);
}
void StartTransaction() override {
coordinator::HlcRequest req{.last_shard_map_version = shards_map_.GetHlc()};
CoordinatorWriteRequests write_req = req;
spdlog::trace("sending hlc request to start transaction");
auto write_res = coord_cli_.SendWriteRequest(write_req);
spdlog::trace("received hlc response to start transaction");
if (write_res.HasError()) {
throw std::runtime_error("HLC request failed");
}
auto coordinator_write_response = write_res.GetValue();
auto hlc_response = std::get<coordinator::HlcResponse>(coordinator_write_response);
// Transaction ID to be used later...
transaction_id_ = hlc_response.new_hlc;
if (hlc_response.fresher_shard_map) {
shards_map_ = hlc_response.fresher_shard_map.value();
SetUpNameIdMappers();
}
}
void Commit() override {
coordinator::HlcRequest req{.last_shard_map_version = shards_map_.GetHlc()};
CoordinatorWriteRequests write_req = req;
spdlog::trace("sending hlc request before committing transaction");
auto write_res = coord_cli_.SendWriteRequest(write_req);
spdlog::trace("received hlc response before committing transaction");
if (write_res.HasError()) {
throw std::runtime_error("HLC request for commit failed");
}
auto coordinator_write_response = write_res.GetValue();
auto hlc_response = std::get<coordinator::HlcResponse>(coordinator_write_response);
if (hlc_response.fresher_shard_map) {
shards_map_ = hlc_response.fresher_shard_map.value();
SetUpNameIdMappers();
}
auto commit_timestamp = hlc_response.new_hlc;
msgs::CommitRequest commit_req{.transaction_id = transaction_id_, .commit_timestamp = commit_timestamp};
for (const auto &[label, space] : shards_map_.label_spaces) {
for (const auto &[key, shard] : space.shards) {
auto &storage_client = GetStorageClientForShard(shard);
// TODO(kostasrim) Currently requests return the result directly. Adjust this when the API works MgFuture
// instead.
auto commit_response = storage_client.SendWriteRequest(commit_req);
// RETRY on timeouts?
// Sometimes this produces a timeout. Temporary solution is to use a while(true) as was done in shard_map test
if (commit_response.HasError()) {
throw std::runtime_error("Commit request timed out");
}
msgs::WriteResponses write_response_variant = commit_response.GetValue();
auto &response = std::get<msgs::CommitResponse>(write_response_variant);
if (response.error) {
throw std::runtime_error("Commit request did not succeed");
}
}
}
}
storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const override {
return shards_map_.GetEdgeTypeId(name).value();
}
storage::v3::PropertyId NameToProperty(const std::string &name) const override {
return shards_map_.GetPropertyId(name).value();
}
storage::v3::LabelId NameToLabel(const std::string &name) const override {
return shards_map_.GetLabelId(name).value();
}
const std::string &PropertyToName(storage::v3::PropertyId id) const override {
return properties_.IdToName(id.AsUint());
}
const std::string &LabelToName(storage::v3::LabelId id) const override { return labels_.IdToName(id.AsUint()); }
const std::string &EdgeTypeToName(storage::v3::EdgeTypeId id) const override {
return edge_types_.IdToName(id.AsUint());
}
bool IsPrimaryKey(storage::v3::LabelId primary_label, storage::v3::PropertyId property) const override {
const auto schema_it = shards_map_.schemas.find(primary_label);
MG_ASSERT(schema_it != shards_map_.schemas.end(), "Invalid primary label id: {}", primary_label.AsUint());
return std::find_if(schema_it->second.begin(), schema_it->second.end(), [property](const auto &schema_prop) {
return schema_prop.property_id == property;
}) != schema_it->second.end();
}
bool IsPrimaryLabel(storage::v3::LabelId label) const override { return shards_map_.label_spaces.contains(label); }
// TODO(kostasrim) Simplify return result
std::vector<VertexAccessor> ScanVertices(std::optional<std::string> label) override {
// create requests
std::vector<ShardRequestState<msgs::ScanVerticesRequest>> requests_to_be_sent = RequestsForScanVertices(label);
spdlog::trace("created {} ScanVertices requests", requests_to_be_sent.size());
// begin all requests in parallel
RunningRequests<msgs::ScanVerticesRequest> running_requests = {};
running_requests.reserve(requests_to_be_sent.size());
for (size_t i = 0; i < requests_to_be_sent.size(); i++) {
auto &request = requests_to_be_sent[i];
io::ReadinessToken readiness_token{i};
auto &storage_client = GetStorageClientForShard(request.shard);
storage_client.SendAsyncReadRequest(request.request, notifier_, readiness_token);
running_requests.emplace(readiness_token.GetId(), request);
}
spdlog::trace("sent {} ScanVertices requests in parallel", running_requests.size());
// drive requests to completion
auto responses = DriveReadResponses<msgs::ScanVerticesRequest, msgs::ScanVerticesResponse>(running_requests);
spdlog::trace("got back {} ScanVertices responses after driving to completion", responses.size());
// convert responses into VertexAccessor objects to return
std::vector<VertexAccessor> accessors;
accessors.reserve(responses.size());
for (auto &response : responses) {
for (auto &result_row : response.results) {
accessors.emplace_back(VertexAccessor(std::move(result_row.vertex), std::move(result_row.props), this));
}
}
return accessors;
}
std::vector<msgs::CreateVerticesResponse> CreateVertices(std::vector<msgs::NewVertex> new_vertices) override {
MG_ASSERT(!new_vertices.empty());
// create requests
std::vector<ShardRequestState<msgs::CreateVerticesRequest>> requests_to_be_sent =
RequestsForCreateVertices(new_vertices);
spdlog::trace("created {} CreateVertices requests", requests_to_be_sent.size());
// begin all requests in parallel
RunningRequests<msgs::CreateVerticesRequest> running_requests = {};
running_requests.reserve(requests_to_be_sent.size());
for (size_t i = 0; i < requests_to_be_sent.size(); i++) {
auto &request = requests_to_be_sent[i];
io::ReadinessToken readiness_token{i};
for (auto &new_vertex : request.request.new_vertices) {
new_vertex.label_ids.erase(new_vertex.label_ids.begin());
}
auto &storage_client = GetStorageClientForShard(request.shard);
storage_client.SendAsyncWriteRequest(request.request, notifier_, readiness_token);
running_requests.emplace(readiness_token.GetId(), request);
}
spdlog::trace("sent {} CreateVertices requests in parallel", running_requests.size());
// drive requests to completion
return DriveWriteResponses<msgs::CreateVerticesRequest, msgs::CreateVerticesResponse>(running_requests);
}
std::vector<msgs::CreateExpandResponse> CreateExpand(std::vector<msgs::NewExpand> new_edges) override {
MG_ASSERT(!new_edges.empty());
// create requests
std::vector<ShardRequestState<msgs::CreateExpandRequest>> requests_to_be_sent = RequestsForCreateExpand(new_edges);
// begin all requests in parallel
RunningRequests<msgs::CreateExpandRequest> running_requests = {};
running_requests.reserve(requests_to_be_sent.size());
for (size_t i = 0; i < requests_to_be_sent.size(); i++) {
auto &request = requests_to_be_sent[i];
io::ReadinessToken readiness_token{i};
auto &storage_client = GetStorageClientForShard(request.shard);
msgs::WriteRequests req = request.request;
storage_client.SendAsyncWriteRequest(req, notifier_, readiness_token);
running_requests.emplace(readiness_token.GetId(), request);
}
// drive requests to completion
return DriveWriteResponses<msgs::CreateExpandRequest, msgs::CreateExpandResponse>(running_requests);
}
std::vector<msgs::ExpandOneResultRow> ExpandOne(msgs::ExpandOneRequest request) override {
// TODO(kostasrim)Update to limit the batch size here
// Expansions of the destination must be handled by the caller. For example
// match (u:L1 { prop : 1 })-[:Friend]-(v:L1)
// For each vertex U, the ExpandOne will result in <U, Edges>. The destination vertex and its properties
// must be fetched again with an ExpandOne(Edges.dst)
// create requests
std::vector<ShardRequestState<msgs::ExpandOneRequest>> requests_to_be_sent = RequestsForExpandOne(request);
// begin all requests in parallel
RunningRequests<msgs::ExpandOneRequest> running_requests = {};
running_requests.reserve(requests_to_be_sent.size());
for (size_t i = 0; i < requests_to_be_sent.size(); i++) {
auto &request = requests_to_be_sent[i];
io::ReadinessToken readiness_token{i};
auto &storage_client = GetStorageClientForShard(request.shard);
msgs::ReadRequests req = request.request;
storage_client.SendAsyncReadRequest(req, notifier_, readiness_token);
running_requests.emplace(readiness_token.GetId(), request);
}
// drive requests to completion
auto responses = DriveReadResponses<msgs::ExpandOneRequest, msgs::ExpandOneResponse>(running_requests);
// post-process responses
std::vector<msgs::ExpandOneResultRow> result_rows;
const auto total_row_count = std::accumulate(responses.begin(), responses.end(), 0,
[](const int64_t partial_count, const msgs::ExpandOneResponse &resp) {
return partial_count + resp.result.size();
});
result_rows.reserve(total_row_count);
for (auto &response : responses) {
result_rows.insert(result_rows.end(), std::make_move_iterator(response.result.begin()),
std::make_move_iterator(response.result.end()));
}
return result_rows;
}
std::vector<msgs::GetPropertiesResultRow> GetProperties(msgs::GetPropertiesRequest requests) override {
// create requests
std::vector<ShardRequestState<msgs::GetPropertiesRequest>> requests_to_be_sent =
RequestsForGetProperties(std::move(requests));
// begin all requests in parallel
RunningRequests<msgs::GetPropertiesRequest> running_requests = {};
running_requests.reserve(requests_to_be_sent.size());
for (size_t i = 0; i < requests_to_be_sent.size(); i++) {
auto &request = requests_to_be_sent[i];
io::ReadinessToken readiness_token{i};
auto &storage_client = GetStorageClientForShard(request.shard);
msgs::ReadRequests req = request.request;
storage_client.SendAsyncReadRequest(req, notifier_, readiness_token);
running_requests.emplace(readiness_token.GetId(), request);
}
// drive requests to completion
auto responses = DriveReadResponses<msgs::GetPropertiesRequest, msgs::GetPropertiesResponse>(running_requests);
// post-process responses
std::vector<msgs::GetPropertiesResultRow> result_rows;
for (auto &&response : responses) {
std::move(response.result_row.begin(), response.result_row.end(), std::back_inserter(result_rows));
}
return result_rows;
}
std::optional<storage::v3::PropertyId> MaybeNameToProperty(const std::string &name) const override {
return shards_map_.GetPropertyId(name);
}
std::optional<storage::v3::EdgeTypeId> MaybeNameToEdgeType(const std::string &name) const override {
return shards_map_.GetEdgeTypeId(name);
}
std::optional<storage::v3::LabelId> MaybeNameToLabel(const std::string &name) const override {
return shards_map_.GetLabelId(name);
}
private:
std::vector<ShardRequestState<msgs::CreateVerticesRequest>> RequestsForCreateVertices(
const std::vector<msgs::NewVertex> &new_vertices) {
std::map<ShardMetadata, msgs::CreateVerticesRequest> per_shard_request_table;
for (auto &new_vertex : new_vertices) {
MG_ASSERT(!new_vertex.label_ids.empty(), "No label_ids provided for new vertex in RequestRouter::CreateVertices");
auto shard = shards_map_.GetShardForKey(new_vertex.label_ids[0].id,
storage::conversions::ConvertPropertyVector(new_vertex.primary_key));
if (!per_shard_request_table.contains(shard)) {
msgs::CreateVerticesRequest create_v_rqst{.transaction_id = transaction_id_};
per_shard_request_table.insert(std::pair(shard, std::move(create_v_rqst)));
}
per_shard_request_table[shard].new_vertices.push_back(std::move(new_vertex));
}
std::vector<ShardRequestState<msgs::CreateVerticesRequest>> requests = {};
for (auto &[shard, request] : per_shard_request_table) {
ShardRequestState<msgs::CreateVerticesRequest> shard_request_state{
.shard = shard,
.request = request,
};
requests.emplace_back(std::move(shard_request_state));
}
return requests;
}
std::vector<ShardRequestState<msgs::CreateExpandRequest>> RequestsForCreateExpand(
const std::vector<msgs::NewExpand> &new_expands) {
std::map<ShardMetadata, msgs::CreateExpandRequest> per_shard_request_table;
auto ensure_shard_exists_in_table = [&per_shard_request_table,
transaction_id = transaction_id_](const ShardMetadata &shard) {
if (!per_shard_request_table.contains(shard)) {
msgs::CreateExpandRequest create_expand_request{.transaction_id = transaction_id};
per_shard_request_table.insert({shard, std::move(create_expand_request)});
}
};
for (auto &new_expand : new_expands) {
const auto shard_src_vertex = shards_map_.GetShardForKey(
new_expand.src_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.src_vertex.second));
const auto shard_dest_vertex = shards_map_.GetShardForKey(
new_expand.dest_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.dest_vertex.second));
ensure_shard_exists_in_table(shard_src_vertex);
if (shard_src_vertex != shard_dest_vertex) {
ensure_shard_exists_in_table(shard_dest_vertex);
per_shard_request_table[shard_dest_vertex].new_expands.push_back(new_expand);
}
per_shard_request_table[shard_src_vertex].new_expands.push_back(std::move(new_expand));
}
std::vector<ShardRequestState<msgs::CreateExpandRequest>> requests = {};
for (auto &[shard, request] : per_shard_request_table) {
ShardRequestState<msgs::CreateExpandRequest> shard_request_state{
.shard = shard,
.request = request,
};
requests.emplace_back(std::move(shard_request_state));
}
return requests;
}
std::vector<ShardRequestState<msgs::ScanVerticesRequest>> RequestsForScanVertices(
const std::optional<std::string> &label) {
std::vector<coordinator::Shards> multi_shards;
if (label) {
const auto label_id = shards_map_.GetLabelId(*label);
MG_ASSERT(label_id);
MG_ASSERT(IsPrimaryLabel(*label_id));
multi_shards = {shards_map_.GetShardsForLabel(*label)};
} else {
multi_shards = shards_map_.GetAllShards();
}
std::vector<ShardRequestState<msgs::ScanVerticesRequest>> requests = {};
for (auto &shards : multi_shards) {
for (auto &[key, shard] : shards) {
MG_ASSERT(!shard.peers.empty());
msgs::ScanVerticesRequest request;
request.transaction_id = transaction_id_;
request.start_id.second = storage::conversions::ConvertValueVector(key);
ShardRequestState<msgs::ScanVerticesRequest> shard_request_state{
.shard = shard,
.request = std::move(request),
};
requests.emplace_back(std::move(shard_request_state));
}
}
return requests;
}
std::vector<ShardRequestState<msgs::ExpandOneRequest>> RequestsForExpandOne(const msgs::ExpandOneRequest &request) {
std::map<ShardMetadata, msgs::ExpandOneRequest> per_shard_request_table;
msgs::ExpandOneRequest top_level_rqst_template = request;
top_level_rqst_template.transaction_id = transaction_id_;
top_level_rqst_template.src_vertices.clear();
for (auto &vertex : request.src_vertices) {
auto shard =
shards_map_.GetShardForKey(vertex.first.id, storage::conversions::ConvertPropertyVector(vertex.second));
if (!per_shard_request_table.contains(shard)) {
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
}
per_shard_request_table[shard].src_vertices.push_back(vertex);
}
std::vector<ShardRequestState<msgs::ExpandOneRequest>> requests = {};
for (auto &[shard, request] : per_shard_request_table) {
ShardRequestState<msgs::ExpandOneRequest> shard_request_state{
.shard = shard,
.request = request,
};
requests.emplace_back(std::move(shard_request_state));
}
return requests;
}
std::vector<ShardRequestState<msgs::GetPropertiesRequest>> RequestsForGetProperties(
msgs::GetPropertiesRequest &&request) {
std::map<ShardMetadata, msgs::GetPropertiesRequest> per_shard_request_table;
auto top_level_rqst_template = request;
top_level_rqst_template.transaction_id = transaction_id_;
top_level_rqst_template.vertex_ids.clear();
top_level_rqst_template.vertices_and_edges.clear();
for (auto &&vertex : request.vertex_ids) {
auto shard =
shards_map_.GetShardForKey(vertex.first.id, storage::conversions::ConvertPropertyVector(vertex.second));
if (!per_shard_request_table.contains(shard)) {
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
}
per_shard_request_table[shard].vertex_ids.emplace_back(std::move(vertex));
}
for (auto &[vertex, maybe_edge] : request.vertices_and_edges) {
auto shard =
shards_map_.GetShardForKey(vertex.first.id, storage::conversions::ConvertPropertyVector(vertex.second));
if (!per_shard_request_table.contains(shard)) {
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
}
per_shard_request_table[shard].vertices_and_edges.emplace_back(std::move(vertex), maybe_edge);
}
std::vector<ShardRequestState<msgs::GetPropertiesRequest>> requests;
for (auto &[shard, rqst] : per_shard_request_table) {
ShardRequestState<msgs::GetPropertiesRequest> shard_request_state{
.shard = shard,
.request = std::move(rqst),
};
requests.emplace_back(std::move(shard_request_state));
}
return requests;
}
StorageClient &GetStorageClientForShard(ShardMetadata shard) {
if (!storage_cli_manager_.Exists(shard)) {
AddStorageClientToManager(shard);
}
return storage_cli_manager_.GetClient(shard);
}
StorageClient &GetStorageClientForShard(const std::string &label, const CompoundKey &key) {
auto shard = shards_map_.GetShardForKey(label, key);
return GetStorageClientForShard(std::move(shard));
}
void AddStorageClientToManager(ShardMetadata target_shard) {
MG_ASSERT(!target_shard.peers.empty());
auto leader_addr = target_shard.peers.front();
std::vector<Address> addresses;
addresses.reserve(target_shard.peers.size());
for (auto &address : target_shard.peers) {
addresses.push_back(std::move(address.address));
}
auto cli = StorageClient(io_, std::move(leader_addr.address), std::move(addresses));
storage_cli_manager_.AddClient(target_shard, std::move(cli));
}
template <typename RequestT, typename ResponseT>
std::vector<ResponseT> DriveReadResponses(RunningRequests<RequestT> &running_requests) {
// Store responses in a map based on the corresponding request
// offset, so that they can be reassembled in the correct order
// even if they came back in randomized orders.
std::map<size_t, ResponseT> response_map;
spdlog::trace("waiting on readiness for token");
while (response_map.size() < running_requests.size()) {
auto ready = notifier_.Await();
spdlog::trace("got readiness for token {}", ready.GetId());
auto &request = running_requests.at(ready.GetId());
auto &storage_client = GetStorageClientForShard(request.shard);
std::optional<utils::BasicResult<io::TimedOut, msgs::ReadResponses>> poll_result =
storage_client.PollAsyncReadRequest(ready);
if (!poll_result.has_value()) {
continue;
}
if (poll_result->HasError()) {
throw std::runtime_error("RequestRouter Read request timed out");
}
msgs::ReadResponses response_variant = poll_result->GetValue();
auto response = std::get<ResponseT>(response_variant);
if (response.error) {
throw std::runtime_error("RequestRouter Read request did not succeed");
}
// the readiness token has an ID based on the request vector offset
response_map.emplace(ready.GetId(), std::move(response));
}
std::vector<ResponseT> responses;
responses.reserve(running_requests.size());
int last = -1;
for (auto &&[offset, response] : response_map) {
MG_ASSERT(last + 1 == offset);
responses.emplace_back(std::forward<ResponseT>(response));
last = offset;
}
return responses;
}
template <typename RequestT, typename ResponseT>
std::vector<ResponseT> DriveWriteResponses(RunningRequests<RequestT> &running_requests) {
// Store responses in a map based on the corresponding request
// offset, so that they can be reassembled in the correct order
// even if they came back in randomized orders.
std::map<size_t, ResponseT> response_map;
while (response_map.size() < running_requests.size()) {
auto ready = notifier_.Await();
auto &request = running_requests.at(ready.GetId());
auto &storage_client = GetStorageClientForShard(request.shard);
std::optional<utils::BasicResult<io::TimedOut, msgs::WriteResponses>> poll_result =
storage_client.PollAsyncWriteRequest(ready);
if (!poll_result.has_value()) {
continue;
}
if (poll_result->HasError()) {
throw std::runtime_error("RequestRouter Write request timed out");
}
msgs::WriteResponses response_variant = poll_result->GetValue();
auto response = std::get<ResponseT>(response_variant);
if (response.error) {
throw std::runtime_error("RequestRouter Write request did not succeed");
}
// the readiness token has an ID based on the request vector offset
response_map.emplace(ready.GetId(), std::move(response));
}
std::vector<ResponseT> responses;
responses.reserve(running_requests.size());
int last = -1;
for (auto &&[offset, response] : response_map) {
MG_ASSERT(last + 1 == offset);
responses.emplace_back(std::forward<ResponseT>(response));
last = offset;
}
return responses;
}
void SetUpNameIdMappers() {
std::unordered_map<uint64_t, std::string> id_to_name;
for (const auto &[name, id] : shards_map_.labels) {
id_to_name.emplace(id.AsUint(), name);
}
labels_.StoreMapping(std::move(id_to_name));
id_to_name.clear();
for (const auto &[name, id] : shards_map_.properties) {
id_to_name.emplace(id.AsUint(), name);
}
properties_.StoreMapping(std::move(id_to_name));
id_to_name.clear();
for (const auto &[name, id] : shards_map_.edge_types) {
id_to_name.emplace(id.AsUint(), name);
}
edge_types_.StoreMapping(std::move(id_to_name));
}
ShardMap shards_map_;
storage::v3::NameIdMapper properties_;
storage::v3::NameIdMapper edge_types_;
storage::v3::NameIdMapper labels_;
CoordinatorClient coord_cli_;
RsmStorageClientManager<StorageClient> storage_cli_manager_;
io::Io<TTransport> io_;
coordinator::Hlc transaction_id_;
io::Notifier notifier_ = {};
// TODO(kostasrim) Add batch prefetching
};
} // namespace memgraph::query::v2

View File

@ -24,6 +24,7 @@
#include "coordinator/hybrid_logical_clock.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
namespace memgraph::msgs {
@ -317,12 +318,13 @@ struct Value {
}
};
struct Expression {
std::string expression;
struct ShardError {
common::ErrorCode code;
std::string message;
};
struct Filter {
std::string filter_expression;
struct Expression {
std::string expression;
};
enum class OrderingDirection { ASCENDING = 1, DESCENDING = 2 };
@ -361,27 +363,38 @@ struct ScanResultRow {
};
struct ScanVerticesResponse {
bool success;
std::optional<ShardError> error;
std::optional<VertexId> next_start_id;
std::vector<ScanResultRow> results;
};
using VertexOrEdgeIds = std::variant<VertexId, EdgeId>;
struct GetPropertiesRequest {
Hlc transaction_id;
// Shouldn't contain mixed vertex and edge ids
VertexOrEdgeIds vertex_or_edge_ids;
std::vector<PropertyId> property_ids;
std::vector<Expression> expressions;
bool only_unique = false;
std::optional<std::vector<OrderBy>> order_by;
std::vector<VertexId> vertex_ids;
std::vector<std::pair<VertexId, EdgeId>> vertices_and_edges;
std::optional<std::vector<PropertyId>> property_ids;
std::vector<std::string> expressions;
std::vector<OrderBy> order_by;
std::optional<size_t> limit;
std::optional<Filter> filter;
// Return only the properties of the vertices or edges that the filter predicate
// evaluates to true
std::optional<std::string> filter;
};
struct GetPropertiesResultRow {
VertexId vertex;
std::optional<EdgeId> edge;
std::vector<std::pair<PropertyId, Value>> props;
std::vector<Value> evaluated_expressions;
};
struct GetPropertiesResponse {
bool success;
std::vector<GetPropertiesResultRow> result_row;
std::optional<ShardError> error;
};
enum class EdgeDirection : uint8_t { OUT = 1, IN = 2, BOTH = 3 };
@ -403,7 +416,9 @@ struct ExpandOneRequest {
std::vector<std::string> vertex_expressions;
std::vector<std::string> edge_expressions;
std::optional<std::vector<OrderBy>> order_by;
std::vector<OrderBy> order_by_vertices;
std::vector<OrderBy> order_by_edges;
// Limit the edges or the vertices?
std::optional<size_t> limit;
std::vector<std::string> filters;
@ -446,14 +461,16 @@ struct ExpandOneResultRow {
};
struct ExpandOneResponse {
bool success;
std::optional<ShardError> error;
std::vector<ExpandOneResultRow> result;
};
struct UpdateVertexProp {
struct UpdateVertex {
PrimaryKey primary_key;
// This should be a map
std::vector<std::pair<PropertyId, Value>> property_updates;
// Labels are first added and then removed from vertices
std::vector<LabelId> add_labels;
std::vector<LabelId> remove_labels;
std::map<PropertyId, Value> property_updates;
};
struct UpdateEdgeProp {
@ -480,7 +497,7 @@ struct CreateVerticesRequest {
};
struct CreateVerticesResponse {
bool success;
std::optional<ShardError> error;
};
struct DeleteVerticesRequest {
@ -491,16 +508,16 @@ struct DeleteVerticesRequest {
};
struct DeleteVerticesResponse {
bool success;
std::optional<ShardError> error;
};
struct UpdateVerticesRequest {
Hlc transaction_id;
std::vector<UpdateVertexProp> new_properties;
std::vector<UpdateVertex> update_vertices;
};
struct UpdateVerticesResponse {
bool success;
std::optional<ShardError> error;
};
/*
@ -522,7 +539,7 @@ struct CreateExpandRequest {
};
struct CreateExpandResponse {
bool success;
std::optional<ShardError> error;
};
struct DeleteEdgesRequest {
@ -531,7 +548,7 @@ struct DeleteEdgesRequest {
};
struct DeleteEdgesResponse {
bool success;
std::optional<ShardError> error;
};
struct UpdateEdgesRequest {
@ -540,7 +557,7 @@ struct UpdateEdgesRequest {
};
struct UpdateEdgesResponse {
bool success;
std::optional<ShardError> error;
};
struct CommitRequest {
@ -549,7 +566,7 @@ struct CommitRequest {
};
struct CommitResponse {
bool success;
std::optional<ShardError> error;
};
using ReadRequests = std::variant<ExpandOneRequest, GetPropertiesRequest, ScanVerticesRequest>;

View File

@ -1,735 +0,0 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <chrono>
#include <deque>
#include <iostream>
#include <iterator>
#include <map>
#include <numeric>
#include <optional>
#include <random>
#include <set>
#include <stdexcept>
#include <thread>
#include <unordered_map>
#include <vector>
#include "coordinator/coordinator.hpp"
#include "coordinator/coordinator_client.hpp"
#include "coordinator/coordinator_rsm.hpp"
#include "coordinator/shard_map.hpp"
#include "io/address.hpp"
#include "io/errors.hpp"
#include "io/rsm/raft.hpp"
#include "io/rsm/rsm_client.hpp"
#include "io/rsm/shard_rsm.hpp"
#include "io/simulator/simulator.hpp"
#include "io/simulator/simulator_transport.hpp"
#include "query/v2/accessors.hpp"
#include "query/v2/requests.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/value_conversions.hpp"
#include "utils/result.hpp"
namespace memgraph::msgs {
template <typename TStorageClient>
class RsmStorageClientManager {
public:
using CompoundKey = memgraph::io::rsm::ShardRsmKey;
using Shard = memgraph::coordinator::Shard;
using LabelId = memgraph::storage::v3::LabelId;
RsmStorageClientManager() = default;
RsmStorageClientManager(const RsmStorageClientManager &) = delete;
RsmStorageClientManager(RsmStorageClientManager &&) = delete;
RsmStorageClientManager &operator=(const RsmStorageClientManager &) = delete;
RsmStorageClientManager &operator=(RsmStorageClientManager &&) = delete;
~RsmStorageClientManager() = default;
void AddClient(Shard key, TStorageClient client) { cli_cache_.emplace(std::move(key), std::move(client)); }
bool Exists(const Shard &key) { return cli_cache_.contains(key); }
void PurgeCache() { cli_cache_.clear(); }
TStorageClient &GetClient(const Shard &key) {
auto it = cli_cache_.find(key);
MG_ASSERT(it != cli_cache_.end(), "Non-existing shard client");
return it->second;
}
private:
std::map<Shard, TStorageClient> cli_cache_;
};
template <typename TRequest>
struct ExecutionState {
using CompoundKey = memgraph::io::rsm::ShardRsmKey;
using Shard = memgraph::coordinator::Shard;
enum State : int8_t { INITIALIZING, EXECUTING, COMPLETED };
// label is optional because some operators can create/remove etc, vertices. These kind of requests contain the label
// on the request itself.
std::optional<std::string> label;
// CompoundKey is optional because some operators require to iterate over all the available keys
// of a shard. One example is ScanAll, where we only require the field label.
std::optional<CompoundKey> key;
// Transaction id to be filled by the ShardRequestManager implementation
memgraph::coordinator::Hlc transaction_id;
// Initialized by ShardRequestManager implementation. This vector is filled with the shards that
// the ShardRequestManager impl will send requests to. When a request to a shard exhausts it, meaning that
// it pulled all the requested data from the given Shard, it will be removed from the Vector. When the Vector becomes
// empty, it means that all of the requests have completed succefully.
// TODO(gvolfing)
// Maybe make this into a more complex object to be able to keep track of paginated resutls. E.g. instead of a vector
// of Shards make it into a std::vector<std::pair<Shard, PaginatedResultType>> (probably a struct instead of a pair)
// where PaginatedResultType is an enum signaling the progress on the given request. This way we can easily check if
// a partial response on a shard(if there is one) is finished and we can send off the request for the next batch.
std::vector<Shard> shard_cache;
// 1-1 mapping with `shard_cache`.
// A vector that tracks request metadata for each shard (For example, next_id for a ScanAll on Shard A)
std::vector<TRequest> requests;
State state = INITIALIZING;
};
class ShardRequestManagerInterface {
public:
using VertexAccessor = memgraph::query::v2::accessors::VertexAccessor;
ShardRequestManagerInterface() = default;
ShardRequestManagerInterface(const ShardRequestManagerInterface &) = delete;
ShardRequestManagerInterface(ShardRequestManagerInterface &&) = delete;
ShardRequestManagerInterface &operator=(const ShardRequestManagerInterface &) = delete;
ShardRequestManagerInterface &&operator=(ShardRequestManagerInterface &&) = delete;
virtual ~ShardRequestManagerInterface() = default;
virtual void StartTransaction() = 0;
virtual void Commit() = 0;
virtual std::vector<VertexAccessor> Request(ExecutionState<ScanVerticesRequest> &state) = 0;
virtual std::vector<CreateVerticesResponse> Request(ExecutionState<CreateVerticesRequest> &state,
std::vector<NewVertex> new_vertices) = 0;
virtual std::vector<ExpandOneResultRow> Request(ExecutionState<ExpandOneRequest> &state,
ExpandOneRequest request) = 0;
virtual std::vector<CreateExpandResponse> Request(ExecutionState<CreateExpandRequest> &state,
std::vector<NewExpand> new_edges) = 0;
virtual storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const = 0;
virtual storage::v3::PropertyId NameToProperty(const std::string &name) const = 0;
virtual storage::v3::LabelId NameToLabel(const std::string &name) const = 0;
virtual const std::string &PropertyToName(memgraph::storage::v3::PropertyId prop) const = 0;
virtual const std::string &LabelToName(memgraph::storage::v3::LabelId label) const = 0;
virtual const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId type) const = 0;
virtual bool IsPrimaryLabel(LabelId label) const = 0;
virtual bool IsPrimaryKey(LabelId primary_label, PropertyId property) const = 0;
virtual std::vector<coordinator::SchemaProperty> GetSchemaForLabel(LabelId label) const = 0;
};
// TODO(kostasrim)rename this class template
template <typename TTransport>
class ShardRequestManager : public ShardRequestManagerInterface {
public:
using StorageClient =
memgraph::coordinator::RsmClient<TTransport, WriteRequests, WriteResponses, ReadRequests, ReadResponses>;
using CoordinatorWriteRequests = memgraph::coordinator::CoordinatorWriteRequests;
using CoordinatorClient = memgraph::coordinator::CoordinatorClient<TTransport>;
using Address = memgraph::io::Address;
using Shard = memgraph::coordinator::Shard;
using ShardMap = memgraph::coordinator::ShardMap;
using CompoundKey = memgraph::coordinator::PrimaryKey;
using VertexAccessor = memgraph::query::v2::accessors::VertexAccessor;
ShardRequestManager(CoordinatorClient coord, memgraph::io::Io<TTransport> &&io)
: coord_cli_(std::move(coord)), io_(std::move(io)) {}
ShardRequestManager(const ShardRequestManager &) = delete;
ShardRequestManager(ShardRequestManager &&) = delete;
ShardRequestManager &operator=(const ShardRequestManager &) = delete;
ShardRequestManager &operator=(ShardRequestManager &&) = delete;
~ShardRequestManager() override {}
void StartTransaction() override {
memgraph::coordinator::HlcRequest req{.last_shard_map_version = shards_map_.GetHlc()};
CoordinatorWriteRequests write_req = req;
auto write_res = coord_cli_.SendWriteRequest(write_req);
if (write_res.HasError()) {
throw std::runtime_error("HLC request failed");
}
auto coordinator_write_response = write_res.GetValue();
auto hlc_response = std::get<memgraph::coordinator::HlcResponse>(coordinator_write_response);
// Transaction ID to be used later...
transaction_id_ = hlc_response.new_hlc;
if (hlc_response.fresher_shard_map) {
shards_map_ = hlc_response.fresher_shard_map.value();
SetUpNameIdMappers();
}
}
void Commit() override {
memgraph::coordinator::HlcRequest req{.last_shard_map_version = shards_map_.GetHlc()};
CoordinatorWriteRequests write_req = req;
auto write_res = coord_cli_.SendWriteRequest(write_req);
if (write_res.HasError()) {
throw std::runtime_error("HLC request for commit failed");
}
auto coordinator_write_response = write_res.GetValue();
auto hlc_response = std::get<memgraph::coordinator::HlcResponse>(coordinator_write_response);
if (hlc_response.fresher_shard_map) {
shards_map_ = hlc_response.fresher_shard_map.value();
SetUpNameIdMappers();
}
auto commit_timestamp = hlc_response.new_hlc;
msgs::CommitRequest commit_req{.transaction_id = transaction_id_, .commit_timestamp = commit_timestamp};
for (const auto &[label, space] : shards_map_.label_spaces) {
for (const auto &[key, shard] : space.shards) {
auto &storage_client = GetStorageClientForShard(shard);
// TODO(kostasrim) Currently requests return the result directly. Adjust this when the API works MgFuture
// instead.
auto commit_response = storage_client.SendWriteRequest(commit_req);
// RETRY on timeouts?
// Sometimes this produces a timeout. Temporary solution is to use a while(true) as was done in shard_map test
if (commit_response.HasError()) {
throw std::runtime_error("Commit request timed out");
}
WriteResponses write_response_variant = commit_response.GetValue();
auto &response = std::get<CommitResponse>(write_response_variant);
if (!response.success) {
throw std::runtime_error("Commit request did not succeed");
}
}
}
}
storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const override {
return shards_map_.GetEdgeTypeId(name).value();
}
storage::v3::PropertyId NameToProperty(const std::string &name) const override {
return shards_map_.GetPropertyId(name).value();
}
storage::v3::LabelId NameToLabel(const std::string &name) const override {
return shards_map_.GetLabelId(name).value();
}
const std::string &PropertyToName(memgraph::storage::v3::PropertyId id) const override {
return properties_.IdToName(id.AsUint());
}
const std::string &LabelToName(memgraph::storage::v3::LabelId id) const override {
return labels_.IdToName(id.AsUint());
}
const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId id) const override {
return edge_types_.IdToName(id.AsUint());
}
bool IsPrimaryKey(LabelId primary_label, PropertyId property) const override {
const auto schema_it = shards_map_.schemas.find(primary_label);
MG_ASSERT(schema_it != shards_map_.schemas.end(), "Invalid primary label id: {}", primary_label.AsUint());
return std::find_if(schema_it->second.begin(), schema_it->second.end(), [property](const auto &schema_prop) {
return schema_prop.property_id == property;
}) != schema_it->second.end();
}
std::vector<coordinator::SchemaProperty> GetSchemaForLabel(LabelId label) const override {
return shards_map_.schemas.at(label);
}
bool IsPrimaryLabel(LabelId label) const override { return shards_map_.label_spaces.contains(label); }
// TODO(kostasrim) Simplify return result
std::vector<VertexAccessor> Request(ExecutionState<ScanVerticesRequest> &state) override {
MaybeInitializeExecutionState(state);
std::vector<ScanVerticesResponse> responses;
SendAllRequests(state);
auto all_requests_gathered = [](auto &paginated_rsp_tracker) {
return std::ranges::all_of(paginated_rsp_tracker, [](const auto &state) {
return state.second == PaginatedResponseState::PartiallyFinished;
});
};
std::map<Shard, PaginatedResponseState> paginated_response_tracker;
for (const auto &shard : state.shard_cache) {
paginated_response_tracker.insert(std::make_pair(shard, PaginatedResponseState::Pending));
}
do {
AwaitOnPaginatedRequests(state, responses, paginated_response_tracker);
} while (!all_requests_gathered(paginated_response_tracker));
MaybeCompleteState(state);
// TODO(kostasrim) Before returning start prefetching the batch (this shall be done once we get MgFuture as return
// result of storage_client.SendReadRequest()).
return PostProcess(std::move(responses));
}
std::vector<CreateVerticesResponse> Request(ExecutionState<CreateVerticesRequest> &state,
std::vector<NewVertex> new_vertices) override {
MG_ASSERT(!new_vertices.empty());
MaybeInitializeExecutionState(state, new_vertices);
std::vector<CreateVerticesResponse> responses;
auto &shard_cache_ref = state.shard_cache;
// 1. Send the requests.
SendAllRequests(state, shard_cache_ref);
// 2. Block untill all the futures are exhausted
do {
AwaitOnResponses(state, responses);
} while (!state.shard_cache.empty());
MaybeCompleteState(state);
// TODO(kostasrim) Before returning start prefetching the batch (this shall be done once we get MgFuture as return
// result of storage_client.SendReadRequest()).
return responses;
}
std::vector<CreateExpandResponse> Request(ExecutionState<CreateExpandRequest> &state,
std::vector<NewExpand> new_edges) override {
MG_ASSERT(!new_edges.empty());
MaybeInitializeExecutionState(state, new_edges);
std::vector<CreateExpandResponse> responses;
auto &shard_cache_ref = state.shard_cache;
size_t id{0};
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++id) {
auto &storage_client = GetStorageClientForShard(*shard_it);
WriteRequests req = state.requests[id];
auto write_response_result = storage_client.SendWriteRequest(std::move(req));
if (write_response_result.HasError()) {
throw std::runtime_error("CreateVertices request timedout");
}
WriteResponses response_variant = write_response_result.GetValue();
CreateExpandResponse mapped_response = std::get<CreateExpandResponse>(response_variant);
if (!mapped_response.success) {
throw std::runtime_error("CreateExpand request did not succeed");
}
responses.push_back(mapped_response);
shard_it = shard_cache_ref.erase(shard_it);
}
// We are done with this state
MaybeCompleteState(state);
return responses;
}
std::vector<ExpandOneResultRow> Request(ExecutionState<ExpandOneRequest> &state, ExpandOneRequest request) override {
// TODO(kostasrim)Update to limit the batch size here
// Expansions of the destination must be handled by the caller. For example
// match (u:L1 { prop : 1 })-[:Friend]-(v:L1)
// For each vertex U, the ExpandOne will result in <U, Edges>. The destination vertex and its properties
// must be fetched again with an ExpandOne(Edges.dst)
MaybeInitializeExecutionState(state, std::move(request));
std::vector<ExpandOneResponse> responses;
auto &shard_cache_ref = state.shard_cache;
// 1. Send the requests.
SendAllRequests(state, shard_cache_ref);
// 2. Block untill all the futures are exhausted
do {
AwaitOnResponses(state, responses);
} while (!state.shard_cache.empty());
std::vector<ExpandOneResultRow> result_rows;
const auto total_row_count = std::accumulate(
responses.begin(), responses.end(), 0,
[](const int64_t partial_count, const ExpandOneResponse &resp) { return partial_count + resp.result.size(); });
result_rows.reserve(total_row_count);
for (auto &response : responses) {
result_rows.insert(result_rows.end(), std::make_move_iterator(response.result.begin()),
std::make_move_iterator(response.result.end()));
}
MaybeCompleteState(state);
return result_rows;
}
private:
enum class PaginatedResponseState { Pending, PartiallyFinished };
std::vector<VertexAccessor> PostProcess(std::vector<ScanVerticesResponse> &&responses) const {
std::vector<VertexAccessor> accessors;
for (auto &response : responses) {
for (auto &result_row : response.results) {
accessors.emplace_back(VertexAccessor(std::move(result_row.vertex), std::move(result_row.props), this));
}
}
return accessors;
}
template <typename ExecutionState>
void ThrowIfStateCompleted(ExecutionState &state) const {
if (state.state == ExecutionState::COMPLETED) [[unlikely]] {
throw std::runtime_error("State is completed and must be reset");
}
}
template <typename ExecutionState>
void MaybeCompleteState(ExecutionState &state) const {
if (state.requests.empty()) {
state.state = ExecutionState::COMPLETED;
}
}
template <typename ExecutionState>
bool ShallNotInitializeState(ExecutionState &state) const {
return state.state != ExecutionState::INITIALIZING;
}
void MaybeInitializeExecutionState(ExecutionState<CreateVerticesRequest> &state,
std::vector<NewVertex> new_vertices) {
ThrowIfStateCompleted(state);
if (ShallNotInitializeState(state)) {
return;
}
state.transaction_id = transaction_id_;
std::map<Shard, CreateVerticesRequest> per_shard_request_table;
for (auto &new_vertex : new_vertices) {
MG_ASSERT(!new_vertex.label_ids.empty(), "This is error!");
auto shard = shards_map_.GetShardForKey(new_vertex.label_ids[0].id,
storage::conversions::ConvertPropertyVector(new_vertex.primary_key));
if (!per_shard_request_table.contains(shard)) {
CreateVerticesRequest create_v_rqst{.transaction_id = transaction_id_};
per_shard_request_table.insert(std::pair(shard, std::move(create_v_rqst)));
state.shard_cache.push_back(shard);
}
per_shard_request_table[shard].new_vertices.push_back(std::move(new_vertex));
}
for (auto &[shard, rqst] : per_shard_request_table) {
state.requests.push_back(std::move(rqst));
}
state.state = ExecutionState<CreateVerticesRequest>::EXECUTING;
}
void MaybeInitializeExecutionState(ExecutionState<CreateExpandRequest> &state, std::vector<NewExpand> new_expands) {
ThrowIfStateCompleted(state);
if (ShallNotInitializeState(state)) {
return;
}
state.transaction_id = transaction_id_;
std::map<Shard, CreateExpandRequest> per_shard_request_table;
auto ensure_shard_exists_in_table = [&per_shard_request_table,
transaction_id = transaction_id_](const Shard &shard) {
if (!per_shard_request_table.contains(shard)) {
CreateExpandRequest create_expand_request{.transaction_id = transaction_id};
per_shard_request_table.insert({shard, std::move(create_expand_request)});
}
};
for (auto &new_expand : new_expands) {
const auto shard_src_vertex = shards_map_.GetShardForKey(
new_expand.src_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.src_vertex.second));
const auto shard_dest_vertex = shards_map_.GetShardForKey(
new_expand.dest_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.dest_vertex.second));
ensure_shard_exists_in_table(shard_src_vertex);
if (shard_src_vertex != shard_dest_vertex) {
ensure_shard_exists_in_table(shard_dest_vertex);
per_shard_request_table[shard_dest_vertex].new_expands.push_back(new_expand);
}
per_shard_request_table[shard_src_vertex].new_expands.push_back(std::move(new_expand));
}
for (auto &[shard, request] : per_shard_request_table) {
state.shard_cache.push_back(shard);
state.requests.push_back(std::move(request));
}
state.state = ExecutionState<CreateExpandRequest>::EXECUTING;
}
void MaybeInitializeExecutionState(ExecutionState<ScanVerticesRequest> &state) {
ThrowIfStateCompleted(state);
if (ShallNotInitializeState(state)) {
return;
}
std::vector<coordinator::Shards> multi_shards;
state.transaction_id = transaction_id_;
if (!state.label) {
multi_shards = shards_map_.GetAllShards();
} else {
const auto label_id = shards_map_.GetLabelId(*state.label);
MG_ASSERT(label_id);
MG_ASSERT(IsPrimaryLabel(*label_id));
multi_shards = {shards_map_.GetShardsForLabel(*state.label)};
}
for (auto &shards : multi_shards) {
for (auto &[key, shard] : shards) {
MG_ASSERT(!shard.empty());
state.shard_cache.push_back(std::move(shard));
ScanVerticesRequest rqst;
rqst.transaction_id = transaction_id_;
rqst.start_id.second = storage::conversions::ConvertValueVector(key);
state.requests.push_back(std::move(rqst));
}
}
state.state = ExecutionState<ScanVerticesRequest>::EXECUTING;
}
void MaybeInitializeExecutionState(ExecutionState<ExpandOneRequest> &state, ExpandOneRequest request) {
ThrowIfStateCompleted(state);
if (ShallNotInitializeState(state)) {
return;
}
state.transaction_id = transaction_id_;
std::map<Shard, ExpandOneRequest> per_shard_request_table;
auto top_level_rqst_template = request;
top_level_rqst_template.transaction_id = transaction_id_;
top_level_rqst_template.src_vertices.clear();
state.requests.clear();
for (auto &vertex : request.src_vertices) {
auto shard =
shards_map_.GetShardForKey(vertex.first.id, storage::conversions::ConvertPropertyVector(vertex.second));
if (!per_shard_request_table.contains(shard)) {
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
state.shard_cache.push_back(shard);
}
per_shard_request_table[shard].src_vertices.push_back(vertex);
}
for (auto &[shard, rqst] : per_shard_request_table) {
state.requests.push_back(std::move(rqst));
}
state.state = ExecutionState<ExpandOneRequest>::EXECUTING;
}
StorageClient &GetStorageClientForShard(Shard shard) {
if (!storage_cli_manager_.Exists(shard)) {
AddStorageClientToManager(shard);
}
return storage_cli_manager_.GetClient(shard);
}
StorageClient &GetStorageClientForShard(const std::string &label, const CompoundKey &key) {
auto shard = shards_map_.GetShardForKey(label, key);
return GetStorageClientForShard(std::move(shard));
}
void AddStorageClientToManager(Shard target_shard) {
MG_ASSERT(!target_shard.empty());
auto leader_addr = target_shard.front();
std::vector<Address> addresses;
addresses.reserve(target_shard.size());
for (auto &address : target_shard) {
addresses.push_back(std::move(address.address));
}
auto cli = StorageClient(io_, std::move(leader_addr.address), std::move(addresses));
storage_cli_manager_.AddClient(target_shard, std::move(cli));
}
void SendAllRequests(ExecutionState<ScanVerticesRequest> &state) {
int64_t shard_idx = 0;
for (const auto &request : state.requests) {
const auto &current_shard = state.shard_cache[shard_idx];
auto &storage_client = GetStorageClientForShard(current_shard);
ReadRequests req = request;
storage_client.SendAsyncReadRequest(request);
++shard_idx;
}
}
void SendAllRequests(ExecutionState<CreateVerticesRequest> &state,
std::vector<memgraph::coordinator::Shard> &shard_cache_ref) {
size_t id = 0;
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++shard_it) {
// This is fine because all new_vertices of each request end up on the same shard
const auto labels = state.requests[id].new_vertices[0].label_ids;
auto req_deep_copy = state.requests[id];
for (auto &new_vertex : req_deep_copy.new_vertices) {
new_vertex.label_ids.erase(new_vertex.label_ids.begin());
}
auto &storage_client = GetStorageClientForShard(*shard_it);
WriteRequests req = req_deep_copy;
storage_client.SendAsyncWriteRequest(req);
++id;
}
}
void SendAllRequests(ExecutionState<ExpandOneRequest> &state,
std::vector<memgraph::coordinator::Shard> &shard_cache_ref) {
size_t id = 0;
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++shard_it) {
auto &storage_client = GetStorageClientForShard(*shard_it);
ReadRequests req = state.requests[id];
storage_client.SendAsyncReadRequest(req);
++id;
}
}
void AwaitOnResponses(ExecutionState<CreateVerticesRequest> &state, std::vector<CreateVerticesResponse> &responses) {
auto &shard_cache_ref = state.shard_cache;
int64_t request_idx = 0;
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end();) {
// This is fine because all new_vertices of each request end up on the same shard
const auto labels = state.requests[request_idx].new_vertices[0].label_ids;
auto &storage_client = GetStorageClientForShard(*shard_it);
auto poll_result = storage_client.AwaitAsyncWriteRequest();
if (!poll_result) {
++shard_it;
++request_idx;
continue;
}
if (poll_result->HasError()) {
throw std::runtime_error("CreateVertices request timed out");
}
WriteResponses response_variant = poll_result->GetValue();
auto response = std::get<CreateVerticesResponse>(response_variant);
if (!response.success) {
throw std::runtime_error("CreateVertices request did not succeed");
}
responses.push_back(response);
shard_it = shard_cache_ref.erase(shard_it);
// Needed to maintain the 1-1 mapping between the ShardCache and the requests.
auto it = state.requests.begin() + request_idx;
state.requests.erase(it);
}
}
void AwaitOnResponses(ExecutionState<ExpandOneRequest> &state, std::vector<ExpandOneResponse> &responses) {
auto &shard_cache_ref = state.shard_cache;
int64_t request_idx = 0;
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end();) {
auto &storage_client = GetStorageClientForShard(*shard_it);
auto poll_result = storage_client.PollAsyncReadRequest();
if (!poll_result) {
++shard_it;
++request_idx;
continue;
}
if (poll_result->HasError()) {
throw std::runtime_error("ExpandOne request timed out");
}
ReadResponses response_variant = poll_result->GetValue();
auto response = std::get<ExpandOneResponse>(response_variant);
// -NOTE-
// Currently a boolean flag for signaling the overall success of the
// ExpandOne request does not exist. But it should, so here we assume
// that it is already in place.
if (!response.success) {
throw std::runtime_error("ExpandOne request did not succeed");
}
responses.push_back(std::move(response));
shard_it = shard_cache_ref.erase(shard_it);
// Needed to maintain the 1-1 mapping between the ShardCache and the requests.
auto it = state.requests.begin() + request_idx;
state.requests.erase(it);
}
}
void AwaitOnPaginatedRequests(ExecutionState<ScanVerticesRequest> &state,
std::vector<ScanVerticesResponse> &responses,
std::map<Shard, PaginatedResponseState> &paginated_response_tracker) {
auto &shard_cache_ref = state.shard_cache;
// Find the first request that is not holding a paginated response.
int64_t request_idx = 0;
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end();) {
if (paginated_response_tracker.at(*shard_it) != PaginatedResponseState::Pending) {
++shard_it;
++request_idx;
continue;
}
auto &storage_client = GetStorageClientForShard(*shard_it);
auto await_result = storage_client.AwaitAsyncReadRequest();
if (!await_result) {
// Redirection has occured.
++shard_it;
++request_idx;
continue;
}
if (await_result->HasError()) {
throw std::runtime_error("ScanAll request timed out");
}
ReadResponses read_response_variant = await_result->GetValue();
auto response = std::get<ScanVerticesResponse>(read_response_variant);
if (!response.success) {
throw std::runtime_error("ScanAll request did not succeed");
}
if (!response.next_start_id) {
paginated_response_tracker.erase((*shard_it));
shard_cache_ref.erase(shard_it);
// Needed to maintain the 1-1 mapping between the ShardCache and the requests.
auto it = state.requests.begin() + request_idx;
state.requests.erase(it);
} else {
state.requests[request_idx].start_id.second = response.next_start_id->second;
paginated_response_tracker[*shard_it] = PaginatedResponseState::PartiallyFinished;
}
responses.push_back(std::move(response));
}
}
void SetUpNameIdMappers() {
std::unordered_map<uint64_t, std::string> id_to_name;
for (const auto &[name, id] : shards_map_.labels) {
id_to_name.emplace(id.AsUint(), name);
}
labels_.StoreMapping(std::move(id_to_name));
id_to_name.clear();
for (const auto &[name, id] : shards_map_.properties) {
id_to_name.emplace(id.AsUint(), name);
}
properties_.StoreMapping(std::move(id_to_name));
id_to_name.clear();
for (const auto &[name, id] : shards_map_.edge_types) {
id_to_name.emplace(id.AsUint(), name);
}
edge_types_.StoreMapping(std::move(id_to_name));
}
ShardMap shards_map_;
storage::v3::NameIdMapper properties_;
storage::v3::NameIdMapper edge_types_;
storage::v3::NameIdMapper labels_;
CoordinatorClient coord_cli_;
RsmStorageClientManager<StorageClient> storage_cli_manager_;
memgraph::io::Io<TTransport> io_;
memgraph::coordinator::Hlc transaction_id_;
// TODO(kostasrim) Add batch prefetching
};
} // namespace memgraph::msgs

View File

@ -97,15 +97,15 @@ bool LastCommittedVersionHasLabelProperty(const Vertex &vertex, LabelId label, c
if (delta->label == label) {
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
break;
}
break;
}
case Delta::Action::REMOVE_LABEL: {
if (delta->label == label) {
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
break;
}
break;
}
case Delta::Action::ADD_IN_EDGE:
case Delta::Action::ADD_OUT_EDGE:

View File

@ -16,12 +16,10 @@ set(storage_v3_src_files
schemas.cpp
schema_validator.cpp
shard.cpp
storage.cpp
shard_rsm.cpp
bindings/typed_value.cpp
expr.cpp
request_helper.cpp
storage.cpp)
request_helper.cpp)
# ######################
find_package(gflags REQUIRED)
@ -33,4 +31,4 @@ target_link_libraries(mg-storage-v3 Threads::Threads mg-utils gflags)
target_include_directories(mg-storage-v3 PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bindings)
add_dependencies(mg-storage-v3 generate_lcp_storage)
target_link_libraries(mg-storage-v3 mg-slk mg-expr mg-io)
target_link_libraries(mg-storage-v3 mg-slk mg-expr mg-io mg-functions)

View File

@ -24,6 +24,8 @@
#include "storage/v3/conversions.hpp"
#include "storage/v3/path.hpp"
#include "utils/typeinfo.hpp"
#include "utils/exceptions.hpp"
#include "functions/awesome_memgraph_functions.hpp"
cpp<#
@ -178,14 +180,6 @@ cpp<#
(:serialize (:slk :load-args '((storage "storage::v3::AstStorage *")))))
#>cpp
struct FunctionContext {
DbAccessor *db_accessor;
utils::MemoryResource *memory;
int64_t timestamp;
std::unordered_map<std::string, int64_t> *counters;
View view;
};
inline bool operator==(const LabelIx &a, const LabelIx &b) {
return a.ix == b.ix && a.name == b.name;
}
@ -845,16 +839,24 @@ cpp<#
:slk-load (slk-load-ast-vector "Expression"))
(function-name "std::string" :scope :public)
(function "std::function<TypedValue(const TypedValue *, int64_t,
const FunctionContext &)>"
const functions::FunctionContext<memgraph::storage::v3::DbAccessor> &)>"
:scope :public
:dont-save t
:clone :copy
:slk-load (lambda (member)
#>cpp
self->${member} = functions::NameToFunction<memgraph::storage::v3::TypedValue,
functions::FunctionContext<memgraph::storage::v3::DbAccessor>,
functions::StorageEngineTag, Conv>(self->function_name_);
cpp<#)))
(:public
#>cpp
Function() = default;
using Conv = decltype(PropertyToTypedValueFunctor<TypedValue>);
class SemanticException : public memgraph::utils::BasicException {
using utils::BasicException::BasicException;
};
DEFVISITABLE(ExpressionVisitor<TypedValue>);
DEFVISITABLE(ExpressionVisitor<void>);
@ -872,7 +874,13 @@ cpp<#
Function(const std::string &function_name,
const std::vector<Expression *> &arguments)
: arguments_(arguments),
function_name_(function_name) {
function_name_(function_name),
function_(functions::NameToFunction<memgraph::storage::v3::TypedValue,
functions::FunctionContext<memgraph::storage::v3::DbAccessor>,
functions::StorageEngineTag, Conv>(function_name_)) {
if (!function_) {
throw SemanticException("Function '{}' doesn't exist.", function_name);
}
}
cpp<#)
(:private

View File

@ -78,8 +78,8 @@ class DbAccessor final {
return VerticesIterable(accessor_->Vertices(label, property, lower, upper, view));
}
storage::v3::Result<EdgeAccessor> InsertEdge(VertexAccessor *from, VertexAccessor *to,
const storage::v3::EdgeTypeId &edge_type) {
storage::v3::ShardResult<EdgeAccessor> InsertEdge(VertexAccessor *from, VertexAccessor *to,
const storage::v3::EdgeTypeId &edge_type) {
static constexpr auto kDummyGid = storage::v3::Gid::FromUint(0);
auto maybe_edge = accessor_->CreateEdge(from->Id(storage::v3::View::NEW).GetValue(),
to->Id(storage::v3::View::NEW).GetValue(), edge_type, kDummyGid);
@ -87,8 +87,8 @@ class DbAccessor final {
return EdgeAccessor(*maybe_edge);
}
storage::v3::Result<std::optional<EdgeAccessor>> RemoveEdge(EdgeAccessor *edge) {
auto res = accessor_->DeleteEdge(edge->FromVertex(), edge->ToVertex(), edge->Gid());
storage::v3::ShardResult<std::optional<EdgeAccessor>> RemoveEdge(EdgeAccessor *edge) {
auto res = accessor_->DeleteEdge(edge->From(), edge->To(), edge->Gid());
if (res.HasError()) {
return res.GetError();
}
@ -101,7 +101,7 @@ class DbAccessor final {
return std::make_optional<EdgeAccessor>(*value);
}
storage::v3::Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> DetachRemoveVertex(
storage::v3::ShardResult<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> DetachRemoveVertex(
VertexAccessor *vertex_accessor) {
using ReturnType = std::pair<VertexAccessor, std::vector<EdgeAccessor>>;
@ -125,7 +125,7 @@ class DbAccessor final {
return std::make_optional<ReturnType>(vertex, std::move(deleted_edges));
}
storage::v3::Result<std::optional<VertexAccessor>> RemoveVertex(VertexAccessor *vertex_accessor) {
storage::v3::ShardResult<std::optional<VertexAccessor>> RemoveVertex(VertexAccessor *vertex_accessor) {
auto res = accessor_->DeleteVertex(vertex_accessor);
if (res.HasError()) {
return res.GetError();

View File

@ -21,6 +21,7 @@
#include "storage/v3/id_types.hpp"
#include "storage/v3/property_store.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/view.hpp"
#include "utils/memory.hpp"
@ -87,6 +88,6 @@ struct EvaluationContext {
using ExpressionEvaluator =
memgraph::expr::ExpressionEvaluator<TypedValue, EvaluationContext, DbAccessor, storage::v3::View,
storage::v3::LabelId, storage::v3::PropertyStore, PropertyToTypedValueConverter,
memgraph::storage::v3::Error>;
common::ErrorCode>;
} // namespace memgraph::storage::v3

View File

@ -17,5 +17,5 @@
#include "storage/v3/bindings/typed_value.hpp"
namespace memgraph::storage::v3 {
using Frame = memgraph::expr::Frame<TypedValue>;
using Frame = memgraph::expr::Frame;
} // namespace memgraph::storage::v3

View File

@ -69,6 +69,10 @@ TTypedValue PropertyToTypedValue(const PropertyValue &value) {
LOG_FATAL("Unsupported type");
}
template <typename TypedValueT>
inline const auto PropertyToTypedValueFunctor =
[](const PropertyValue &value) { return PropertyToTypedValue<TypedValueT>(value); };
template <typename TTypedValue>
TTypedValue PropertyToTypedValue(const PropertyValue &value, utils::MemoryResource *mem) {
switch (value.type()) {

View File

@ -15,6 +15,7 @@
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/schema_validator.hpp"
#include "storage/v3/vertex_accessor.hpp"
#include "utils/memory_tracker.hpp"
@ -50,17 +51,17 @@ bool EdgeAccessor::IsVisible(const View view) const {
return exists && (for_deleted_ || !deleted);
}
const VertexId &EdgeAccessor::FromVertex() const { return from_vertex_; }
const VertexId &EdgeAccessor::From() const { return from_vertex_; }
const VertexId &EdgeAccessor::ToVertex() const { return to_vertex_; }
const VertexId &EdgeAccessor::To() const { return to_vertex_; }
Result<PropertyValue> EdgeAccessor::SetProperty(PropertyId property, const PropertyValue &value) {
ShardResult<PropertyValue> EdgeAccessor::SetProperty(PropertyId property, const PropertyValue &value) {
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
if (!config_.properties_on_edges) return Error::PROPERTIES_DISABLED;
if (!config_.properties_on_edges) return SHARD_ERROR(ErrorCode::PROPERTIES_DISABLED);
if (!PrepareForWrite(transaction_, edge_.ptr)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, edge_.ptr)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (edge_.ptr->deleted) return Error::DELETED_OBJECT;
if (edge_.ptr->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto current_value = edge_.ptr->properties.GetProperty(property);
// We could skip setting the value if the previous one is the same to the new
@ -75,12 +76,12 @@ Result<PropertyValue> EdgeAccessor::SetProperty(PropertyId property, const Prope
return std::move(current_value);
}
Result<std::map<PropertyId, PropertyValue>> EdgeAccessor::ClearProperties() {
if (!config_.properties_on_edges) return Error::PROPERTIES_DISABLED;
ShardResult<std::map<PropertyId, PropertyValue>> EdgeAccessor::ClearProperties() {
if (!config_.properties_on_edges) return SHARD_ERROR(ErrorCode::PROPERTIES_DISABLED);
if (!PrepareForWrite(transaction_, edge_.ptr)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, edge_.ptr)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (edge_.ptr->deleted) return Error::DELETED_OBJECT;
if (edge_.ptr->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto properties = edge_.ptr->properties.Properties();
for (const auto &property : properties) {
@ -92,11 +93,11 @@ Result<std::map<PropertyId, PropertyValue>> EdgeAccessor::ClearProperties() {
return std::move(properties);
}
Result<PropertyValue> EdgeAccessor::GetProperty(View view, PropertyId property) const {
ShardResult<PropertyValue> EdgeAccessor::GetProperty(View view, PropertyId property) const {
return GetProperty(property, view);
}
Result<PropertyValue> EdgeAccessor::GetProperty(PropertyId property, View view) const {
ShardResult<PropertyValue> EdgeAccessor::GetProperty(PropertyId property, View view) const {
if (!config_.properties_on_edges) return PropertyValue();
auto exists = true;
auto deleted = edge_.ptr->deleted;
@ -128,12 +129,12 @@ Result<PropertyValue> EdgeAccessor::GetProperty(PropertyId property, View view)
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return std::move(value);
}
Result<std::map<PropertyId, PropertyValue>> EdgeAccessor::Properties(View view) const {
ShardResult<std::map<PropertyId, PropertyValue>> EdgeAccessor::Properties(View view) const {
if (!config_.properties_on_edges) return std::map<PropertyId, PropertyValue>{};
auto exists = true;
auto deleted = edge_.ptr->deleted;
@ -174,9 +175,12 @@ Result<std::map<PropertyId, PropertyValue>> EdgeAccessor::Properties(View view)
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return std::move(properties);
}
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
size_t EdgeAccessor::CypherId() const { return Gid().AsUint(); }
} // namespace memgraph::storage::v3

View File

@ -48,27 +48,27 @@ class EdgeAccessor final {
/// @return true if the object is visible from the current transaction
bool IsVisible(View view) const;
const VertexId &FromVertex() const;
const VertexId &From() const;
const VertexId &ToVertex() const;
const VertexId &To() const;
EdgeTypeId EdgeType() const { return edge_type_; }
/// Set a property value and return the old value.
/// @throw std::bad_alloc
Result<PropertyValue> SetProperty(PropertyId property, const PropertyValue &value);
ShardResult<PropertyValue> SetProperty(PropertyId property, const PropertyValue &value);
/// Remove all properties and return old values for each removed property.
/// @throw std::bad_alloc
Result<std::map<PropertyId, PropertyValue>> ClearProperties();
ShardResult<std::map<PropertyId, PropertyValue>> ClearProperties();
/// @throw std::bad_alloc
Result<PropertyValue> GetProperty(PropertyId property, View view) const;
ShardResult<PropertyValue> GetProperty(PropertyId property, View view) const;
Result<PropertyValue> GetProperty(View view, PropertyId property) const;
ShardResult<PropertyValue> GetProperty(View view, PropertyId property) const;
/// @throw std::bad_alloc
Result<std::map<PropertyId, PropertyValue>> Properties(View view) const;
ShardResult<std::map<PropertyId, PropertyValue>> Properties(View view) const;
Gid Gid() const noexcept {
if (config_.properties_on_edges) {
@ -84,6 +84,9 @@ class EdgeAccessor final {
}
bool operator!=(const EdgeAccessor &other) const noexcept { return !(*this == other); }
// Dummy function
size_t CypherId() const;
private:
EdgeRef edge_;
EdgeTypeId edge_type_;

View File

@ -52,13 +52,12 @@ msgs::Value ConstructValueEdge(const EdgeAccessor &acc, View view) {
msgs::EdgeType type = {.id = acc.EdgeType()};
msgs::EdgeId gid = {.gid = acc.Gid().AsUint()};
msgs::Label src_prim_label = {.id = acc.FromVertex().primary_label};
msgs::Label src_prim_label = {.id = acc.From().primary_label};
memgraph::msgs::VertexId src_vertex =
std::make_pair(src_prim_label, conversions::ConvertValueVector(acc.FromVertex().primary_key));
std::make_pair(src_prim_label, conversions::ConvertValueVector(acc.From().primary_key));
msgs::Label dst_prim_label = {.id = acc.ToVertex().primary_label};
msgs::VertexId dst_vertex =
std::make_pair(dst_prim_label, conversions::ConvertValueVector(acc.ToVertex().primary_key));
msgs::Label dst_prim_label = {.id = acc.To().primary_label};
msgs::VertexId dst_vertex = std::make_pair(dst_prim_label, conversions::ConvertValueVector(acc.To().primary_key));
auto properties = acc.Properties(view);
@ -165,7 +164,7 @@ std::any ParseExpression(const std::string &expr, memgraph::expr::AstStorage &st
return visitor.visit(ast);
}
TypedValue ComputeExpression(DbAccessor &dba, const std::optional<memgraph::storage::v3::VertexAccessor> &v_acc,
TypedValue ComputeExpression(DbAccessor &dba, const memgraph::storage::v3::VertexAccessor &v_acc,
const std::optional<memgraph::storage::v3::EdgeAccessor> &e_acc,
const std::string &expression, std::string_view node_name, std::string_view edge_name) {
AstStorage storage;
@ -192,10 +191,11 @@ TypedValue ComputeExpression(DbAccessor &dba, const std::optional<memgraph::stor
return position_symbol_pair.second.name() == node_name;
}) != symbol_table.table().end());
frame[symbol_table.at(node_identifier)] = *v_acc;
frame[symbol_table.at(node_identifier)] = v_acc;
}
if (edge_identifier.symbol_pos_ != -1) {
MG_ASSERT(e_acc.has_value());
MG_ASSERT(std::find_if(symbol_table.table().begin(), symbol_table.table().end(),
[&edge_name](const std::pair<int32_t, Symbol> &position_symbol_pair) {
return position_symbol_pair.second.name() == edge_name;

View File

@ -9,6 +9,8 @@
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <vector>
#include "db_accessor.hpp"
@ -48,8 +50,7 @@ auto Eval(TExpression *expr, EvaluationContext &ctx, AstStorage &storage, Expres
std::any ParseExpression(const std::string &expr, AstStorage &storage);
TypedValue ComputeExpression(DbAccessor &dba, const std::optional<VertexAccessor> &v_acc,
const std::optional<EdgeAccessor> &e_acc, const std::string &expression,
std::string_view node_name, std::string_view edge_name);
TypedValue ComputeExpression(DbAccessor &dba, const VertexAccessor &v_acc, const std::optional<EdgeAccessor> &e_acc,
const std::string &expression, std::string_view node_name, std::string_view edge_name);
} // namespace memgraph::storage::v3

View File

@ -11,56 +11,457 @@
#include "storage/v3/request_helper.hpp"
#include <iterator>
#include <vector>
#include "pretty_print_ast_to_original_expression.hpp"
#include "storage/v3/bindings/db_accessor.hpp"
#include "storage/v3/bindings/pretty_print_ast_to_original_expression.hpp"
#include "storage/v3/expr.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/value_conversions.hpp"
namespace memgraph::storage::v3 {
using msgs::Label;
using msgs::PropertyId;
std::vector<Element> OrderByElements(Shard::Accessor &acc, DbAccessor &dba, VerticesIterable &vertices_iterable,
std::vector<msgs::OrderBy> &order_bys) {
std::vector<Element> ordered;
ordered.reserve(acc.ApproximateVertexCount());
std::vector<Ordering> ordering;
ordering.reserve(order_bys.size());
for (const auto &order : order_bys) {
switch (order.direction) {
case memgraph::msgs::OrderingDirection::ASCENDING: {
ordering.push_back(Ordering::ASC);
break;
}
case memgraph::msgs::OrderingDirection::DESCENDING: {
ordering.push_back(Ordering::DESC);
break;
}
}
using conversions::ConvertPropertyVector;
using conversions::FromPropertyValueToValue;
using conversions::ToMsgsVertexId;
namespace {
using AllEdgePropertyDataStucture = std::map<PropertyId, msgs::Value>;
using SpecificEdgePropertyDataStucture = std::vector<msgs::Value>;
using AllEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, AllEdgePropertyDataStucture>;
using SpecificEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, SpecificEdgePropertyDataStucture>;
using SpecificEdgePropertiesVector = std::vector<SpecificEdgeProperties>;
using AllEdgePropertiesVector = std::vector<AllEdgeProperties>;
struct VertexIdCmpr {
bool operator()(const storage::v3::VertexId *lhs, const storage::v3::VertexId *rhs) const { return *lhs < *rhs; }
};
std::optional<std::map<PropertyId, Value>> PrimaryKeysFromAccessor(const VertexAccessor &acc, View view,
const Schemas::Schema &schema) {
std::map<PropertyId, Value> ret;
auto props = acc.Properties(view);
auto maybe_pk = acc.PrimaryKey(view);
if (maybe_pk.HasError()) {
spdlog::debug("Encountered an error while trying to get vertex primary key.");
return std::nullopt;
}
auto compare_typed_values = TypedValueVectorCompare(ordering);
for (auto it = vertices_iterable.begin(); it != vertices_iterable.end(); ++it) {
std::vector<TypedValue> properties_order_by;
properties_order_by.reserve(order_bys.size());
for (const auto &order_by : order_bys) {
const auto val =
ComputeExpression(dba, *it, std::nullopt, order_by.expression.expression, expr::identifier_node_symbol, "");
properties_order_by.push_back(val);
}
ordered.push_back({std::move(properties_order_by), *it});
auto &pk = maybe_pk.GetValue();
MG_ASSERT(schema.second.size() == pk.size(), "PrimaryKey size does not match schema!");
for (size_t i{0}; i < schema.second.size(); ++i) {
ret.emplace(schema.second[i].property_id, FromPropertyValueToValue(std::move(pk[i])));
}
std::sort(ordered.begin(), ordered.end(), [compare_typed_values](const auto &pair1, const auto &pair2) {
return compare_typed_values(pair1.properties_order_by, pair2.properties_order_by);
return ret;
}
ShardResult<std::vector<msgs::Label>> FillUpSourceVertexSecondaryLabels(const std::optional<VertexAccessor> &v_acc,
const msgs::ExpandOneRequest &req) {
auto secondary_labels = v_acc->Labels(View::NEW);
if (secondary_labels.HasError()) {
spdlog::debug("Encountered an error while trying to get the secondary labels of a vertex. Transaction id: {}",
req.transaction_id.logical_id);
return secondary_labels.GetError();
}
auto &sec_labels = secondary_labels.GetValue();
std::vector<msgs::Label> msgs_secondary_labels;
msgs_secondary_labels.reserve(sec_labels.size());
std::transform(sec_labels.begin(), sec_labels.end(), std::back_inserter(msgs_secondary_labels),
[](auto label_id) { return msgs::Label{.id = label_id}; });
return msgs_secondary_labels;
}
ShardResult<std::map<PropertyId, Value>> FillUpSourceVertexProperties(const std::optional<VertexAccessor> &v_acc,
const msgs::ExpandOneRequest &req,
storage::v3::View view,
const Schemas::Schema &schema) {
std::map<PropertyId, Value> src_vertex_properties;
if (!req.src_vertex_properties) {
auto props = v_acc->Properties(View::NEW);
if (props.HasError()) {
spdlog::debug("Encountered an error while trying to access vertex properties. Transaction id: {}",
req.transaction_id.logical_id);
return props.GetError();
}
for (auto &[key, val] : props.GetValue()) {
src_vertex_properties.insert(std::make_pair(key, FromPropertyValueToValue(std::move(val))));
}
auto pks = PrimaryKeysFromAccessor(*v_acc, view, schema);
if (pks) {
src_vertex_properties.merge(*pks);
}
} else if (req.src_vertex_properties.value().empty()) {
// NOOP
} else {
for (const auto &prop : req.src_vertex_properties.value()) {
auto prop_val = v_acc->GetProperty(prop, View::OLD);
if (prop_val.HasError()) {
spdlog::debug("Encountered an error while trying to access vertex properties. Transaction id: {}",
req.transaction_id.logical_id);
return prop_val.GetError();
}
src_vertex_properties.insert(std::make_pair(prop, FromPropertyValueToValue(std::move(prop_val.GetValue()))));
}
}
return src_vertex_properties;
}
ShardResult<std::array<std::vector<EdgeAccessor>, 2>> FillUpConnectingEdges(
const std::optional<VertexAccessor> &v_acc, const msgs::ExpandOneRequest &req,
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness) {
std::vector<EdgeTypeId> edge_types{};
edge_types.reserve(req.edge_types.size());
std::transform(req.edge_types.begin(), req.edge_types.end(), std::back_inserter(edge_types),
[](const msgs::EdgeType &edge_type) { return edge_type.id; });
std::vector<EdgeAccessor> in_edges;
std::vector<EdgeAccessor> out_edges;
switch (req.direction) {
case msgs::EdgeDirection::OUT: {
auto out_edges_result = v_acc->OutEdges(View::NEW, edge_types);
if (out_edges_result.HasError()) {
spdlog::debug("Encountered an error while trying to get out-going EdgeAccessors. Transaction id: {}",
req.transaction_id.logical_id);
return out_edges_result.GetError();
}
out_edges =
maybe_filter_based_on_edge_uniqueness(std::move(out_edges_result.GetValue()), msgs::EdgeDirection::OUT);
break;
}
case msgs::EdgeDirection::IN: {
auto in_edges_result = v_acc->InEdges(View::NEW, edge_types);
if (in_edges_result.HasError()) {
spdlog::debug(
"Encountered an error while trying to get in-going EdgeAccessors. Transaction id: {}"[req.transaction_id
.logical_id]);
return in_edges_result.GetError();
}
in_edges = maybe_filter_based_on_edge_uniqueness(std::move(in_edges_result.GetValue()), msgs::EdgeDirection::IN);
break;
}
case msgs::EdgeDirection::BOTH: {
auto in_edges_result = v_acc->InEdges(View::NEW, edge_types);
if (in_edges_result.HasError()) {
spdlog::debug("Encountered an error while trying to get in-going EdgeAccessors. Transaction id: {}",
req.transaction_id.logical_id);
return in_edges_result.GetError();
}
in_edges = maybe_filter_based_on_edge_uniqueness(std::move(in_edges_result.GetValue()), msgs::EdgeDirection::IN);
auto out_edges_result = v_acc->OutEdges(View::NEW, edge_types);
if (out_edges_result.HasError()) {
spdlog::debug("Encountered an error while trying to get out-going EdgeAccessors. Transaction id: {}",
req.transaction_id.logical_id);
return out_edges_result.GetError();
}
out_edges =
maybe_filter_based_on_edge_uniqueness(std::move(out_edges_result.GetValue()), msgs::EdgeDirection::OUT);
break;
}
}
return std::array<std::vector<EdgeAccessor>, 2>{std::move(in_edges), std::move(out_edges)};
}
template <bool are_in_edges>
ShardResult<void> FillEdges(const std::vector<EdgeAccessor> &edges, msgs::ExpandOneResultRow &row,
const EdgeFiller &edge_filler) {
for (const auto &edge : edges) {
if (const auto res = edge_filler(edge, are_in_edges, row); res.HasError()) {
return res.GetError();
}
}
return {};
}
}; // namespace
ShardResult<std::map<PropertyId, Value>> CollectSpecificPropertiesFromAccessor(const VertexAccessor &acc,
const std::vector<PropertyId> &props,
View view) {
std::map<PropertyId, Value> ret;
for (const auto &prop : props) {
auto result = acc.GetProperty(prop, view);
if (result.HasError()) {
spdlog::debug("Encountered an Error while trying to get a vertex property.");
return result.GetError();
}
auto &value = result.GetValue();
ret.emplace(std::make_pair(prop, FromPropertyValueToValue(std::move(value))));
}
return ret;
}
std::vector<TypedValue> EvaluateVertexExpressions(DbAccessor &dba, const VertexAccessor &v_acc,
const std::vector<std::string> &expressions,
std::string_view node_name) {
std::vector<TypedValue> evaluated_expressions;
evaluated_expressions.reserve(expressions.size());
std::transform(expressions.begin(), expressions.end(), std::back_inserter(evaluated_expressions),
[&dba, &v_acc, &node_name](const auto &expression) {
return ComputeExpression(dba, v_acc, std::nullopt, expression, node_name, "");
});
return evaluated_expressions;
}
std::vector<TypedValue> EvaluateEdgeExpressions(DbAccessor &dba, const VertexAccessor &v_acc, const EdgeAccessor &e_acc,
const std::vector<std::string> &expressions) {
std::vector<TypedValue> evaluated_expressions;
evaluated_expressions.reserve(expressions.size());
std::transform(expressions.begin(), expressions.end(), std::back_inserter(evaluated_expressions),
[&dba, &v_acc, &e_acc](const auto &expression) {
return ComputeExpression(dba, v_acc, e_acc, expression, expr::identifier_node_symbol,
expr::identifier_edge_symbol);
});
return evaluated_expressions;
}
ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(const VertexAccessor &acc, View view,
const Schemas::Schema &schema) {
auto ret = impl::CollectAllPropertiesImpl<VertexAccessor>(acc, view);
if (ret.HasError()) {
return ret.GetError();
}
auto pks = PrimaryKeysFromAccessor(acc, view, schema);
if (pks) {
ret.GetValue().merge(std::move(*pks));
}
return ret;
}
ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(const VertexAccessor &acc, View view) {
return impl::CollectAllPropertiesImpl(acc, view);
}
EdgeUniquenessFunction InitializeEdgeUniquenessFunction(bool only_unique_neighbor_rows) {
// Functions to select connecting edges based on uniquness
EdgeUniquenessFunction maybe_filter_based_on_edge_uniquness;
if (only_unique_neighbor_rows) {
maybe_filter_based_on_edge_uniquness = [](EdgeAccessors &&edges,
msgs::EdgeDirection edge_direction) -> EdgeAccessors {
std::function<bool(std::set<const storage::v3::VertexId *, VertexIdCmpr> &, const storage::v3::EdgeAccessor &)>
is_edge_unique;
switch (edge_direction) {
case msgs::EdgeDirection::OUT: {
is_edge_unique = [](std::set<const storage::v3::VertexId *, VertexIdCmpr> &other_vertex_set,
const storage::v3::EdgeAccessor &edge_acc) {
auto [it, insertion_happened] = other_vertex_set.insert(&edge_acc.To());
return insertion_happened;
};
break;
}
case msgs::EdgeDirection::IN: {
is_edge_unique = [](std::set<const storage::v3::VertexId *, VertexIdCmpr> &other_vertex_set,
const storage::v3::EdgeAccessor &edge_acc) {
auto [it, insertion_happened] = other_vertex_set.insert(&edge_acc.From());
return insertion_happened;
};
break;
}
case msgs::EdgeDirection::BOTH:
MG_ASSERT(false, "This is should never happen, msgs::EdgeDirection::BOTH should not be passed here.");
}
EdgeAccessors ret;
std::set<const storage::v3::VertexId *, VertexIdCmpr> other_vertex_set;
for (const auto &edge : edges) {
if (is_edge_unique(other_vertex_set, edge)) {
ret.emplace_back(edge);
}
}
return ret;
};
} else {
maybe_filter_based_on_edge_uniquness =
[](EdgeAccessors &&edges, msgs::EdgeDirection /*edge_direction*/) -> EdgeAccessors { return std::move(edges); };
}
return maybe_filter_based_on_edge_uniquness;
}
EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req) {
EdgeFiller edge_filler;
if (!req.edge_properties) {
edge_filler = [transaction_id = req.transaction_id.logical_id](
const EdgeAccessor &edge, const bool is_in_edge,
msgs::ExpandOneResultRow &result_row) -> ShardResult<void> {
auto properties_results = edge.Properties(View::NEW);
if (properties_results.HasError()) {
spdlog::debug("Encountered an error while trying to get edge properties. Transaction id: {}", transaction_id);
return properties_results.GetError();
}
std::map<PropertyId, msgs::Value> value_properties;
for (auto &[prop_key, prop_val] : properties_results.GetValue()) {
value_properties.insert(std::make_pair(prop_key, FromPropertyValueToValue(std::move(prop_val))));
}
using EdgeWithAllProperties = msgs::ExpandOneResultRow::EdgeWithAllProperties;
EdgeWithAllProperties edges{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()}, edge.Gid().AsUint(),
std::move(value_properties)};
if (is_in_edge) {
result_row.in_edges_with_all_properties.push_back(std::move(edges));
} else {
result_row.out_edges_with_all_properties.push_back(std::move(edges));
}
return {};
};
} else {
// TODO(gvolfing) - do we want to set the action_successful here?
edge_filler = [&req](const EdgeAccessor &edge, const bool is_in_edge,
msgs::ExpandOneResultRow &result_row) -> ShardResult<void> {
std::vector<msgs::Value> value_properties;
value_properties.reserve(req.edge_properties.value().size());
for (const auto &edge_prop : req.edge_properties.value()) {
auto property_result = edge.GetProperty(edge_prop, View::NEW);
if (property_result.HasError()) {
spdlog::debug("Encountered an error while trying to get edge properties. Transaction id: {}",
req.transaction_id.logical_id);
return property_result.GetError();
}
value_properties.emplace_back(FromPropertyValueToValue(std::move(property_result.GetValue())));
}
using EdgeWithSpecificProperties = msgs::ExpandOneResultRow::EdgeWithSpecificProperties;
EdgeWithSpecificProperties edges{ToMsgsVertexId(edge.From()), msgs::EdgeType{edge.EdgeType()},
edge.Gid().AsUint(), std::move(value_properties)};
if (is_in_edge) {
result_row.in_edges_with_specific_properties.push_back(std::move(edges));
} else {
result_row.out_edges_with_specific_properties.push_back(std::move(edges));
}
return {};
};
}
return edge_filler;
}
bool FilterOnVertex(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc,
const std::vector<std::string> &filters) {
return std::ranges::all_of(filters, [&dba, &v_acc](const auto &filter_expr) {
const auto result = ComputeExpression(dba, v_acc, std::nullopt, filter_expr, expr::identifier_node_symbol, "");
return result.IsBool() && result.ValueBool();
});
return ordered;
}
bool FilterOnEdge(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc, const EdgeAccessor &e_acc,
const std::vector<std::string> &filters) {
return std::ranges::all_of(filters, [&dba, &v_acc, &e_acc](const auto &filter_expr) {
const auto result =
ComputeExpression(dba, v_acc, e_acc, filter_expr, expr::identifier_node_symbol, expr::identifier_edge_symbol);
return result.IsBool() && result.ValueBool();
});
}
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
Shard::Accessor &acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
const Schemas::Schema &schema) {
/// Fill up source vertex
const auto primary_key = ConvertPropertyVector(src_vertex.second);
auto v_acc = acc.FindVertex(primary_key, View::NEW);
msgs::Vertex source_vertex = {.id = src_vertex};
auto maybe_secondary_labels = FillUpSourceVertexSecondaryLabels(v_acc, req);
if (maybe_secondary_labels.HasError()) {
return maybe_secondary_labels.GetError();
}
source_vertex.labels = std::move(*maybe_secondary_labels);
auto src_vertex_properties = FillUpSourceVertexProperties(v_acc, req, storage::v3::View::NEW, schema);
if (src_vertex_properties.HasError()) {
return src_vertex_properties.GetError();
}
/// Fill up connecting edges
auto fill_up_connecting_edges = FillUpConnectingEdges(v_acc, req, maybe_filter_based_on_edge_uniqueness);
if (fill_up_connecting_edges.HasError()) {
return fill_up_connecting_edges.GetError();
}
auto [in_edges, out_edges] = fill_up_connecting_edges.GetValue();
msgs::ExpandOneResultRow result_row;
result_row.src_vertex = std::move(source_vertex);
result_row.src_vertex_properties = std::move(*src_vertex_properties);
static constexpr bool kInEdges = true;
static constexpr bool kOutEdges = false;
if (const auto fill_edges_res = FillEdges<kInEdges>(in_edges, result_row, edge_filler); fill_edges_res.HasError()) {
return fill_edges_res.GetError();
}
if (const auto fill_edges_res = FillEdges<kOutEdges>(out_edges, result_row, edge_filler); fill_edges_res.HasError()) {
return fill_edges_res.GetError();
}
return result_row;
}
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
VertexAccessor v_acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
std::vector<EdgeAccessor> in_edge_accessors, std::vector<EdgeAccessor> out_edge_accessors,
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
const Schemas::Schema &schema) {
/// Fill up source vertex
msgs::Vertex source_vertex = {.id = src_vertex};
auto maybe_secondary_labels = FillUpSourceVertexSecondaryLabels(v_acc, req);
if (maybe_secondary_labels.HasError()) {
return maybe_secondary_labels.GetError();
}
source_vertex.labels = std::move(*maybe_secondary_labels);
/// Fill up source vertex properties
auto src_vertex_properties = FillUpSourceVertexProperties(v_acc, req, storage::v3::View::NEW, schema);
if (src_vertex_properties.HasError()) {
return src_vertex_properties.GetError();
}
/// Fill up connecting edges
auto in_edges = maybe_filter_based_on_edge_uniqueness(std::move(in_edge_accessors), msgs::EdgeDirection::IN);
auto out_edges = maybe_filter_based_on_edge_uniqueness(std::move(out_edge_accessors), msgs::EdgeDirection::OUT);
msgs::ExpandOneResultRow result_row;
result_row.src_vertex = std::move(source_vertex);
result_row.src_vertex_properties = std::move(*src_vertex_properties);
static constexpr bool kInEdges = true;
static constexpr bool kOutEdges = false;
if (const auto fill_edges_res = FillEdges<kInEdges>(in_edges, result_row, edge_filler); fill_edges_res.HasError()) {
return fill_edges_res.GetError();
}
if (const auto fill_edges_res = FillEdges<kOutEdges>(out_edges, result_row, edge_filler); fill_edges_res.HasError()) {
return fill_edges_res.GetError();
}
return result_row;
}
VerticesIterable::Iterator GetStartVertexIterator(VerticesIterable &vertex_iterable,
const std::vector<PropertyValue> &start_ids, const View view) {
const std::vector<PropertyValue> &primary_key, const View view) {
auto it = vertex_iterable.begin();
while (it != vertex_iterable.end()) {
if (const auto &vertex = *it; start_ids <= vertex.PrimaryKey(view).GetValue()) {
if (const auto &vertex = *it; primary_key <= vertex.PrimaryKey(view).GetValue()) {
break;
}
++it;
@ -68,15 +469,112 @@ VerticesIterable::Iterator GetStartVertexIterator(VerticesIterable &vertex_itera
return it;
}
std::vector<Element>::const_iterator GetStartOrderedElementsIterator(const std::vector<Element> &ordered_elements,
const std::vector<PropertyValue> &start_ids,
const View view) {
std::vector<Element<VertexAccessor>>::const_iterator GetStartOrderedElementsIterator(
const std::vector<Element<VertexAccessor>> &ordered_elements, const std::vector<PropertyValue> &primary_key,
const View view) {
for (auto it = ordered_elements.begin(); it != ordered_elements.end(); ++it) {
if (const auto &vertex = it->vertex_acc; start_ids <= vertex.PrimaryKey(view).GetValue()) {
if (const auto &vertex = it->object_acc; primary_key <= vertex.PrimaryKey(view).GetValue()) {
return it;
}
}
return ordered_elements.end();
}
std::array<std::vector<EdgeAccessor>, 2> GetEdgesFromVertex(const VertexAccessor &vertex_accessor,
const msgs::EdgeDirection direction) {
std::vector<EdgeAccessor> in_edges;
std::vector<EdgeAccessor> out_edges;
switch (direction) {
case memgraph::msgs::EdgeDirection::IN: {
auto edges = vertex_accessor.InEdges(View::OLD);
if (edges.HasValue()) {
in_edges = edges.GetValue();
}
break;
}
case memgraph::msgs::EdgeDirection::OUT: {
auto edges = vertex_accessor.OutEdges(View::OLD);
if (edges.HasValue()) {
out_edges = edges.GetValue();
}
break;
}
case memgraph::msgs::EdgeDirection::BOTH: {
auto maybe_in_edges = vertex_accessor.InEdges(View::OLD);
auto maybe_out_edges = vertex_accessor.OutEdges(View::OLD);
std::vector<EdgeAccessor> edges;
if (maybe_in_edges.HasValue()) {
in_edges = maybe_in_edges.GetValue();
}
if (maybe_out_edges.HasValue()) {
out_edges = maybe_out_edges.GetValue();
}
break;
}
}
return std::array<std::vector<EdgeAccessor>, 2>{std::move(in_edges), std::move(out_edges)};
}
std::vector<Element<EdgeAccessor>> OrderByEdges(DbAccessor &dba, std::vector<EdgeAccessor> &iterable,
std::vector<msgs::OrderBy> &order_by_edges,
const VertexAccessor &vertex_acc) {
std::vector<Ordering> ordering;
ordering.reserve(order_by_edges.size());
std::transform(order_by_edges.begin(), order_by_edges.end(), std::back_inserter(ordering),
[](const auto &order_by) { return ConvertMsgsOrderByToOrdering(order_by.direction); });
std::vector<Element<EdgeAccessor>> ordered;
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
std::vector<TypedValue> properties_order_by;
properties_order_by.reserve(order_by_edges.size());
std::transform(order_by_edges.begin(), order_by_edges.end(), std::back_inserter(properties_order_by),
[&dba, &vertex_acc, &it](const auto &order_by) {
return ComputeExpression(dba, vertex_acc, *it, order_by.expression.expression,
expr::identifier_node_symbol, expr::identifier_edge_symbol);
});
ordered.push_back({std::move(properties_order_by), *it});
}
auto compare_typed_values = TypedValueVectorCompare(ordering);
std::sort(ordered.begin(), ordered.end(), [compare_typed_values](const auto &pair1, const auto &pair2) {
return compare_typed_values(pair1.properties_order_by, pair2.properties_order_by);
});
return ordered;
}
std::vector<Element<std::pair<VertexAccessor, EdgeAccessor>>> OrderByEdges(
DbAccessor &dba, std::vector<EdgeAccessor> &iterable, std::vector<msgs::OrderBy> &order_by_edges,
const std::vector<VertexAccessor> &vertex_acc) {
MG_ASSERT(vertex_acc.size() == iterable.size());
std::vector<Ordering> ordering;
ordering.reserve(order_by_edges.size());
std::transform(order_by_edges.begin(), order_by_edges.end(), std::back_inserter(ordering),
[](const auto &order_by) { return ConvertMsgsOrderByToOrdering(order_by.direction); });
std::vector<Element<std::pair<VertexAccessor, EdgeAccessor>>> ordered;
VertexAccessor current = vertex_acc.front();
size_t id = 0;
for (auto it = iterable.begin(); it != iterable.end(); it++, id++) {
current = vertex_acc[id];
std::vector<TypedValue> properties_order_by;
properties_order_by.reserve(order_by_edges.size());
std::transform(order_by_edges.begin(), order_by_edges.end(), std::back_inserter(properties_order_by),
[&dba, it, current](const auto &order_by) {
return ComputeExpression(dba, current, *it, order_by.expression.expression,
expr::identifier_node_symbol, expr::identifier_edge_symbol);
});
ordered.push_back({std::move(properties_order_by), {current, *it}});
}
auto compare_typed_values = TypedValueVectorCompare(ordering);
std::sort(ordered.begin(), ordered.end(), [compare_typed_values](const auto &pair1, const auto &pair2) {
return compare_typed_values(pair1.properties_order_by, pair2.properties_order_by);
});
return ordered;
}
} // namespace memgraph::storage::v3

View File

@ -9,14 +9,30 @@
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <vector>
#include "ast/ast.hpp"
#include "query/v2/requests.hpp"
#include "storage/v3/bindings/ast/ast.hpp"
#include "storage/v3/bindings/pretty_print_ast_to_original_expression.hpp"
#include "storage/v3/bindings/typed_value.hpp"
#include "storage/v3/edge_accessor.hpp"
#include "storage/v3/expr.hpp"
#include "storage/v3/shard.hpp"
#include "storage/v3/value_conversions.hpp"
#include "storage/v3/vertex_accessor.hpp"
#include "utils/template_utils.hpp"
namespace memgraph::storage::v3 {
using EdgeAccessors = std::vector<storage::v3::EdgeAccessor>;
using EdgeUniquenessFunction = std::function<EdgeAccessors(EdgeAccessors &&, msgs::EdgeDirection)>;
using EdgeFiller =
std::function<ShardResult<void>(const EdgeAccessor &edge, bool is_in_edge, msgs::ExpandOneResultRow &result_row)>;
using msgs::Value;
template <typename T>
concept OrderableObject = utils::SameAsAnyOf<T, VertexAccessor, EdgeAccessor, std::pair<VertexAccessor, EdgeAccessor>>;
inline bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
// in ordering null comes after everything else
@ -72,6 +88,17 @@ inline bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
}
}
inline Ordering ConvertMsgsOrderByToOrdering(msgs::OrderingDirection ordering) {
switch (ordering) {
case memgraph::msgs::OrderingDirection::ASCENDING:
return memgraph::storage::v3::Ordering::ASC;
case memgraph::msgs::OrderingDirection::DESCENDING:
return memgraph::storage::v3::Ordering::DESC;
default:
LOG_FATAL("Unknown ordering direction");
}
}
class TypedValueVectorCompare final {
public:
explicit TypedValueVectorCompare(const std::vector<Ordering> &ordering) : ordering_(ordering) {}
@ -99,18 +126,134 @@ class TypedValueVectorCompare final {
std::vector<Ordering> ordering_;
};
template <OrderableObject TObjectAccessor>
struct Element {
std::vector<TypedValue> properties_order_by;
VertexAccessor vertex_acc;
TObjectAccessor object_acc;
};
std::vector<Element> OrderByElements(Shard::Accessor &acc, DbAccessor &dba, VerticesIterable &vertices_iterable,
std::vector<msgs::OrderBy> &order_bys);
template <typename T>
concept VerticesIt = utils::SameAsAnyOf<T, VerticesIterable, std::vector<VertexAccessor>>;
template <VerticesIt TIterable>
std::vector<Element<VertexAccessor>> OrderByVertices(DbAccessor &dba, TIterable &iterable,
std::vector<msgs::OrderBy> &order_by_vertices) {
std::vector<Ordering> ordering;
ordering.reserve(order_by_vertices.size());
std::transform(order_by_vertices.begin(), order_by_vertices.end(), std::back_inserter(ordering),
[](const auto &order_by) { return ConvertMsgsOrderByToOrdering(order_by.direction); });
std::vector<Element<VertexAccessor>> ordered;
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
std::vector<TypedValue> properties_order_by;
properties_order_by.reserve(order_by_vertices.size());
std::transform(order_by_vertices.begin(), order_by_vertices.end(), std::back_inserter(properties_order_by),
[&dba, &it](const auto &order_by) {
return ComputeExpression(dba, *it, std::nullopt /*e_acc*/, order_by.expression.expression,
expr::identifier_node_symbol, expr::identifier_edge_symbol);
});
ordered.push_back({std::move(properties_order_by), *it});
}
auto compare_typed_values = TypedValueVectorCompare(ordering);
std::sort(ordered.begin(), ordered.end(), [compare_typed_values](const auto &pair1, const auto &pair2) {
return compare_typed_values(pair1.properties_order_by, pair2.properties_order_by);
});
return ordered;
}
std::vector<Element<EdgeAccessor>> OrderByEdges(DbAccessor &dba, std::vector<EdgeAccessor> &iterable,
std::vector<msgs::OrderBy> &order_by_edges,
const VertexAccessor &vertex_acc);
std::vector<Element<std::pair<VertexAccessor, EdgeAccessor>>> OrderByEdges(
DbAccessor &dba, std::vector<EdgeAccessor> &iterable, std::vector<msgs::OrderBy> &order_by_edges,
const std::vector<VertexAccessor> &vertex_acc);
VerticesIterable::Iterator GetStartVertexIterator(VerticesIterable &vertex_iterable,
const std::vector<PropertyValue> &start_ids, View view);
const std::vector<PropertyValue> &primary_key, View view);
std::vector<Element>::const_iterator GetStartOrderedElementsIterator(const std::vector<Element> &ordered_elements,
const std::vector<PropertyValue> &start_ids,
View view);
std::vector<Element<VertexAccessor>>::const_iterator GetStartOrderedElementsIterator(
const std::vector<Element<VertexAccessor>> &ordered_elements, const std::vector<PropertyValue> &primary_key,
View view);
std::array<std::vector<EdgeAccessor>, 2> GetEdgesFromVertex(const VertexAccessor &vertex_accessor,
msgs::EdgeDirection direction);
bool FilterOnVertex(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc, const std::vector<std::string> &filters);
bool FilterOnEdge(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc, const EdgeAccessor &e_acc,
const std::vector<std::string> &filters);
std::vector<TypedValue> EvaluateVertexExpressions(DbAccessor &dba, const VertexAccessor &v_acc,
const std::vector<std::string> &expressions,
std::string_view node_name);
std::vector<TypedValue> EvaluateEdgeExpressions(DbAccessor &dba, const VertexAccessor &v_acc, const EdgeAccessor &e_acc,
const std::vector<std::string> &expressions);
template <typename T>
concept PropertiesAccessor = utils::SameAsAnyOf<T, VertexAccessor, EdgeAccessor>;
template <PropertiesAccessor TAccessor>
ShardResult<std::map<PropertyId, Value>> CollectSpecificPropertiesFromAccessor(const TAccessor &acc,
const std::vector<PropertyId> &props,
View view) {
std::map<PropertyId, Value> ret;
for (const auto &prop : props) {
auto result = acc.GetProperty(prop, view);
if (result.HasError()) {
spdlog::debug("Encountered an Error while trying to get a vertex property.");
return result.GetError();
}
auto &value = result.GetValue();
ret.emplace(std::make_pair(prop, FromPropertyValueToValue(std::move(value))));
}
return ret;
}
ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(const VertexAccessor &acc, View view,
const Schemas::Schema &schema);
namespace impl {
template <PropertiesAccessor TAccessor>
ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesImpl(const TAccessor &acc, View view) {
std::map<PropertyId, Value> ret;
auto props = acc.Properties(view);
if (props.HasError()) {
spdlog::debug("Encountered an error while trying to get vertex properties.");
return props.GetError();
}
auto &properties = props.GetValue();
std::transform(properties.begin(), properties.end(), std::inserter(ret, ret.begin()),
[](std::pair<const PropertyId, PropertyValue> &pair) {
return std::make_pair(pair.first, conversions::FromPropertyValueToValue(std::move(pair.second)));
});
return ret;
}
} // namespace impl
template <PropertiesAccessor TAccessor>
ShardResult<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(const TAccessor &acc, View view) {
return impl::CollectAllPropertiesImpl<TAccessor>(acc, view);
}
EdgeUniquenessFunction InitializeEdgeUniquenessFunction(bool only_unique_neighbor_rows);
EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req);
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
Shard::Accessor &acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
const Schemas::Schema &schema);
ShardResult<msgs::ExpandOneResultRow> GetExpandOneResult(
VertexAccessor v_acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
std::vector<EdgeAccessor> in_edge_accessors, std::vector<EdgeAccessor> out_edge_accessors,
const EdgeUniquenessFunction &maybe_filter_based_on_edge_uniqueness, const EdgeFiller &edge_filler,
const Schemas::Schema &schema);
} // namespace memgraph::storage::v3

View File

@ -11,24 +11,43 @@
#pragma once
#include <cstdint>
#include <experimental/source_location>
#include <string>
#include <string_view>
#include <type_traits>
#include "common/errors.hpp"
#include "utils/result.hpp"
namespace memgraph::storage::v3 {
static_assert(std::is_same_v<uint8_t, unsigned char>);
enum class Error : uint8_t {
SERIALIZATION_ERROR,
NONEXISTENT_OBJECT,
DELETED_OBJECT,
VERTEX_HAS_EDGES,
PROPERTIES_DISABLED,
VERTEX_ALREADY_INSERTED
struct ShardError {
ShardError(common::ErrorCode code, std::string message, const std::experimental::source_location location)
: code{code}, message{std::move(message)}, source{fmt::format("{}:{}", location.file_name(), location.line())} {}
ShardError(common::ErrorCode code, const std::experimental::source_location location)
: code{code}, source{fmt::format("{}:{}", location.file_name(), location.line())} {}
common::ErrorCode code;
std::string message;
std::string source;
inline friend bool operator==(const ShardError &lhs, const ShardError &rhs) { return lhs.code == rhs.code; }
inline friend bool operator==(const ShardError &lhs, const common::ErrorCode rhs) { return lhs.code == rhs; }
};
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define SHARD_ERROR(error, ...) \
({ \
using ErrorCode = memgraph::common::ErrorCode; \
memgraph::storage::v3::ShardError(error, GET_MESSAGE(__VA_ARGS__), std::experimental::source_location::current()); \
})
template <class TValue>
using Result = utils::BasicResult<Error, TValue>;
using ShardResult = utils::BasicResult<ShardError, TValue>;
} // namespace memgraph::storage::v3

View File

@ -16,67 +16,58 @@
#include <ranges>
#include "common/types.hpp"
#include "storage/v3/name_id_mapper.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/schemas.hpp"
namespace memgraph::storage::v3 {
bool operator==(const SchemaViolation &lhs, const SchemaViolation &rhs) {
return lhs.status == rhs.status && lhs.label == rhs.label &&
lhs.violated_schema_property == rhs.violated_schema_property &&
lhs.violated_property_value == rhs.violated_property_value;
}
SchemaValidator::SchemaValidator(Schemas &schemas, const NameIdMapper &name_id_mapper)
: schemas_{&schemas}, name_id_mapper_{&name_id_mapper} {}
SchemaViolation::SchemaViolation(ValidationStatus status, LabelId label) : status{status}, label{label} {}
SchemaViolation::SchemaViolation(ValidationStatus status, LabelId label, SchemaProperty violated_schema_property)
: status{status}, label{label}, violated_schema_property{violated_schema_property} {}
SchemaViolation::SchemaViolation(ValidationStatus status, LabelId label, SchemaProperty violated_schema_property,
PropertyValue violated_property_value)
: status{status},
label{label},
violated_schema_property{violated_schema_property},
violated_property_value{violated_property_value} {}
SchemaValidator::SchemaValidator(Schemas &schemas) : schemas_{schemas} {}
std::optional<SchemaViolation> SchemaValidator::ValidateVertexCreate(
LabelId primary_label, const std::vector<LabelId> &labels,
const std::vector<PropertyValue> &primary_properties) const {
ShardResult<void> SchemaValidator::ValidateVertexCreate(LabelId primary_label, const std::vector<LabelId> &labels,
const std::vector<PropertyValue> &primary_properties) const {
// Schema on primary label
const auto *schema = schemas_.GetSchema(primary_label);
const auto *schema = schemas_->GetSchema(primary_label);
if (schema == nullptr) {
return SchemaViolation(SchemaViolation::ValidationStatus::NO_SCHEMA_DEFINED_FOR_LABEL, primary_label);
return SHARD_ERROR(ErrorCode::SCHEMA_NO_SCHEMA_DEFINED_FOR_LABEL, "Schema not defined for label :{}",
name_id_mapper_->IdToName(primary_label.AsInt()));
}
// Is there another primary label among secondary labels
for (const auto &secondary_label : labels) {
if (schemas_.GetSchema(secondary_label)) {
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_SECONDARY_LABEL_IS_PRIMARY, secondary_label);
if (schemas_->GetSchema(secondary_label)) {
return SHARD_ERROR(ErrorCode::SCHEMA_VERTEX_SECONDARY_LABEL_IS_PRIMARY,
"Cannot add label :{}, since it is defined as a primary label",
name_id_mapper_->IdToName(secondary_label.AsInt()));
}
}
// Quick size check
if (schema->second.size() != primary_properties.size()) {
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_PRIMARY_PROPERTIES_UNDEFINED, primary_label);
return SHARD_ERROR(ErrorCode::SCHEMA_VERTEX_PRIMARY_PROPERTIES_UNDEFINED,
"Not all primary properties have been specified for :{} vertex",
name_id_mapper_->IdToName(primary_label.AsInt()));
}
// Check only properties defined by schema
for (size_t i{0}; i < schema->second.size(); ++i) {
// Check schema property type
if (auto property_schema_type = PropertyTypeToSchemaType(primary_properties[i]);
property_schema_type && *property_schema_type != schema->second[i].type) {
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_PROPERTY_WRONG_TYPE, primary_label,
schema->second[i], primary_properties[i]);
return SHARD_ERROR(ErrorCode::SCHEMA_VERTEX_PROPERTY_WRONG_TYPE,
"Property {} is of wrong type, expected {}, actual {}",
name_id_mapper_->IdToName(schema->second[i].property_id.AsInt()),
SchemaTypeToString(schema->second[i].type), SchemaTypeToString(*property_schema_type));
}
}
return std::nullopt;
return {};
}
std::optional<SchemaViolation> SchemaValidator::ValidatePropertyUpdate(const LabelId primary_label,
const PropertyId property_id) const {
ShardResult<void> SchemaValidator::ValidatePropertyUpdate(const LabelId primary_label,
const PropertyId property_id) const {
// Verify existence of schema on primary label
const auto *schema = schemas_.GetSchema(primary_label);
const auto *schema = schemas_->GetSchema(primary_label);
MG_ASSERT(schema, "Cannot validate against non existing schema!");
// Verify that updating property is not part of schema
@ -84,34 +75,37 @@ std::optional<SchemaViolation> SchemaValidator::ValidatePropertyUpdate(const Lab
schema->second,
[property_id](const auto &schema_property) { return property_id == schema_property.property_id; });
schema_property != schema->second.end()) {
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_UPDATE_PRIMARY_KEY, primary_label,
*schema_property);
return SHARD_ERROR(ErrorCode::SCHEMA_VERTEX_UPDATE_PRIMARY_KEY,
"Cannot update primary property {} of schema on label :{}",
name_id_mapper_->IdToName(schema_property->property_id.AsInt()),
name_id_mapper_->IdToName(primary_label.AsInt()));
}
return std::nullopt;
return {};
}
std::optional<SchemaViolation> SchemaValidator::ValidateLabelUpdate(const LabelId label) const {
const auto *schema = schemas_.GetSchema(label);
ShardResult<void> SchemaValidator::ValidateLabelUpdate(const LabelId label) const {
const auto *schema = schemas_->GetSchema(label);
if (schema) {
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_UPDATE_PRIMARY_LABEL, label);
return SHARD_ERROR(ErrorCode::SCHEMA_VERTEX_UPDATE_PRIMARY_LABEL, "Cannot add/remove primary label :{}",
name_id_mapper_->IdToName(label.AsInt()));
}
return std::nullopt;
return {};
}
const Schemas::Schema *SchemaValidator::GetSchema(LabelId label) const { return schemas_.GetSchema(label); }
const Schemas::Schema *SchemaValidator::GetSchema(LabelId label) const { return schemas_->GetSchema(label); }
VertexValidator::VertexValidator(const SchemaValidator &schema_validator, const LabelId primary_label)
: schema_validator{&schema_validator}, primary_label_{primary_label} {}
std::optional<SchemaViolation> VertexValidator::ValidatePropertyUpdate(PropertyId property_id) const {
ShardResult<void> VertexValidator::ValidatePropertyUpdate(PropertyId property_id) const {
return schema_validator->ValidatePropertyUpdate(primary_label_, property_id);
};
std::optional<SchemaViolation> VertexValidator::ValidateAddLabel(LabelId label) const {
ShardResult<void> VertexValidator::ValidateAddLabel(LabelId label) const {
return schema_validator->ValidateLabelUpdate(label);
}
std::optional<SchemaViolation> VertexValidator::ValidateRemoveLabel(LabelId label) const {
ShardResult<void> VertexValidator::ValidateRemoveLabel(LabelId label) const {
return schema_validator->ValidateLabelUpdate(label);
}

View File

@ -11,68 +11,43 @@
#pragma once
#include <optional>
#include <variant>
#include "storage/v2/result.hpp"
#include "storage/v3/id_types.hpp"
#include "storage/v3/name_id_mapper.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/schemas.hpp"
namespace memgraph::storage::v3 {
struct SchemaViolation {
enum class ValidationStatus : uint8_t {
NO_SCHEMA_DEFINED_FOR_LABEL,
VERTEX_PROPERTY_WRONG_TYPE,
VERTEX_UPDATE_PRIMARY_KEY,
VERTEX_UPDATE_PRIMARY_LABEL,
VERTEX_SECONDARY_LABEL_IS_PRIMARY,
VERTEX_PRIMARY_PROPERTIES_UNDEFINED,
};
SchemaViolation(ValidationStatus status, LabelId label);
SchemaViolation(ValidationStatus status, LabelId label, SchemaProperty violated_schema_property);
SchemaViolation(ValidationStatus status, LabelId label, SchemaProperty violated_schema_property,
PropertyValue violated_property_value);
friend bool operator==(const SchemaViolation &lhs, const SchemaViolation &rhs);
ValidationStatus status;
LabelId label;
std::optional<SchemaProperty> violated_schema_property;
std::optional<PropertyValue> violated_property_value;
};
class SchemaValidator {
public:
explicit SchemaValidator(Schemas &schemas);
explicit SchemaValidator(Schemas &schemas, const NameIdMapper &name_id_mapper);
[[nodiscard]] std::optional<SchemaViolation> ValidateVertexCreate(
LabelId primary_label, const std::vector<LabelId> &labels,
const std::vector<PropertyValue> &primary_properties) const;
[[nodiscard]] ShardResult<void> ValidateVertexCreate(LabelId primary_label, const std::vector<LabelId> &labels,
const std::vector<PropertyValue> &primary_properties) const;
[[nodiscard]] std::optional<SchemaViolation> ValidatePropertyUpdate(LabelId primary_label,
PropertyId property_id) const;
[[nodiscard]] ShardResult<void> ValidatePropertyUpdate(LabelId primary_label, PropertyId property_id) const;
[[nodiscard]] std::optional<SchemaViolation> ValidateLabelUpdate(LabelId label) const;
[[nodiscard]] ShardResult<void> ValidateLabelUpdate(LabelId label) const;
const Schemas::Schema *GetSchema(LabelId label) const;
private:
Schemas &schemas_;
Schemas *schemas_;
const NameIdMapper *name_id_mapper_;
};
struct VertexValidator {
explicit VertexValidator(const SchemaValidator &schema_validator, LabelId primary_label);
[[nodiscard]] std::optional<SchemaViolation> ValidatePropertyUpdate(PropertyId property_id) const;
[[nodiscard]] ShardResult<void> ValidatePropertyUpdate(PropertyId property_id) const;
[[nodiscard]] std::optional<SchemaViolation> ValidateAddLabel(LabelId label) const;
[[nodiscard]] ShardResult<void> ValidateAddLabel(LabelId label) const;
[[nodiscard]] std::optional<SchemaViolation> ValidateRemoveLabel(LabelId label) const;
[[nodiscard]] ShardResult<void> ValidateRemoveLabel(LabelId label) const;
const SchemaValidator *schema_validator;

View File

@ -31,9 +31,10 @@
#include "storage/v3/indices.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/name_id_mapper.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/schema_validator.hpp"
#include "storage/v3/shard_operation_result.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "storage/v3/vertex_accessor.hpp"
@ -327,7 +328,7 @@ Shard::Shard(const LabelId primary_label, const PrimaryKey min_primary_key,
: primary_label_{primary_label},
min_primary_key_{min_primary_key},
max_primary_key_{max_primary_key},
schema_validator_{schemas_},
schema_validator_{schemas_, name_id_mapper_},
vertex_validator_{schema_validator_, primary_label},
indices_{config.items, vertex_validator_},
isolation_level_{config.transaction.isolation_level},
@ -344,7 +345,7 @@ Shard::~Shard() {}
Shard::Accessor::Accessor(Shard &shard, Transaction &transaction)
: shard_(&shard), transaction_(&transaction), config_(shard_->config_.items) {}
ShardOperationResult<VertexAccessor> Shard::Accessor::CreateVertexAndValidate(
ShardResult<VertexAccessor> Shard::Accessor::CreateVertexAndValidate(
const std::vector<LabelId> &labels, const std::vector<PropertyValue> &primary_properties,
const std::vector<std::pair<PropertyId, PropertyValue>> &properties) {
OOMExceptionEnabler oom_exception;
@ -352,8 +353,8 @@ ShardOperationResult<VertexAccessor> Shard::Accessor::CreateVertexAndValidate(
auto maybe_schema_violation =
GetSchemaValidator().ValidateVertexCreate(shard_->primary_label_, labels, primary_properties);
if (maybe_schema_violation) {
return {std::move(*maybe_schema_violation)};
if (maybe_schema_violation.HasError()) {
return {std::move(maybe_schema_violation.GetError())};
}
auto acc = shard_->vertices_.access();
@ -363,7 +364,7 @@ ShardOperationResult<VertexAccessor> Shard::Accessor::CreateVertexAndValidate(
VertexAccessor vertex_acc{&it->vertex, transaction_, &shard_->indices_, config_, shard_->vertex_validator_};
if (!inserted) {
return {Error::VERTEX_ALREADY_INSERTED};
return SHARD_ERROR(ErrorCode::VERTEX_ALREADY_INSERTED);
}
MG_ASSERT(it != acc.end(), "Invalid Vertex accessor!");
@ -394,19 +395,19 @@ std::optional<VertexAccessor> Shard::Accessor::FindVertex(std::vector<PropertyVa
return VertexAccessor::Create(&it->vertex, transaction_, &shard_->indices_, config_, shard_->vertex_validator_, view);
}
Result<std::optional<VertexAccessor>> Shard::Accessor::DeleteVertex(VertexAccessor *vertex) {
ShardResult<std::optional<VertexAccessor>> Shard::Accessor::DeleteVertex(VertexAccessor *vertex) {
MG_ASSERT(vertex->transaction_ == transaction_,
"VertexAccessor must be from the same transaction as the storage "
"accessor when deleting a vertex!");
auto *vertex_ptr = vertex->vertex_;
if (!PrepareForWrite(transaction_, vertex_ptr)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, vertex_ptr)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_ptr->deleted) {
return std::optional<VertexAccessor>{};
}
if (!vertex_ptr->in_edges.empty() || !vertex_ptr->out_edges.empty()) return Error::VERTEX_HAS_EDGES;
if (!vertex_ptr->in_edges.empty() || !vertex_ptr->out_edges.empty()) return SHARD_ERROR(ErrorCode::VERTEX_HAS_EDGES);
CreateAndLinkDelta(transaction_, vertex_ptr, Delta::RecreateObjectTag());
vertex_ptr->deleted = true;
@ -415,7 +416,7 @@ Result<std::optional<VertexAccessor>> Shard::Accessor::DeleteVertex(VertexAccess
shard_->vertex_validator_, true);
}
Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shard::Accessor::DetachDeleteVertex(
ShardResult<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shard::Accessor::DetachDeleteVertex(
VertexAccessor *vertex) {
using ReturnType = std::pair<VertexAccessor, std::vector<EdgeAccessor>>;
@ -428,7 +429,7 @@ Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shar
std::vector<Vertex::EdgeLink> out_edges;
{
if (!PrepareForWrite(transaction_, vertex_ptr)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, vertex_ptr)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_ptr->deleted) return std::optional<ReturnType>{};
@ -441,9 +442,9 @@ Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shar
for (const auto &item : in_edges) {
auto [edge_type, from_vertex, edge] = item;
EdgeAccessor e(edge, edge_type, from_vertex, vertex_id, transaction_, &shard_->indices_, config_);
auto ret = DeleteEdge(e.FromVertex(), e.ToVertex(), e.Gid());
auto ret = DeleteEdge(e.From(), e.To(), e.Gid());
if (ret.HasError()) {
MG_ASSERT(ret.GetError() == Error::SERIALIZATION_ERROR, "Invalid database state!");
MG_ASSERT(ret.GetError() == common::ErrorCode::SERIALIZATION_ERROR, "Invalid database state!");
return ret.GetError();
}
@ -454,9 +455,9 @@ Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shar
for (const auto &item : out_edges) {
auto [edge_type, to_vertex, edge] = item;
EdgeAccessor e(edge, edge_type, vertex_id, to_vertex, transaction_, &shard_->indices_, config_);
auto ret = DeleteEdge(e.FromVertex(), e.ToVertex(), e.Gid());
auto ret = DeleteEdge(e.From(), e.To(), e.Gid());
if (ret.HasError()) {
MG_ASSERT(ret.GetError() == Error::SERIALIZATION_ERROR, "Invalid database state!");
MG_ASSERT(ret.GetError() == common::ErrorCode::SERIALIZATION_ERROR, "Invalid database state!");
return ret.GetError();
}
@ -469,7 +470,7 @@ Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shar
// vertex. Some other transaction could have modified the vertex in the
// meantime if we didn't have any edges to delete.
if (!PrepareForWrite(transaction_, vertex_ptr)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, vertex_ptr)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
MG_ASSERT(!vertex_ptr->deleted, "Invalid database state!");
@ -481,8 +482,8 @@ Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> Shar
std::move(deleted_edges));
}
Result<EdgeAccessor> Shard::Accessor::CreateEdge(VertexId from_vertex_id, VertexId to_vertex_id,
const EdgeTypeId edge_type, const Gid gid) {
ShardResult<EdgeAccessor> Shard::Accessor::CreateEdge(VertexId from_vertex_id, VertexId to_vertex_id,
const EdgeTypeId edge_type, const Gid gid) {
OOMExceptionEnabler oom_exception;
Vertex *from_vertex{nullptr};
Vertex *to_vertex{nullptr};
@ -506,12 +507,12 @@ Result<EdgeAccessor> Shard::Accessor::CreateEdge(VertexId from_vertex_id, Vertex
}
if (from_is_local) {
if (!PrepareForWrite(transaction_, from_vertex)) return Error::SERIALIZATION_ERROR;
if (from_vertex->deleted) return Error::DELETED_OBJECT;
if (!PrepareForWrite(transaction_, from_vertex)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (from_vertex->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
}
if (to_is_local && to_vertex != from_vertex) {
if (!PrepareForWrite(transaction_, to_vertex)) return Error::SERIALIZATION_ERROR;
if (to_vertex->deleted) return Error::DELETED_OBJECT;
if (!PrepareForWrite(transaction_, to_vertex)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (to_vertex->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
}
EdgeRef edge(gid);
@ -540,8 +541,8 @@ Result<EdgeAccessor> Shard::Accessor::CreateEdge(VertexId from_vertex_id, Vertex
&shard_->indices_, config_);
}
Result<std::optional<EdgeAccessor>> Shard::Accessor::DeleteEdge(VertexId from_vertex_id, VertexId to_vertex_id,
const Gid edge_id) {
ShardResult<std::optional<EdgeAccessor>> Shard::Accessor::DeleteEdge(VertexId from_vertex_id, VertexId to_vertex_id,
const Gid edge_id) {
Vertex *from_vertex{nullptr};
Vertex *to_vertex{nullptr};
@ -566,13 +567,13 @@ Result<std::optional<EdgeAccessor>> Shard::Accessor::DeleteEdge(VertexId from_ve
if (from_is_local) {
if (!PrepareForWrite(transaction_, from_vertex)) {
return Error::SERIALIZATION_ERROR;
return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
}
MG_ASSERT(!from_vertex->deleted, "Invalid database state!");
}
if (to_is_local && to_vertex != from_vertex) {
if (!PrepareForWrite(transaction_, to_vertex)) {
return Error::SERIALIZATION_ERROR;
return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
}
MG_ASSERT(!to_vertex->deleted, "Invalid database state!");
}

View File

@ -38,7 +38,6 @@
#include "storage/v3/result.hpp"
#include "storage/v3/schema_validator.hpp"
#include "storage/v3/schemas.hpp"
#include "storage/v3/shard_operation_result.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "storage/v3/vertex_accessor.hpp"
@ -207,7 +206,7 @@ class Shard final {
public:
/// @throw std::bad_alloc
ShardOperationResult<VertexAccessor> CreateVertexAndValidate(
ShardResult<VertexAccessor> CreateVertexAndValidate(
const std::vector<LabelId> &labels, const std::vector<PropertyValue> &primary_properties,
const std::vector<std::pair<PropertyId, PropertyValue>> &properties);
@ -262,19 +261,19 @@ class Shard final {
/// @return Accessor to the deleted vertex if a deletion took place, std::nullopt otherwise
/// @throw std::bad_alloc
Result<std::optional<VertexAccessor>> DeleteVertex(VertexAccessor *vertex);
ShardResult<std::optional<VertexAccessor>> DeleteVertex(VertexAccessor *vertex);
/// @return Accessor to the deleted vertex and deleted edges if a deletion took place, std::nullopt otherwise
/// @throw std::bad_alloc
Result<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> DetachDeleteVertex(
ShardResult<std::optional<std::pair<VertexAccessor, std::vector<EdgeAccessor>>>> DetachDeleteVertex(
VertexAccessor *vertex);
/// @throw std::bad_alloc
Result<EdgeAccessor> CreateEdge(VertexId from_vertex_id, VertexId to_vertex_id, EdgeTypeId edge_type, Gid gid);
ShardResult<EdgeAccessor> CreateEdge(VertexId from_vertex_id, VertexId to_vertex_id, EdgeTypeId edge_type, Gid gid);
/// Accessor to the deleted edge if a deletion took place, std::nullopt otherwise
/// @throw std::bad_alloc
Result<std::optional<EdgeAccessor>> DeleteEdge(VertexId from_vertex_id, VertexId to_vertex_id, Gid edge_id);
ShardResult<std::optional<EdgeAccessor>> DeleteEdge(VertexId from_vertex_id, VertexId to_vertex_id, Gid edge_id);
LabelId NameToLabel(std::string_view name) const;

View File

@ -190,6 +190,12 @@ class ShardManager {
});
}
void BlockOnQuiescence() {
for (const auto &worker : workers_) {
worker.BlockOnQuiescence();
}
}
private:
io::Io<IoImpl> io_;
std::vector<shard_worker::Queue> workers_;

View File

@ -1,26 +0,0 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <variant>
#include "storage/v3/result.hpp"
#include "storage/v3/schema_validator.hpp"
namespace memgraph::storage::v3 {
using ResultErrorType = std::variant<SchemaViolation, Error>;
template <typename TValue>
using ShardOperationResult = utils::BasicResult<ResultErrorType, TValue>;
} // namespace memgraph::storage::v3

File diff suppressed because it is too large Load Diff

View File

@ -21,9 +21,6 @@
namespace memgraph::storage::v3 {
template <typename>
constexpr auto kAlwaysFalse = false;
class ShardRsm {
std::unique_ptr<Shard> shard_;

View File

@ -80,6 +80,9 @@ struct QueueInner {
// starvation by sometimes randomizing priorities, rather than following a strict
// prioritization.
std::deque<Message> queue;
uint64_t submitted = 0;
uint64_t calls_to_pop = 0;
};
/// There are two reasons to implement our own Queue instead of using
@ -95,6 +98,8 @@ class Queue {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
inner_->submitted++;
inner_->queue.emplace_back(std::forward<Message>(message));
} // lock dropped before notifying condition variable
@ -105,6 +110,9 @@ class Queue {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
inner_->calls_to_pop++;
inner_->cv.notify_all();
while (inner_->queue.empty()) {
inner_->cv.wait(lock);
}
@ -114,6 +122,15 @@ class Queue {
return message;
}
void BlockOnQuiescence() const {
MG_ASSERT(inner_.use_count() > 0);
std::unique_lock<std::mutex> lock(inner_->mu);
while (inner_->calls_to_pop <= inner_->submitted) {
inner_->cv.wait(lock);
}
}
};
/// A ShardWorker owns Raft<ShardRsm> instances. receives messages from the ShardManager.
@ -122,7 +139,6 @@ class ShardWorker {
io::Io<IoImpl> io_;
Queue queue_;
std::priority_queue<std::pair<Time, uuid>, std::vector<std::pair<Time, uuid>>, std::greater<>> cron_schedule_;
Time next_cron_ = Time::min();
std::map<uuid, ShardRaft<IoImpl>> rsm_map_;
bool Process(ShutDown && /* shut_down */) { return false; }
@ -175,10 +191,7 @@ class ShardWorker {
return;
}
auto rsm_io = io_.ForkLocal();
auto io_addr = rsm_io.GetAddress();
io_addr.unique_id = to_init.uuid;
rsm_io.SetAddress(io_addr);
auto rsm_io = io_.ForkLocal(to_init.uuid);
// TODO(tyler) get peers from Coordinator in HeartbeatResponse
std::vector<Address> rsm_peers = {};
@ -208,15 +221,12 @@ class ShardWorker {
~ShardWorker() = default;
void Run() {
while (true) {
bool should_continue = true;
while (should_continue) {
Message message = queue_.Pop();
const bool should_continue =
should_continue =
std::visit([&](auto &&msg) { return Process(std::forward<decltype(msg)>(msg)); }, std::move(message));
if (!should_continue) {
return;
}
}
}
};

View File

@ -1,34 +0,0 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <vector>
#include <boost/asio/thread_pool.hpp>
#include "storage/v3/shard.hpp"
namespace memgraph::storage::v3 {
class Storage {
public:
explicit Storage(Config config);
// Interface toward shard manipulation
// Shard handler -> will use rsm client
private:
std::vector<Shard> shards_;
boost::asio::thread_pool shard_handlers_;
Config config_;
};
} // namespace memgraph::storage::v3

View File

@ -129,4 +129,27 @@ inline std::vector<Value> ConvertValueVector(const std::vector<v3::PropertyValue
inline msgs::VertexId ToMsgsVertexId(const v3::VertexId &vertex_id) {
return {msgs::Label{vertex_id.primary_label}, ConvertValueVector(vertex_id.primary_key)};
}
inline std::vector<std::pair<v3::PropertyId, v3::PropertyValue>> ConvertPropertyMap(
std::vector<std::pair<v3::PropertyId, Value>> &properties) {
std::vector<std::pair<v3::PropertyId, v3::PropertyValue>> ret;
ret.reserve(properties.size());
std::transform(std::make_move_iterator(properties.begin()), std::make_move_iterator(properties.end()),
std::back_inserter(ret), [](std::pair<v3::PropertyId, Value> &&property) {
return std::make_pair(property.first, ToPropertyValue(std::move(property.second)));
});
return ret;
}
inline std::vector<std::pair<PropertyId, Value>> FromMap(const std::map<PropertyId, Value> &properties) {
std::vector<std::pair<PropertyId, Value>> ret;
ret.reserve(properties.size());
std::transform(properties.begin(), properties.end(), std::back_inserter(ret),
[](const auto &property) { return std::make_pair(property.first, property.second); });
return ret;
}
} // namespace memgraph::storage::conversions

View File

@ -21,8 +21,8 @@
#include "storage/v3/key_store.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard.hpp"
#include "storage/v3/shard_operation_result.hpp"
#include "storage/v3/vertex.hpp"
#include "utils/logging.hpp"
#include "utils/memory_tracker.hpp"
@ -80,12 +80,12 @@ bool VertexAccessor::IsVisible(View view) const {
return exists && (for_deleted_ || !deleted);
}
Result<bool> VertexAccessor::AddLabel(LabelId label) {
ShardResult<bool> VertexAccessor::AddLabel(LabelId label) {
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
if (!PrepareForWrite(transaction_, vertex_)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return Error::DELETED_OBJECT;
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
if (std::find(vertex_->labels.begin(), vertex_->labels.end(), label) != vertex_->labels.end()) return false;
@ -98,15 +98,15 @@ Result<bool> VertexAccessor::AddLabel(LabelId label) {
return true;
}
ShardOperationResult<bool> VertexAccessor::AddLabelAndValidate(LabelId label) {
if (const auto maybe_violation_error = vertex_validator_->ValidateAddLabel(label); maybe_violation_error) {
return {*maybe_violation_error};
ShardResult<bool> VertexAccessor::AddLabelAndValidate(LabelId label) {
if (const auto maybe_violation_error = vertex_validator_->ValidateAddLabel(label); maybe_violation_error.HasError()) {
return {maybe_violation_error.GetError()};
}
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
if (!PrepareForWrite(transaction_, vertex_)) return {Error::SERIALIZATION_ERROR};
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return {Error::DELETED_OBJECT};
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
if (std::find(vertex_->labels.begin(), vertex_->labels.end(), label) != vertex_->labels.end()) return false;
@ -119,10 +119,10 @@ ShardOperationResult<bool> VertexAccessor::AddLabelAndValidate(LabelId label) {
return true;
}
Result<bool> VertexAccessor::RemoveLabel(LabelId label) {
if (!PrepareForWrite(transaction_, vertex_)) return Error::SERIALIZATION_ERROR;
ShardResult<bool> VertexAccessor::RemoveLabel(LabelId label) {
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return Error::DELETED_OBJECT;
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto it = std::find(vertex_->labels.begin(), vertex_->labels.end(), label);
if (it == vertex_->labels.end()) return false;
@ -134,14 +134,15 @@ Result<bool> VertexAccessor::RemoveLabel(LabelId label) {
return true;
}
ShardOperationResult<bool> VertexAccessor::RemoveLabelAndValidate(LabelId label) {
if (const auto maybe_violation_error = vertex_validator_->ValidateRemoveLabel(label); maybe_violation_error) {
return {*maybe_violation_error};
ShardResult<bool> VertexAccessor::RemoveLabelAndValidate(LabelId label) {
if (const auto maybe_violation_error = vertex_validator_->ValidateRemoveLabel(label);
maybe_violation_error.HasError()) {
return {maybe_violation_error.GetError()};
}
if (!PrepareForWrite(transaction_, vertex_)) return {Error::SERIALIZATION_ERROR};
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return {Error::DELETED_OBJECT};
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto it = std::find(vertex_->labels.begin(), vertex_->labels.end(), label);
if (it == vertex_->labels.end()) return false;
@ -153,9 +154,9 @@ ShardOperationResult<bool> VertexAccessor::RemoveLabelAndValidate(LabelId label)
return true;
}
Result<bool> VertexAccessor::HasLabel(View view, LabelId label) const { return HasLabel(label, view); }
ShardResult<bool> VertexAccessor::HasLabel(View view, LabelId label) const { return HasLabel(label, view); }
Result<bool> VertexAccessor::HasLabel(LabelId label, View view) const {
ShardResult<bool> VertexAccessor::HasLabel(LabelId label, View view) const {
bool exists = true;
bool deleted = false;
bool has_label = false;
@ -197,12 +198,12 @@ Result<bool> VertexAccessor::HasLabel(LabelId label, View view) const {
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return has_label;
}
Result<LabelId> VertexAccessor::PrimaryLabel(const View view) const {
ShardResult<LabelId> VertexAccessor::PrimaryLabel(const View view) const {
if (const auto result = CheckVertexExistence(view); result.HasError()) {
return result.GetError();
}
@ -210,21 +211,21 @@ Result<LabelId> VertexAccessor::PrimaryLabel(const View view) const {
return vertex_validator_->primary_label_;
}
Result<PrimaryKey> VertexAccessor::PrimaryKey(const View view) const {
ShardResult<PrimaryKey> VertexAccessor::PrimaryKey(const View view) const {
if (const auto result = CheckVertexExistence(view); result.HasError()) {
return result.GetError();
}
return vertex_->keys.Keys();
}
Result<VertexId> VertexAccessor::Id(View view) const {
ShardResult<VertexId> VertexAccessor::Id(View view) const {
if (const auto result = CheckVertexExistence(view); result.HasError()) {
return result.GetError();
}
return VertexId{vertex_validator_->primary_label_, vertex_->keys.Keys()};
};
Result<std::vector<LabelId>> VertexAccessor::Labels(View view) const {
ShardResult<std::vector<LabelId>> VertexAccessor::Labels(View view) const {
bool exists = true;
bool deleted = false;
std::vector<LabelId> labels;
@ -267,17 +268,17 @@ Result<std::vector<LabelId>> VertexAccessor::Labels(View view) const {
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return std::move(labels);
}
Result<PropertyValue> VertexAccessor::SetProperty(PropertyId property, const PropertyValue &value) {
ShardResult<PropertyValue> VertexAccessor::SetProperty(PropertyId property, const PropertyValue &value) {
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
if (!PrepareForWrite(transaction_, vertex_)) return Error::SERIALIZATION_ERROR;
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return Error::DELETED_OBJECT;
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto current_value = vertex_->properties.GetProperty(property);
// We could skip setting the value if the previous one is the same to the new
@ -294,7 +295,7 @@ Result<PropertyValue> VertexAccessor::SetProperty(PropertyId property, const Pro
return std::move(current_value);
}
Result<void> VertexAccessor::CheckVertexExistence(View view) const {
ShardResult<void> VertexAccessor::CheckVertexExistence(View view) const {
bool exists = true;
bool deleted = false;
Delta *delta = nullptr;
@ -323,27 +324,27 @@ Result<void> VertexAccessor::CheckVertexExistence(View view) const {
}
});
if (!exists) {
return Error::NONEXISTENT_OBJECT;
return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
}
if (!for_deleted_ && deleted) {
return Error::DELETED_OBJECT;
return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
}
return {};
}
ShardOperationResult<PropertyValue> VertexAccessor::SetPropertyAndValidate(PropertyId property,
const PropertyValue &value) {
if (auto maybe_violation_error = vertex_validator_->ValidatePropertyUpdate(property); maybe_violation_error) {
return {*maybe_violation_error};
ShardResult<PropertyValue> VertexAccessor::SetPropertyAndValidate(PropertyId property, const PropertyValue &value) {
if (auto maybe_violation_error = vertex_validator_->ValidatePropertyUpdate(property);
maybe_violation_error.HasError()) {
return {maybe_violation_error.GetError()};
}
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
if (!PrepareForWrite(transaction_, vertex_)) {
return {Error::SERIALIZATION_ERROR};
return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
}
if (vertex_->deleted) {
return {Error::DELETED_OBJECT};
return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
}
auto current_value = vertex_->properties.GetProperty(property);
@ -361,10 +362,10 @@ ShardOperationResult<PropertyValue> VertexAccessor::SetPropertyAndValidate(Prope
return std::move(current_value);
}
Result<std::map<PropertyId, PropertyValue>> VertexAccessor::ClearProperties() {
if (!PrepareForWrite(transaction_, vertex_)) return Error::SERIALIZATION_ERROR;
ShardResult<std::map<PropertyId, PropertyValue>> VertexAccessor::ClearProperties() {
if (!PrepareForWrite(transaction_, vertex_)) return SHARD_ERROR(ErrorCode::SERIALIZATION_ERROR);
if (vertex_->deleted) return Error::DELETED_OBJECT;
if (vertex_->deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
auto properties = vertex_->properties.Properties();
for (const auto &property : properties) {
@ -377,7 +378,7 @@ Result<std::map<PropertyId, PropertyValue>> VertexAccessor::ClearProperties() {
return std::move(properties);
}
Result<PropertyValue> VertexAccessor::GetProperty(View view, PropertyId property) const {
ShardResult<PropertyValue> VertexAccessor::GetProperty(View view, PropertyId property) const {
return GetProperty(property, view).GetValue();
}
@ -407,7 +408,7 @@ PropertyValue VertexAccessor::GetPropertyValue(PropertyId property, View view) c
return value;
}
Result<PropertyValue> VertexAccessor::GetProperty(PropertyId property, View view) const {
ShardResult<PropertyValue> VertexAccessor::GetProperty(PropertyId property, View view) const {
bool exists = true;
bool deleted = false;
PropertyValue value;
@ -442,12 +443,12 @@ Result<PropertyValue> VertexAccessor::GetProperty(PropertyId property, View view
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return std::move(value);
}
Result<std::map<PropertyId, PropertyValue>> VertexAccessor::Properties(View view) const {
ShardResult<std::map<PropertyId, PropertyValue>> VertexAccessor::Properties(View view) const {
bool exists = true;
bool deleted = false;
std::map<PropertyId, PropertyValue> properties;
@ -492,13 +493,13 @@ Result<std::map<PropertyId, PropertyValue>> VertexAccessor::Properties(View view
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return std::move(properties);
}
Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(View view, const std::vector<EdgeTypeId> &edge_types,
const VertexId *destination_id) const {
ShardResult<std::vector<EdgeAccessor>> VertexAccessor::InEdges(View view, const std::vector<EdgeTypeId> &edge_types,
const VertexId *destination_id) const {
bool exists = true;
bool deleted = false;
std::vector<Vertex::EdgeLink> in_edges;
@ -564,8 +565,8 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(View view, const std::
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
std::vector<EdgeAccessor> ret;
if (in_edges.empty()) {
return ret;
@ -579,8 +580,8 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(View view, const std::
return ret;
}
Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(View view, const std::vector<EdgeTypeId> &edge_types,
const VertexId *destination_id) const {
ShardResult<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(View view, const std::vector<EdgeTypeId> &edge_types,
const VertexId *destination_id) const {
bool exists = true;
bool deleted = false;
std::vector<Vertex::EdgeLink> out_edges;
@ -644,8 +645,8 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(View view, const std:
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
std::vector<EdgeAccessor> ret;
if (out_edges.empty()) {
return ret;
@ -659,7 +660,7 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(View view, const std:
return ret;
}
Result<size_t> VertexAccessor::InDegree(View view) const {
ShardResult<size_t> VertexAccessor::InDegree(View view) const {
bool exists = true;
bool deleted = false;
size_t degree = 0;
@ -691,12 +692,12 @@ Result<size_t> VertexAccessor::InDegree(View view) const {
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return degree;
}
Result<size_t> VertexAccessor::OutDegree(View view) const {
ShardResult<size_t> VertexAccessor::OutDegree(View view) const {
bool exists = true;
bool deleted = false;
size_t degree = 0;
@ -728,8 +729,8 @@ Result<size_t> VertexAccessor::OutDegree(View view) const {
break;
}
});
if (!exists) return Error::NONEXISTENT_OBJECT;
if (!for_deleted_ && deleted) return Error::DELETED_OBJECT;
if (!exists) return SHARD_ERROR(ErrorCode::NONEXISTENT_OBJECT);
if (!for_deleted_ && deleted) return SHARD_ERROR(ErrorCode::DELETED_OBJECT);
return degree;
}

View File

@ -17,7 +17,7 @@
#include "storage/v3/id_types.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/result.hpp"
#include "storage/v3/shard_operation_result.hpp"
#include "storage/v3/schema_validator.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "storage/v3/vertex_id.hpp"
@ -55,61 +55,61 @@ class VertexAccessor final {
/// `false` is returned if the label already existed, or SchemaViolation
/// if adding the label has violated one of the schema constraints.
/// @throw std::bad_alloc
ShardOperationResult<bool> AddLabelAndValidate(LabelId label);
ShardResult<bool> AddLabelAndValidate(LabelId label);
/// Remove a label and return `true` if deletion took place.
/// `false` is returned if the vertex did not have a label already. or SchemaViolation
/// if adding the label has violated one of the schema constraints.
/// @throw std::bad_alloc
ShardOperationResult<bool> RemoveLabelAndValidate(LabelId label);
ShardResult<bool> RemoveLabelAndValidate(LabelId label);
Result<bool> HasLabel(View view, LabelId label) const;
ShardResult<bool> HasLabel(View view, LabelId label) const;
Result<bool> HasLabel(LabelId label, View view) const;
ShardResult<bool> HasLabel(LabelId label, View view) const;
/// @throw std::bad_alloc
/// @throw std::length_error if the resulting vector exceeds
/// std::vector::max_size().
Result<std::vector<LabelId>> Labels(View view) const;
ShardResult<std::vector<LabelId>> Labels(View view) const;
Result<LabelId> PrimaryLabel(View view) const;
ShardResult<LabelId> PrimaryLabel(View view) const;
Result<PrimaryKey> PrimaryKey(View view) const;
ShardResult<PrimaryKey> PrimaryKey(View view) const;
Result<VertexId> Id(View view) const;
ShardResult<VertexId> Id(View view) const;
/// Set a property value and return the old value or error.
/// @throw std::bad_alloc
ShardOperationResult<PropertyValue> SetPropertyAndValidate(PropertyId property, const PropertyValue &value);
ShardResult<PropertyValue> SetPropertyAndValidate(PropertyId property, const PropertyValue &value);
/// Remove all properties and return the values of the removed properties.
/// @throw std::bad_alloc
Result<std::map<PropertyId, PropertyValue>> ClearProperties();
ShardResult<std::map<PropertyId, PropertyValue>> ClearProperties();
/// @throw std::bad_alloc
Result<PropertyValue> GetProperty(PropertyId property, View view) const;
ShardResult<PropertyValue> GetProperty(PropertyId property, View view) const;
// TODO Remove this
Result<PropertyValue> GetProperty(View view, PropertyId property) const;
ShardResult<PropertyValue> GetProperty(View view, PropertyId property) const;
/// @throw std::bad_alloc
Result<std::map<PropertyId, PropertyValue>> Properties(View view) const;
ShardResult<std::map<PropertyId, PropertyValue>> Properties(View view) const;
/// @throw std::bad_alloc
/// @throw std::length_error if the resulting vector exceeds
/// std::vector::max_size().
Result<std::vector<EdgeAccessor>> InEdges(View view, const std::vector<EdgeTypeId> &edge_types = {},
const VertexId *destination_id = nullptr) const;
ShardResult<std::vector<EdgeAccessor>> InEdges(View view, const std::vector<EdgeTypeId> &edge_types = {},
const VertexId *destination_id = nullptr) const;
/// @throw std::bad_alloc
/// @throw std::length_error if the resulting vector exceeds
/// std::vector::max_size().
Result<std::vector<EdgeAccessor>> OutEdges(View view, const std::vector<EdgeTypeId> &edge_types = {},
const VertexId *destination_id = nullptr) const;
ShardResult<std::vector<EdgeAccessor>> OutEdges(View view, const std::vector<EdgeTypeId> &edge_types = {},
const VertexId *destination_id = nullptr) const;
Result<size_t> InDegree(View view) const;
ShardResult<size_t> InDegree(View view) const;
Result<size_t> OutDegree(View view) const;
ShardResult<size_t> OutDegree(View view) const;
const SchemaValidator *GetSchemaValidator() const;
@ -122,20 +122,20 @@ class VertexAccessor final {
/// Add a label and return `true` if insertion took place.
/// `false` is returned if the label already existed.
/// @throw std::bad_alloc
Result<bool> AddLabel(LabelId label);
ShardResult<bool> AddLabel(LabelId label);
/// Remove a label and return `true` if deletion took place.
/// `false` is returned if the vertex did not have a label already.
/// @throw std::bad_alloc
Result<bool> RemoveLabel(LabelId label);
ShardResult<bool> RemoveLabel(LabelId label);
/// Set a property value and return the old value.
/// @throw std::bad_alloc
Result<PropertyValue> SetProperty(PropertyId property, const PropertyValue &value);
ShardResult<PropertyValue> SetProperty(PropertyId property, const PropertyValue &value);
PropertyValue GetPropertyValue(PropertyId property, View view) const;
Result<void> CheckVertexExistence(View view) const;
ShardResult<void> CheckVertexExistence(View view) const;
Vertex *vertex_;
Transaction *transaction_;

View File

@ -9,12 +9,11 @@
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include "storage/v3/storage.hpp"
#pragma once
#include "storage/v3/config.hpp"
namespace memgraph::utils {
namespace memgraph::storage::v3 {
template <typename>
constexpr auto kAlwaysFalse{false};
Storage::Storage(Config config) : config_{config} {}
} // namespace memgraph::storage::v3
} // namespace memgraph::utils

View File

@ -9,11 +9,13 @@ function(add_benchmark test_cpp)
get_filename_component(exec_name ${test_cpp} NAME_WE)
set(target_name ${test_prefix}${exec_name})
add_executable(${target_name} ${test_cpp} ${ARGN})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
target_link_libraries(${target_name} benchmark gflags)
# register test
add_test(${target_name} ${exec_name})
add_dependencies(memgraph__benchmark ${target_name})
@ -37,9 +39,9 @@ target_link_libraries(${test_prefix}profile mg-query)
add_benchmark(query/stripped.cpp)
target_link_libraries(${test_prefix}stripped mg-query)
if (MG_ENTERPRISE)
add_benchmark(rpc.cpp)
target_link_libraries(${test_prefix}rpc mg-rpc)
if(MG_ENTERPRISE)
add_benchmark(rpc.cpp)
target_link_libraries(${test_prefix}rpc mg-rpc)
endif()
add_benchmark(skip_list_random.cpp)
@ -65,3 +67,15 @@ target_link_libraries(${test_prefix}storage_v2_property_store mg-storage-v2)
add_benchmark(future.cpp)
target_link_libraries(${test_prefix}future mg-io)
add_benchmark(data_structures_insert.cpp)
target_link_libraries(${test_prefix}data_structures_insert mg-utils mg-storage-v3)
add_benchmark(data_structures_find.cpp)
target_link_libraries(${test_prefix}data_structures_find mg-utils mg-storage-v3)
add_benchmark(data_structures_contains.cpp)
target_link_libraries(${test_prefix}data_structures_contains mg-utils mg-storage-v3)
add_benchmark(data_structures_remove.cpp)
target_link_libraries(${test_prefix}data_structures_remove mg-utils mg-storage-v3)

View File

@ -0,0 +1,58 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#pragma once
#include <map>
#include <set>
#include <vector>
#include "coordinator/hybrid_logical_clock.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/lexicographically_ordered_vertex.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/transaction.hpp"
#include "utils/skip_list.hpp"
namespace memgraph::benchmark {
template <typename T>
inline void PrepareData(utils::SkipList<T> &skip_list, const int64_t num_elements) {
coordinator::Hlc start_timestamp;
storage::v3::Transaction transaction{start_timestamp, storage::v3::IsolationLevel::SNAPSHOT_ISOLATION};
for (auto i{0}; i < num_elements; ++i) {
auto acc = skip_list.access();
acc.insert({storage::v3::PrimaryKey{storage::v3::PropertyValue{true}}});
}
}
template <typename TKey, typename TValue>
inline void PrepareData(std::map<TKey, TValue> &std_map, const int64_t num_elements) {
coordinator::Hlc start_timestamp;
storage::v3::Transaction transaction{start_timestamp, storage::v3::IsolationLevel::SNAPSHOT_ISOLATION};
auto *delta = storage::v3::CreateDeleteObjectDelta(&transaction);
for (auto i{0}; i < num_elements; ++i) {
std_map.insert({storage::v3::PrimaryKey{storage::v3::PropertyValue{i}},
storage::v3::LexicographicallyOrderedVertex{storage::v3::Vertex{
delta, std::vector<storage::v3::PropertyValue>{storage::v3::PropertyValue{true}}}}});
}
}
template <typename T>
inline void PrepareData(std::set<T> &std_set, const int64_t num_elements) {
coordinator::Hlc start_timestamp;
storage::v3::Transaction transaction{start_timestamp, storage::v3::IsolationLevel::SNAPSHOT_ISOLATION};
for (auto i{0}; i < num_elements; ++i) {
std_set.insert(std::vector<storage::v3::PropertyValue>{storage::v3::PropertyValue{true}});
}
}
} // namespace memgraph::benchmark

View File

@ -0,0 +1,105 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include <atomic>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <map>
#include <set>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include "data_structures_common.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/lexicographically_ordered_vertex.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "utils/skip_list.hpp"
namespace memgraph::benchmark {
///////////////////////////////////////////////////////////////////////////////
// Testing Contains Operation
///////////////////////////////////////////////////////////////////////////////
static void BM_BenchmarkContainsSkipList(::benchmark::State &state) {
utils::SkipList<storage::v3::PrimaryKey> skip_list;
PrepareData(skip_list, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
auto acc = skip_list.access();
if (acc.contains(storage::v3::PrimaryKey{{storage::v3::PropertyValue(value)}})) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
static void BM_BenchmarkContainsStdMap(::benchmark::State &state) {
std::map<storage::v3::PrimaryKey, storage::v3::LexicographicallyOrderedVertex> std_map;
PrepareData(std_map, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_map.contains(storage::v3::PrimaryKey{{storage::v3::PropertyValue(value)}})) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
static void BM_BenchmarkContainsStdSet(::benchmark::State &state) {
std::set<storage::v3::PrimaryKey> std_set;
PrepareData(std_set, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_set.contains(storage::v3::PrimaryKey{storage::v3::PropertyValue{value}})) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
BENCHMARK(BM_BenchmarkContainsSkipList)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkContainsStdMap)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkContainsStdSet)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
} // namespace memgraph::benchmark
BENCHMARK_MAIN();

View File

@ -0,0 +1,104 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include <atomic>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <map>
#include <set>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include "data_structures_common.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/lexicographically_ordered_vertex.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "utils/skip_list.hpp"
namespace memgraph::benchmark {
///////////////////////////////////////////////////////////////////////////////
// Testing Find Operation
///////////////////////////////////////////////////////////////////////////////
static void BM_BenchmarkFindSkipList(::benchmark::State &state) {
utils::SkipList<storage::v3::PrimaryKey> skip_list;
PrepareData(skip_list, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
auto acc = skip_list.access();
if (acc.find(storage::v3::PrimaryKey{{storage::v3::PropertyValue(value)}}) != acc.end()) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
static void BM_BenchmarkFindStdMap(::benchmark::State &state) {
std::map<storage::v3::PrimaryKey, storage::v3::LexicographicallyOrderedVertex> std_map;
PrepareData(std_map, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_map.find(storage::v3::PrimaryKey{{storage::v3::PropertyValue(value)}}) != std_map.end()) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
static void BM_BenchmarkFindStdSet(::benchmark::State &state) {
std::set<storage::v3::PrimaryKey> std_set;
PrepareData(std_set, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t found_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_set.find(storage::v3::PrimaryKey{storage::v3::PropertyValue{value}}) != std_set.end()) {
found_elems++;
}
}
}
state.SetItemsProcessed(found_elems);
}
BENCHMARK(BM_BenchmarkFindSkipList)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkFindStdMap)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkFindStdSet)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
} // namespace memgraph::benchmark
BENCHMARK_MAIN();

View File

@ -0,0 +1,85 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include <atomic>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <map>
#include <set>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include "storage/v3/key_store.hpp"
#include "storage/v3/lexicographically_ordered_vertex.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "utils/skip_list.hpp"
namespace memgraph::benchmark {
///////////////////////////////////////////////////////////////////////////////
// Testing Insert Operation
///////////////////////////////////////////////////////////////////////////////
static void BM_BenchmarkInsertSkipList(::benchmark::State &state) {
utils::SkipList<storage::v3::PrimaryKey> skip_list;
coordinator::Hlc start_timestamp;
storage::v3::Transaction transaction{start_timestamp, storage::v3::IsolationLevel::SNAPSHOT_ISOLATION};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
auto acc = skip_list.access();
acc.insert({storage::v3::PrimaryKey{storage::v3::PropertyValue{true}}});
}
}
}
static void BM_BenchmarkInsertStdMap(::benchmark::State &state) {
std::map<storage::v3::PrimaryKey, storage::v3::LexicographicallyOrderedVertex> std_map;
coordinator::Hlc start_timestamp;
storage::v3::Transaction transaction{start_timestamp, storage::v3::IsolationLevel::SNAPSHOT_ISOLATION};
auto *delta = storage::v3::CreateDeleteObjectDelta(&transaction);
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
std_map.insert({storage::v3::PrimaryKey{storage::v3::PropertyValue{i}},
storage::v3::LexicographicallyOrderedVertex{storage::v3::Vertex{
delta, std::vector<storage::v3::PropertyValue>{storage::v3::PropertyValue{true}}}}});
}
}
}
static void BM_BenchmarkInsertStdSet(::benchmark::State &state) {
std::set<storage::v3::PrimaryKey> std_set;
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
std_set.insert(
storage::v3::PrimaryKey{std::vector<storage::v3::PropertyValue>{storage::v3::PropertyValue{true}}});
}
}
}
BENCHMARK(BM_BenchmarkInsertSkipList)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkInsertStdMap)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkInsertStdSet)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
} // namespace memgraph::benchmark
BENCHMARK_MAIN();

View File

@ -0,0 +1,106 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include <atomic>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <map>
#include <set>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include "data_structures_common.hpp"
#include "storage/v3/key_store.hpp"
#include "storage/v3/lexicographically_ordered_vertex.hpp"
#include "storage/v3/mvcc.hpp"
#include "storage/v3/property_value.hpp"
#include "storage/v3/transaction.hpp"
#include "storage/v3/vertex.hpp"
#include "utils/skip_list.hpp"
namespace memgraph::benchmark {
///////////////////////////////////////////////////////////////////////////////
// Testing Remove Operation
///////////////////////////////////////////////////////////////////////////////
static void BM_BenchmarkRemoveSkipList(::benchmark::State &state) {
utils::SkipList<storage::v3::PrimaryKey> skip_list;
PrepareData(skip_list, state.range(0));
// So we can also have elements that don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t removed_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
auto acc = skip_list.access();
if (acc.remove(storage::v3::PrimaryKey{storage::v3::PropertyValue(value)})) {
removed_elems++;
}
}
}
state.SetItemsProcessed(removed_elems);
}
static void BM_BenchmarkRemoveStdMap(::benchmark::State &state) {
std::map<storage::v3::PrimaryKey, storage::v3::LexicographicallyOrderedVertex> std_map;
PrepareData(std_map, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t removed_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_map.erase(storage::v3::PrimaryKey{storage::v3::PropertyValue{value}}) > 0) {
removed_elems++;
}
}
}
state.SetItemsProcessed(removed_elems);
}
static void BM_BenchmarkRemoveStdSet(::benchmark::State &state) {
std::set<storage::v3::PrimaryKey> std_set;
PrepareData(std_set, state.range(0));
// So we can also have elements that does don't exist
std::mt19937 i_generator(std::random_device{}());
std::uniform_int_distribution<int64_t> i_distribution(0, state.range(0) * 2);
int64_t removed_elems{0};
for (auto _ : state) {
for (auto i{0}; i < state.range(0); ++i) {
int64_t value = i_distribution(i_generator);
if (std_set.erase(storage::v3::PrimaryKey{storage::v3::PropertyValue{value}}) > 0) {
removed_elems++;
}
}
}
state.SetItemsProcessed(removed_elems);
}
BENCHMARK(BM_BenchmarkRemoveSkipList)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkRemoveStdMap)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
BENCHMARK(BM_BenchmarkRemoveStdSet)->RangeMultiplier(10)->Range(1000, 10000000)->Unit(::benchmark::kMillisecond);
} // namespace memgraph::benchmark
BENCHMARK_MAIN();

View File

@ -1,4 +1,4 @@
// Copyright 2021 Memgraph Ltd.
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -11,11 +11,14 @@
#pragma once
#include <array>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <functional>
#include <iostream>
#include <memory>
#include <numeric>
#include <thread>
#include <vector>
@ -26,7 +29,7 @@ DEFINE_int32(duration, 10, "Duration of test (in seconds)");
struct Stats {
uint64_t total{0};
uint64_t succ[4] = {0, 0, 0, 0};
std::array<uint64_t, 4> succ = {0, 0, 0, 0};
};
const int OP_INSERT = 0;
@ -81,3 +84,27 @@ inline void RunConcurrentTest(std::function<void(std::atomic<bool> *, Stats *)>
std::cout << "Total successful: " << tot << " (" << tot / FLAGS_duration << " calls/s)" << std::endl;
std::cout << "Total ops: " << tops << " (" << tops / FLAGS_duration << " calls/s)" << std::endl;
}
inline void RunTest(std::function<void(const std::atomic<bool> &, Stats &)> test_func) {
Stats stats;
std::atomic<bool> run{true};
{
std::jthread bg_thread(test_func, std::cref(run), std::ref(stats));
std::this_thread::sleep_for(std::chrono::seconds(FLAGS_duration));
run.store(false, std::memory_order_relaxed);
}
std::cout << " Operations: " << stats.total << std::endl;
std::cout << " Successful insert: " << stats.succ[0] << std::endl;
std::cout << " Successful contains: " << stats.succ[1] << std::endl;
std::cout << " Successful remove: " << stats.succ[2] << std::endl;
std::cout << " Successful find: " << stats.succ[3] << std::endl;
std::cout << std::endl;
const auto tot = std::accumulate(stats.succ.begin(), +stats.succ.begin() + 3, 0);
const auto tops = stats.total;
std::cout << "Total successful: " << tot << " (" << tot / FLAGS_duration << " calls/s)" << std::endl;
std::cout << "Total ops: " << tops << " (" << tops / FLAGS_duration << " calls/s)" << std::endl;
}

View File

@ -9,3 +9,4 @@ distributed_queries_e2e_python_files(order_by_and_limit.py)
distributed_queries_e2e_python_files(distinct.py)
distributed_queries_e2e_python_files(optional_match.py)
distributed_queries_e2e_python_files(common.py)
distributed_queries_e2e_python_files(awesome_memgraph_functions.py)

View File

@ -0,0 +1,93 @@
# Copyright 2022 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import sys
import mgclient
import pytest
from common import (
connection,
execute_and_fetch_all,
has_n_result_row,
wait_for_shard_manager_to_initialize,
)
def test_awesome_memgraph_functions(connection):
wait_for_shard_manager_to_initialize()
cursor = connection.cursor()
assert has_n_result_row(cursor, "CREATE (n :label {property:1})", 0)
assert has_n_result_row(cursor, "CREATE (n :label {property:2})", 0)
assert has_n_result_row(cursor, "CREATE (n :label {property:3})", 0)
assert has_n_result_row(cursor, "CREATE (n :label {property:4})", 0)
assert has_n_result_row(cursor, "CREATE (n :label {property:10})", 0)
results = execute_and_fetch_all(cursor, "MATCH (n) WITH COLLECT(n) as nn RETURN SIZE(nn)")
assert len(results) == 1
assert results[0][0] == 5
results = execute_and_fetch_all(cursor, "MATCH (n) WITH COLLECT(n.property) as nn RETURN ALL(i IN nn WHERE i > 0)")
assert len(results) == 1
assert results[0][0] == True
results = execute_and_fetch_all(cursor, """RETURN CONTAINS("Pineapple", "P")""")
assert len(results) == 1
assert results[0][0] == True
results = execute_and_fetch_all(cursor, """RETURN ENDSWITH("Pineapple", "e")""")
assert len(results) == 1
assert results[0][0] == True
results = execute_and_fetch_all(cursor, """RETURN LEFT("Pineapple", 1)""")
assert len(results) == 1
assert results[0][0] == "P"
results = execute_and_fetch_all(cursor, """RETURN RIGHT("Pineapple", 1)""")
assert len(results) == 1
assert results[0][0] == "e"
results = execute_and_fetch_all(cursor, """RETURN REVERSE("Apple")""")
assert len(results) == 1
assert results[0][0] == "elppA"
results = execute_and_fetch_all(cursor, """RETURN REPLACE("Apple", "A", "a")""")
assert len(results) == 1
assert results[0][0] == "apple"
results = execute_and_fetch_all(cursor, """RETURN TOLOWER("Apple")""")
assert len(results) == 1
assert results[0][0] == "apple"
results = execute_and_fetch_all(cursor, """RETURN TOUPPER("Apple")""")
assert len(results) == 1
assert results[0][0] == "APPLE"
results = execute_and_fetch_all(cursor, """RETURN TRIM(" Apple")""")
assert len(results) == 1
assert results[0][0] == "Apple"
results = execute_and_fetch_all(cursor, """RETURN SPLIT("Apple.Apple", ".")""")
assert len(results) == 1
assert results[0][0] == ["Apple", "Apple"]
results = execute_and_fetch_all(cursor, """RETURN LOG10(100)""")
assert len(results) == 1
assert results[0][0] == 2
results = execute_and_fetch_all(cursor, """RETURN SQRT(4)""")
assert len(results) == 1
assert results[0][0] == 2
if __name__ == "__main__":
sys.exit(pytest.main([__file__, "-rA"]))

View File

@ -36,3 +36,8 @@ workloads:
binary: "tests/e2e/pytest_runner.sh"
args: ["distributed_queries/optional_match.py"]
<<: *template_cluster
- name: "Awesome memgraph functions"
binary: "tests/e2e/pytest_runner.sh"
args: ["distributed_queries/awesome_memgraph_functions.py"]
<<: *template_cluster

Some files were not shown because too many files have changed in this diff Show More