Merge branch 'project-pineapples' into T1079-MG-add-simple-query-to-benchmark_v2
This commit is contained in:
commit
3cf79f5bbf
@ -11,7 +11,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <compare>
|
||||
#include <ctime>
|
||||
#include <iomanip>
|
||||
|
||||
#include "io/time.hpp"
|
||||
|
||||
@ -31,6 +34,15 @@ struct Hlc {
|
||||
bool operator==(const uint64_t other) const { return logical_id == other; }
|
||||
bool operator<(const uint64_t other) const { return logical_id < other; }
|
||||
bool operator>=(const uint64_t other) const { return logical_id >= other; }
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const Hlc &hlc) {
|
||||
auto wall_clock = std::chrono::system_clock::to_time_t(hlc.coordinator_wall_clock);
|
||||
in << "Hlc { logical_id: " << hlc.logical_id;
|
||||
in << ", coordinator_wall_clock: " << std::put_time(std::localtime(&wall_clock), "%F %T");
|
||||
in << " }";
|
||||
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordinator
|
||||
|
@ -9,8 +9,17 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/types.hpp"
|
||||
#include "coordinator/shard_map.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/temporal.hpp"
|
||||
#include "utils/cast.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace memgraph::coordinator {
|
||||
|
||||
@ -57,6 +66,267 @@ PrimaryKey SchemaToMinKey(const std::vector<SchemaProperty> &schema) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ShardMap ShardMap::Parse(std::istream &input_stream) {
|
||||
ShardMap shard_map;
|
||||
const auto read_size = [&input_stream] {
|
||||
size_t size{0};
|
||||
input_stream >> size;
|
||||
return size;
|
||||
};
|
||||
|
||||
// Reads a string until the next whitespace
|
||||
const auto read_word = [&input_stream] {
|
||||
std::string word;
|
||||
input_stream >> word;
|
||||
return word;
|
||||
};
|
||||
|
||||
const auto read_names = [&read_size, &read_word] {
|
||||
const auto number_of_names = read_size();
|
||||
spdlog::trace("Reading {} names", number_of_names);
|
||||
std::vector<std::string> names;
|
||||
names.reserve(number_of_names);
|
||||
|
||||
for (auto name_index = 0; name_index < number_of_names; ++name_index) {
|
||||
names.push_back(read_word());
|
||||
spdlog::trace("Read '{}'", names.back());
|
||||
}
|
||||
return names;
|
||||
};
|
||||
|
||||
const auto read_line = [&input_stream] {
|
||||
std::string line;
|
||||
std::getline(input_stream, line);
|
||||
return line;
|
||||
};
|
||||
|
||||
const auto parse_type = [](const std::string &type) {
|
||||
static const auto type_map = std::unordered_map<std::string, common::SchemaType>{
|
||||
{"string", common::SchemaType::STRING}, {"int", common::SchemaType::INT}, {"bool", common::SchemaType::BOOL}};
|
||||
const auto lower_case_type = utils::ToLowerCase(type);
|
||||
auto it = type_map.find(lower_case_type);
|
||||
MG_ASSERT(it != type_map.end(), "Invalid type in split files: {}", type);
|
||||
return it->second;
|
||||
};
|
||||
|
||||
const auto parse_property_value = [](std::string text, const common::SchemaType type) {
|
||||
if (type == common::SchemaType::STRING) {
|
||||
return storage::v3::PropertyValue{std::move(text)};
|
||||
}
|
||||
if (type == common::SchemaType::INT) {
|
||||
size_t processed{0};
|
||||
int64_t value = std::stoll(text, &processed);
|
||||
MG_ASSERT(processed == text.size() || text[processed] == ' ', "Invalid integer format: '{}'", text);
|
||||
return storage::v3::PropertyValue{value};
|
||||
}
|
||||
LOG_FATAL("Not supported type: {}", utils::UnderlyingCast(type));
|
||||
};
|
||||
|
||||
spdlog::debug("Reading properties");
|
||||
const auto properties = read_names();
|
||||
MG_ASSERT(shard_map.AllocatePropertyIds(properties).size() == properties.size(),
|
||||
"Unexpected number of properties created!");
|
||||
|
||||
spdlog::debug("Reading edge types");
|
||||
const auto edge_types = read_names();
|
||||
MG_ASSERT(shard_map.AllocateEdgeTypeIds(edge_types).size() == edge_types.size(),
|
||||
"Unexpected number of properties created!");
|
||||
|
||||
spdlog::debug("Reading primary labels");
|
||||
const auto number_of_primary_labels = read_size();
|
||||
spdlog::debug("Reading {} primary labels", number_of_primary_labels);
|
||||
|
||||
for (auto label_index = 0; label_index < number_of_primary_labels; ++label_index) {
|
||||
const auto primary_label = read_word();
|
||||
spdlog::debug("Reading primary label named '{}'", primary_label);
|
||||
const auto number_of_primary_properties = read_size();
|
||||
spdlog::debug("Reading {} primary properties", number_of_primary_properties);
|
||||
std::vector<std::string> pp_names;
|
||||
std::vector<common::SchemaType> pp_types;
|
||||
pp_names.reserve(number_of_primary_properties);
|
||||
pp_types.reserve(number_of_primary_properties);
|
||||
for (auto property_index = 0; property_index < number_of_primary_properties; ++property_index) {
|
||||
pp_names.push_back(read_word());
|
||||
spdlog::debug("Reading primary property named '{}'", pp_names.back());
|
||||
pp_types.push_back(parse_type(read_word()));
|
||||
}
|
||||
auto pp_mapping = shard_map.AllocatePropertyIds(pp_names);
|
||||
std::vector<SchemaProperty> schema;
|
||||
schema.reserve(number_of_primary_properties);
|
||||
|
||||
for (auto property_index = 0; property_index < number_of_primary_properties; ++property_index) {
|
||||
schema.push_back(storage::v3::SchemaProperty{pp_mapping.at(pp_names[property_index]), pp_types[property_index]});
|
||||
}
|
||||
const auto hlc = shard_map.GetHlc();
|
||||
MG_ASSERT(shard_map.InitializeNewLabel(primary_label, schema, 1, hlc).has_value(),
|
||||
"Cannot initialize new label: {}", primary_label);
|
||||
|
||||
const auto number_of_split_points = read_size();
|
||||
spdlog::debug("Reading {} split points", number_of_split_points);
|
||||
|
||||
[[maybe_unused]] const auto remainder_from_last_line = read_line();
|
||||
for (auto split_point_index = 0; split_point_index < number_of_split_points; ++split_point_index) {
|
||||
const auto line = read_line();
|
||||
spdlog::debug("Read split point '{}'", line);
|
||||
MG_ASSERT(line.front() == '[', "Invalid split file format!");
|
||||
MG_ASSERT(line.back() == ']', "Invalid split file format!");
|
||||
std::string_view line_view{line};
|
||||
line_view.remove_prefix(1);
|
||||
line_view.remove_suffix(1);
|
||||
static constexpr std::string_view kDelimiter{","};
|
||||
auto pk_values_as_text = utils::Split(line_view, kDelimiter);
|
||||
std::vector<PropertyValue> pk;
|
||||
pk.reserve(number_of_primary_properties);
|
||||
MG_ASSERT(pk_values_as_text.size() == number_of_primary_properties,
|
||||
"Split point contains invalid number of values '{}'", line);
|
||||
|
||||
for (auto property_index = 0; property_index < number_of_primary_properties; ++property_index) {
|
||||
pk.push_back(parse_property_value(std::move(pk_values_as_text[property_index]), schema[property_index].type));
|
||||
}
|
||||
shard_map.SplitShard(shard_map.GetHlc(), shard_map.labels.at(primary_label), pk);
|
||||
}
|
||||
}
|
||||
|
||||
return shard_map;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &in, const ShardMap &shard_map) {
|
||||
using utils::print_helpers::operator<<;
|
||||
|
||||
in << "ShardMap { shard_map_version: " << shard_map.shard_map_version;
|
||||
in << ", max_property_id: " << shard_map.max_property_id;
|
||||
in << ", max_edge_type_id: " << shard_map.max_edge_type_id;
|
||||
in << ", properties: " << shard_map.properties;
|
||||
in << ", edge_types: " << shard_map.edge_types;
|
||||
in << ", max_label_id: " << shard_map.max_label_id;
|
||||
in << ", labels: " << shard_map.labels;
|
||||
in << ", label_spaces: " << shard_map.label_spaces;
|
||||
in << ", schemas: " << shard_map.schemas;
|
||||
in << "}";
|
||||
return in;
|
||||
}
|
||||
|
||||
Shards ShardMap::GetShardsForLabel(const LabelName &label) const {
|
||||
const auto id = labels.at(label);
|
||||
const auto &shards = label_spaces.at(id).shards;
|
||||
return shards;
|
||||
}
|
||||
|
||||
std::vector<Shards> ShardMap::GetAllShards() const {
|
||||
std::vector<Shards> all_shards;
|
||||
all_shards.reserve(label_spaces.size());
|
||||
std::transform(label_spaces.begin(), label_spaces.end(), std::back_inserter(all_shards),
|
||||
[](const auto &label_space) { return label_space.second.shards; });
|
||||
return all_shards;
|
||||
}
|
||||
|
||||
// TODO(gabor) later we will want to update the wallclock time with
|
||||
// the given Io<impl>'s time as well
|
||||
Hlc ShardMap::IncrementShardMapVersion() noexcept {
|
||||
++shard_map_version.logical_id;
|
||||
return shard_map_version;
|
||||
}
|
||||
|
||||
Hlc ShardMap::GetHlc() const noexcept { return shard_map_version; }
|
||||
|
||||
std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
|
||||
std::set<boost::uuids::uuid> initialized) {
|
||||
std::vector<ShardToInitialize> ret{};
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (auto &[label_id, label_space] : label_spaces) {
|
||||
for (auto it = label_space.shards.begin(); it != label_space.shards.end(); it++) {
|
||||
auto &[low_key, shard] = *it;
|
||||
std::optional<PrimaryKey> high_key;
|
||||
if (const auto next_it = std::next(it); next_it != label_space.shards.end()) {
|
||||
high_key = next_it->first;
|
||||
}
|
||||
// TODO(tyler) avoid these triple-nested loops by having the heartbeat include better info
|
||||
bool machine_contains_shard = false;
|
||||
|
||||
for (auto &aas : shard) {
|
||||
if (initialized.contains(aas.address.unique_id)) {
|
||||
machine_contains_shard = true;
|
||||
if (aas.status != Status::CONSENSUS_PARTICIPANT) {
|
||||
spdlog::info("marking shard as full consensus participant: {}", aas.address.unique_id);
|
||||
aas.status = Status::CONSENSUS_PARTICIPANT;
|
||||
}
|
||||
} else {
|
||||
const bool same_machine = aas.address.last_known_ip == storage_manager.last_known_ip &&
|
||||
aas.address.last_known_port == storage_manager.last_known_port;
|
||||
if (same_machine) {
|
||||
machine_contains_shard = true;
|
||||
spdlog::info("reminding shard manager that they should begin participating in shard");
|
||||
ret.push_back(ShardToInitialize{
|
||||
.uuid = aas.address.unique_id,
|
||||
.label_id = label_id,
|
||||
.min_key = low_key,
|
||||
.max_key = high_key,
|
||||
.schema = schemas[label_id],
|
||||
.config = Config{},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!machine_contains_shard && shard.size() < label_space.replication_factor) {
|
||||
Address address = storage_manager;
|
||||
|
||||
// TODO(tyler) use deterministic UUID so that coordinators don't diverge here
|
||||
address.unique_id = boost::uuids::uuid{boost::uuids::random_generator()()},
|
||||
|
||||
spdlog::info("assigning shard manager to shard");
|
||||
|
||||
ret.push_back(ShardToInitialize{
|
||||
.uuid = address.unique_id,
|
||||
.label_id = label_id,
|
||||
.min_key = low_key,
|
||||
.max_key = high_key,
|
||||
.schema = schemas[label_id],
|
||||
.config = Config{},
|
||||
});
|
||||
|
||||
AddressAndStatus aas = {
|
||||
.address = address,
|
||||
.status = Status::INITIALIZING,
|
||||
};
|
||||
|
||||
shard.emplace_back(aas);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ShardMap::SplitShard(Hlc previous_shard_map_version, LabelId label_id, const PrimaryKey &key) {
|
||||
if (previous_shard_map_version != shard_map_version) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto &label_space = label_spaces.at(label_id);
|
||||
auto &shards_in_map = label_space.shards;
|
||||
|
||||
MG_ASSERT(!shards_in_map.empty());
|
||||
MG_ASSERT(!shards_in_map.contains(key));
|
||||
MG_ASSERT(label_spaces.contains(label_id));
|
||||
|
||||
// Finding the Shard that the new PrimaryKey should map to.
|
||||
auto prev = std::prev(shards_in_map.upper_bound(key));
|
||||
Shard duplicated_shard = prev->second;
|
||||
|
||||
// Apply the split
|
||||
shards_in_map[key] = duplicated_shard;
|
||||
|
||||
IncrementShardMapVersion();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<LabelId> ShardMap::InitializeNewLabel(std::string label_name, std::vector<SchemaProperty> schema,
|
||||
size_t replication_factor, Hlc last_shard_map_version) {
|
||||
if (shard_map_version != last_shard_map_version || labels.contains(label_name)) {
|
||||
@ -88,4 +358,175 @@ std::optional<LabelId> ShardMap::InitializeNewLabel(std::string label_name, std:
|
||||
return label_id;
|
||||
}
|
||||
|
||||
void ShardMap::AddServer(Address server_address) {
|
||||
// Find a random place for the server to plug in
|
||||
}
|
||||
std::optional<LabelId> ShardMap::GetLabelId(const std::string &label) const {
|
||||
if (const auto it = labels.find(label); it != labels.end()) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const std::string &ShardMap::GetLabelName(const LabelId label) const {
|
||||
if (const auto it =
|
||||
std::ranges::find_if(labels, [label](const auto &name_id_pair) { return name_id_pair.second == label; });
|
||||
it != labels.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("GetLabelName fails on the given label id!");
|
||||
}
|
||||
|
||||
std::optional<PropertyId> ShardMap::GetPropertyId(const std::string &property_name) const {
|
||||
if (const auto it = properties.find(property_name); it != properties.end()) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const std::string &ShardMap::GetPropertyName(const PropertyId property) const {
|
||||
if (const auto it = std::ranges::find_if(
|
||||
properties, [property](const auto &name_id_pair) { return name_id_pair.second == property; });
|
||||
it != properties.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("PropertyId not found!");
|
||||
}
|
||||
|
||||
std::optional<EdgeTypeId> ShardMap::GetEdgeTypeId(const std::string &edge_type) const {
|
||||
if (const auto it = edge_types.find(edge_type); it != edge_types.end()) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const std::string &ShardMap::GetEdgeTypeName(const EdgeTypeId property) const {
|
||||
if (const auto it = std::ranges::find_if(
|
||||
edge_types, [property](const auto &name_id_pair) { return name_id_pair.second == property; });
|
||||
it != edge_types.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("EdgeTypeId not found!");
|
||||
}
|
||||
Shards ShardMap::GetShardsForRange(const LabelName &label_name, const PrimaryKey &start_key,
|
||||
const PrimaryKey &end_key) const {
|
||||
MG_ASSERT(start_key <= end_key);
|
||||
MG_ASSERT(labels.contains(label_name));
|
||||
|
||||
LabelId label_id = labels.at(label_name);
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
const auto &shards_for_label = label_space.shards;
|
||||
|
||||
MG_ASSERT(shards_for_label.begin()->first <= start_key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
auto it = std::prev(shards_for_label.upper_bound(start_key));
|
||||
const auto end_it = shards_for_label.upper_bound(end_key);
|
||||
|
||||
Shards shards{};
|
||||
|
||||
std::copy(it, end_it, std::inserter(shards, shards.end()));
|
||||
|
||||
return shards;
|
||||
}
|
||||
|
||||
Shard ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const {
|
||||
MG_ASSERT(labels.contains(label_name));
|
||||
|
||||
LabelId label_id = labels.at(label_name);
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
MG_ASSERT(label_space.shards.begin()->first <= key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
return std::prev(label_space.shards.upper_bound(key))->second;
|
||||
}
|
||||
|
||||
Shard ShardMap::GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const {
|
||||
MG_ASSERT(label_spaces.contains(label_id));
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
MG_ASSERT(label_space.shards.begin()->first <= key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
return std::prev(label_space.shards.upper_bound(key))->second;
|
||||
}
|
||||
|
||||
PropertyMap ShardMap::AllocatePropertyIds(const std::vector<PropertyName> &new_properties) {
|
||||
PropertyMap ret{};
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (const auto &property_name : new_properties) {
|
||||
if (properties.contains(property_name)) {
|
||||
auto property_id = properties.at(property_name);
|
||||
ret.emplace(property_name, property_id);
|
||||
} else {
|
||||
mutated = true;
|
||||
|
||||
const PropertyId property_id = PropertyId::FromUint(++max_property_id);
|
||||
ret.emplace(property_name, property_id);
|
||||
properties.emplace(property_name, property_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EdgeTypeIdMap ShardMap::AllocateEdgeTypeIds(const std::vector<EdgeTypeName> &new_edge_types) {
|
||||
EdgeTypeIdMap ret;
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (const auto &edge_type_name : new_edge_types) {
|
||||
if (edge_types.contains(edge_type_name)) {
|
||||
auto edge_type_id = edge_types.at(edge_type_name);
|
||||
ret.emplace(edge_type_name, edge_type_id);
|
||||
} else {
|
||||
mutated = true;
|
||||
|
||||
const EdgeTypeId edge_type_id = EdgeTypeId::FromUint(++max_edge_type_id);
|
||||
ret.emplace(edge_type_name, edge_type_id);
|
||||
edge_types.emplace(edge_type_name, edge_type_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ShardMap::ClusterInitialized() const {
|
||||
for (const auto &[label_id, label_space] : label_spaces) {
|
||||
for (const auto &[low_key, shard] : label_space.shards) {
|
||||
if (shard.size() < label_space.replication_factor) {
|
||||
spdlog::info("label_space below desired replication factor");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto &aas : shard) {
|
||||
if (aas.status != Status::CONSENSUS_PARTICIPANT) {
|
||||
spdlog::info("shard member not yet a CONSENSUS_PARTICIPANT");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordinator
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/temporal.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/print_helpers.hpp"
|
||||
|
||||
namespace memgraph::coordinator {
|
||||
|
||||
@ -53,7 +54,24 @@ enum class Status : uint8_t {
|
||||
struct AddressAndStatus {
|
||||
memgraph::io::Address address;
|
||||
Status status;
|
||||
|
||||
friend bool operator<(const AddressAndStatus &lhs, const AddressAndStatus &rhs) { return lhs.address < rhs.address; }
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const AddressAndStatus &address_and_status) {
|
||||
in << "AddressAndStatus { address: ";
|
||||
in << address_and_status.address;
|
||||
if (address_and_status.status == Status::CONSENSUS_PARTICIPANT) {
|
||||
in << ", status: CONSENSUS_PARTICIPANT }";
|
||||
} else {
|
||||
in << ", status: INITIALIZING }";
|
||||
}
|
||||
|
||||
return in;
|
||||
}
|
||||
|
||||
friend bool operator==(const AddressAndStatus &lhs, const AddressAndStatus &rhs) {
|
||||
return lhs.address == rhs.address;
|
||||
}
|
||||
};
|
||||
|
||||
using PrimaryKey = std::vector<PropertyValue>;
|
||||
@ -78,8 +96,21 @@ PrimaryKey SchemaToMinKey(const std::vector<SchemaProperty> &schema);
|
||||
|
||||
struct LabelSpace {
|
||||
std::vector<SchemaProperty> schema;
|
||||
// Maps between the smallest primary key stored in the shard and the shard
|
||||
std::map<PrimaryKey, Shard> shards;
|
||||
size_t replication_factor;
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const LabelSpace &label_space) {
|
||||
using utils::print_helpers::operator<<;
|
||||
|
||||
in << "LabelSpace { schema: ";
|
||||
in << label_space.schema;
|
||||
in << ", shards: ";
|
||||
in << label_space.shards;
|
||||
in << ", replication_factor: " << label_space.replication_factor << "}";
|
||||
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
struct ShardMap {
|
||||
@ -93,254 +124,50 @@ struct ShardMap {
|
||||
std::map<LabelId, LabelSpace> label_spaces;
|
||||
std::map<LabelId, std::vector<SchemaProperty>> schemas;
|
||||
|
||||
Shards GetShards(const LabelName &label) {
|
||||
const auto id = labels.at(label);
|
||||
auto &shards = label_spaces.at(id).shards;
|
||||
return shards;
|
||||
}
|
||||
[[nodiscard]] static ShardMap Parse(std::istream &input_stream);
|
||||
friend std::ostream &operator<<(std::ostream &in, const ShardMap &shard_map);
|
||||
|
||||
Shards GetShardsForLabel(const LabelName &label) const;
|
||||
|
||||
std::vector<Shards> GetAllShards() const;
|
||||
|
||||
// TODO(gabor) later we will want to update the wallclock time with
|
||||
// the given Io<impl>'s time as well
|
||||
Hlc IncrementShardMapVersion() noexcept {
|
||||
++shard_map_version.logical_id;
|
||||
return shard_map_version;
|
||||
}
|
||||
|
||||
Hlc GetHlc() const noexcept { return shard_map_version; }
|
||||
Hlc IncrementShardMapVersion() noexcept;
|
||||
Hlc GetHlc() const noexcept;
|
||||
|
||||
// Returns the shard UUIDs that have been assigned but not yet acknowledged for this storage manager
|
||||
std::vector<ShardToInitialize> AssignShards(Address storage_manager, std::set<boost::uuids::uuid> initialized) {
|
||||
std::vector<ShardToInitialize> ret{};
|
||||
std::vector<ShardToInitialize> AssignShards(Address storage_manager, std::set<boost::uuids::uuid> initialized);
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (auto &[label_id, label_space] : label_spaces) {
|
||||
for (auto &[low_key, shard] : label_space.shards) {
|
||||
// TODO(tyler) avoid these triple-nested loops by having the heartbeat include better info
|
||||
bool machine_contains_shard = false;
|
||||
|
||||
for (auto &aas : shard) {
|
||||
if (initialized.contains(aas.address.unique_id)) {
|
||||
spdlog::info("marking shard as full consensus participant: {}", aas.address.unique_id);
|
||||
aas.status = Status::CONSENSUS_PARTICIPANT;
|
||||
machine_contains_shard = true;
|
||||
} else {
|
||||
const bool same_machine = aas.address.last_known_ip == storage_manager.last_known_ip &&
|
||||
aas.address.last_known_port == storage_manager.last_known_port;
|
||||
if (same_machine) {
|
||||
machine_contains_shard = true;
|
||||
ret.push_back(ShardToInitialize{
|
||||
.uuid = aas.address.unique_id,
|
||||
.label_id = label_id,
|
||||
.min_key = low_key,
|
||||
.max_key = std::nullopt,
|
||||
.schema = schemas[label_id],
|
||||
.config = Config{},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!machine_contains_shard && shard.size() < label_space.replication_factor) {
|
||||
Address address = storage_manager;
|
||||
|
||||
// TODO(tyler) use deterministic UUID so that coordinators don't diverge here
|
||||
address.unique_id = boost::uuids::uuid{boost::uuids::random_generator()()},
|
||||
|
||||
ret.push_back(ShardToInitialize{.uuid = address.unique_id,
|
||||
.label_id = label_id,
|
||||
.min_key = low_key,
|
||||
.max_key = std::nullopt,
|
||||
.schema = schemas[label_id],
|
||||
.config = Config{}});
|
||||
|
||||
AddressAndStatus aas = {
|
||||
.address = address,
|
||||
.status = Status::INITIALIZING,
|
||||
};
|
||||
|
||||
shard.emplace_back(aas);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool SplitShard(Hlc previous_shard_map_version, LabelId label_id, const PrimaryKey &key) {
|
||||
if (previous_shard_map_version != shard_map_version) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto &label_space = label_spaces.at(label_id);
|
||||
auto &shards_in_map = label_space.shards;
|
||||
|
||||
MG_ASSERT(!shards_in_map.empty());
|
||||
MG_ASSERT(!shards_in_map.contains(key));
|
||||
MG_ASSERT(label_spaces.contains(label_id));
|
||||
|
||||
// Finding the Shard that the new PrimaryKey should map to.
|
||||
auto prev = std::prev(shards_in_map.upper_bound(key));
|
||||
Shard duplicated_shard = prev->second;
|
||||
|
||||
// Apply the split
|
||||
shards_in_map[key] = duplicated_shard;
|
||||
|
||||
return true;
|
||||
}
|
||||
bool SplitShard(Hlc previous_shard_map_version, LabelId label_id, const PrimaryKey &key);
|
||||
|
||||
std::optional<LabelId> InitializeNewLabel(std::string label_name, std::vector<SchemaProperty> schema,
|
||||
size_t replication_factor, Hlc last_shard_map_version);
|
||||
|
||||
void AddServer(Address server_address) {
|
||||
// Find a random place for the server to plug in
|
||||
}
|
||||
void AddServer(Address server_address);
|
||||
|
||||
LabelId GetLabelId(const std::string &label) const { return labels.at(label); }
|
||||
std::optional<LabelId> GetLabelId(const std::string &label) const;
|
||||
// TODO(antaljanosbenjamin): Remove this and instead use NameIdMapper
|
||||
const std::string &GetLabelName(LabelId label) const;
|
||||
std::optional<PropertyId> GetPropertyId(const std::string &property_name) const;
|
||||
const std::string &GetPropertyName(PropertyId property) const;
|
||||
std::optional<EdgeTypeId> GetEdgeTypeId(const std::string &edge_type) const;
|
||||
const std::string &GetEdgeTypeName(EdgeTypeId property) const;
|
||||
|
||||
std::string GetLabelName(const LabelId label) const {
|
||||
if (const auto it =
|
||||
std::ranges::find_if(labels, [label](const auto &name_id_pair) { return name_id_pair.second == label; });
|
||||
it != labels.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("GetLabelName fails on the given label id!");
|
||||
}
|
||||
Shards GetShardsForRange(const LabelName &label_name, const PrimaryKey &start_key, const PrimaryKey &end_key) const;
|
||||
|
||||
std::optional<PropertyId> GetPropertyId(const std::string &property_name) const {
|
||||
if (properties.contains(property_name)) {
|
||||
return properties.at(property_name);
|
||||
}
|
||||
Shard GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
Shard GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const;
|
||||
|
||||
std::string GetPropertyName(const PropertyId property) const {
|
||||
if (const auto it = std::ranges::find_if(
|
||||
properties, [property](const auto &name_id_pair) { return name_id_pair.second == property; });
|
||||
it != properties.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("PropertyId not found!");
|
||||
}
|
||||
PropertyMap AllocatePropertyIds(const std::vector<PropertyName> &new_properties);
|
||||
|
||||
std::optional<EdgeTypeId> GetEdgeTypeId(const std::string &edge_type) const {
|
||||
if (edge_types.contains(edge_type)) {
|
||||
return edge_types.at(edge_type);
|
||||
}
|
||||
EdgeTypeIdMap AllocateEdgeTypeIds(const std::vector<EdgeTypeName> &new_edge_types);
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::string GetEdgeTypeName(const EdgeTypeId property) const {
|
||||
if (const auto it = std::ranges::find_if(
|
||||
edge_types, [property](const auto &name_id_pair) { return name_id_pair.second == property; });
|
||||
it != edge_types.end()) {
|
||||
return it->first;
|
||||
}
|
||||
throw utils::BasicException("EdgeTypeId not found!");
|
||||
}
|
||||
|
||||
Shards GetShardsForRange(const LabelName &label_name, const PrimaryKey &start_key, const PrimaryKey &end_key) const {
|
||||
MG_ASSERT(start_key <= end_key);
|
||||
MG_ASSERT(labels.contains(label_name));
|
||||
|
||||
LabelId label_id = labels.at(label_name);
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
const auto &shards_for_label = label_space.shards;
|
||||
|
||||
MG_ASSERT(shards_for_label.begin()->first <= start_key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
auto it = std::prev(shards_for_label.upper_bound(start_key));
|
||||
const auto end_it = shards_for_label.upper_bound(end_key);
|
||||
|
||||
Shards shards{};
|
||||
|
||||
std::copy(it, end_it, std::inserter(shards, shards.end()));
|
||||
|
||||
return shards;
|
||||
}
|
||||
|
||||
Shard GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const {
|
||||
MG_ASSERT(labels.contains(label_name));
|
||||
|
||||
LabelId label_id = labels.at(label_name);
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
MG_ASSERT(label_space.shards.begin()->first <= key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
return std::prev(label_space.shards.upper_bound(key))->second;
|
||||
}
|
||||
|
||||
Shard GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const {
|
||||
MG_ASSERT(label_spaces.contains(label_id));
|
||||
|
||||
const auto &label_space = label_spaces.at(label_id);
|
||||
|
||||
MG_ASSERT(label_space.shards.begin()->first <= key,
|
||||
"the ShardMap must always contain a minimal key that is less than or equal to any requested key");
|
||||
|
||||
return std::prev(label_space.shards.upper_bound(key))->second;
|
||||
}
|
||||
|
||||
PropertyMap AllocatePropertyIds(const std::vector<PropertyName> &new_properties) {
|
||||
PropertyMap ret{};
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (const auto &property_name : new_properties) {
|
||||
if (properties.contains(property_name)) {
|
||||
auto property_id = properties.at(property_name);
|
||||
ret.emplace(property_name, property_id);
|
||||
} else {
|
||||
mutated = true;
|
||||
|
||||
const PropertyId property_id = PropertyId::FromUint(++max_property_id);
|
||||
ret.emplace(property_name, property_id);
|
||||
properties.emplace(property_name, property_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EdgeTypeIdMap AllocateEdgeTypeIds(const std::vector<EdgeTypeName> &new_edge_types) {
|
||||
EdgeTypeIdMap ret;
|
||||
|
||||
bool mutated = false;
|
||||
|
||||
for (const auto &edge_type_name : new_edge_types) {
|
||||
if (edge_types.contains(edge_type_name)) {
|
||||
auto edge_type_id = edge_types.at(edge_type_name);
|
||||
ret.emplace(edge_type_name, edge_type_id);
|
||||
} else {
|
||||
mutated = true;
|
||||
|
||||
const EdgeTypeId edge_type_id = EdgeTypeId::FromUint(++max_edge_type_id);
|
||||
ret.emplace(edge_type_name, edge_type_id);
|
||||
edge_types.emplace(edge_type_name, edge_type_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (mutated) {
|
||||
IncrementShardMapVersion();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
/// Returns true if all shards have the desired number of replicas and they are in
|
||||
/// the CONSENSUS_PARTICIPANT state. Note that this does not necessarily mean that
|
||||
/// there is also an active leader for each shard.
|
||||
bool ClusterInitialized() const;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordinator
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "expr/typed_value_exception.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/fnv.hpp"
|
||||
@ -32,16 +33,6 @@
|
||||
|
||||
namespace memgraph::expr {
|
||||
|
||||
/**
|
||||
* An exception raised by the TypedValue system. Typically when
|
||||
* trying to perform operations (such as addition) on TypedValues
|
||||
* of incompatible Types.
|
||||
*/
|
||||
class TypedValueException : public utils::BasicException {
|
||||
public:
|
||||
using utils::BasicException::BasicException;
|
||||
};
|
||||
|
||||
// TODO: Neo4j does overflow checking. Should we also implement it?
|
||||
/**
|
||||
* Stores a query runtime value and its type.
|
||||
|
26
src/expr/typed_value_exception.hpp
Normal file
26
src/expr/typed_value_exception.hpp
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::expr {
|
||||
/**
|
||||
* An exception raised by the TypedValue system. Typically when
|
||||
* trying to perform operations (such as addition) on TypedValues
|
||||
* of incompatible Types.
|
||||
*/
|
||||
class TypedValueException : public utils::BasicException {
|
||||
public:
|
||||
using utils::BasicException::BasicException;
|
||||
};
|
||||
} // namespace memgraph::expr
|
@ -202,9 +202,8 @@ Value ToBoltValue(msgs::Value value) {
|
||||
return Value{std::move(map)};
|
||||
}
|
||||
case msgs::Value::Type::Vertex:
|
||||
case msgs::Value::Type::Edge:
|
||||
case msgs::Value::Type::Path: {
|
||||
throw utils::BasicException("Path, Vertex and Edge not supported!");
|
||||
case msgs::Value::Type::Edge: {
|
||||
throw utils::BasicException("Vertex and Edge not supported!");
|
||||
}
|
||||
// TODO Value to Date types not supported
|
||||
}
|
||||
|
@ -15,11 +15,36 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
|
||||
namespace memgraph::io {
|
||||
|
||||
struct PartialAddress {
|
||||
boost::asio::ip::address ip;
|
||||
uint16_t port;
|
||||
|
||||
friend bool operator==(const PartialAddress &lhs, const PartialAddress &rhs) = default;
|
||||
|
||||
/// unique_id is most dominant for ordering, then ip, then port
|
||||
friend bool operator<(const PartialAddress &lhs, const PartialAddress &rhs) {
|
||||
if (lhs.ip != rhs.ip) {
|
||||
return lhs.ip < rhs.ip;
|
||||
}
|
||||
|
||||
return lhs.port < rhs.port;
|
||||
}
|
||||
|
||||
std::string ToString() const { return fmt::format("PartialAddress {{ ip: {}, port: {} }}", ip.to_string(), port); }
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const PartialAddress &partial_address) {
|
||||
in << partial_address.ToString();
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
struct Address {
|
||||
// It's important for all participants to have a
|
||||
// unique identifier - IP and port alone are not
|
||||
@ -54,6 +79,13 @@ struct Address {
|
||||
};
|
||||
}
|
||||
|
||||
PartialAddress ToPartialAddress() const {
|
||||
return PartialAddress{
|
||||
.ip = last_known_ip,
|
||||
.port = last_known_port,
|
||||
};
|
||||
}
|
||||
|
||||
friend bool operator==(const Address &lhs, const Address &rhs) = default;
|
||||
|
||||
/// unique_id is most dominant for ordering, then last_known_ip, then last_known_port
|
||||
@ -73,5 +105,36 @@ struct Address {
|
||||
return fmt::format("Address {{ unique_id: {}, last_known_ip: {}, last_known_port: {} }}",
|
||||
boost::uuids::to_string(unique_id), last_known_ip.to_string(), last_known_port);
|
||||
}
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const Address &address) {
|
||||
in << address.ToString();
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace memgraph::io
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<memgraph::io::PartialAddress> {
|
||||
size_t operator()(const memgraph::io::PartialAddress &pa) const {
|
||||
using boost::hash_combine;
|
||||
using boost::hash_value;
|
||||
|
||||
// Start with a hash value of 0 .
|
||||
std::size_t seed = 0;
|
||||
|
||||
if (pa.ip.is_v4()) {
|
||||
auto h = std::hash<boost::asio::ip::address_v4>()(pa.ip.to_v4());
|
||||
hash_combine(seed, h);
|
||||
} else {
|
||||
auto h = std::hash<boost::asio::ip::address_v6>()(pa.ip.to_v6());
|
||||
hash_combine(seed, h);
|
||||
}
|
||||
hash_combine(seed, hash_value(pa.port));
|
||||
|
||||
// Return the result.
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
@ -29,6 +29,8 @@ class Simulator {
|
||||
explicit Simulator(SimulatorConfig config)
|
||||
: rng_(std::mt19937{config.rng_seed}), simulator_handle_{std::make_shared<SimulatorHandle>(config)} {}
|
||||
|
||||
~Simulator() { ShutDown(); }
|
||||
|
||||
void ShutDown() { simulator_handle_->ShutDown(); }
|
||||
|
||||
Io<SimulatorTransport> RegisterNew() {
|
||||
|
@ -16,12 +16,10 @@
|
||||
#include "io/simulator/simulator_stats.hpp"
|
||||
#include "io/time.hpp"
|
||||
#include "io/transport.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::io::simulator {
|
||||
|
||||
using memgraph::io::Duration;
|
||||
using memgraph::io::Time;
|
||||
|
||||
void SimulatorHandle::ShutDown() {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
should_shut_down_ = true;
|
||||
@ -76,9 +74,15 @@ bool SimulatorHandle::MaybeTickSimulator() {
|
||||
const Duration clock_advance = std::chrono::microseconds{time_distrib_(rng_)};
|
||||
cluster_wide_time_microseconds_ += clock_advance;
|
||||
|
||||
MG_ASSERT(cluster_wide_time_microseconds_ < config_.abort_time,
|
||||
"Cluster has executed beyond its configured abort_time, and something may be failing to make progress "
|
||||
"in an expected amount of time.");
|
||||
if (cluster_wide_time_microseconds_ >= config_.abort_time) {
|
||||
if (should_shut_down_) {
|
||||
return false;
|
||||
}
|
||||
spdlog::error(
|
||||
"Cluster has executed beyond its configured abort_time, and something may be failing to make progress "
|
||||
"in an expected amount of time.");
|
||||
throw utils::BasicException{"Cluster has executed beyond its configured abort_time"};
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -121,7 +125,8 @@ bool SimulatorHandle::MaybeTickSimulator() {
|
||||
// don't add it anywhere, let it drop
|
||||
} else {
|
||||
// add to can_receive_ if not
|
||||
const auto &[om_vec, inserted] = can_receive_.try_emplace(to_address, std::vector<OpaqueMessage>());
|
||||
const auto &[om_vec, inserted] =
|
||||
can_receive_.try_emplace(to_address.ToPartialAddress(), std::vector<OpaqueMessage>());
|
||||
om_vec->second.emplace_back(std::move(opaque_message));
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ class SimulatorHandle {
|
||||
std::map<PromiseKey, DeadlineAndOpaquePromise> promises_;
|
||||
|
||||
// messages that are sent to servers that may later receive them
|
||||
std::map<Address, std::vector<OpaqueMessage>> can_receive_;
|
||||
std::map<PartialAddress, std::vector<OpaqueMessage>> can_receive_;
|
||||
|
||||
Time cluster_wide_time_microseconds_;
|
||||
bool should_shut_down_ = false;
|
||||
@ -59,7 +59,7 @@ class SimulatorHandle {
|
||||
const Time now = cluster_wide_time_microseconds_;
|
||||
for (auto it = promises_.begin(); it != promises_.end();) {
|
||||
auto &[promise_key, dop] = *it;
|
||||
if (dop.deadline < now) {
|
||||
if (dop.deadline < now && config_.perform_timeouts) {
|
||||
spdlog::info("timing out request from requester {} to replier {}.", promise_key.requester_address.ToString(),
|
||||
promise_key.replier_address.ToString());
|
||||
std::move(dop).promise.TimeOut();
|
||||
@ -76,6 +76,14 @@ class SimulatorHandle {
|
||||
explicit SimulatorHandle(SimulatorConfig config)
|
||||
: cluster_wide_time_microseconds_(config.start_time), rng_(config.rng_seed), config_(config) {}
|
||||
|
||||
~SimulatorHandle() {
|
||||
for (auto it = promises_.begin(); it != promises_.end();) {
|
||||
auto &[promise_key, dop] = *it;
|
||||
std::move(dop).promise.TimeOut();
|
||||
it = promises_.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
void IncrementServerCountAndWaitForQuiescentState(Address address);
|
||||
|
||||
/// This method causes most of the interesting simulation logic to happen, wrt network behavior.
|
||||
@ -121,9 +129,11 @@ class SimulatorHandle {
|
||||
|
||||
const Time deadline = cluster_wide_time_microseconds_ + timeout;
|
||||
|
||||
auto partial_address = receiver.ToPartialAddress();
|
||||
|
||||
while (!should_shut_down_ && (cluster_wide_time_microseconds_ < deadline)) {
|
||||
if (can_receive_.contains(receiver)) {
|
||||
std::vector<OpaqueMessage> &can_rx = can_receive_.at(receiver);
|
||||
if (can_receive_.contains(partial_address)) {
|
||||
std::vector<OpaqueMessage> &can_rx = can_receive_.at(partial_address);
|
||||
if (!can_rx.empty()) {
|
||||
OpaqueMessage message = std::move(can_rx.back());
|
||||
can_rx.pop_back();
|
||||
|
@ -65,7 +65,7 @@ class MachineManager {
|
||||
MachineConfig config_;
|
||||
CoordinatorRsm<IoImpl> coordinator_;
|
||||
ShardManager<IoImpl> shard_manager_;
|
||||
Time next_cron_;
|
||||
Time next_cron_ = Time::min();
|
||||
|
||||
public:
|
||||
// TODO initialize ShardManager with "real" coordinator addresses instead of io.GetAddress
|
||||
@ -95,7 +95,7 @@ class MachineManager {
|
||||
WriteResponse<CoordinatorWriteResponses>, ReadRequest<StorageReadRequest>,
|
||||
AppendRequest<StorageWriteRequest>, WriteRequest<StorageWriteRequest>>;
|
||||
|
||||
spdlog::info("MM waiting on Receive");
|
||||
spdlog::info("MM waiting on Receive on address {}", io_.GetAddress().ToString());
|
||||
|
||||
// Note: this parameter pack must be kept in-sync with the AllMessages parameter pack above
|
||||
auto request_result = io_.template ReceiveWithTimeout<
|
||||
@ -106,7 +106,6 @@ class MachineManager {
|
||||
|
||||
if (request_result.HasError()) {
|
||||
// time to do Cron
|
||||
spdlog::info("MM got timeout");
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -116,7 +115,6 @@ class MachineManager {
|
||||
|
||||
// If message is for the coordinator, cast it to subset and pass it to the coordinator
|
||||
bool to_coordinator = coordinator_.GetAddress() == request_envelope.to_address;
|
||||
spdlog::info("coordinator: {}", coordinator_.GetAddress().ToString());
|
||||
if (to_coordinator) {
|
||||
std::optional<CoordinatorMessages> conversion_attempt =
|
||||
ConvertVariant<AllMessages, ReadRequest<CoordinatorReadRequests>, AppendRequest<CoordinatorWriteRequests>,
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
@ -265,6 +266,10 @@ DEFINE_uint64(
|
||||
"Total memory limit in MiB. Set to 0 to use the default values which are 100\% of the phyisical memory if the swap "
|
||||
"is enabled and 90\% of the physical memory otherwise.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(split_file, "",
|
||||
"Path to the split file which contains the predefined labels, properties, edge types and shard-ranges.");
|
||||
|
||||
namespace {
|
||||
using namespace std::literals;
|
||||
inline constexpr std::array isolation_level_mappings{
|
||||
@ -640,11 +645,21 @@ int main(int argc, char **argv) {
|
||||
};
|
||||
|
||||
memgraph::coordinator::ShardMap sm;
|
||||
auto prop_map = sm.AllocatePropertyIds(std::vector<std::string>{"property"});
|
||||
auto edge_type_map = sm.AllocateEdgeTypeIds(std::vector<std::string>{"edge_type"});
|
||||
std::vector<memgraph::storage::v3::SchemaProperty> schema{
|
||||
{prop_map.at("property"), memgraph::common::SchemaType::INT}};
|
||||
sm.InitializeNewLabel("label", schema, 1, sm.shard_map_version);
|
||||
if (FLAGS_split_file.empty()) {
|
||||
const std::string property{"property"};
|
||||
const std::string label{"label"};
|
||||
auto prop_map = sm.AllocatePropertyIds(std::vector<std::string>{property});
|
||||
auto edge_type_map = sm.AllocateEdgeTypeIds(std::vector<std::string>{"TO"});
|
||||
std::vector<memgraph::storage::v3::SchemaProperty> schema{
|
||||
{prop_map.at(property), memgraph::common::SchemaType::INT}};
|
||||
sm.InitializeNewLabel(label, schema, 1, sm.shard_map_version);
|
||||
sm.SplitShard(sm.GetHlc(), *sm.GetLabelId(label),
|
||||
std::vector<memgraph::storage::v3::PropertyValue>{memgraph::storage::v3::PropertyValue{2}});
|
||||
} else {
|
||||
std::ifstream input{FLAGS_split_file, std::ios::in};
|
||||
MG_ASSERT(input.is_open(), "Cannot open split file to read: {}", FLAGS_split_file);
|
||||
sm = memgraph::coordinator::ShardMap::Parse(input);
|
||||
}
|
||||
|
||||
memgraph::coordinator::Coordinator coordinator{sm};
|
||||
|
||||
|
@ -33,7 +33,7 @@ add_dependencies(mg-query-v2 generate_lcp_query_v2)
|
||||
target_include_directories(mg-query-v2 PUBLIC ${CMAKE_SOURCE_DIR}/include)
|
||||
target_include_directories(mg-query-v2 PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bindings)
|
||||
target_link_libraries(mg-query-v2 dl cppitertools Boost::headers)
|
||||
target_link_libraries(mg-query-v2 mg-integrations-pulsar mg-integrations-kafka mg-storage-v3 mg-license mg-utils mg-kvstore mg-memory)
|
||||
target_link_libraries(mg-query-v2 mg-integrations-pulsar mg-integrations-kafka mg-storage-v3 mg-license mg-utils mg-kvstore mg-memory mg-coordinator)
|
||||
target_link_libraries(mg-query-v2 mg-expr)
|
||||
|
||||
if(NOT "${MG_PYTHON_PATH}" STREQUAL "")
|
||||
|
@ -14,13 +14,12 @@
|
||||
#include "storage/v3/id_types.hpp"
|
||||
|
||||
namespace memgraph::query::v2::accessors {
|
||||
EdgeAccessor::EdgeAccessor(Edge edge, std::vector<std::pair<PropertyId, Value>> props)
|
||||
: edge(std::move(edge)), properties(std::move(props)) {}
|
||||
EdgeAccessor::EdgeAccessor(Edge edge) : edge(std::move(edge)) {}
|
||||
|
||||
EdgeTypeId EdgeAccessor::EdgeType() const { return EdgeTypeId::FromUint(edge.type.id); }
|
||||
EdgeTypeId EdgeAccessor::EdgeType() const { return edge.type.id; }
|
||||
|
||||
std::vector<std::pair<PropertyId, Value>> EdgeAccessor::Properties() const {
|
||||
return properties;
|
||||
const std::vector<std::pair<PropertyId, Value>> &EdgeAccessor::Properties() const {
|
||||
return edge.properties;
|
||||
// std::map<std::string, TypedValue> res;
|
||||
// for (const auto &[name, value] : *properties) {
|
||||
// res[name] = ValueToTypedValue(value);
|
||||
@ -34,7 +33,9 @@ Value EdgeAccessor::GetProperty(const std::string & /*prop_name*/) const {
|
||||
return {};
|
||||
}
|
||||
|
||||
Edge EdgeAccessor::GetEdge() const { return edge; }
|
||||
const Edge &EdgeAccessor::GetEdge() const { return edge; }
|
||||
|
||||
bool EdgeAccessor::IsCycle() const { return edge.src == edge.dst; };
|
||||
|
||||
VertexAccessor EdgeAccessor::To() const { return VertexAccessor(Vertex{edge.dst}, {}); }
|
||||
|
||||
@ -43,6 +44,10 @@ VertexAccessor EdgeAccessor::From() const { return VertexAccessor(Vertex{edge.sr
|
||||
VertexAccessor::VertexAccessor(Vertex v, std::vector<std::pair<PropertyId, Value>> props)
|
||||
: vertex(std::move(v)), properties(std::move(props)) {}
|
||||
|
||||
Label VertexAccessor::PrimaryLabel() const { return vertex.id.first; }
|
||||
|
||||
const msgs::VertexId &VertexAccessor::Id() const { return vertex.id; }
|
||||
|
||||
std::vector<Label> VertexAccessor::Labels() const { return vertex.labels; }
|
||||
|
||||
bool VertexAccessor::HasLabel(Label &label) const {
|
||||
@ -50,14 +55,7 @@ bool VertexAccessor::HasLabel(Label &label) const {
|
||||
[label](const auto &l) { return l.id == label.id; }) != vertex.labels.end();
|
||||
}
|
||||
|
||||
std::vector<std::pair<PropertyId, Value>> VertexAccessor::Properties() const {
|
||||
// std::map<std::string, TypedValue> res;
|
||||
// for (const auto &[name, value] : *properties) {
|
||||
// res[name] = ValueToTypedValue(value);
|
||||
// }
|
||||
// return res;
|
||||
return properties;
|
||||
}
|
||||
const std::vector<std::pair<PropertyId, Value>> &VertexAccessor::Properties() const { return properties; }
|
||||
|
||||
Value VertexAccessor::GetProperty(PropertyId prop_id) const {
|
||||
return std::find_if(properties.begin(), properties.end(), [&](auto &pr) { return prop_id == pr.first; })->second;
|
||||
|
@ -36,57 +36,61 @@ class VertexAccessor;
|
||||
|
||||
class EdgeAccessor final {
|
||||
public:
|
||||
EdgeAccessor(Edge edge, std::vector<std::pair<PropertyId, Value>> props);
|
||||
explicit EdgeAccessor(Edge edge);
|
||||
|
||||
EdgeTypeId EdgeType() const;
|
||||
[[nodiscard]] EdgeTypeId EdgeType() const;
|
||||
|
||||
std::vector<std::pair<PropertyId, Value>> Properties() const;
|
||||
[[nodiscard]] const std::vector<std::pair<PropertyId, Value>> &Properties() const;
|
||||
|
||||
Value GetProperty(const std::string &prop_name) const;
|
||||
[[nodiscard]] Value GetProperty(const std::string &prop_name) const;
|
||||
|
||||
Edge GetEdge() const;
|
||||
[[nodiscard]] const Edge &GetEdge() const;
|
||||
|
||||
[[nodiscard]] bool IsCycle() const;
|
||||
|
||||
// Dummy function
|
||||
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
|
||||
inline size_t CypherId() const { return 10; }
|
||||
[[nodiscard]] size_t CypherId() const { return 10; }
|
||||
|
||||
// bool HasSrcAccessor const { return src == nullptr; }
|
||||
// bool HasDstAccessor const { return dst == nullptr; }
|
||||
|
||||
VertexAccessor To() const;
|
||||
VertexAccessor From() const;
|
||||
[[nodiscard]] VertexAccessor To() const;
|
||||
[[nodiscard]] VertexAccessor From() const;
|
||||
|
||||
friend bool operator==(const EdgeAccessor &lhs, const EdgeAccessor &rhs) {
|
||||
return lhs.edge == rhs.edge && lhs.properties == rhs.properties;
|
||||
}
|
||||
friend bool operator==(const EdgeAccessor &lhs, const EdgeAccessor &rhs) { return lhs.edge == rhs.edge; }
|
||||
|
||||
friend bool operator!=(const EdgeAccessor &lhs, const EdgeAccessor &rhs) { return !(lhs == rhs); }
|
||||
|
||||
private:
|
||||
Edge edge;
|
||||
std::vector<std::pair<PropertyId, Value>> properties;
|
||||
};
|
||||
|
||||
class VertexAccessor final {
|
||||
public:
|
||||
using PropertyId = msgs::PropertyId;
|
||||
using Label = msgs::Label;
|
||||
using VertexId = msgs::VertexId;
|
||||
VertexAccessor(Vertex v, std::vector<std::pair<PropertyId, Value>> props);
|
||||
|
||||
std::vector<Label> Labels() const;
|
||||
[[nodiscard]] Label PrimaryLabel() const;
|
||||
|
||||
bool HasLabel(Label &label) const;
|
||||
[[nodiscard]] const msgs::VertexId &Id() const;
|
||||
|
||||
std::vector<std::pair<PropertyId, Value>> Properties() const;
|
||||
[[nodiscard]] std::vector<Label> Labels() const;
|
||||
|
||||
Value GetProperty(PropertyId prop_id) const;
|
||||
Value GetProperty(const std::string &prop_name) const;
|
||||
[[nodiscard]] bool HasLabel(Label &label) const;
|
||||
|
||||
msgs::Vertex GetVertex() const;
|
||||
[[nodiscard]] const std::vector<std::pair<PropertyId, Value>> &Properties() const;
|
||||
|
||||
[[nodiscard]] Value GetProperty(PropertyId prop_id) const;
|
||||
[[nodiscard]] Value GetProperty(const std::string &prop_name) const;
|
||||
|
||||
[[nodiscard]] msgs::Vertex GetVertex() const;
|
||||
|
||||
// Dummy function
|
||||
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
|
||||
inline size_t CypherId() const { return 10; }
|
||||
[[nodiscard]] size_t CypherId() const { return 10; }
|
||||
|
||||
// auto InEdges(storage::View view, const std::vector<storage::EdgeTypeId> &edge_types) const
|
||||
// -> storage::Result<decltype(iter::imap(MakeEdgeAccessor, *impl_.InEdges(view)))> {
|
||||
|
@ -11,5 +11,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_AST_INCLUDE_PATH
|
||||
#error You are probably trying to include files from expr from both the storage and query engines! You will have a rough time kid!
|
||||
#endif
|
||||
|
||||
#define MG_AST_INCLUDE_PATH "query/v2/frontend/ast/ast.hpp" // NOLINT(cppcoreguidelines-macro-usage)
|
||||
#define MG_INJECTED_NAMESPACE_NAME memgraph::query::v2 // NOLINT(cppcoreguidelines-macro-usage)
|
||||
|
@ -25,6 +25,23 @@
|
||||
|
||||
namespace memgraph::query::v2 {
|
||||
|
||||
// Used to store range of ids that are available
|
||||
// Used for edge id assignment
|
||||
class IdAllocator {
|
||||
public:
|
||||
IdAllocator() = default;
|
||||
IdAllocator(uint64_t low, uint64_t high) : current_edge_id_{low}, max_id_{high} {};
|
||||
|
||||
uint64_t AllocateId() {
|
||||
MG_ASSERT(current_edge_id_ < max_id_, "Current Edge Id went above max id");
|
||||
return current_edge_id_++;
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t current_edge_id_;
|
||||
uint64_t max_id_;
|
||||
};
|
||||
|
||||
struct EvaluationContext {
|
||||
/// Memory for allocations during evaluation of a *single* Pull call.
|
||||
///
|
||||
@ -63,7 +80,7 @@ inline std::vector<storage::v3::LabelId> NamesToLabels(const std::vector<std::st
|
||||
// TODO Fix by using reference
|
||||
if (shard_request_manager != nullptr) {
|
||||
for (const auto &name : label_names) {
|
||||
labels.push_back(shard_request_manager->LabelNameToLabelId(name));
|
||||
labels.push_back(shard_request_manager->NameToLabel(name));
|
||||
}
|
||||
}
|
||||
return labels;
|
||||
@ -79,9 +96,9 @@ struct ExecutionContext {
|
||||
plan::ProfilingStats stats;
|
||||
plan::ProfilingStats *stats_root{nullptr};
|
||||
ExecutionStats execution_stats;
|
||||
// TriggerContextCollector *trigger_context_collector{nullptr};
|
||||
utils::AsyncTimer timer;
|
||||
msgs::ShardRequestManagerInterface *shard_request_manager{nullptr};
|
||||
IdAllocator edge_ids_alloc;
|
||||
};
|
||||
|
||||
static_assert(std::is_move_assignable_v<ExecutionContext>, "ExecutionContext must be move assignable!");
|
||||
|
@ -49,9 +49,7 @@ inline TypedValue ValueToTypedValue(const msgs::Value &value) {
|
||||
case Value::Type::Vertex:
|
||||
return TypedValue(accessors::VertexAccessor(value.vertex_v, {}));
|
||||
case Value::Type::Edge:
|
||||
return TypedValue(accessors::EdgeAccessor(value.edge_v, {}));
|
||||
case Value::Type::Path:
|
||||
break;
|
||||
return TypedValue(accessors::EdgeAccessor(value.edge_v));
|
||||
}
|
||||
throw std::runtime_error("Incorrect type in conversion");
|
||||
}
|
||||
@ -91,7 +89,10 @@ inline msgs::Value TypedValueToValue(const TypedValue &value) {
|
||||
case TypedValue::Type::Edge:
|
||||
return Value(value.ValueEdge().GetEdge());
|
||||
case TypedValue::Type::Path:
|
||||
default:
|
||||
case TypedValue::Type::LocalTime:
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
case TypedValue::Type::Date:
|
||||
case TypedValue::Type::Duration:
|
||||
break;
|
||||
}
|
||||
throw std::runtime_error("Incorrect type in conversion");
|
||||
|
@ -680,7 +680,6 @@ struct PullPlan {
|
||||
PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters ¶meters, const bool is_profile_query,
|
||||
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
|
||||
msgs::ShardRequestManagerInterface *shard_request_manager, const std::optional<size_t> memory_limit)
|
||||
// TriggerContextCollector *trigger_context_collector, const std::optional<size_t> memory_limit)
|
||||
: plan_(plan),
|
||||
cursor_(plan->plan().MakeCursor(execution_memory)),
|
||||
frame_(plan->symbol_table().max_position(), execution_memory),
|
||||
@ -696,8 +695,8 @@ PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters &par
|
||||
}
|
||||
ctx_.is_shutting_down = &interpreter_context->is_shutting_down;
|
||||
ctx_.is_profile_query = is_profile_query;
|
||||
// ctx_.trigger_context_collector = trigger_context_collector;
|
||||
ctx_.shard_request_manager = shard_request_manager;
|
||||
ctx_.edge_ids_alloc = interpreter_context->edge_ids_alloc;
|
||||
}
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *stream, std::optional<int> n,
|
||||
@ -805,6 +804,20 @@ Interpreter::Interpreter(InterpreterContext *interpreter_context) : interpreter_
|
||||
coordinator::CoordinatorClient<io::local_transport::LocalTransport>(
|
||||
query_io, interpreter_context_->coordinator_address, std::vector{interpreter_context_->coordinator_address}),
|
||||
std::move(query_io));
|
||||
// Get edge ids
|
||||
coordinator::CoordinatorWriteRequests requests{coordinator::AllocateEdgeIdBatchRequest{.batch_size = 1000000}};
|
||||
io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests> ww;
|
||||
ww.operation = requests;
|
||||
auto resp = interpreter_context_->io
|
||||
.Request<io::rsm::WriteRequest<coordinator::CoordinatorWriteRequests>,
|
||||
io::rsm::WriteResponse<coordinator::CoordinatorWriteResponses>>(
|
||||
interpreter_context_->coordinator_address, ww)
|
||||
.Wait();
|
||||
if (resp.HasValue()) {
|
||||
const auto alloc_edge_id_reps =
|
||||
std::get<coordinator::AllocateEdgeIdBatchResponse>(resp.GetValue().message.write_return);
|
||||
interpreter_context_->edge_ids_alloc = {alloc_edge_id_reps.low, alloc_edge_id_reps.high};
|
||||
}
|
||||
}
|
||||
|
||||
PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper) {
|
||||
|
@ -12,6 +12,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include <cstdint>
|
||||
|
||||
#include "coordinator/coordinator.hpp"
|
||||
#include "coordinator/coordinator_client.hpp"
|
||||
@ -30,6 +31,7 @@
|
||||
#include "query/v2/metadata.hpp"
|
||||
#include "query/v2/plan/operator.hpp"
|
||||
#include "query/v2/plan/read_write_type_checker.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "query/v2/stream.hpp"
|
||||
#include "storage/v3/isolation_level.hpp"
|
||||
#include "storage/v3/name_id_mapper.hpp"
|
||||
@ -184,6 +186,7 @@ struct InterpreterContext {
|
||||
utils::SkipList<PlanCacheEntry> plan_cache;
|
||||
|
||||
const InterpreterConfig config;
|
||||
IdAllocator edge_ids_alloc;
|
||||
|
||||
// TODO (antaljanosbenjamin) Figure out an abstraction for io::Io to make it possible to construct an interpreter
|
||||
// context with a simulator transport without templatizing it.
|
||||
@ -334,7 +337,7 @@ class Interpreter final {
|
||||
|
||||
// This cannot be std::optional because we need to move this accessor later on into a lambda capture
|
||||
// which is assigned to std::function. std::function requires every object to be copyable, so we
|
||||
// move this unique_ptr into a shrared_ptr.
|
||||
// move this unique_ptr into a shared_ptr.
|
||||
std::unique_ptr<storage::v3::Shard::Accessor> db_accessor_;
|
||||
std::optional<DbAccessor> execution_db_accessor_;
|
||||
std::unique_ptr<msgs::ShardRequestManagerInterface> shard_request_manager_;
|
||||
|
@ -183,15 +183,22 @@ class DistributedCreateNodeCursor : public Cursor {
|
||||
std::vector<msgs::NewVertex> requests;
|
||||
for (const auto &node_info : nodes_info_) {
|
||||
msgs::NewVertex rqst;
|
||||
MG_ASSERT(!node_info->labels.empty(), "Cannot determine primary label");
|
||||
const auto primary_label = node_info->labels[0];
|
||||
// TODO(jbajic) Fix properties not send,
|
||||
// suggestion: ignore distinction between properties and primary keys
|
||||
// since schema validation is done on storage side
|
||||
std::map<msgs::PropertyId, msgs::Value> properties;
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, nullptr,
|
||||
storage::v3::View::NEW);
|
||||
if (const auto *node_info_properties = std::get_if<PropertiesMapList>(&node_info->properties)) {
|
||||
for (const auto &[key, value_expression] : *node_info_properties) {
|
||||
TypedValue val = value_expression->Accept(evaluator);
|
||||
properties[key] = TypedValueToValue(val);
|
||||
if (context.shard_request_manager->IsPrimaryKey(key)) {
|
||||
rqst.primary_key.push_back(storage::v3::TypedValueToValue(val));
|
||||
|
||||
if (context.shard_request_manager->IsPrimaryKey(primary_label, key)) {
|
||||
rqst.primary_key.push_back(TypedValueToValue(val));
|
||||
} else {
|
||||
properties[key] = TypedValueToValue(val);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -199,9 +206,10 @@ class DistributedCreateNodeCursor : public Cursor {
|
||||
for (const auto &[key, value] : property_map) {
|
||||
auto key_str = std::string(key);
|
||||
auto property_id = context.shard_request_manager->NameToProperty(key_str);
|
||||
properties[property_id] = TypedValueToValue(value);
|
||||
if (context.shard_request_manager->IsPrimaryKey(property_id)) {
|
||||
if (context.shard_request_manager->IsPrimaryKey(primary_label, property_id)) {
|
||||
rqst.primary_key.push_back(storage::v3::TypedValueToValue(value));
|
||||
} else {
|
||||
properties[property_id] = TypedValueToValue(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -210,7 +218,7 @@ class DistributedCreateNodeCursor : public Cursor {
|
||||
throw QueryRuntimeException("Primary label must be defined!");
|
||||
}
|
||||
// TODO(kostasrim) Copy non primary labels as well
|
||||
rqst.label_ids.push_back(msgs::Label{node_info->labels[0]});
|
||||
rqst.label_ids.push_back(msgs::Label{.id = primary_label});
|
||||
requests.push_back(std::move(rqst));
|
||||
}
|
||||
return requests;
|
||||
@ -280,10 +288,12 @@ CreateExpand::CreateExpand(const NodeCreationInfo &node_info, const EdgeCreation
|
||||
|
||||
ACCEPT_WITH_INPUT(CreateExpand)
|
||||
|
||||
class DistributedCreateExpandCursor;
|
||||
|
||||
UniqueCursorPtr CreateExpand::MakeCursor(utils::MemoryResource *mem) const {
|
||||
EventCounter::IncrementCounter(EventCounter::CreateNodeOperator);
|
||||
|
||||
return MakeUniqueCursorPtr<CreateExpandCursor>(mem, *this, mem);
|
||||
return MakeUniqueCursorPtr<DistributedCreateExpandCursor>(mem, input_, mem, *this);
|
||||
}
|
||||
|
||||
std::vector<Symbol> CreateExpand::ModifiedSymbols(const SymbolTable &table) const {
|
||||
@ -371,6 +381,8 @@ class DistributedScanAllAndFilterCursor : public Cursor {
|
||||
}
|
||||
}
|
||||
|
||||
request_state_.label = label_.has_value() ? std::make_optional(shard_manager.LabelToName(*label_)) : std::nullopt;
|
||||
|
||||
if (current_vertex_it == current_batch.end()) {
|
||||
if (request_state_.state == State::COMPLETED || !MakeRequest(shard_manager)) {
|
||||
ResetExecutionState();
|
||||
@ -389,7 +401,6 @@ class DistributedScanAllAndFilterCursor : public Cursor {
|
||||
current_batch.clear();
|
||||
current_vertex_it = current_batch.end();
|
||||
request_state_ = msgs::ExecutionState<msgs::ScanVerticesRequest>{};
|
||||
request_state_.label = "label";
|
||||
}
|
||||
|
||||
void Reset() override {
|
||||
@ -414,6 +425,8 @@ ScanAll::ScanAll(const std::shared_ptr<LogicalOperator> &input, Symbol output_sy
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAll)
|
||||
|
||||
class DistributedScanAllCursor;
|
||||
|
||||
UniqueCursorPtr ScanAll::MakeCursor(utils::MemoryResource *mem) const {
|
||||
EventCounter::IncrementCounter(EventCounter::ScanAllOperator);
|
||||
|
||||
@ -552,10 +565,12 @@ Expand::Expand(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbo
|
||||
|
||||
ACCEPT_WITH_INPUT(Expand)
|
||||
|
||||
class DistributedExpandCursor;
|
||||
|
||||
UniqueCursorPtr Expand::MakeCursor(utils::MemoryResource *mem) const {
|
||||
EventCounter::IncrementCounter(EventCounter::ExpandOperator);
|
||||
|
||||
return MakeUniqueCursorPtr<ExpandCursor>(mem, *this, mem);
|
||||
return MakeUniqueCursorPtr<DistributedExpandCursor>(mem, *this, mem);
|
||||
}
|
||||
|
||||
std::vector<Symbol> Expand::ModifiedSymbols(const SymbolTable &table) const {
|
||||
@ -2338,4 +2353,255 @@ bool Foreach::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
|
||||
return visitor.PostVisit(*this);
|
||||
}
|
||||
|
||||
class DistributedCreateExpandCursor : public Cursor {
|
||||
public:
|
||||
using InputOperator = std::shared_ptr<memgraph::query::v2::plan::LogicalOperator>;
|
||||
DistributedCreateExpandCursor(const InputOperator &op, utils::MemoryResource *mem, const CreateExpand &self)
|
||||
: input_cursor_{op->MakeCursor(mem)}, self_{self} {}
|
||||
|
||||
bool Pull(Frame &frame, ExecutionContext &context) override {
|
||||
SCOPED_PROFILE_OP("CreateExpand");
|
||||
if (!input_cursor_->Pull(frame, context)) {
|
||||
return false;
|
||||
}
|
||||
auto &shard_manager = context.shard_request_manager;
|
||||
ResetExecutionState();
|
||||
shard_manager->Request(state_, ExpandCreationInfoToRequest(context, frame));
|
||||
return true;
|
||||
}
|
||||
|
||||
void Shutdown() override { input_cursor_->Shutdown(); }
|
||||
|
||||
void Reset() override {
|
||||
input_cursor_->Reset();
|
||||
ResetExecutionState();
|
||||
}
|
||||
|
||||
// Get the existing node other vertex
|
||||
accessors::VertexAccessor &OtherVertex(Frame &frame) const {
|
||||
// This assumes that vertex exists
|
||||
MG_ASSERT(self_.existing_node_, "Vertex creating with edge not supported!");
|
||||
TypedValue &dest_node_value = frame[self_.node_info_.symbol];
|
||||
ExpectType(self_.node_info_.symbol, dest_node_value, TypedValue::Type::Vertex);
|
||||
return dest_node_value.ValueVertex();
|
||||
}
|
||||
|
||||
std::vector<msgs::NewExpand> ExpandCreationInfoToRequest(ExecutionContext &context, Frame &frame) const {
|
||||
std::vector<msgs::NewExpand> edge_requests;
|
||||
for (const auto &edge_info : std::vector{self_.edge_info_}) {
|
||||
msgs::NewExpand request{.id = {context.edge_ids_alloc.AllocateId()}};
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, nullptr,
|
||||
storage::v3::View::NEW);
|
||||
request.type = {edge_info.edge_type};
|
||||
if (const auto *edge_info_properties = std::get_if<PropertiesMapList>(&edge_info.properties)) {
|
||||
for (const auto &[property, value_expression] : *edge_info_properties) {
|
||||
TypedValue val = value_expression->Accept(evaluator);
|
||||
request.properties.emplace_back(property, storage::v3::TypedValueToValue(val));
|
||||
}
|
||||
} else {
|
||||
// handle parameter
|
||||
auto property_map = evaluator.Visit(*std::get<ParameterLookup *>(edge_info.properties)).ValueMap();
|
||||
for (const auto &[property, value] : property_map) {
|
||||
const auto property_id = context.shard_request_manager->NameToProperty(std::string(property));
|
||||
request.properties.emplace_back(property_id, storage::v3::TypedValueToValue(value));
|
||||
}
|
||||
}
|
||||
// src, dest
|
||||
TypedValue &v1_value = frame[self_.input_symbol_];
|
||||
const auto &v1 = v1_value.ValueVertex();
|
||||
const auto &v2 = OtherVertex(frame);
|
||||
|
||||
// Set src and dest vertices
|
||||
// TODO(jbajic) Currently we are only handling scenario where vertices
|
||||
// are matched
|
||||
const auto set_vertex = [&context](const auto &vertex, auto &vertex_id) {
|
||||
vertex_id.first = vertex.PrimaryLabel();
|
||||
for (const auto &[key, val] : vertex.Properties()) {
|
||||
if (context.shard_request_manager->IsPrimaryKey(vertex_id.first.id, key)) {
|
||||
vertex_id.second.push_back(val);
|
||||
}
|
||||
}
|
||||
};
|
||||
std::invoke([&]() {
|
||||
switch (edge_info.direction) {
|
||||
case EdgeAtom::Direction::IN: {
|
||||
set_vertex(v1, request.src_vertex);
|
||||
set_vertex(v2, request.dest_vertex);
|
||||
break;
|
||||
}
|
||||
case EdgeAtom::Direction::OUT: {
|
||||
set_vertex(v1, request.dest_vertex);
|
||||
set_vertex(v2, request.src_vertex);
|
||||
break;
|
||||
}
|
||||
case EdgeAtom::Direction::BOTH:
|
||||
LOG_FATAL("Must indicate exact expansion direction here");
|
||||
}
|
||||
});
|
||||
|
||||
edge_requests.push_back(std::move(request));
|
||||
}
|
||||
return edge_requests;
|
||||
}
|
||||
|
||||
private:
|
||||
void ResetExecutionState() { state_ = {}; }
|
||||
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
const CreateExpand &self_;
|
||||
msgs::ExecutionState<msgs::CreateExpandRequest> state_;
|
||||
};
|
||||
|
||||
class DistributedExpandCursor : public Cursor {
|
||||
public:
|
||||
explicit DistributedExpandCursor(const Expand &self, utils::MemoryResource *mem)
|
||||
: self_(self),
|
||||
input_cursor_(self.input_->MakeCursor(mem)),
|
||||
current_in_edge_it_(current_in_edges_.begin()),
|
||||
current_out_edge_it_(current_out_edges_.begin()) {
|
||||
if (self_.common_.existing_node) {
|
||||
throw QueryRuntimeException("Cannot use existing node with DistributedExpandOne cursor!");
|
||||
}
|
||||
}
|
||||
|
||||
using VertexAccessor = accessors::VertexAccessor;
|
||||
using EdgeAccessor = accessors::EdgeAccessor;
|
||||
|
||||
bool InitEdges(Frame &frame, ExecutionContext &context) {
|
||||
// Input Vertex could be null if it is created by a failed optional match. In
|
||||
// those cases we skip that input pull and continue with the next.
|
||||
|
||||
while (true) {
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
TypedValue &vertex_value = frame[self_.input_symbol_];
|
||||
|
||||
// Null check due to possible failed optional match.
|
||||
if (vertex_value.IsNull()) continue;
|
||||
|
||||
ExpectType(self_.input_symbol_, vertex_value, TypedValue::Type::Vertex);
|
||||
auto &vertex = vertex_value.ValueVertex();
|
||||
static constexpr auto direction_to_msgs_direction = [](const EdgeAtom::Direction direction) {
|
||||
switch (direction) {
|
||||
case EdgeAtom::Direction::IN:
|
||||
return msgs::EdgeDirection::IN;
|
||||
case EdgeAtom::Direction::OUT:
|
||||
return msgs::EdgeDirection::OUT;
|
||||
case EdgeAtom::Direction::BOTH:
|
||||
return msgs::EdgeDirection::BOTH;
|
||||
}
|
||||
};
|
||||
|
||||
msgs::ExpandOneRequest request;
|
||||
request.direction = direction_to_msgs_direction(self_.common_.direction);
|
||||
// to not fetch any properties of the edges
|
||||
request.edge_properties.emplace();
|
||||
request.src_vertices.push_back(vertex.Id());
|
||||
msgs::ExecutionState<msgs::ExpandOneRequest> request_state;
|
||||
auto result_rows = context.shard_request_manager->Request(request_state, std::move(request));
|
||||
MG_ASSERT(result_rows.size() == 1);
|
||||
auto &result_row = result_rows.front();
|
||||
|
||||
const auto convert_edges = [&vertex](
|
||||
std::vector<msgs::ExpandOneResultRow::EdgeWithSpecificProperties> &&edge_messages,
|
||||
const EdgeAtom::Direction direction) {
|
||||
std::vector<EdgeAccessor> edge_accessors;
|
||||
edge_accessors.reserve(edge_messages.size());
|
||||
switch (direction) {
|
||||
case EdgeAtom::Direction::IN: {
|
||||
for (auto &edge : edge_messages) {
|
||||
edge_accessors.emplace_back(
|
||||
msgs::Edge{std::move(edge.other_end), vertex.Id(), {}, {edge.gid}, edge.type});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case EdgeAtom::Direction::OUT: {
|
||||
for (auto &edge : edge_messages) {
|
||||
edge_accessors.emplace_back(
|
||||
msgs::Edge{vertex.Id(), std::move(edge.other_end), {}, {edge.gid}, edge.type});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case EdgeAtom::Direction::BOTH: {
|
||||
LOG_FATAL("Must indicate exact expansion direction here");
|
||||
}
|
||||
}
|
||||
return edge_accessors;
|
||||
};
|
||||
current_in_edges_ =
|
||||
convert_edges(std::move(result_row.in_edges_with_specific_properties), EdgeAtom::Direction::IN);
|
||||
current_in_edge_it_ = current_in_edges_.begin();
|
||||
current_in_edges_ =
|
||||
convert_edges(std::move(result_row.in_edges_with_specific_properties), EdgeAtom::Direction::OUT);
|
||||
current_in_edge_it_ = current_in_edges_.begin();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool Pull(Frame &frame, ExecutionContext &context) override {
|
||||
SCOPED_PROFILE_OP("DistributedExpand");
|
||||
// A helper function for expanding a node from an edge.
|
||||
auto pull_node = [this, &frame](const EdgeAccessor &new_edge, EdgeAtom::Direction direction) {
|
||||
if (self_.common_.existing_node) return;
|
||||
switch (direction) {
|
||||
case EdgeAtom::Direction::IN:
|
||||
frame[self_.common_.node_symbol] = new_edge.From();
|
||||
break;
|
||||
case EdgeAtom::Direction::OUT:
|
||||
frame[self_.common_.node_symbol] = new_edge.To();
|
||||
break;
|
||||
case EdgeAtom::Direction::BOTH:
|
||||
LOG_FATAL("Must indicate exact expansion direction here");
|
||||
}
|
||||
};
|
||||
|
||||
while (true) {
|
||||
if (MustAbort(context)) throw HintedAbortError();
|
||||
// attempt to get a value from the incoming edges
|
||||
if (current_in_edge_it_ != current_in_edges_.end()) {
|
||||
auto &edge = *current_in_edge_it_;
|
||||
++current_in_edge_it_;
|
||||
frame[self_.common_.edge_symbol] = edge;
|
||||
pull_node(edge, EdgeAtom::Direction::IN);
|
||||
return true;
|
||||
}
|
||||
|
||||
// attempt to get a value from the outgoing edges
|
||||
if (current_out_edge_it_ != current_out_edges_.end()) {
|
||||
auto &edge = *current_out_edge_it_;
|
||||
++current_out_edge_it_;
|
||||
if (self_.common_.direction == EdgeAtom::Direction::BOTH && edge.IsCycle()) {
|
||||
continue;
|
||||
};
|
||||
frame[self_.common_.edge_symbol] = edge;
|
||||
pull_node(edge, EdgeAtom::Direction::OUT);
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we are here, either the edges have not been initialized,
|
||||
// or they have been exhausted. Attempt to initialize the edges.
|
||||
if (!InitEdges(frame, context)) return false;
|
||||
|
||||
// we have re-initialized the edges, continue with the loop
|
||||
}
|
||||
}
|
||||
|
||||
void Shutdown() override { input_cursor_->Shutdown(); }
|
||||
|
||||
void Reset() override {
|
||||
input_cursor_->Reset();
|
||||
current_in_edges_.clear();
|
||||
current_out_edges_.clear();
|
||||
current_in_edge_it_ = current_in_edges_.end();
|
||||
current_out_edge_it_ = current_out_edges_.end();
|
||||
}
|
||||
|
||||
private:
|
||||
const Expand &self_;
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
std::vector<EdgeAccessor> current_in_edges_;
|
||||
std::vector<EdgeAccessor> current_out_edges_;
|
||||
std::vector<EdgeAccessor>::iterator current_in_edge_it_;
|
||||
std::vector<EdgeAccessor>::iterator current_out_edge_it_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::query::v2::plan
|
||||
|
@ -22,9 +22,9 @@
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include "expr/semantic/symbol.hpp"
|
||||
#include "query/v2/common.hpp"
|
||||
#include "query/v2/frontend/ast/ast.hpp"
|
||||
#include "expr/semantic/symbol.hpp"
|
||||
#include "query/v2/bindings/typed_value.hpp"
|
||||
#include "query/v2/bindings/frame.hpp"
|
||||
#include "query/v2/bindings/symbol_table.hpp"
|
||||
|
@ -31,12 +31,9 @@ class VertexCountCache {
|
||||
public:
|
||||
explicit VertexCountCache(TDbAccessor *shard_request_manager) : shard_request_manager_{shard_request_manager} {}
|
||||
|
||||
auto NameToLabel(const std::string &name) { return shard_request_manager_->LabelNameToLabelId(name); }
|
||||
auto NameToLabel(const std::string &name) { return shard_request_manager_->NameToLabel(name); }
|
||||
auto NameToProperty(const std::string &name) { return shard_request_manager_->NameToProperty(name); }
|
||||
auto NameToEdgeType(const std::string & /*name*/) {
|
||||
MG_ASSERT(false, "NameToEdgeType");
|
||||
return storage::v3::EdgeTypeId::FromInt(0);
|
||||
}
|
||||
auto NameToEdgeType(const std::string &name) { return shard_request_manager_->NameToEdgeType(name); }
|
||||
|
||||
int64_t VerticesCount() { return 1; }
|
||||
|
||||
@ -55,7 +52,8 @@ class VertexCountCache {
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool LabelIndexExists(storage::v3::LabelId /*label*/) { return false; }
|
||||
// For now return true if label is primary label
|
||||
bool LabelIndexExists(storage::v3::LabelId label) { return shard_request_manager_->IsPrimaryLabel(label); }
|
||||
|
||||
bool LabelPropertyIndexExists(storage::v3::LabelId /*label*/, storage::v3::PropertyId /*property*/) { return false; }
|
||||
|
||||
|
@ -50,23 +50,24 @@ using PropertyId = memgraph::storage::v3::PropertyId;
|
||||
using EdgeTypeId = memgraph::storage::v3::EdgeTypeId;
|
||||
|
||||
struct EdgeType {
|
||||
uint64_t id;
|
||||
EdgeTypeId id;
|
||||
friend bool operator==(const EdgeType &lhs, const EdgeType &rhs) = default;
|
||||
};
|
||||
|
||||
struct EdgeId {
|
||||
Gid gid;
|
||||
|
||||
friend bool operator==(const EdgeId &lhs, const EdgeId &rhs) { return lhs.gid == rhs.gid; }
|
||||
friend bool operator<(const EdgeId &lhs, const EdgeId &rhs) { return lhs.gid < rhs.gid; }
|
||||
};
|
||||
|
||||
struct Edge {
|
||||
VertexId src;
|
||||
VertexId dst;
|
||||
std::optional<std::vector<std::pair<PropertyId, Value>>> properties;
|
||||
std::vector<std::pair<PropertyId, Value>> properties;
|
||||
EdgeId id;
|
||||
EdgeType type;
|
||||
friend bool operator==(const Edge &lhs, const Edge &rhs) {
|
||||
return (lhs.src == rhs.src) && (lhs.dst == rhs.dst) && (lhs.type == rhs.type);
|
||||
}
|
||||
friend bool operator==(const Edge &lhs, const Edge &rhs) { return lhs.id == rhs.id; }
|
||||
};
|
||||
|
||||
struct Vertex {
|
||||
@ -75,16 +76,6 @@ struct Vertex {
|
||||
friend bool operator==(const Vertex &lhs, const Vertex &rhs) { return lhs.id == rhs.id; }
|
||||
};
|
||||
|
||||
struct PathPart {
|
||||
Vertex dst;
|
||||
Gid edge;
|
||||
};
|
||||
|
||||
struct Path {
|
||||
Vertex src;
|
||||
std::vector<PathPart> parts;
|
||||
};
|
||||
|
||||
struct Null {};
|
||||
|
||||
struct Value {
|
||||
@ -134,13 +125,9 @@ struct Value {
|
||||
case Type::Map:
|
||||
std::destroy_at(&map_v);
|
||||
return;
|
||||
|
||||
case Type::Vertex:
|
||||
std::destroy_at(&vertex_v);
|
||||
return;
|
||||
case Type::Path:
|
||||
std::destroy_at(&path_v);
|
||||
return;
|
||||
case Type::Edge:
|
||||
std::destroy_at(&edge_v);
|
||||
}
|
||||
@ -174,9 +161,6 @@ struct Value {
|
||||
case Type::Edge:
|
||||
new (&edge_v) Edge(other.edge_v);
|
||||
return;
|
||||
case Type::Path:
|
||||
new (&path_v) Path(other.path_v);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,9 +192,6 @@ struct Value {
|
||||
case Type::Edge:
|
||||
new (&edge_v) Edge(std::move(other.edge_v));
|
||||
break;
|
||||
case Type::Path:
|
||||
new (&path_v) Path(std::move(other.path_v));
|
||||
break;
|
||||
}
|
||||
|
||||
other.DestroyValue();
|
||||
@ -250,9 +231,6 @@ struct Value {
|
||||
case Type::Edge:
|
||||
new (&edge_v) Edge(other.edge_v);
|
||||
break;
|
||||
case Type::Path:
|
||||
new (&path_v) Path(other.path_v);
|
||||
break;
|
||||
}
|
||||
|
||||
return *this;
|
||||
@ -291,9 +269,6 @@ struct Value {
|
||||
case Type::Edge:
|
||||
new (&edge_v) Edge(std::move(other.edge_v));
|
||||
break;
|
||||
case Type::Path:
|
||||
new (&path_v) Path(std::move(other.path_v));
|
||||
break;
|
||||
}
|
||||
|
||||
other.DestroyValue();
|
||||
@ -301,7 +276,7 @@ struct Value {
|
||||
|
||||
return *this;
|
||||
}
|
||||
enum class Type : uint8_t { Null, Bool, Int64, Double, String, List, Map, Vertex, Edge, Path };
|
||||
enum class Type : uint8_t { Null, Bool, Int64, Double, String, List, Map, Vertex, Edge };
|
||||
Type type{Type::Null};
|
||||
union {
|
||||
Null null_v;
|
||||
@ -313,7 +288,6 @@ struct Value {
|
||||
std::map<std::string, Value> map_v;
|
||||
Vertex vertex_v;
|
||||
Edge edge_v;
|
||||
Path path_v;
|
||||
};
|
||||
|
||||
friend bool operator==(const Value &lhs, const Value &rhs) {
|
||||
@ -339,26 +313,10 @@ struct Value {
|
||||
return lhs.vertex_v == rhs.vertex_v;
|
||||
case Value::Type::Edge:
|
||||
return lhs.edge_v == rhs.edge_v;
|
||||
case Value::Type::Path:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct ValuesMap {
|
||||
std::unordered_map<PropertyId, Value> values_map;
|
||||
};
|
||||
|
||||
struct MappedValues {
|
||||
std::vector<ValuesMap> values_map;
|
||||
};
|
||||
|
||||
struct ListedValues {
|
||||
std::vector<std::vector<Value>> properties;
|
||||
};
|
||||
|
||||
using Values = std::variant<ListedValues, MappedValues>;
|
||||
|
||||
struct Expression {
|
||||
std::string expression;
|
||||
};
|
||||
@ -378,20 +336,28 @@ enum class StorageView { OLD = 0, NEW = 1 };
|
||||
|
||||
struct ScanVerticesRequest {
|
||||
Hlc transaction_id;
|
||||
// This should be optional
|
||||
VertexId start_id;
|
||||
// The empty optional means return all of the properties, while an empty list means do not return any properties
|
||||
std::optional<std::vector<PropertyId>> props_to_return;
|
||||
// expression that determines if vertex is returned or not
|
||||
std::vector<std::string> filter_expressions;
|
||||
// expression whose result is returned for every vertex
|
||||
std::vector<std::string> vertex_expressions;
|
||||
std::optional<size_t> batch_limit;
|
||||
std::vector<OrderBy> order_bys;
|
||||
StorageView storage_view{StorageView::NEW};
|
||||
|
||||
std::optional<Label> label;
|
||||
std::optional<std::pair<PropertyId, std::string>> property_expression_pair;
|
||||
std::optional<std::vector<std::string>> filter_expressions;
|
||||
};
|
||||
|
||||
struct ScanResultRow {
|
||||
Vertex vertex;
|
||||
// empty() is no properties returned
|
||||
// This should be changed to std::map<PropertyId, Value>
|
||||
std::vector<std::pair<PropertyId, Value>> props;
|
||||
std::vector<Value> evaluated_vertex_expressions;
|
||||
};
|
||||
|
||||
struct ScanVerticesResponse {
|
||||
@ -404,6 +370,7 @@ using VertexOrEdgeIds = std::variant<VertexId, EdgeId>;
|
||||
|
||||
struct GetPropertiesRequest {
|
||||
Hlc transaction_id;
|
||||
// Shouldn't contain mixed vertex and edge ids
|
||||
VertexOrEdgeIds vertex_or_edge_ids;
|
||||
std::vector<PropertyId> property_ids;
|
||||
std::vector<Expression> expressions;
|
||||
@ -415,44 +382,49 @@ struct GetPropertiesRequest {
|
||||
|
||||
struct GetPropertiesResponse {
|
||||
bool success;
|
||||
Values values;
|
||||
};
|
||||
|
||||
enum class EdgeDirection : uint8_t { OUT = 1, IN = 2, BOTH = 3 };
|
||||
|
||||
struct VertexEdgeId {
|
||||
VertexId vertex_id;
|
||||
std::optional<EdgeId> next_id;
|
||||
};
|
||||
|
||||
struct ExpandOneRequest {
|
||||
// TODO(antaljanosbenjamin): Filtering based on the id of the other end of the edge?
|
||||
Hlc transaction_id;
|
||||
std::vector<VertexId> src_vertices;
|
||||
// return types that type is in this list
|
||||
// empty means all the types
|
||||
std::vector<EdgeType> edge_types;
|
||||
EdgeDirection direction;
|
||||
EdgeDirection direction{EdgeDirection::OUT};
|
||||
// Wether to return multiple edges between the same neighbors
|
||||
bool only_unique_neighbor_rows = false;
|
||||
// The empty optional means return all of the properties, while an empty
|
||||
// list means do not return any properties
|
||||
// TODO(antaljanosbenjamin): All of the special values should be communicated through a single vertex object
|
||||
// after schema is implemented
|
||||
// Special values are accepted:
|
||||
// * __mg__labels
|
||||
// The empty optional means return all of the properties, while an empty list means do not return any properties
|
||||
std::optional<std::vector<PropertyId>> src_vertex_properties;
|
||||
// TODO(antaljanosbenjamin): All of the special values should be communicated through a single vertex object
|
||||
// after schema is implemented
|
||||
// Special values are accepted:
|
||||
// * __mg__dst_id (Vertex, but without labels)
|
||||
// * __mg__type (binary)
|
||||
// The empty optional means return all of the properties, while an empty list means do not return any properties
|
||||
std::optional<std::vector<PropertyId>> edge_properties;
|
||||
// QUESTION(antaljanosbenjamin): Maybe also add possibility to expressions evaluated on the source vertex?
|
||||
// List of expressions evaluated on edges
|
||||
std::vector<Expression> expressions;
|
||||
|
||||
std::vector<std::string> vertex_expressions;
|
||||
std::vector<std::string> edge_expressions;
|
||||
|
||||
std::optional<std::vector<OrderBy>> order_by;
|
||||
// Limit the edges or the vertices?
|
||||
std::optional<size_t> limit;
|
||||
std::optional<Filter> filter;
|
||||
std::vector<std::string> filters;
|
||||
};
|
||||
|
||||
struct ExpandOneResultRow {
|
||||
struct EdgeWithAllProperties {
|
||||
VertexId other_end;
|
||||
EdgeType type;
|
||||
Gid gid;
|
||||
std::map<PropertyId, Value> properties;
|
||||
};
|
||||
|
||||
struct EdgeWithSpecificProperties {
|
||||
VertexId other_end;
|
||||
EdgeType type;
|
||||
Gid gid;
|
||||
std::vector<Value> properties;
|
||||
};
|
||||
|
||||
// NOTE: This struct could be a single Values with columns something like this:
|
||||
// src_vertex(Vertex), vertex_prop1(Value), vertex_prop2(Value), edges(list<Value>)
|
||||
// where edges might be a list of:
|
||||
@ -461,15 +433,17 @@ struct ExpandOneResultRow {
|
||||
// The drawback of this is currently the key of the map is always interpreted as a string in Value, not as an
|
||||
// integer, which should be in case of mapped properties.
|
||||
Vertex src_vertex;
|
||||
std::optional<std::map<PropertyId, Value>> src_vertex_properties;
|
||||
std::map<PropertyId, Value> src_vertex_properties;
|
||||
|
||||
// NOTE: If the desired edges are specified in the request,
|
||||
// edges_with_specific_properties will have a value and it will
|
||||
// return the properties as a vector of property values. The order
|
||||
// of the values returned should be the same as the PropertyIds
|
||||
// were defined in the request.
|
||||
std::optional<std::vector<std::tuple<VertexId, Gid, std::map<PropertyId, Value>>>> edges_with_all_properties;
|
||||
std::optional<std::vector<std::tuple<VertexId, Gid, std::vector<Value>>>> edges_with_specific_properties;
|
||||
std::vector<EdgeWithAllProperties> in_edges_with_all_properties;
|
||||
std::vector<EdgeWithSpecificProperties> in_edges_with_specific_properties;
|
||||
std::vector<EdgeWithAllProperties> out_edges_with_all_properties;
|
||||
std::vector<EdgeWithSpecificProperties> out_edges_with_specific_properties;
|
||||
};
|
||||
|
||||
struct ExpandOneResponse {
|
||||
@ -479,6 +453,7 @@ struct ExpandOneResponse {
|
||||
|
||||
struct UpdateVertexProp {
|
||||
PrimaryKey primary_key;
|
||||
// This should be a map
|
||||
std::vector<std::pair<PropertyId, Value>> property_updates;
|
||||
};
|
||||
|
||||
@ -486,6 +461,7 @@ struct UpdateEdgeProp {
|
||||
EdgeId edge_id;
|
||||
VertexId src;
|
||||
VertexId dst;
|
||||
// This should be a map
|
||||
std::vector<std::pair<PropertyId, Value>> property_updates;
|
||||
};
|
||||
|
||||
@ -495,12 +471,7 @@ struct UpdateEdgeProp {
|
||||
struct NewVertex {
|
||||
std::vector<Label> label_ids;
|
||||
PrimaryKey primary_key;
|
||||
std::vector<std::pair<PropertyId, Value>> properties;
|
||||
};
|
||||
|
||||
struct NewVertexLabel {
|
||||
std::string label;
|
||||
PrimaryKey primary_key;
|
||||
// This should be a map
|
||||
std::vector<std::pair<PropertyId, Value>> properties;
|
||||
};
|
||||
|
||||
@ -536,12 +507,22 @@ struct UpdateVerticesResponse {
|
||||
/*
|
||||
* Edges
|
||||
*/
|
||||
struct CreateEdgesRequest {
|
||||
Hlc transaction_id;
|
||||
std::vector<Edge> edges;
|
||||
// No need for specifying direction since it has to be in one, and src and dest
|
||||
// vertices clearly communicate the direction
|
||||
struct NewExpand {
|
||||
EdgeId id;
|
||||
EdgeType type;
|
||||
VertexId src_vertex;
|
||||
VertexId dest_vertex;
|
||||
std::vector<std::pair<PropertyId, Value>> properties;
|
||||
};
|
||||
|
||||
struct CreateEdgesResponse {
|
||||
struct CreateExpandRequest {
|
||||
Hlc transaction_id;
|
||||
std::vector<NewExpand> new_expands;
|
||||
};
|
||||
|
||||
struct CreateExpandResponse {
|
||||
bool success;
|
||||
};
|
||||
|
||||
@ -576,8 +557,8 @@ using ReadRequests = std::variant<ExpandOneRequest, GetPropertiesRequest, ScanVe
|
||||
using ReadResponses = std::variant<ExpandOneResponse, GetPropertiesResponse, ScanVerticesResponse>;
|
||||
|
||||
using WriteRequests = std::variant<CreateVerticesRequest, DeleteVerticesRequest, UpdateVerticesRequest,
|
||||
CreateEdgesRequest, DeleteEdgesRequest, UpdateEdgesRequest, CommitRequest>;
|
||||
CreateExpandRequest, DeleteEdgesRequest, UpdateEdgesRequest, CommitRequest>;
|
||||
using WriteResponses = std::variant<CreateVerticesResponse, DeleteVerticesResponse, UpdateVerticesResponse,
|
||||
CreateEdgesResponse, DeleteEdgesResponse, UpdateEdgesResponse, CommitResponse>;
|
||||
CreateExpandResponse, DeleteEdgesResponse, UpdateEdgesResponse, CommitResponse>;
|
||||
|
||||
} // namespace memgraph::msgs
|
||||
|
@ -14,7 +14,9 @@
|
||||
#include <chrono>
|
||||
#include <deque>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <map>
|
||||
#include <numeric>
|
||||
#include <optional>
|
||||
#include <random>
|
||||
#include <set>
|
||||
@ -54,18 +56,20 @@ class RsmStorageClientManager {
|
||||
RsmStorageClientManager &operator=(RsmStorageClientManager &&) = delete;
|
||||
~RsmStorageClientManager() = default;
|
||||
|
||||
void AddClient(const LabelId label_id, Shard key, TStorageClient client) {
|
||||
cli_cache_[label_id].insert({std::move(key), std::move(client)});
|
||||
}
|
||||
void AddClient(Shard key, TStorageClient client) { cli_cache_.emplace(std::move(key), std::move(client)); }
|
||||
|
||||
bool Exists(const LabelId label_id, const Shard &key) { return cli_cache_[label_id].contains(key); }
|
||||
bool Exists(const Shard &key) { return cli_cache_.contains(key); }
|
||||
|
||||
void PurgeCache() { cli_cache_.clear(); }
|
||||
|
||||
TStorageClient &GetClient(const LabelId label_id, const Shard &key) { return cli_cache_[label_id].find(key)->second; }
|
||||
TStorageClient &GetClient(const Shard &key) {
|
||||
auto it = cli_cache_.find(key);
|
||||
MG_ASSERT(it != cli_cache_.end(), "Non-existing shard client");
|
||||
return it->second;
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<LabelId, std::map<Shard, TStorageClient>> cli_cache_;
|
||||
std::map<Shard, TStorageClient> cli_cache_;
|
||||
};
|
||||
|
||||
template <typename TRequest>
|
||||
@ -93,7 +97,7 @@ struct ExecutionState {
|
||||
// a partial response on a shard(if there is one) is finished and we can send off the request for the next batch.
|
||||
std::vector<Shard> shard_cache;
|
||||
// 1-1 mapping with `shard_cache`.
|
||||
// A vector that tracks request metatdata for each shard (For example, next_id for a ScanAll on Shard A)
|
||||
// A vector that tracks request metadata for each shard (For example, next_id for a ScanAll on Shard A)
|
||||
std::vector<TRequest> requests;
|
||||
State state = INITIALIZING;
|
||||
};
|
||||
@ -114,16 +118,19 @@ class ShardRequestManagerInterface {
|
||||
virtual std::vector<VertexAccessor> Request(ExecutionState<ScanVerticesRequest> &state) = 0;
|
||||
virtual std::vector<CreateVerticesResponse> Request(ExecutionState<CreateVerticesRequest> &state,
|
||||
std::vector<NewVertex> new_vertices) = 0;
|
||||
virtual std::vector<ExpandOneResponse> Request(ExecutionState<ExpandOneRequest> &state) = 0;
|
||||
// TODO(antaljanosbenjamin): unify the GetXXXId and NameToId functions to have consistent naming, return type and
|
||||
// implementation
|
||||
virtual std::vector<ExpandOneResultRow> Request(ExecutionState<ExpandOneRequest> &state,
|
||||
ExpandOneRequest request) = 0;
|
||||
virtual std::vector<CreateExpandResponse> Request(ExecutionState<CreateExpandRequest> &state,
|
||||
std::vector<NewExpand> new_edges) = 0;
|
||||
|
||||
virtual storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const = 0;
|
||||
virtual storage::v3::PropertyId NameToProperty(const std::string &name) const = 0;
|
||||
virtual storage::v3::LabelId LabelNameToLabelId(const std::string &name) const = 0;
|
||||
virtual storage::v3::LabelId NameToLabel(const std::string &name) const = 0;
|
||||
virtual const std::string &PropertyToName(memgraph::storage::v3::PropertyId prop) const = 0;
|
||||
virtual const std::string &LabelToName(memgraph::storage::v3::LabelId label) const = 0;
|
||||
virtual const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId type) const = 0;
|
||||
virtual bool IsPrimaryKey(PropertyId name) const = 0;
|
||||
virtual bool IsPrimaryLabel(LabelId label) const = 0;
|
||||
virtual bool IsPrimaryKey(LabelId primary_label, PropertyId property) const = 0;
|
||||
};
|
||||
|
||||
// TODO(kostasrim)rename this class template
|
||||
@ -186,7 +193,7 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
|
||||
for (const auto &[label, space] : shards_map_.label_spaces) {
|
||||
for (const auto &[key, shard] : space.shards) {
|
||||
auto &storage_client = GetStorageClientForShard(shard, label);
|
||||
auto &storage_client = GetStorageClientForShard(shard);
|
||||
// TODO(kostasrim) Currently requests return the result directly. Adjust this when the API works MgFuture
|
||||
// instead.
|
||||
auto commit_response = storage_client.SendWriteRequest(commit_req);
|
||||
@ -204,36 +211,39 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
}
|
||||
}
|
||||
|
||||
storage::v3::EdgeTypeId NameToEdgeType(const std::string & /*name*/) const override {
|
||||
return memgraph::storage::v3::EdgeTypeId::FromUint(0);
|
||||
storage::v3::EdgeTypeId NameToEdgeType(const std::string &name) const override {
|
||||
return shards_map_.GetEdgeTypeId(name).value();
|
||||
}
|
||||
|
||||
storage::v3::PropertyId NameToProperty(const std::string &name) const override {
|
||||
return *shards_map_.GetPropertyId(name);
|
||||
return shards_map_.GetPropertyId(name).value();
|
||||
}
|
||||
|
||||
memgraph::storage::v3::LabelId LabelNameToLabelId(const std::string &name) const override {
|
||||
return shards_map_.GetLabelId(name);
|
||||
storage::v3::LabelId NameToLabel(const std::string &name) const override {
|
||||
return shards_map_.GetLabelId(name).value();
|
||||
}
|
||||
|
||||
const std::string &PropertyToName(memgraph::storage::v3::PropertyId /*prop*/) const override {
|
||||
static std::string str{"dummy__prop"};
|
||||
return str;
|
||||
const std::string &PropertyToName(memgraph::storage::v3::PropertyId prop) const override {
|
||||
return shards_map_.GetPropertyName(prop);
|
||||
}
|
||||
const std::string &LabelToName(memgraph::storage::v3::LabelId /*label*/) const override {
|
||||
static std::string str{"dummy__label"};
|
||||
return str;
|
||||
const std::string &LabelToName(memgraph::storage::v3::LabelId label) const override {
|
||||
return shards_map_.GetLabelName(label);
|
||||
}
|
||||
const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId /*type*/) const override {
|
||||
static std::string str{"dummy__edgetype"};
|
||||
return str;
|
||||
const std::string &EdgeTypeToName(memgraph::storage::v3::EdgeTypeId type) const override {
|
||||
return shards_map_.GetEdgeTypeName(type);
|
||||
}
|
||||
|
||||
bool IsPrimaryKey(const PropertyId name) const override {
|
||||
return std::find_if(shards_map_.properties.begin(), shards_map_.properties.end(),
|
||||
[name](auto &pr) { return pr.second == name; }) != shards_map_.properties.end();
|
||||
bool IsPrimaryKey(LabelId primary_label, PropertyId property) const override {
|
||||
const auto schema_it = shards_map_.schemas.find(primary_label);
|
||||
MG_ASSERT(schema_it != shards_map_.schemas.end(), "Invalid primary label id: {}", primary_label.AsUint());
|
||||
|
||||
return std::find_if(schema_it->second.begin(), schema_it->second.end(), [property](const auto &schema_prop) {
|
||||
return schema_prop.property_id == property;
|
||||
}) != schema_it->second.end();
|
||||
}
|
||||
|
||||
bool IsPrimaryLabel(LabelId label) const override { return shards_map_.label_spaces.contains(label); }
|
||||
|
||||
// TODO(kostasrim) Simplify return result
|
||||
std::vector<VertexAccessor> Request(ExecutionState<ScanVerticesRequest> &state) override {
|
||||
MaybeInitializeExecutionState(state);
|
||||
@ -250,6 +260,7 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
for (const auto &shard : state.shard_cache) {
|
||||
paginated_response_tracker.insert(std::make_pair(shard, PaginatedResponseState::Pending));
|
||||
}
|
||||
|
||||
do {
|
||||
AwaitOnPaginatedRequests(state, responses, paginated_response_tracker);
|
||||
} while (!all_requests_gathered(paginated_response_tracker));
|
||||
@ -281,13 +292,41 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
return responses;
|
||||
}
|
||||
|
||||
std::vector<ExpandOneResponse> Request(ExecutionState<ExpandOneRequest> &state) override {
|
||||
std::vector<CreateExpandResponse> Request(ExecutionState<CreateExpandRequest> &state,
|
||||
std::vector<NewExpand> new_edges) override {
|
||||
MG_ASSERT(!new_edges.empty());
|
||||
MaybeInitializeExecutionState(state, new_edges);
|
||||
std::vector<CreateExpandResponse> responses;
|
||||
auto &shard_cache_ref = state.shard_cache;
|
||||
size_t id{0};
|
||||
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++id) {
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
WriteRequests req = state.requests[id];
|
||||
auto write_response_result = storage_client.SendWriteRequest(std::move(req));
|
||||
if (write_response_result.HasError()) {
|
||||
throw std::runtime_error("CreateVertices request timedout");
|
||||
}
|
||||
WriteResponses response_variant = write_response_result.GetValue();
|
||||
CreateExpandResponse mapped_response = std::get<CreateExpandResponse>(response_variant);
|
||||
|
||||
if (!mapped_response.success) {
|
||||
throw std::runtime_error("CreateExpand request did not succeed");
|
||||
}
|
||||
responses.push_back(mapped_response);
|
||||
shard_it = shard_cache_ref.erase(shard_it);
|
||||
}
|
||||
// We are done with this state
|
||||
MaybeCompleteState(state);
|
||||
return responses;
|
||||
}
|
||||
|
||||
std::vector<ExpandOneResultRow> Request(ExecutionState<ExpandOneRequest> &state, ExpandOneRequest request) override {
|
||||
// TODO(kostasrim)Update to limit the batch size here
|
||||
// Expansions of the destination must be handled by the caller. For example
|
||||
// match (u:L1 { prop : 1 })-[:Friend]-(v:L1)
|
||||
// For each vertex U, the ExpandOne will result in <U, Edges>. The destination vertex and its properties
|
||||
// must be fetched again with an ExpandOne(Edges.dst)
|
||||
MaybeInitializeExecutionState(state);
|
||||
MaybeInitializeExecutionState(state, std::move(request));
|
||||
std::vector<ExpandOneResponse> responses;
|
||||
auto &shard_cache_ref = state.shard_cache;
|
||||
|
||||
@ -298,9 +337,18 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
do {
|
||||
AwaitOnResponses(state, responses);
|
||||
} while (!state.shard_cache.empty());
|
||||
std::vector<ExpandOneResultRow> result_rows;
|
||||
const auto total_row_count = std::accumulate(
|
||||
responses.begin(), responses.end(), 0,
|
||||
[](const int64_t partial_count, const ExpandOneResponse &resp) { return partial_count + resp.result.size(); });
|
||||
result_rows.reserve(total_row_count);
|
||||
|
||||
for (auto &response : responses) {
|
||||
result_rows.insert(result_rows.end(), std::make_move_iterator(response.result.begin()),
|
||||
std::make_move_iterator(response.result.end()));
|
||||
}
|
||||
MaybeCompleteState(state);
|
||||
return responses;
|
||||
return result_rows;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -363,25 +411,74 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
state.state = ExecutionState<CreateVerticesRequest>::EXECUTING;
|
||||
}
|
||||
|
||||
void MaybeInitializeExecutionState(ExecutionState<ScanVerticesRequest> &state) {
|
||||
void MaybeInitializeExecutionState(ExecutionState<CreateExpandRequest> &state, std::vector<NewExpand> new_expands) {
|
||||
ThrowIfStateCompleted(state);
|
||||
if (ShallNotInitializeState(state)) {
|
||||
return;
|
||||
}
|
||||
state.transaction_id = transaction_id_;
|
||||
auto shards = shards_map_.GetShards(*state.label);
|
||||
for (auto &[key, shard] : shards) {
|
||||
MG_ASSERT(!shard.empty());
|
||||
state.shard_cache.push_back(std::move(shard));
|
||||
ScanVerticesRequest rqst;
|
||||
rqst.transaction_id = transaction_id_;
|
||||
rqst.start_id.second = storage::conversions::ConvertValueVector(key);
|
||||
state.requests.push_back(std::move(rqst));
|
||||
|
||||
std::map<Shard, CreateExpandRequest> per_shard_request_table;
|
||||
auto ensure_shard_exists_in_table = [&per_shard_request_table,
|
||||
transaction_id = transaction_id_](const Shard &shard) {
|
||||
if (!per_shard_request_table.contains(shard)) {
|
||||
CreateExpandRequest create_expand_request{.transaction_id = transaction_id};
|
||||
per_shard_request_table.insert({shard, std::move(create_expand_request)});
|
||||
}
|
||||
};
|
||||
|
||||
for (auto &new_expand : new_expands) {
|
||||
const auto shard_src_vertex = shards_map_.GetShardForKey(
|
||||
new_expand.src_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.src_vertex.second));
|
||||
const auto shard_dest_vertex = shards_map_.GetShardForKey(
|
||||
new_expand.dest_vertex.first.id, storage::conversions::ConvertPropertyVector(new_expand.dest_vertex.second));
|
||||
|
||||
ensure_shard_exists_in_table(shard_src_vertex);
|
||||
|
||||
if (shard_src_vertex != shard_dest_vertex) {
|
||||
ensure_shard_exists_in_table(shard_dest_vertex);
|
||||
per_shard_request_table[shard_dest_vertex].new_expands.push_back(new_expand);
|
||||
}
|
||||
per_shard_request_table[shard_src_vertex].new_expands.push_back(std::move(new_expand));
|
||||
}
|
||||
|
||||
for (auto &[shard, request] : per_shard_request_table) {
|
||||
state.shard_cache.push_back(shard);
|
||||
state.requests.push_back(std::move(request));
|
||||
}
|
||||
state.state = ExecutionState<CreateExpandRequest>::EXECUTING;
|
||||
}
|
||||
|
||||
void MaybeInitializeExecutionState(ExecutionState<ScanVerticesRequest> &state) {
|
||||
ThrowIfStateCompleted(state);
|
||||
if (ShallNotInitializeState(state)) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<coordinator::Shards> multi_shards;
|
||||
state.transaction_id = transaction_id_;
|
||||
if (!state.label) {
|
||||
multi_shards = shards_map_.GetAllShards();
|
||||
} else {
|
||||
const auto label_id = shards_map_.GetLabelId(*state.label);
|
||||
MG_ASSERT(label_id);
|
||||
MG_ASSERT(IsPrimaryLabel(*label_id));
|
||||
multi_shards = {shards_map_.GetShardsForLabel(*state.label)};
|
||||
}
|
||||
for (auto &shards : multi_shards) {
|
||||
for (auto &[key, shard] : shards) {
|
||||
MG_ASSERT(!shard.empty());
|
||||
state.shard_cache.push_back(std::move(shard));
|
||||
ScanVerticesRequest rqst;
|
||||
rqst.transaction_id = transaction_id_;
|
||||
rqst.start_id.second = storage::conversions::ConvertValueVector(key);
|
||||
state.requests.push_back(std::move(rqst));
|
||||
}
|
||||
}
|
||||
state.state = ExecutionState<ScanVerticesRequest>::EXECUTING;
|
||||
}
|
||||
|
||||
void MaybeInitializeExecutionState(ExecutionState<ExpandOneRequest> &state) {
|
||||
void MaybeInitializeExecutionState(ExecutionState<ExpandOneRequest> &state, ExpandOneRequest request) {
|
||||
ThrowIfStateCompleted(state);
|
||||
if (ShallNotInitializeState(state)) {
|
||||
return;
|
||||
@ -389,24 +486,18 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
state.transaction_id = transaction_id_;
|
||||
|
||||
std::map<Shard, ExpandOneRequest> per_shard_request_table;
|
||||
MG_ASSERT(state.requests.size() == 1);
|
||||
auto top_level_rqst = std::move(*state.requests.begin());
|
||||
auto top_level_rqst_template = top_level_rqst;
|
||||
auto top_level_rqst_template = request;
|
||||
top_level_rqst_template.transaction_id = transaction_id_;
|
||||
top_level_rqst_template.src_vertices.clear();
|
||||
top_level_rqst_template.edge_types.clear();
|
||||
state.requests.clear();
|
||||
size_t id = 0;
|
||||
for (const auto &vertex : top_level_rqst.src_vertices) {
|
||||
for (auto &vertex : request.src_vertices) {
|
||||
auto shard =
|
||||
shards_map_.GetShardForKey(vertex.first.id, storage::conversions::ConvertPropertyVector(vertex.second));
|
||||
if (!per_shard_request_table.contains(shard)) {
|
||||
ExpandOneRequest expand_v_rqst = top_level_rqst_template;
|
||||
per_shard_request_table.insert(std::pair(shard, std::move(expand_v_rqst)));
|
||||
per_shard_request_table.insert(std::pair(shard, top_level_rqst_template));
|
||||
state.shard_cache.push_back(shard);
|
||||
}
|
||||
per_shard_request_table[shard].src_vertices.push_back(vertex);
|
||||
per_shard_request_table[shard].edge_types.push_back(top_level_rqst.edge_types[id]);
|
||||
++id;
|
||||
}
|
||||
|
||||
for (auto &[shard, rqst] : per_shard_request_table) {
|
||||
@ -415,20 +506,19 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
state.state = ExecutionState<ExpandOneRequest>::EXECUTING;
|
||||
}
|
||||
|
||||
StorageClient &GetStorageClientForShard(Shard shard, LabelId label_id) {
|
||||
if (!storage_cli_manager_.Exists(label_id, shard)) {
|
||||
AddStorageClientToManager(shard, label_id);
|
||||
StorageClient &GetStorageClientForShard(Shard shard) {
|
||||
if (!storage_cli_manager_.Exists(shard)) {
|
||||
AddStorageClientToManager(shard);
|
||||
}
|
||||
return storage_cli_manager_.GetClient(label_id, shard);
|
||||
return storage_cli_manager_.GetClient(shard);
|
||||
}
|
||||
|
||||
StorageClient &GetStorageClientForShard(const std::string &label, const CompoundKey &key) {
|
||||
auto shard = shards_map_.GetShardForKey(label, key);
|
||||
auto label_id = shards_map_.GetLabelId(label);
|
||||
return GetStorageClientForShard(std::move(shard), label_id);
|
||||
return GetStorageClientForShard(std::move(shard));
|
||||
}
|
||||
|
||||
void AddStorageClientToManager(Shard target_shard, const LabelId &label_id) {
|
||||
void AddStorageClientToManager(Shard target_shard) {
|
||||
MG_ASSERT(!target_shard.empty());
|
||||
auto leader_addr = target_shard.front();
|
||||
std::vector<Address> addresses;
|
||||
@ -437,15 +527,19 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
addresses.push_back(std::move(address.address));
|
||||
}
|
||||
auto cli = StorageClient(io_, std::move(leader_addr.address), std::move(addresses));
|
||||
storage_cli_manager_.AddClient(label_id, target_shard, std::move(cli));
|
||||
storage_cli_manager_.AddClient(target_shard, std::move(cli));
|
||||
}
|
||||
|
||||
void SendAllRequests(ExecutionState<ScanVerticesRequest> &state) {
|
||||
int64_t shard_idx = 0;
|
||||
for (const auto &request : state.requests) {
|
||||
auto &storage_client =
|
||||
GetStorageClientForShard(*state.label, storage::conversions::ConvertPropertyVector(request.start_id.second));
|
||||
const auto ¤t_shard = state.shard_cache[shard_idx];
|
||||
|
||||
auto &storage_client = GetStorageClientForShard(current_shard);
|
||||
ReadRequests req = request;
|
||||
storage_client.SendAsyncReadRequest(request);
|
||||
|
||||
++shard_idx;
|
||||
}
|
||||
}
|
||||
|
||||
@ -461,7 +555,7 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
new_vertex.label_ids.erase(new_vertex.label_ids.begin());
|
||||
}
|
||||
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it, labels[0].id);
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
|
||||
WriteRequests req = req_deep_copy;
|
||||
storage_client.SendAsyncWriteRequest(req);
|
||||
@ -472,11 +566,11 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
void SendAllRequests(ExecutionState<ExpandOneRequest> &state,
|
||||
std::vector<memgraph::coordinator::Shard> &shard_cache_ref) {
|
||||
size_t id = 0;
|
||||
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++id) {
|
||||
const Label primary_label = state.requests[id].src_vertices[0].first;
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it, primary_label.id);
|
||||
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++shard_it) {
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
ReadRequests req = state.requests[id];
|
||||
storage_client.SendAsyncReadRequest(req);
|
||||
++id;
|
||||
}
|
||||
}
|
||||
|
||||
@ -488,7 +582,7 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
// This is fine because all new_vertices of each request end up on the same shard
|
||||
const auto labels = state.requests[request_idx].new_vertices[0].label_ids;
|
||||
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it, labels[0].id);
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
|
||||
auto poll_result = storage_client.AwaitAsyncWriteRequest();
|
||||
if (!poll_result) {
|
||||
@ -521,13 +615,13 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
auto &shard_cache_ref = state.shard_cache;
|
||||
int64_t request_idx = 0;
|
||||
|
||||
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end(); ++request_idx) {
|
||||
auto &storage_client = GetStorageClientForShard(
|
||||
*state.label,
|
||||
storage::conversions::ConvertPropertyVector(state.requests[request_idx].src_vertices[0].second));
|
||||
for (auto shard_it = shard_cache_ref.begin(); shard_it != shard_cache_ref.end();) {
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
|
||||
auto poll_result = storage_client.PollAsyncReadRequest();
|
||||
if (!poll_result) {
|
||||
++shard_it;
|
||||
++request_idx;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -550,7 +644,6 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
// Needed to maintain the 1-1 mapping between the ShardCache and the requests.
|
||||
auto it = state.requests.begin() + request_idx;
|
||||
state.requests.erase(it);
|
||||
--request_idx;
|
||||
}
|
||||
}
|
||||
|
||||
@ -568,8 +661,8 @@ class ShardRequestManager : public ShardRequestManagerInterface {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto &storage_client = GetStorageClientForShard(
|
||||
*state.label, storage::conversions::ConvertPropertyVector(state.requests[request_idx].start_id.second));
|
||||
auto &storage_client = GetStorageClientForShard(*shard_it);
|
||||
|
||||
auto await_result = storage_client.AwaitAsyncReadRequest();
|
||||
|
||||
if (!await_result) {
|
||||
|
@ -19,6 +19,8 @@ set(storage_v3_src_files
|
||||
storage.cpp
|
||||
shard_rsm.cpp
|
||||
bindings/typed_value.cpp
|
||||
expr.cpp
|
||||
request_helper.cpp
|
||||
storage.cpp)
|
||||
|
||||
# ######################
|
||||
|
@ -11,5 +11,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_AST_INCLUDE_PATH
|
||||
#error You are probably trying to include some files of mg-expr from both the storage and query engines! You will have a rough time kid!
|
||||
#endif
|
||||
|
||||
#define MG_AST_INCLUDE_PATH "storage/v3/bindings/ast/ast.hpp" // NOLINT(cppcoreguidelines-macro-usage)
|
||||
#define MG_INJECTED_NAMESPACE_NAME memgraph::storage::v3 // NOLINT(cppcoreguidelines-macro-usage)
|
||||
|
@ -0,0 +1,19 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "storage/v3/bindings/bindings.hpp"
|
||||
|
||||
#include "expr/ast/pretty_print_ast_to_original_expression.hpp"
|
||||
#include "storage/v3/bindings/typed_value.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {} // namespace memgraph::storage::v3
|
@ -9,7 +9,7 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "expr/typed_value.hpp"
|
||||
#include "expr/typed_value_exception.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
|
210
src/storage/v3/expr.cpp
Normal file
210
src/storage/v3/expr.cpp
Normal file
@ -0,0 +1,210 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v3/expr.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "db_accessor.hpp"
|
||||
#include "opencypher/parser.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/bindings/ast/ast.hpp"
|
||||
#include "storage/v3/bindings/bindings.hpp"
|
||||
#include "storage/v3/bindings/cypher_main_visitor.hpp"
|
||||
#include "storage/v3/bindings/db_accessor.hpp"
|
||||
#include "storage/v3/bindings/eval.hpp"
|
||||
#include "storage/v3/bindings/frame.hpp"
|
||||
#include "storage/v3/bindings/symbol_generator.hpp"
|
||||
#include "storage/v3/bindings/symbol_table.hpp"
|
||||
#include "storage/v3/bindings/typed_value.hpp"
|
||||
#include "storage/v3/value_conversions.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
msgs::Value ConstructValueVertex(const VertexAccessor &acc, View view) {
|
||||
// Get the vertex id
|
||||
auto prim_label = acc.PrimaryLabel(view).GetValue();
|
||||
memgraph::msgs::Label value_label{.id = prim_label};
|
||||
|
||||
auto prim_key = conversions::ConvertValueVector(acc.PrimaryKey(view).GetValue());
|
||||
memgraph::msgs::VertexId vertex_id = std::make_pair(value_label, prim_key);
|
||||
|
||||
// Get the labels
|
||||
auto vertex_labels = acc.Labels(view).GetValue();
|
||||
std::vector<memgraph::msgs::Label> value_labels;
|
||||
value_labels.reserve(vertex_labels.size());
|
||||
|
||||
std::transform(vertex_labels.begin(), vertex_labels.end(), std::back_inserter(value_labels),
|
||||
[](const auto &label) { return msgs::Label{.id = label}; });
|
||||
|
||||
return msgs::Value({.id = vertex_id, .labels = value_labels});
|
||||
}
|
||||
|
||||
msgs::Value ConstructValueEdge(const EdgeAccessor &acc, View view) {
|
||||
msgs::EdgeType type = {.id = acc.EdgeType()};
|
||||
msgs::EdgeId gid = {.gid = acc.Gid().AsUint()};
|
||||
|
||||
msgs::Label src_prim_label = {.id = acc.FromVertex().primary_label};
|
||||
memgraph::msgs::VertexId src_vertex =
|
||||
std::make_pair(src_prim_label, conversions::ConvertValueVector(acc.FromVertex().primary_key));
|
||||
|
||||
msgs::Label dst_prim_label = {.id = acc.ToVertex().primary_label};
|
||||
msgs::VertexId dst_vertex =
|
||||
std::make_pair(dst_prim_label, conversions::ConvertValueVector(acc.ToVertex().primary_key));
|
||||
|
||||
auto properties = acc.Properties(view);
|
||||
|
||||
std::vector<std::pair<PropertyId, msgs::Value>> present_properties;
|
||||
if (properties.HasValue()) {
|
||||
auto props = properties.GetValue();
|
||||
std::vector<std::pair<PropertyId, msgs::Value>> present_properties;
|
||||
present_properties.reserve(props.size());
|
||||
|
||||
std::transform(props.begin(), props.end(), std::back_inserter(present_properties),
|
||||
[](std::pair<const PropertyId, PropertyValue> &prop) {
|
||||
return std::make_pair(prop.first, conversions::FromPropertyValueToValue(std::move(prop.second)));
|
||||
});
|
||||
}
|
||||
|
||||
return msgs::Value(msgs::Edge{.src = std::move(src_vertex),
|
||||
.dst = std::move(dst_vertex),
|
||||
.properties = std::move(present_properties),
|
||||
.id = gid,
|
||||
.type = type});
|
||||
}
|
||||
|
||||
msgs::Value FromTypedValueToValue(TypedValue &&tv) {
|
||||
switch (tv.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return msgs::Value(tv.ValueBool());
|
||||
case TypedValue::Type::Double:
|
||||
return msgs::Value(tv.ValueDouble());
|
||||
case TypedValue::Type::Int:
|
||||
return msgs::Value(tv.ValueInt());
|
||||
case TypedValue::Type::List: {
|
||||
std::vector<msgs::Value> list;
|
||||
auto &tv_list = tv.ValueList();
|
||||
list.reserve(tv_list.size());
|
||||
std::transform(tv_list.begin(), tv_list.end(), std::back_inserter(list),
|
||||
[](auto &elem) { return FromTypedValueToValue(std::move(elem)); });
|
||||
return msgs::Value(list);
|
||||
}
|
||||
case TypedValue::Type::Map: {
|
||||
std::map<std::string, msgs::Value> map;
|
||||
for (auto &[key, val] : tv.ValueMap()) {
|
||||
map.emplace(key, FromTypedValueToValue(std::move(val)));
|
||||
}
|
||||
return msgs::Value(map);
|
||||
}
|
||||
case TypedValue::Type::Null:
|
||||
return {};
|
||||
case TypedValue::Type::String:
|
||||
return msgs::Value((std::string(tv.ValueString())));
|
||||
case TypedValue::Type::Vertex:
|
||||
return ConstructValueVertex(tv.ValueVertex(), View::OLD);
|
||||
case TypedValue::Type::Edge:
|
||||
return ConstructValueEdge(tv.ValueEdge(), View::OLD);
|
||||
|
||||
// TBD -> we need to specify temporal types, not a priority.
|
||||
case TypedValue::Type::Date:
|
||||
case TypedValue::Type::LocalTime:
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
case TypedValue::Type::Duration:
|
||||
case TypedValue::Type::Path: {
|
||||
MG_ASSERT(false, "This conversion between TypedValue and Value is not implemented yet!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<msgs::Value> ConvertToValueVectorFromTypedValueVector(
|
||||
std::vector<memgraph::storage::v3::TypedValue> &&vec) {
|
||||
std::vector<msgs::Value> ret;
|
||||
ret.reserve(vec.size());
|
||||
|
||||
std::transform(vec.begin(), vec.end(), std::back_inserter(ret),
|
||||
[](auto &elem) { return FromTypedValueToValue(std::move(elem)); });
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<PropertyId> NamesToProperties(const std::vector<std::string> &property_names, DbAccessor &dba) {
|
||||
std::vector<PropertyId> properties;
|
||||
properties.reserve(property_names.size());
|
||||
|
||||
for (const auto &name : property_names) {
|
||||
properties.push_back(dba.NameToProperty(name));
|
||||
}
|
||||
return properties;
|
||||
}
|
||||
|
||||
std::vector<memgraph::storage::v3::LabelId> NamesToLabels(const std::vector<std::string> &label_names,
|
||||
DbAccessor &dba) {
|
||||
std::vector<memgraph::storage::v3::LabelId> labels;
|
||||
labels.reserve(label_names.size());
|
||||
for (const auto &name : label_names) {
|
||||
labels.push_back(dba.NameToLabel(name));
|
||||
}
|
||||
return labels;
|
||||
}
|
||||
|
||||
std::any ParseExpression(const std::string &expr, memgraph::expr::AstStorage &storage) {
|
||||
memgraph::frontend::opencypher::Parser<memgraph::frontend::opencypher::ParserOpTag::EXPRESSION> parser(expr);
|
||||
ParsingContext pc;
|
||||
CypherMainVisitor visitor(pc, &storage);
|
||||
|
||||
auto *ast = parser.tree();
|
||||
return visitor.visit(ast);
|
||||
}
|
||||
|
||||
TypedValue ComputeExpression(DbAccessor &dba, const std::optional<memgraph::storage::v3::VertexAccessor> &v_acc,
|
||||
const std::optional<memgraph::storage::v3::EdgeAccessor> &e_acc,
|
||||
const std::string &expression, std::string_view node_name, std::string_view edge_name) {
|
||||
AstStorage storage;
|
||||
Frame frame{1 + 1}; // 1 for the node_identifier, 1 for the edge_identifier
|
||||
SymbolTable symbol_table;
|
||||
EvaluationContext ctx;
|
||||
|
||||
ExpressionEvaluator eval{&frame, symbol_table, ctx, &dba, View::OLD};
|
||||
auto expr = ParseExpression(expression, storage);
|
||||
|
||||
auto node_identifier = Identifier(std::string(node_name), false);
|
||||
auto edge_identifier = Identifier(std::string(edge_name), false);
|
||||
|
||||
std::vector<Identifier *> identifiers;
|
||||
identifiers.push_back(&node_identifier);
|
||||
identifiers.push_back(&edge_identifier);
|
||||
|
||||
expr::SymbolGenerator symbol_generator(&symbol_table, identifiers);
|
||||
(std::any_cast<Expression *>(expr))->Accept(symbol_generator);
|
||||
|
||||
if (node_identifier.symbol_pos_ != -1) {
|
||||
MG_ASSERT(std::find_if(symbol_table.table().begin(), symbol_table.table().end(),
|
||||
[&node_name](const std::pair<int32_t, Symbol> &position_symbol_pair) {
|
||||
return position_symbol_pair.second.name() == node_name;
|
||||
}) != symbol_table.table().end());
|
||||
|
||||
frame[symbol_table.at(node_identifier)] = *v_acc;
|
||||
}
|
||||
|
||||
if (edge_identifier.symbol_pos_ != -1) {
|
||||
MG_ASSERT(std::find_if(symbol_table.table().begin(), symbol_table.table().end(),
|
||||
[&edge_name](const std::pair<int32_t, Symbol> &position_symbol_pair) {
|
||||
return position_symbol_pair.second.name() == edge_name;
|
||||
}) != symbol_table.table().end());
|
||||
|
||||
frame[symbol_table.at(edge_identifier)] = *e_acc;
|
||||
}
|
||||
|
||||
return Eval(std::any_cast<Expression *>(expr), ctx, storage, eval, dba);
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage::v3
|
55
src/storage/v3/expr.hpp
Normal file
55
src/storage/v3/expr.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "db_accessor.hpp"
|
||||
#include "opencypher/parser.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/bindings/ast/ast.hpp"
|
||||
#include "storage/v3/bindings/bindings.hpp"
|
||||
#include "storage/v3/bindings/cypher_main_visitor.hpp"
|
||||
#include "storage/v3/bindings/db_accessor.hpp"
|
||||
#include "storage/v3/bindings/eval.hpp"
|
||||
#include "storage/v3/bindings/frame.hpp"
|
||||
#include "storage/v3/bindings/symbol_generator.hpp"
|
||||
#include "storage/v3/bindings/symbol_table.hpp"
|
||||
#include "storage/v3/bindings/typed_value.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
memgraph::msgs::Value ConstructValueVertex(const memgraph::storage::v3::VertexAccessor &acc, View view);
|
||||
|
||||
msgs::Value ConstructValueEdge(const EdgeAccessor &acc, View view);
|
||||
|
||||
msgs::Value FromTypedValueToValue(TypedValue &&tv);
|
||||
|
||||
std::vector<msgs::Value> ConvertToValueVectorFromTypedValueVector(std::vector<TypedValue> &&vec);
|
||||
|
||||
std::vector<PropertyId> NamesToProperties(const std::vector<std::string> &property_names, DbAccessor &dba);
|
||||
|
||||
std::vector<LabelId> NamesToLabels(const std::vector<std::string> &label_names, DbAccessor &dba);
|
||||
|
||||
template <class TExpression>
|
||||
auto Eval(TExpression *expr, EvaluationContext &ctx, AstStorage &storage, ExpressionEvaluator &eval, DbAccessor &dba) {
|
||||
ctx.properties = NamesToProperties(storage.properties_, dba);
|
||||
ctx.labels = NamesToLabels(storage.labels_, dba);
|
||||
auto value = expr->Accept(eval);
|
||||
return value;
|
||||
}
|
||||
|
||||
std::any ParseExpression(const std::string &expr, AstStorage &storage);
|
||||
|
||||
TypedValue ComputeExpression(DbAccessor &dba, const std::optional<VertexAccessor> &v_acc,
|
||||
const std::optional<EdgeAccessor> &e_acc, const std::string &expression,
|
||||
std::string_view node_name, std::string_view edge_name);
|
||||
|
||||
} // namespace memgraph::storage::v3
|
@ -14,6 +14,7 @@
|
||||
#include <bit>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <type_traits>
|
||||
|
||||
#include "utils/cast.hpp"
|
||||
@ -35,6 +36,11 @@ namespace memgraph::storage::v3 {
|
||||
constexpr uint64_t AsUint() const { return id_; } \
|
||||
constexpr int64_t AsInt() const { return std::bit_cast<int64_t>(id_); } \
|
||||
\
|
||||
friend std::ostream &operator<<(std::ostream &in, const name &n) { \
|
||||
in << n.AsInt(); \
|
||||
return in; \
|
||||
} \
|
||||
\
|
||||
private: \
|
||||
uint64_t id_; \
|
||||
}; \
|
||||
|
82
src/storage/v3/request_helper.cpp
Normal file
82
src/storage/v3/request_helper.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v3/request_helper.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "pretty_print_ast_to_original_expression.hpp"
|
||||
#include "storage/v3/bindings/db_accessor.hpp"
|
||||
#include "storage/v3/expr.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
std::vector<Element> OrderByElements(Shard::Accessor &acc, DbAccessor &dba, VerticesIterable &vertices_iterable,
|
||||
std::vector<msgs::OrderBy> &order_bys) {
|
||||
std::vector<Element> ordered;
|
||||
ordered.reserve(acc.ApproximateVertexCount());
|
||||
std::vector<Ordering> ordering;
|
||||
ordering.reserve(order_bys.size());
|
||||
for (const auto &order : order_bys) {
|
||||
switch (order.direction) {
|
||||
case memgraph::msgs::OrderingDirection::ASCENDING: {
|
||||
ordering.push_back(Ordering::ASC);
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::OrderingDirection::DESCENDING: {
|
||||
ordering.push_back(Ordering::DESC);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto compare_typed_values = TypedValueVectorCompare(ordering);
|
||||
for (auto it = vertices_iterable.begin(); it != vertices_iterable.end(); ++it) {
|
||||
std::vector<TypedValue> properties_order_by;
|
||||
properties_order_by.reserve(order_bys.size());
|
||||
|
||||
for (const auto &order_by : order_bys) {
|
||||
const auto val =
|
||||
ComputeExpression(dba, *it, std::nullopt, order_by.expression.expression, expr::identifier_node_symbol, "");
|
||||
properties_order_by.push_back(val);
|
||||
}
|
||||
ordered.push_back({std::move(properties_order_by), *it});
|
||||
}
|
||||
|
||||
std::sort(ordered.begin(), ordered.end(), [compare_typed_values](const auto &pair1, const auto &pair2) {
|
||||
return compare_typed_values(pair1.properties_order_by, pair2.properties_order_by);
|
||||
});
|
||||
return ordered;
|
||||
}
|
||||
|
||||
VerticesIterable::Iterator GetStartVertexIterator(VerticesIterable &vertex_iterable,
|
||||
const std::vector<PropertyValue> &start_ids, const View view) {
|
||||
auto it = vertex_iterable.begin();
|
||||
while (it != vertex_iterable.end()) {
|
||||
if (const auto &vertex = *it; start_ids <= vertex.PrimaryKey(view).GetValue()) {
|
||||
break;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
return it;
|
||||
}
|
||||
|
||||
std::vector<Element>::const_iterator GetStartOrderedElementsIterator(const std::vector<Element> &ordered_elements,
|
||||
const std::vector<PropertyValue> &start_ids,
|
||||
const View view) {
|
||||
for (auto it = ordered_elements.begin(); it != ordered_elements.end(); ++it) {
|
||||
if (const auto &vertex = it->vertex_acc; start_ids <= vertex.PrimaryKey(view).GetValue()) {
|
||||
return it;
|
||||
}
|
||||
}
|
||||
return ordered_elements.end();
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage::v3
|
116
src/storage/v3/request_helper.hpp
Normal file
116
src/storage/v3/request_helper.hpp
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ast/ast.hpp"
|
||||
#include "storage/v3/bindings/typed_value.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
inline bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
|
||||
// in ordering null comes after everything else
|
||||
// at the same time Null is not less that null
|
||||
// first deal with Null < Whatever case
|
||||
if (a.IsNull()) return false;
|
||||
// now deal with NotNull < Null case
|
||||
if (b.IsNull()) return true;
|
||||
|
||||
// comparisons are from this point legal only between values of
|
||||
// the same type, or int+float combinations
|
||||
if ((a.type() != b.type() && !(a.IsNumeric() && b.IsNumeric())))
|
||||
throw utils::BasicException("Can't compare value of type {} to value of type {}.", a.type(), b.type());
|
||||
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return !a.ValueBool() && b.ValueBool();
|
||||
case TypedValue::Type::Int:
|
||||
if (b.type() == TypedValue::Type::Double)
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
return a.ValueInt() < b.ValueDouble();
|
||||
else
|
||||
return a.ValueInt() < b.ValueInt();
|
||||
case TypedValue::Type::Double:
|
||||
if (b.type() == TypedValue::Type::Int)
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
return a.ValueDouble() < b.ValueInt();
|
||||
else
|
||||
return a.ValueDouble() < b.ValueDouble();
|
||||
case TypedValue::Type::String:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueString() < b.ValueString();
|
||||
case TypedValue::Type::Date:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDate() < b.ValueDate();
|
||||
case TypedValue::Type::LocalTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalTime() < b.ValueLocalTime();
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalDateTime() < b.ValueLocalDateTime();
|
||||
case TypedValue::Type::Duration:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDuration() < b.ValueDuration();
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
throw utils::BasicException("Comparison is not defined for values of type {}.", a.type());
|
||||
case TypedValue::Type::Null:
|
||||
LOG_FATAL("Invalid type");
|
||||
}
|
||||
}
|
||||
|
||||
class TypedValueVectorCompare final {
|
||||
public:
|
||||
explicit TypedValueVectorCompare(const std::vector<Ordering> &ordering) : ordering_(ordering) {}
|
||||
|
||||
bool operator()(const std::vector<TypedValue> &c1, const std::vector<TypedValue> &c2) const {
|
||||
// ordering is invalid if there are more elements in the collections
|
||||
// then there are in the ordering_ vector
|
||||
MG_ASSERT(c1.size() <= ordering_.size() && c2.size() <= ordering_.size(),
|
||||
"Collections contain more elements then there are orderings");
|
||||
|
||||
auto c1_it = c1.begin();
|
||||
auto c2_it = c2.begin();
|
||||
auto ordering_it = ordering_.begin();
|
||||
for (; c1_it != c1.end() && c2_it != c2.end(); c1_it++, c2_it++, ordering_it++) {
|
||||
if (TypedValueCompare(*c1_it, *c2_it)) return *ordering_it == Ordering::ASC;
|
||||
if (TypedValueCompare(*c2_it, *c1_it)) return *ordering_it == Ordering::DESC;
|
||||
}
|
||||
|
||||
// at least one collection is exhausted
|
||||
// c1 is less then c2 iff c1 reached the end but c2 didn't
|
||||
return (c1_it == c1.end()) && (c2_it != c2.end());
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<Ordering> ordering_;
|
||||
};
|
||||
|
||||
struct Element {
|
||||
std::vector<TypedValue> properties_order_by;
|
||||
VertexAccessor vertex_acc;
|
||||
};
|
||||
|
||||
std::vector<Element> OrderByElements(Shard::Accessor &acc, DbAccessor &dba, VerticesIterable &vertices_iterable,
|
||||
std::vector<msgs::OrderBy> &order_bys);
|
||||
|
||||
VerticesIterable::Iterator GetStartVertexIterator(VerticesIterable &vertex_iterable,
|
||||
const std::vector<PropertyValue> &start_ids, View view);
|
||||
|
||||
std::vector<Element>::const_iterator GetStartOrderedElementsIterator(const std::vector<Element> &ordered_elements,
|
||||
const std::vector<PropertyValue> &start_ids,
|
||||
View view);
|
||||
} // namespace memgraph::storage::v3
|
@ -15,6 +15,7 @@
|
||||
#include <cstddef>
|
||||
#include <ranges>
|
||||
|
||||
#include "common/types.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
@ -78,6 +79,39 @@ SchemaValidator::SchemaValidator(Schemas &schemas) : schemas_{schemas} {}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> SchemaValidator::ValidateVertexCreate(
|
||||
LabelId primary_label, const std::vector<LabelId> &labels,
|
||||
const std::vector<PropertyValue> &primary_properties) const {
|
||||
// Schema on primary label
|
||||
const auto *schema = schemas_.GetSchema(primary_label);
|
||||
if (schema == nullptr) {
|
||||
return SchemaViolation(SchemaViolation::ValidationStatus::NO_SCHEMA_DEFINED_FOR_LABEL, primary_label);
|
||||
}
|
||||
|
||||
// Is there another primary label among secondary labels
|
||||
for (const auto &secondary_label : labels) {
|
||||
if (schemas_.GetSchema(secondary_label)) {
|
||||
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_SECONDARY_LABEL_IS_PRIMARY, secondary_label);
|
||||
}
|
||||
}
|
||||
|
||||
// Quick size check
|
||||
if (schema->second.size() != primary_properties.size()) {
|
||||
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_PRIMARY_PROPERTIES_UNDEFINED, primary_label);
|
||||
}
|
||||
// Check only properties defined by schema
|
||||
for (size_t i{0}; i < schema->second.size(); ++i) {
|
||||
// Check schema property type
|
||||
if (auto property_schema_type = PropertyTypeToSchemaType(primary_properties[i]);
|
||||
property_schema_type && *property_schema_type != schema->second[i].type) {
|
||||
return SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_PROPERTY_WRONG_TYPE, primary_label,
|
||||
schema->second[i], primary_properties[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> SchemaValidator::ValidatePropertyUpdate(
|
||||
const LabelId primary_label, const PropertyId property_id) const {
|
||||
// Verify existence of schema on primary label
|
||||
@ -103,6 +137,8 @@ SchemaValidator::SchemaValidator(Schemas &schemas) : schemas_{schemas} {}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const Schemas::Schema *SchemaValidator::GetSchema(LabelId label) const { return schemas_.GetSchema(label); }
|
||||
|
||||
VertexValidator::VertexValidator(const SchemaValidator &schema_validator, const LabelId primary_label)
|
||||
: schema_validator{&schema_validator}, primary_label_{primary_label} {}
|
||||
|
||||
|
@ -29,6 +29,7 @@ struct SchemaViolation {
|
||||
VERTEX_UPDATE_PRIMARY_KEY,
|
||||
VERTEX_UPDATE_PRIMARY_LABEL,
|
||||
VERTEX_SECONDARY_LABEL_IS_PRIMARY,
|
||||
VERTEX_PRIMARY_PROPERTIES_UNDEFINED,
|
||||
};
|
||||
|
||||
SchemaViolation(ValidationStatus status, LabelId label);
|
||||
@ -50,15 +51,21 @@ class SchemaValidator {
|
||||
public:
|
||||
explicit SchemaValidator(Schemas &schemas);
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> ValidateVertexCreate(
|
||||
[[deprecated]] std::optional<SchemaViolation> ValidateVertexCreate(
|
||||
LabelId primary_label, const std::vector<LabelId> &labels,
|
||||
const std::vector<std::pair<PropertyId, PropertyValue>> &properties) const;
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> ValidateVertexCreate(
|
||||
LabelId primary_label, const std::vector<LabelId> &labels,
|
||||
const std::vector<PropertyValue> &primary_properties) const;
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> ValidatePropertyUpdate(LabelId primary_label,
|
||||
PropertyId property_id) const;
|
||||
|
||||
[[nodiscard]] std::optional<SchemaViolation> ValidateLabelUpdate(LabelId label) const;
|
||||
|
||||
const Schemas::Schema *GetSchema(LabelId label) const;
|
||||
|
||||
private:
|
||||
Schemas &schemas_;
|
||||
};
|
||||
|
@ -25,11 +25,22 @@
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
|
||||
std::string SchemaTypeToString(common::SchemaType type);
|
||||
|
||||
struct SchemaProperty {
|
||||
PropertyId property_id;
|
||||
common::SchemaType type;
|
||||
|
||||
friend bool operator==(const SchemaProperty &lhs, const SchemaProperty &rhs);
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const SchemaProperty &schema_property) {
|
||||
in << "SchemaProperty { property_id: ", in << schema_property.property_id.AsUint();
|
||||
in << ", type: ";
|
||||
in << SchemaTypeToString(schema_property.type);
|
||||
in << " }";
|
||||
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
/// Structure that represents a collection of schemas
|
||||
@ -69,6 +80,4 @@ class Schemas {
|
||||
|
||||
std::optional<common::SchemaType> PropertyTypeToSchemaType(const PropertyValue &property_value);
|
||||
|
||||
std::string SchemaTypeToString(common::SchemaType type);
|
||||
|
||||
} // namespace memgraph::storage::v3
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "storage/v3/shard.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
@ -392,15 +393,9 @@ ResultSchema<VertexAccessor> Shard::Accessor::CreateVertexAndValidate(
|
||||
const std::vector<std::pair<PropertyId, PropertyValue>> &properties) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
const auto schema = shard_->GetSchema(shard_->primary_label_)->second;
|
||||
std::vector<std::pair<PropertyId, PropertyValue>> primary_properties_ordered;
|
||||
// TODO(jbajic) Maybe react immediately and send Violation
|
||||
MG_ASSERT("PrimaryKey is invalid size");
|
||||
for (auto i{0}; i < schema.size(); ++i) {
|
||||
primary_properties_ordered.emplace_back(schema[i].property_id, primary_properties[i]);
|
||||
}
|
||||
|
||||
auto maybe_schema_violation =
|
||||
GetSchemaValidator().ValidateVertexCreate(shard_->primary_label_, labels, primary_properties_ordered);
|
||||
GetSchemaValidator().ValidateVertexCreate(shard_->primary_label_, labels, primary_properties);
|
||||
if (maybe_schema_violation) {
|
||||
return {std::move(*maybe_schema_violation)};
|
||||
}
|
||||
|
@ -342,6 +342,8 @@ class Shard final {
|
||||
|
||||
LabelId PrimaryLabel() const;
|
||||
|
||||
[[nodiscard]] bool IsVertexBelongToShard(const VertexId &vertex_id) const;
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
bool CreateIndex(LabelId label, std::optional<uint64_t> desired_commit_timestamp = {});
|
||||
|
||||
@ -376,8 +378,6 @@ class Shard final {
|
||||
|
||||
uint64_t CommitTimestamp(std::optional<uint64_t> desired_commit_timestamp = {});
|
||||
|
||||
[[nodiscard]] bool IsVertexBelongToShard(const VertexId &vertex_id) const;
|
||||
|
||||
// Main object storage
|
||||
NameIdMapper name_id_mapper_;
|
||||
LabelId primary_label_;
|
||||
|
@ -62,8 +62,8 @@ template <typename IoImpl>
|
||||
using ShardRaft = Raft<IoImpl, ShardRsm, WriteRequests, WriteResponses, ReadRequests, ReadResponses>;
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
static constexpr Duration kMinimumCronInterval = 1000ms;
|
||||
static constexpr Duration kMaximumCronInterval = 2000ms;
|
||||
static constexpr Duration kMinimumCronInterval = 100ms;
|
||||
static constexpr Duration kMaximumCronInterval = 200ms;
|
||||
static_assert(kMinimumCronInterval < kMaximumCronInterval,
|
||||
"The minimum cron interval has to be smaller than the maximum cron interval!");
|
||||
|
||||
@ -135,7 +135,7 @@ class ShardManager {
|
||||
io::Io<IoImpl> io_;
|
||||
std::map<uuid, ShardRaft<IoImpl>> rsm_map_;
|
||||
std::priority_queue<std::pair<Time, uuid>, std::vector<std::pair<Time, uuid>>, std::greater<>> cron_schedule_;
|
||||
Time next_cron_;
|
||||
Time next_cron_ = Time::min();
|
||||
Address coordinator_leader_;
|
||||
coordinator::ShardMap shard_map_;
|
||||
std::optional<ResponseFuture<WriteResponse<CoordinatorWriteResponses>>> heartbeat_res_;
|
||||
|
@ -9,58 +9,95 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
#include "parser/opencypher/parser.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/bindings/ast/ast.hpp"
|
||||
#include "storage/v3/bindings/cypher_main_visitor.hpp"
|
||||
#include "storage/v3/bindings/db_accessor.hpp"
|
||||
#include "storage/v3/bindings/eval.hpp"
|
||||
#include "storage/v3/bindings/frame.hpp"
|
||||
#include "storage/v3/bindings/pretty_print_ast_to_original_expression.hpp"
|
||||
#include "storage/v3/bindings/symbol_generator.hpp"
|
||||
#include "storage/v3/bindings/symbol_table.hpp"
|
||||
#include "storage/v3/bindings/typed_value.hpp"
|
||||
#include "storage/v3/expr.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/request_helper.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/shard_rsm.hpp"
|
||||
#include "storage/v3/storage.hpp"
|
||||
#include "storage/v3/value_conversions.hpp"
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
#include "storage/v3/view.hpp"
|
||||
|
||||
using memgraph::msgs::Label;
|
||||
using memgraph::msgs::PropertyId;
|
||||
using memgraph::msgs::Value;
|
||||
using memgraph::msgs::Vertex;
|
||||
using memgraph::msgs::VertexId;
|
||||
namespace memgraph::storage::v3 {
|
||||
using msgs::Label;
|
||||
using msgs::PropertyId;
|
||||
using msgs::Value;
|
||||
|
||||
using memgraph::storage::conversions::ConvertPropertyVector;
|
||||
using memgraph::storage::conversions::ConvertValueVector;
|
||||
using memgraph::storage::conversions::ToPropertyValue;
|
||||
using memgraph::storage::conversions::ToValue;
|
||||
using conversions::ConvertPropertyVector;
|
||||
using conversions::ConvertValueVector;
|
||||
using conversions::FromPropertyValueToValue;
|
||||
using conversions::ToMsgsVertexId;
|
||||
using conversions::ToPropertyValue;
|
||||
|
||||
namespace {
|
||||
std::vector<std::pair<memgraph::storage::v3::PropertyId, memgraph::storage::v3::PropertyValue>> ConvertPropertyMap(
|
||||
namespace msgs = msgs;
|
||||
|
||||
using AllEdgePropertyDataSructure = std::map<PropertyId, msgs::Value>;
|
||||
using SpecificEdgePropertyDataSructure = std::vector<msgs::Value>;
|
||||
|
||||
using AllEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, AllEdgePropertyDataSructure>;
|
||||
using SpecificEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, SpecificEdgePropertyDataSructure>;
|
||||
|
||||
using SpecificEdgePropertiesVector = std::vector<SpecificEdgeProperties>;
|
||||
using AllEdgePropertiesVector = std::vector<AllEdgeProperties>;
|
||||
|
||||
using EdgeAccessors = std::vector<storage::v3::EdgeAccessor>;
|
||||
|
||||
using EdgeFiller = std::function<bool(const EdgeAccessor &edge, bool is_in_edge, msgs::ExpandOneResultRow &result_row)>;
|
||||
using EdgeUniqunessFunction = std::function<EdgeAccessors(EdgeAccessors &&, msgs::EdgeDirection)>;
|
||||
|
||||
struct VertexIdCmpr {
|
||||
bool operator()(const storage::v3::VertexId *lhs, const storage::v3::VertexId *rhs) const { return *lhs < *rhs; }
|
||||
};
|
||||
|
||||
std::vector<std::pair<PropertyId, PropertyValue>> ConvertPropertyMap(
|
||||
std::vector<std::pair<PropertyId, Value>> &&properties) {
|
||||
std::vector<std::pair<memgraph::storage::v3::PropertyId, memgraph::storage::v3::PropertyValue>> ret;
|
||||
std::vector<std::pair<PropertyId, PropertyValue>> ret;
|
||||
ret.reserve(properties.size());
|
||||
|
||||
for (auto &[key, value] : properties) {
|
||||
ret.emplace_back(std::make_pair(key, ToPropertyValue(std::move(value))));
|
||||
}
|
||||
std::transform(std::make_move_iterator(properties.begin()), std::make_move_iterator(properties.end()),
|
||||
std::back_inserter(ret), [](std::pair<PropertyId, Value> &&property) {
|
||||
return std::make_pair(property.first, ToPropertyValue(std::move(property.second)));
|
||||
});
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<std::pair<memgraph::storage::v3::PropertyId, Value>> FromMap(
|
||||
const std::map<PropertyId, Value> &properties) {
|
||||
std::vector<std::pair<memgraph::storage::v3::PropertyId, Value>> ret;
|
||||
std::vector<std::pair<PropertyId, Value>> FromMap(const std::map<PropertyId, Value> &properties) {
|
||||
std::vector<std::pair<PropertyId, Value>> ret;
|
||||
ret.reserve(properties.size());
|
||||
|
||||
for (const auto &[key, value] : properties) {
|
||||
ret.emplace_back(std::make_pair(key, value));
|
||||
}
|
||||
std::transform(properties.begin(), properties.end(), std::back_inserter(ret),
|
||||
[](const auto &property) { return std::make_pair(property.first, property.second); });
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<std::map<PropertyId, Value>> CollectSpecificPropertiesFromAccessor(
|
||||
const memgraph::storage::v3::VertexAccessor &acc, const std::vector<memgraph::storage::v3::PropertyId> &props,
|
||||
memgraph::storage::v3::View view) {
|
||||
std::optional<std::map<PropertyId, Value>> CollectSpecificPropertiesFromAccessor(const VertexAccessor &acc,
|
||||
const std::vector<PropertyId> &props,
|
||||
View view) {
|
||||
std::map<PropertyId, Value> ret;
|
||||
|
||||
for (const auto &prop : props) {
|
||||
@ -70,101 +107,104 @@ std::optional<std::map<PropertyId, Value>> CollectSpecificPropertiesFromAccessor
|
||||
return std::nullopt;
|
||||
}
|
||||
auto &value = result.GetValue();
|
||||
if (value.IsNull()) {
|
||||
spdlog::debug("The specified property does not exist but it should");
|
||||
return std::nullopt;
|
||||
}
|
||||
ret.emplace(std::make_pair(prop, ToValue(value)));
|
||||
ret.emplace(std::make_pair(prop, FromPropertyValueToValue(std::move(value))));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(
|
||||
const memgraph::storage::v3::VertexAccessor &acc, memgraph::storage::v3::View view,
|
||||
const memgraph::storage::v3::Schemas::Schema *schema) {
|
||||
std::optional<std::map<PropertyId, Value>> CollectAllPropertiesFromAccessor(const VertexAccessor &acc, View view,
|
||||
const Schemas::Schema *schema) {
|
||||
std::map<PropertyId, Value> ret;
|
||||
auto props = acc.Properties(view);
|
||||
if (props.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get vertex properties.");
|
||||
return std::nullopt;
|
||||
}
|
||||
for (const auto &[prop_key, prop_val] : props.GetValue()) {
|
||||
ret.emplace(prop_key, ToValue(prop_val));
|
||||
}
|
||||
|
||||
auto &properties = props.GetValue();
|
||||
std::transform(properties.begin(), properties.end(), std::inserter(ret, ret.begin()),
|
||||
[](std::pair<const PropertyId, PropertyValue> &pair) {
|
||||
return std::make_pair(pair.first, FromPropertyValueToValue(std::move(pair.second)));
|
||||
});
|
||||
properties.clear();
|
||||
|
||||
// TODO(antaljanosbenjamin): Once the VertexAccessor::Properties returns also the primary keys, we can get rid of this
|
||||
// code.
|
||||
auto maybe_pk = acc.PrimaryKey(view);
|
||||
if (maybe_pk.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get vertex primary key.");
|
||||
}
|
||||
|
||||
const auto pk = maybe_pk.GetValue();
|
||||
auto &pk = maybe_pk.GetValue();
|
||||
MG_ASSERT(schema->second.size() == pk.size(), "PrimaryKey size does not match schema!");
|
||||
for (size_t i{0}; i < schema->second.size(); ++i) {
|
||||
ret.emplace(schema->second[i].property_id, ToValue(pk[i]));
|
||||
ret.emplace(schema->second[i].property_id, FromPropertyValueToValue(std::move(pk[i])));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
Value ConstructValueVertex(const memgraph::storage::v3::VertexAccessor &acc, memgraph::storage::v3::View view) {
|
||||
// Get the vertex id
|
||||
auto prim_label = acc.PrimaryLabel(view).GetValue();
|
||||
Label value_label{.id = prim_label};
|
||||
|
||||
auto prim_key = ConvertValueVector(acc.PrimaryKey(view).GetValue());
|
||||
VertexId vertex_id = std::make_pair(value_label, prim_key);
|
||||
|
||||
// Get the labels
|
||||
auto vertex_labels = acc.Labels(view).GetValue();
|
||||
std::vector<Label> value_labels;
|
||||
for (const auto &label : vertex_labels) {
|
||||
Label l = {.id = label};
|
||||
value_labels.push_back(l);
|
||||
}
|
||||
|
||||
return Value({.id = vertex_id, .labels = value_labels});
|
||||
bool FilterOnVertex(DbAccessor &dba, const storage::v3::VertexAccessor &v_acc, const std::vector<std::string> &filters,
|
||||
const std::string_view node_name) {
|
||||
return std::ranges::all_of(filters, [&node_name, &dba, &v_acc](const auto &filter_expr) {
|
||||
auto res = ComputeExpression(dba, v_acc, std::nullopt, filter_expr, node_name, "");
|
||||
return res.IsBool() && res.ValueBool();
|
||||
});
|
||||
}
|
||||
|
||||
bool DoesEdgeTypeMatch(const memgraph::msgs::ExpandOneRequest &req, const memgraph::storage::v3::EdgeAccessor &edge) {
|
||||
std::vector<TypedValue> EvaluateVertexExpressions(DbAccessor &dba, const VertexAccessor &v_acc,
|
||||
const std::vector<std::string> &expressions,
|
||||
std::string_view node_name) {
|
||||
std::vector<TypedValue> evaluated_expressions;
|
||||
evaluated_expressions.reserve(expressions.size());
|
||||
|
||||
std::transform(expressions.begin(), expressions.end(), std::back_inserter(evaluated_expressions),
|
||||
[&dba, &v_acc, &node_name](const auto &expression) {
|
||||
return ComputeExpression(dba, v_acc, std::nullopt, expression, node_name, "");
|
||||
});
|
||||
|
||||
return evaluated_expressions;
|
||||
}
|
||||
|
||||
bool DoesEdgeTypeMatch(const std::vector<msgs::EdgeType> &edge_types, const EdgeAccessor &edge) {
|
||||
// TODO(gvolfing) This should be checked only once and handled accordingly.
|
||||
if (req.edge_types.empty()) {
|
||||
if (edge_types.empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return std::ranges::any_of(req.edge_types.cbegin(), req.edge_types.cend(),
|
||||
[&edge](const memgraph::msgs::EdgeType &edge_type) {
|
||||
return memgraph::storage::v3::EdgeTypeId::FromUint(edge_type.id) == edge.EdgeType();
|
||||
});
|
||||
return std::ranges::any_of(edge_types.begin(), edge_types.end(),
|
||||
[&edge](const msgs::EdgeType &edge_type) { return edge_type.id == edge.EdgeType(); });
|
||||
}
|
||||
|
||||
struct LocalError {};
|
||||
|
||||
std::optional<memgraph::msgs::Vertex> FillUpSourceVertex(
|
||||
const std::optional<memgraph::storage::v3::VertexAccessor> &v_acc, memgraph::msgs::ExpandOneRequest &req,
|
||||
memgraph::msgs::VertexId src_vertex) {
|
||||
auto secondary_labels = v_acc->Labels(memgraph::storage::v3::View::OLD);
|
||||
std::optional<msgs::Vertex> FillUpSourceVertex(const std::optional<VertexAccessor> &v_acc,
|
||||
const msgs::ExpandOneRequest &req, msgs::VertexId src_vertex) {
|
||||
auto secondary_labels = v_acc->Labels(View::NEW);
|
||||
if (secondary_labels.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get the secondary labels of a vertex. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
memgraph::msgs::Vertex source_vertex;
|
||||
auto &sec_labels = secondary_labels.GetValue();
|
||||
msgs::Vertex source_vertex;
|
||||
source_vertex.id = src_vertex;
|
||||
source_vertex.labels.reserve(secondary_labels.GetValue().size());
|
||||
for (auto label_id : secondary_labels.GetValue()) {
|
||||
source_vertex.labels.emplace_back(memgraph::msgs::Label{.id = label_id});
|
||||
}
|
||||
source_vertex.labels.reserve(sec_labels.size());
|
||||
|
||||
std::transform(sec_labels.begin(), sec_labels.end(), std::back_inserter(source_vertex.labels),
|
||||
[](auto label_id) { return msgs::Label{.id = label_id}; });
|
||||
|
||||
return source_vertex;
|
||||
}
|
||||
|
||||
std::optional<std::map<PropertyId, Value>> FillUpSourceVertexProperties(
|
||||
const std::optional<memgraph::storage::v3::VertexAccessor> &v_acc, memgraph::msgs::ExpandOneRequest &req) {
|
||||
std::optional<std::map<PropertyId, Value>> FillUpSourceVertexProperties(const std::optional<VertexAccessor> &v_acc,
|
||||
const msgs::ExpandOneRequest &req) {
|
||||
std::map<PropertyId, Value> src_vertex_properties;
|
||||
|
||||
if (!req.src_vertex_properties) {
|
||||
auto props = v_acc->Properties(memgraph::storage::v3::View::OLD);
|
||||
auto props = v_acc->Properties(View::NEW);
|
||||
if (props.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to access vertex properties. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
@ -172,176 +212,111 @@ std::optional<std::map<PropertyId, Value>> FillUpSourceVertexProperties(
|
||||
}
|
||||
|
||||
for (auto &[key, val] : props.GetValue()) {
|
||||
src_vertex_properties.insert(std::make_pair(key, ToValue(val)));
|
||||
src_vertex_properties.insert(std::make_pair(key, FromPropertyValueToValue(std::move(val))));
|
||||
}
|
||||
|
||||
} else if (req.src_vertex_properties.value().empty()) {
|
||||
// NOOP
|
||||
} else {
|
||||
for (const auto &prop : req.src_vertex_properties.value()) {
|
||||
const auto &prop_val = v_acc->GetProperty(prop, memgraph::storage::v3::View::OLD);
|
||||
src_vertex_properties.insert(std::make_pair(prop, ToValue(prop_val.GetValue())));
|
||||
auto prop_val = v_acc->GetProperty(prop, View::OLD);
|
||||
if (prop_val.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to access vertex properties. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return std::nullopt;
|
||||
}
|
||||
src_vertex_properties.insert(std::make_pair(prop, FromPropertyValueToValue(std::move(prop_val.GetValue()))));
|
||||
}
|
||||
}
|
||||
|
||||
return src_vertex_properties;
|
||||
}
|
||||
|
||||
std::optional<std::array<std::vector<memgraph::storage::v3::EdgeAccessor>, 2>> FillUpConnectingEdges(
|
||||
const std::optional<memgraph::storage::v3::VertexAccessor> &v_acc, memgraph::msgs::ExpandOneRequest &req) {
|
||||
std::vector<memgraph::storage::v3::EdgeAccessor> in_edges;
|
||||
std::vector<memgraph::storage::v3::EdgeAccessor> out_edges;
|
||||
std::optional<std::array<std::vector<EdgeAccessor>, 2>> FillUpConnectingEdges(
|
||||
const std::optional<VertexAccessor> &v_acc, const msgs::ExpandOneRequest &req,
|
||||
const EdgeUniqunessFunction &maybe_filter_based_on_edge_uniquness) {
|
||||
std::vector<EdgeAccessor> in_edges;
|
||||
std::vector<EdgeAccessor> out_edges;
|
||||
|
||||
switch (req.direction) {
|
||||
case memgraph::msgs::EdgeDirection::OUT: {
|
||||
auto out_edges_result = v_acc->OutEdges(memgraph::storage::v3::View::OLD);
|
||||
case msgs::EdgeDirection::OUT: {
|
||||
auto out_edges_result = v_acc->OutEdges(View::NEW);
|
||||
if (out_edges_result.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get out-going EdgeAccessors. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return std::nullopt;
|
||||
}
|
||||
out_edges = std::move(out_edges_result.GetValue());
|
||||
out_edges =
|
||||
maybe_filter_based_on_edge_uniquness(std::move(out_edges_result.GetValue()), msgs::EdgeDirection::OUT);
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::EdgeDirection::IN: {
|
||||
auto in_edges_result = v_acc->InEdges(memgraph::storage::v3::View::OLD);
|
||||
case msgs::EdgeDirection::IN: {
|
||||
auto in_edges_result = v_acc->InEdges(View::NEW);
|
||||
if (in_edges_result.HasError()) {
|
||||
spdlog::debug(
|
||||
"Encountered an error while trying to get in-going EdgeAccessors. Transaction id: {}"[req.transaction_id
|
||||
.logical_id]);
|
||||
return std::nullopt;
|
||||
}
|
||||
in_edges = std::move(in_edges_result.GetValue());
|
||||
in_edges = maybe_filter_based_on_edge_uniquness(std::move(in_edges_result.GetValue()), msgs::EdgeDirection::IN);
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::EdgeDirection::BOTH: {
|
||||
auto in_edges_result = v_acc->InEdges(memgraph::storage::v3::View::OLD);
|
||||
case msgs::EdgeDirection::BOTH: {
|
||||
auto in_edges_result = v_acc->InEdges(View::NEW);
|
||||
if (in_edges_result.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get in-going EdgeAccessors. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return std::nullopt;
|
||||
}
|
||||
in_edges = std::move(in_edges_result.GetValue());
|
||||
in_edges = maybe_filter_based_on_edge_uniquness(std::move(in_edges_result.GetValue()), msgs::EdgeDirection::IN);
|
||||
|
||||
auto out_edges_result = v_acc->OutEdges(memgraph::storage::v3::View::OLD);
|
||||
auto out_edges_result = v_acc->OutEdges(View::NEW);
|
||||
if (out_edges_result.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get out-going EdgeAccessors. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return std::nullopt;
|
||||
}
|
||||
out_edges = std::move(out_edges_result.GetValue());
|
||||
out_edges =
|
||||
maybe_filter_based_on_edge_uniquness(std::move(out_edges_result.GetValue()), msgs::EdgeDirection::OUT);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return std::array<std::vector<memgraph::storage::v3::EdgeAccessor>, 2>{in_edges, out_edges};
|
||||
return std::array<std::vector<EdgeAccessor>, 2>{in_edges, out_edges};
|
||||
}
|
||||
|
||||
using AllEdgePropertyDataSructure = std::map<PropertyId, memgraph::msgs::Value>;
|
||||
using SpecificEdgePropertyDataSructure = std::vector<memgraph::msgs::Value>;
|
||||
using AllEdgePropertyDataSructure = std::map<PropertyId, msgs::Value>;
|
||||
using SpecificEdgePropertyDataSructure = std::vector<msgs::Value>;
|
||||
|
||||
using AllEdgeProperties = std::tuple<memgraph::msgs::VertexId, memgraph::msgs::Gid, AllEdgePropertyDataSructure>;
|
||||
using SpecificEdgeProperties =
|
||||
std::tuple<memgraph::msgs::VertexId, memgraph::msgs::Gid, SpecificEdgePropertyDataSructure>;
|
||||
using AllEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, AllEdgePropertyDataSructure>;
|
||||
using SpecificEdgeProperties = std::tuple<msgs::VertexId, msgs::Gid, SpecificEdgePropertyDataSructure>;
|
||||
|
||||
using SpecificEdgePropertiesVector = std::vector<SpecificEdgeProperties>;
|
||||
using AllEdgePropertiesVector = std::vector<AllEdgeProperties>;
|
||||
|
||||
template <typename ReturnType, typename EdgeProperties, typename EdgePropertyDataStructure, typename Functor>
|
||||
std::optional<ReturnType> GetEdgesWithProperties(const std::vector<memgraph::storage::v3::EdgeAccessor> &edges,
|
||||
const memgraph::msgs::ExpandOneRequest &req,
|
||||
Functor get_edge_properties) {
|
||||
ReturnType ret;
|
||||
ret.reserve(edges.size());
|
||||
using EdgeFiller = std::function<bool(const EdgeAccessor &edge, bool is_in_edge, msgs::ExpandOneResultRow &result_row)>;
|
||||
|
||||
template <bool are_in_edges>
|
||||
bool FillEdges(const std::vector<EdgeAccessor> &edges, const msgs::ExpandOneRequest &req, msgs::ExpandOneResultRow &row,
|
||||
const EdgeFiller &edge_filler) {
|
||||
for (const auto &edge : edges) {
|
||||
if (!DoesEdgeTypeMatch(req, edge)) {
|
||||
if (!DoesEdgeTypeMatch(req.edge_types, edge)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
EdgeProperties ret_tuple;
|
||||
|
||||
memgraph::msgs::Label label;
|
||||
label.id = edge.FromVertex().primary_label;
|
||||
memgraph::msgs::VertexId other_vertex = std::make_pair(label, ConvertValueVector(edge.FromVertex().primary_key));
|
||||
|
||||
const auto edge_props_var = get_edge_properties(edge);
|
||||
|
||||
if (std::get_if<LocalError>(&edge_props_var) != nullptr) {
|
||||
return std::nullopt;
|
||||
if (!edge_filler(edge, are_in_edges, row)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto edge_props = std::get<EdgePropertyDataStructure>(edge_props_var);
|
||||
memgraph::msgs::Gid gid = edge.Gid().AsUint();
|
||||
|
||||
ret.emplace_back(EdgeProperties{other_vertex, gid, edge_props});
|
||||
}
|
||||
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename TPropertyValue, typename TPropertyNullopt>
|
||||
void SetFinalEdgeProperties(std::optional<TPropertyValue> &properties_to_value,
|
||||
std::optional<TPropertyNullopt> &properties_to_nullopt, const TPropertyValue &ret_out,
|
||||
const TPropertyValue &ret_in, const memgraph::msgs::ExpandOneRequest &req) {
|
||||
switch (req.direction) {
|
||||
case memgraph::msgs::EdgeDirection::OUT: {
|
||||
properties_to_value = std::move(ret_out);
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::EdgeDirection::IN: {
|
||||
properties_to_value = std::move(ret_in);
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::EdgeDirection::BOTH: {
|
||||
TPropertyValue ret;
|
||||
ret.resize(ret_out.size() + ret_in.size());
|
||||
ret.insert(ret.end(), std::make_move_iterator(ret_in.begin()), std::make_move_iterator(ret_in.end()));
|
||||
ret.insert(ret.end(), std::make_move_iterator(ret_out.begin()), std::make_move_iterator(ret_out.end()));
|
||||
|
||||
properties_to_value = ret;
|
||||
break;
|
||||
}
|
||||
}
|
||||
properties_to_nullopt = {};
|
||||
}
|
||||
|
||||
std::optional<memgraph::msgs::ExpandOneResultRow> GetExpandOneResult(memgraph::storage::v3::Shard::Accessor &acc,
|
||||
memgraph::msgs::VertexId src_vertex,
|
||||
memgraph::msgs::ExpandOneRequest req) {
|
||||
using EdgeProperties =
|
||||
std::variant<LocalError, std::map<PropertyId, memgraph::msgs::Value>, std::vector<memgraph::msgs::Value>>;
|
||||
std::function<EdgeProperties(const memgraph::storage::v3::EdgeAccessor &)> get_edge_properties;
|
||||
|
||||
if (!req.edge_properties) {
|
||||
get_edge_properties = [&req](const memgraph::storage::v3::EdgeAccessor &edge) -> EdgeProperties {
|
||||
std::map<PropertyId, memgraph::msgs::Value> ret;
|
||||
auto property_results = edge.Properties(memgraph::storage::v3::View::OLD);
|
||||
if (property_results.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get out-going EdgeAccessors. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return LocalError{};
|
||||
}
|
||||
|
||||
for (const auto &[prop_key, prop_val] : property_results.GetValue()) {
|
||||
ret.insert(std::make_pair(prop_key, ToValue(prop_val)));
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
} else {
|
||||
// TODO(gvolfing) - do we want to set the action_successful here?
|
||||
get_edge_properties = [&req](const memgraph::storage::v3::EdgeAccessor &edge) {
|
||||
std::vector<memgraph::msgs::Value> ret;
|
||||
ret.reserve(req.edge_properties.value().size());
|
||||
for (const auto &edge_prop : req.edge_properties.value()) {
|
||||
// TODO(gvolfing) maybe check for the absence of certain properties
|
||||
ret.emplace_back(ToValue(edge.GetProperty(edge_prop, memgraph::storage::v3::View::OLD).GetValue()));
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
}
|
||||
|
||||
std::optional<msgs::ExpandOneResultRow> GetExpandOneResult(
|
||||
Shard::Accessor &acc, msgs::VertexId src_vertex, const msgs::ExpandOneRequest &req,
|
||||
const EdgeUniqunessFunction &maybe_filter_based_on_edge_uniquness, const EdgeFiller &edge_filler) {
|
||||
/// Fill up source vertex
|
||||
auto v_acc = acc.FindVertex(ConvertPropertyVector(std::move(src_vertex.second)), memgraph::storage::v3::View::OLD);
|
||||
const auto primary_key = ConvertPropertyVector(std::move(src_vertex.second));
|
||||
auto v_acc = acc.FindVertex(primary_key, View::NEW);
|
||||
|
||||
auto source_vertex = FillUpSourceVertex(v_acc, req, src_vertex);
|
||||
if (!source_vertex) {
|
||||
@ -355,68 +330,134 @@ std::optional<memgraph::msgs::ExpandOneResultRow> GetExpandOneResult(memgraph::s
|
||||
}
|
||||
|
||||
/// Fill up connecting edges
|
||||
auto fill_up_connecting_edges = FillUpConnectingEdges(v_acc, req);
|
||||
auto fill_up_connecting_edges = FillUpConnectingEdges(v_acc, req, maybe_filter_based_on_edge_uniquness);
|
||||
if (!fill_up_connecting_edges) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto [in_edges, out_edges] = fill_up_connecting_edges.value();
|
||||
|
||||
/// Assemble the edge properties
|
||||
std::optional<AllEdgePropertiesVector> edges_with_all_properties;
|
||||
std::optional<SpecificEdgePropertiesVector> edges_with_specific_properties;
|
||||
|
||||
if (!req.edge_properties) {
|
||||
auto ret_in_opt = GetEdgesWithProperties<AllEdgePropertiesVector, AllEdgeProperties, AllEdgePropertyDataSructure>(
|
||||
in_edges, req, get_edge_properties);
|
||||
if (!ret_in_opt) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto ret_out_opt = GetEdgesWithProperties<AllEdgePropertiesVector, AllEdgeProperties, AllEdgePropertyDataSructure>(
|
||||
out_edges, req, get_edge_properties);
|
||||
if (!ret_out_opt) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto &ret_in = *ret_in_opt;
|
||||
auto &ret_out = *ret_out_opt;
|
||||
|
||||
SetFinalEdgeProperties<AllEdgePropertiesVector, SpecificEdgePropertiesVector>(
|
||||
edges_with_all_properties, edges_with_specific_properties, ret_out, ret_in, req);
|
||||
} else {
|
||||
auto ret_in_opt =
|
||||
GetEdgesWithProperties<SpecificEdgePropertiesVector, SpecificEdgeProperties, SpecificEdgePropertyDataSructure>(
|
||||
in_edges, req, get_edge_properties);
|
||||
if (!ret_in_opt) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto ret_out_opt =
|
||||
GetEdgesWithProperties<SpecificEdgePropertiesVector, SpecificEdgeProperties, SpecificEdgePropertyDataSructure>(
|
||||
out_edges, req, get_edge_properties);
|
||||
if (!ret_out_opt) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto &ret_in = *ret_in_opt;
|
||||
auto &ret_out = *ret_out_opt;
|
||||
|
||||
SetFinalEdgeProperties<SpecificEdgePropertiesVector, AllEdgePropertiesVector>(
|
||||
edges_with_specific_properties, edges_with_all_properties, ret_out, ret_in, req);
|
||||
msgs::ExpandOneResultRow result_row;
|
||||
result_row.src_vertex = std::move(*source_vertex);
|
||||
result_row.src_vertex_properties = std::move(*src_vertex_properties);
|
||||
static constexpr bool kInEdges = true;
|
||||
static constexpr bool kOutEdges = false;
|
||||
if (!in_edges.empty() && !FillEdges<kInEdges>(in_edges, req, result_row, edge_filler)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
if (!out_edges.empty() && !FillEdges<kOutEdges>(out_edges, req, result_row, edge_filler)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return memgraph::msgs::ExpandOneResultRow{
|
||||
.src_vertex = std::move(*source_vertex),
|
||||
.src_vertex_properties = std::move(src_vertex_properties),
|
||||
.edges_with_all_properties = std::move(edges_with_all_properties),
|
||||
.edges_with_specific_properties = std::move(edges_with_specific_properties)};
|
||||
return result_row;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
EdgeUniqunessFunction InitializeEdgeUniqunessFunction(bool only_unique_neighbor_rows) {
|
||||
// Functions to select connecting edges based on uniquness
|
||||
EdgeUniqunessFunction maybe_filter_based_on_edge_uniquness;
|
||||
|
||||
namespace memgraph::storage::v3 {
|
||||
if (only_unique_neighbor_rows) {
|
||||
maybe_filter_based_on_edge_uniquness = [](EdgeAccessors &&edges,
|
||||
msgs::EdgeDirection edge_direction) -> EdgeAccessors {
|
||||
std::function<bool(std::set<const storage::v3::VertexId *, VertexIdCmpr> &, const storage::v3::EdgeAccessor &)>
|
||||
is_edge_unique;
|
||||
switch (edge_direction) {
|
||||
case msgs::EdgeDirection::OUT: {
|
||||
is_edge_unique = [](std::set<const storage::v3::VertexId *, VertexIdCmpr> &other_vertex_set,
|
||||
const storage::v3::EdgeAccessor &edge_acc) {
|
||||
auto [it, insertion_happened] = other_vertex_set.insert(&edge_acc.ToVertex());
|
||||
return insertion_happened;
|
||||
};
|
||||
break;
|
||||
}
|
||||
case msgs::EdgeDirection::IN: {
|
||||
is_edge_unique = [](std::set<const storage::v3::VertexId *, VertexIdCmpr> &other_vertex_set,
|
||||
const storage::v3::EdgeAccessor &edge_acc) {
|
||||
auto [it, insertion_happened] = other_vertex_set.insert(&edge_acc.FromVertex());
|
||||
return insertion_happened;
|
||||
};
|
||||
break;
|
||||
}
|
||||
case msgs::EdgeDirection::BOTH:
|
||||
MG_ASSERT(false, "This is should never happen, msgs::EdgeDirection::BOTH should not be passed here.");
|
||||
}
|
||||
|
||||
EdgeAccessors ret;
|
||||
std::set<const storage::v3::VertexId *, VertexIdCmpr> other_vertex_set;
|
||||
|
||||
for (const auto &edge : edges) {
|
||||
if (is_edge_unique(other_vertex_set, edge)) {
|
||||
ret.emplace_back(edge);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
};
|
||||
} else {
|
||||
maybe_filter_based_on_edge_uniquness =
|
||||
[](EdgeAccessors &&edges, msgs::EdgeDirection /*edge_direction*/) -> EdgeAccessors { return std::move(edges); };
|
||||
}
|
||||
|
||||
return maybe_filter_based_on_edge_uniquness;
|
||||
}
|
||||
|
||||
EdgeFiller InitializeEdgeFillerFunction(const msgs::ExpandOneRequest &req) {
|
||||
EdgeFiller edge_filler;
|
||||
|
||||
if (!req.edge_properties) {
|
||||
edge_filler = [transaction_id = req.transaction_id.logical_id](const EdgeAccessor &edge, const bool is_in_edge,
|
||||
msgs::ExpandOneResultRow &result_row) -> bool {
|
||||
auto properties_results = edge.Properties(View::NEW);
|
||||
if (properties_results.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get edge properties. Transaction id: {}", transaction_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::map<PropertyId, msgs::Value> value_properties;
|
||||
for (auto &[prop_key, prop_val] : properties_results.GetValue()) {
|
||||
value_properties.insert(std::make_pair(prop_key, FromPropertyValueToValue(std::move(prop_val))));
|
||||
}
|
||||
using EdgeWithAllProperties = msgs::ExpandOneResultRow::EdgeWithAllProperties;
|
||||
EdgeWithAllProperties edges{ToMsgsVertexId(edge.FromVertex()), msgs::EdgeType{edge.EdgeType()},
|
||||
edge.Gid().AsUint(), std::move(value_properties)};
|
||||
if (is_in_edge) {
|
||||
result_row.in_edges_with_all_properties.push_back(std::move(edges));
|
||||
} else {
|
||||
result_row.out_edges_with_all_properties.push_back(std::move(edges));
|
||||
}
|
||||
return true;
|
||||
};
|
||||
} else {
|
||||
// TODO(gvolfing) - do we want to set the action_successful here?
|
||||
edge_filler = [&req](const EdgeAccessor &edge, const bool is_in_edge,
|
||||
msgs::ExpandOneResultRow &result_row) -> bool {
|
||||
std::vector<msgs::Value> value_properties;
|
||||
value_properties.reserve(req.edge_properties.value().size());
|
||||
for (const auto &edge_prop : req.edge_properties.value()) {
|
||||
auto property_result = edge.GetProperty(edge_prop, View::NEW);
|
||||
if (property_result.HasError()) {
|
||||
spdlog::debug("Encountered an error while trying to get edge properties. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
return false;
|
||||
}
|
||||
value_properties.emplace_back(FromPropertyValueToValue(std::move(property_result.GetValue())));
|
||||
}
|
||||
using EdgeWithSpecificProperties = msgs::ExpandOneResultRow::EdgeWithSpecificProperties;
|
||||
EdgeWithSpecificProperties edges{ToMsgsVertexId(edge.FromVertex()), msgs::EdgeType{edge.EdgeType()},
|
||||
edge.Gid().AsUint(), std::move(value_properties)};
|
||||
if (is_in_edge) {
|
||||
result_row.in_edges_with_specific_properties.push_back(std::move(edges));
|
||||
} else {
|
||||
result_row.out_edges_with_specific_properties.push_back(std::move(edges));
|
||||
}
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
return edge_filler;
|
||||
}
|
||||
|
||||
}; // namespace
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateVerticesRequest &&req) {
|
||||
auto acc = shard_->Access(req.transaction_id);
|
||||
|
||||
@ -433,11 +474,12 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateVerticesRequest &&req) {
|
||||
auto converted_property_map = ConvertPropertyMap(std::move(new_vertex.properties));
|
||||
|
||||
// TODO(gvolfing) make sure if this conversion is actually needed.
|
||||
std::vector<memgraph::storage::v3::LabelId> converted_label_ids;
|
||||
std::vector<LabelId> converted_label_ids;
|
||||
converted_label_ids.reserve(new_vertex.label_ids.size());
|
||||
for (const auto &label_id : new_vertex.label_ids) {
|
||||
converted_label_ids.emplace_back(label_id.id);
|
||||
}
|
||||
|
||||
std::transform(new_vertex.label_ids.begin(), new_vertex.label_ids.end(), std::back_inserter(converted_label_ids),
|
||||
[](const auto &label_id) { return label_id.id; });
|
||||
|
||||
// TODO(jbajic) sending primary key as vector breaks validation on storage side
|
||||
// cannot map id -> value
|
||||
PrimaryKey transformed_pk;
|
||||
@ -466,7 +508,7 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateVerticesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::CreateVerticesResponse{.success = action_successful};
|
||||
return msgs::CreateVerticesResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::UpdateVerticesRequest &&req) {
|
||||
@ -515,7 +557,7 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::UpdateVerticesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::UpdateVerticesResponse{.success = action_successful};
|
||||
return msgs::UpdateVerticesResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteVerticesRequest &&req) {
|
||||
@ -538,7 +580,7 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteVerticesRequest &&req) {
|
||||
// Since we will not have different kinds of deletion types in one transaction,
|
||||
// we dont have to enter the switch statement on every iteration. Optimize this.
|
||||
switch (req.deletion_type) {
|
||||
case memgraph::msgs::DeleteVerticesRequest::DeletionType::DELETE: {
|
||||
case msgs::DeleteVerticesRequest::DeletionType::DELETE: {
|
||||
auto result = acc.DeleteVertex(&vertex_acc.value());
|
||||
if (result.HasError() || !(result.GetValue().has_value())) {
|
||||
action_successful = false;
|
||||
@ -547,7 +589,7 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteVerticesRequest &&req) {
|
||||
|
||||
break;
|
||||
}
|
||||
case memgraph::msgs::DeleteVerticesRequest::DeletionType::DETACH_DELETE: {
|
||||
case msgs::DeleteVerticesRequest::DeletionType::DETACH_DELETE: {
|
||||
auto result = acc.DetachDeleteVertex(&vertex_acc.value());
|
||||
if (result.HasError() || !(result.GetValue().has_value())) {
|
||||
action_successful = false;
|
||||
@ -561,41 +603,52 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteVerticesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::DeleteVerticesResponse{.success = action_successful};
|
||||
return msgs::DeleteVerticesResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateEdgesRequest &&req) {
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateExpandRequest &&req) {
|
||||
auto acc = shard_->Access(req.transaction_id);
|
||||
bool action_successful = true;
|
||||
|
||||
for (auto &edge : req.edges) {
|
||||
auto vertex_acc_from_primary_key = edge.src.second;
|
||||
auto vertex_from_acc = acc.FindVertex(ConvertPropertyVector(std::move(vertex_acc_from_primary_key)), View::OLD);
|
||||
for (auto &new_expand : req.new_expands) {
|
||||
const auto from_vertex_id =
|
||||
v3::VertexId{new_expand.src_vertex.first.id, ConvertPropertyVector(std::move(new_expand.src_vertex.second))};
|
||||
|
||||
auto vertex_acc_to_primary_key = edge.dst.second;
|
||||
auto vertex_to_acc = acc.FindVertex(ConvertPropertyVector(std::move(vertex_acc_to_primary_key)), View::OLD);
|
||||
const auto to_vertex_id =
|
||||
VertexId{new_expand.dest_vertex.first.id, ConvertPropertyVector(std::move(new_expand.dest_vertex.second))};
|
||||
|
||||
if (!vertex_from_acc || !vertex_to_acc) {
|
||||
if (!(shard_->IsVertexBelongToShard(from_vertex_id) || shard_->IsVertexBelongToShard(to_vertex_id))) {
|
||||
action_successful = false;
|
||||
spdlog::debug("Error while trying to insert edge, vertex does not exist. Transaction id: {}",
|
||||
spdlog::debug("Error while trying to insert edge, none of the vertices belong to this shard. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
break;
|
||||
}
|
||||
|
||||
auto from_vertex_id = VertexId(edge.src.first.id, ConvertPropertyVector(std::move(edge.src.second)));
|
||||
auto to_vertex_id = VertexId(edge.dst.first.id, ConvertPropertyVector(std::move(edge.dst.second)));
|
||||
auto edge_acc =
|
||||
acc.CreateEdge(from_vertex_id, to_vertex_id, EdgeTypeId::FromUint(edge.type.id), Gid::FromUint(edge.id.gid));
|
||||
|
||||
if (edge_acc.HasError()) {
|
||||
auto edge_acc = acc.CreateEdge(from_vertex_id, to_vertex_id, new_expand.type.id, Gid::FromUint(new_expand.id.gid));
|
||||
if (edge_acc.HasValue()) {
|
||||
auto edge = edge_acc.GetValue();
|
||||
if (!new_expand.properties.empty()) {
|
||||
for (const auto &[property, value] : new_expand.properties) {
|
||||
if (const auto maybe_error = edge.SetProperty(property, ToPropertyValue(value)); maybe_error.HasError()) {
|
||||
action_successful = false;
|
||||
spdlog::debug("Setting edge property was not successful. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
break;
|
||||
}
|
||||
if (!action_successful) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
action_successful = false;
|
||||
spdlog::debug("Creating edge was not successful. Transaction id: {}", req.transaction_id.logical_id);
|
||||
break;
|
||||
}
|
||||
|
||||
// Add properties to the edge if there is any
|
||||
if (edge.properties) {
|
||||
for (auto &[edge_prop_key, edge_prop_val] : edge.properties.value()) {
|
||||
if (!new_expand.properties.empty()) {
|
||||
for (auto &[edge_prop_key, edge_prop_val] : new_expand.properties) {
|
||||
auto set_result = edge_acc->SetProperty(edge_prop_key, ToPropertyValue(std::move(edge_prop_val)));
|
||||
if (set_result.HasError()) {
|
||||
action_successful = false;
|
||||
@ -607,7 +660,7 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CreateEdgesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::CreateEdgesResponse{.success = action_successful};
|
||||
return msgs::CreateExpandResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteEdgesRequest &&req) {
|
||||
@ -629,10 +682,11 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::DeleteEdgesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::DeleteEdgesResponse{.success = action_successful};
|
||||
return msgs::DeleteEdgesResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::UpdateEdgesRequest &&req) {
|
||||
// TODO(antaljanosbenjamin): handle when the vertex is the destination vertex
|
||||
auto acc = shard_->Access(req.transaction_id);
|
||||
|
||||
bool action_successful = true;
|
||||
@ -687,50 +741,90 @@ msgs::WriteResponses ShardRsm::ApplyWrite(msgs::UpdateEdgesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
return memgraph::msgs::UpdateEdgesResponse{.success = action_successful};
|
||||
return msgs::UpdateEdgesResponse{.success = action_successful};
|
||||
}
|
||||
|
||||
msgs::ReadResponses ShardRsm::HandleRead(msgs::ScanVerticesRequest &&req) {
|
||||
auto acc = shard_->Access(req.transaction_id);
|
||||
bool action_successful = true;
|
||||
|
||||
std::vector<memgraph::msgs::ScanResultRow> results;
|
||||
std::optional<memgraph::msgs::VertexId> next_start_id;
|
||||
std::vector<msgs::ScanResultRow> results;
|
||||
if (req.batch_limit) {
|
||||
results.reserve(*req.batch_limit);
|
||||
}
|
||||
std::optional<msgs::VertexId> next_start_id;
|
||||
|
||||
const auto view = View(req.storage_view);
|
||||
auto vertex_iterable = acc.Vertices(view);
|
||||
bool did_reach_starting_point = false;
|
||||
uint64_t sample_counter = 0;
|
||||
|
||||
const auto start_ids = ConvertPropertyVector(std::move(req.start_id.second));
|
||||
|
||||
for (auto it = vertex_iterable.begin(); it != vertex_iterable.end(); ++it) {
|
||||
const auto &vertex = *it;
|
||||
|
||||
if (start_ids <= vertex.PrimaryKey(View(req.storage_view)).GetValue()) {
|
||||
did_reach_starting_point = true;
|
||||
auto dba = DbAccessor{&acc};
|
||||
const auto emplace_scan_result = [&](const VertexAccessor &vertex) {
|
||||
std::vector<Value> expression_results;
|
||||
// TODO(gvolfing) it should be enough to check these only once.
|
||||
if (vertex.Properties(View(req.storage_view)).HasError()) {
|
||||
action_successful = false;
|
||||
spdlog::debug("Could not retrieve properties from VertexAccessor. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
}
|
||||
if (!req.filter_expressions.empty()) {
|
||||
// NOTE - DbAccessor might get removed in the future.
|
||||
const bool eval = FilterOnVertex(dba, vertex, req.filter_expressions, expr::identifier_node_symbol);
|
||||
if (!eval) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!req.vertex_expressions.empty()) {
|
||||
// NOTE - DbAccessor might get removed in the future.
|
||||
expression_results = ConvertToValueVectorFromTypedValueVector(
|
||||
EvaluateVertexExpressions(dba, vertex, req.vertex_expressions, expr::identifier_node_symbol));
|
||||
}
|
||||
|
||||
if (did_reach_starting_point) {
|
||||
std::optional<std::map<PropertyId, Value>> found_props;
|
||||
std::optional<std::map<PropertyId, Value>> found_props;
|
||||
|
||||
if (req.props_to_return) {
|
||||
found_props = CollectSpecificPropertiesFromAccessor(vertex, req.props_to_return.value(), view);
|
||||
} else {
|
||||
const auto *schema = shard_->GetSchema(shard_->PrimaryLabel());
|
||||
if (req.props_to_return) {
|
||||
found_props = CollectSpecificPropertiesFromAccessor(vertex, req.props_to_return.value(), view);
|
||||
} else {
|
||||
found_props = CollectAllPropertiesFromAccessor(vertex, view, schema);
|
||||
}
|
||||
found_props = CollectAllPropertiesFromAccessor(vertex, view, schema);
|
||||
}
|
||||
|
||||
// TODO(gvolfing) -VERIFY-
|
||||
// Vertex is separated from the properties in the response.
|
||||
// Is it useful to return just a vertex without the properties?
|
||||
if (!found_props) {
|
||||
action_successful = false;
|
||||
}
|
||||
|
||||
results.emplace_back(msgs::ScanResultRow{.vertex = ConstructValueVertex(vertex, view).vertex_v,
|
||||
.props = FromMap(found_props.value()),
|
||||
.evaluated_vertex_expressions = std::move(expression_results)});
|
||||
};
|
||||
|
||||
const auto start_id = ConvertPropertyVector(std::move(req.start_id.second));
|
||||
uint64_t sample_counter{0};
|
||||
auto vertex_iterable = acc.Vertices(view);
|
||||
if (!req.order_bys.empty()) {
|
||||
const auto ordered = OrderByElements(acc, dba, vertex_iterable, req.order_bys);
|
||||
// we are traversing Elements
|
||||
auto it = GetStartOrderedElementsIterator(ordered, start_id, View(req.storage_view));
|
||||
for (; it != ordered.end(); ++it) {
|
||||
emplace_scan_result(it->vertex_acc);
|
||||
++sample_counter;
|
||||
if (req.batch_limit && sample_counter == req.batch_limit) {
|
||||
// Reached the maximum specified batch size.
|
||||
// Get the next element before exiting.
|
||||
++it;
|
||||
if (it != ordered.end()) {
|
||||
const auto &next_vertex = it->vertex_acc;
|
||||
next_start_id = ConstructValueVertex(next_vertex, view).vertex_v.id;
|
||||
}
|
||||
|
||||
// TODO(gvolfing) -VERIFY-
|
||||
// Vertex is seperated from the properties in the response.
|
||||
// Is it useful to return just a vertex without the properties?
|
||||
if (!found_props) {
|
||||
action_successful = false;
|
||||
break;
|
||||
}
|
||||
|
||||
results.emplace_back(msgs::ScanResultRow{.vertex = ConstructValueVertex(vertex, view).vertex_v,
|
||||
.props = FromMap(found_props.value())});
|
||||
}
|
||||
} else {
|
||||
// We are going through VerticesIterable::Iterator
|
||||
auto it = GetStartVertexIterator(vertex_iterable, start_id, View(req.storage_view));
|
||||
for (; it != vertex_iterable.end(); ++it) {
|
||||
emplace_scan_result(*it);
|
||||
|
||||
++sample_counter;
|
||||
if (req.batch_limit && sample_counter == req.batch_limit) {
|
||||
@ -744,7 +838,7 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ScanVerticesRequest &&req) {
|
||||
}
|
||||
}
|
||||
|
||||
memgraph::msgs::ScanVerticesResponse resp{};
|
||||
msgs::ScanVerticesResponse resp{};
|
||||
resp.success = action_successful;
|
||||
|
||||
if (action_successful) {
|
||||
@ -759,10 +853,30 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ExpandOneRequest &&req) {
|
||||
auto acc = shard_->Access(req.transaction_id);
|
||||
bool action_successful = true;
|
||||
|
||||
std::vector<memgraph::msgs::ExpandOneResultRow> results;
|
||||
std::vector<msgs::ExpandOneResultRow> results;
|
||||
|
||||
auto maybe_filter_based_on_edge_uniquness = InitializeEdgeUniqunessFunction(req.only_unique_neighbor_rows);
|
||||
auto edge_filler = InitializeEdgeFillerFunction(req);
|
||||
|
||||
for (auto &src_vertex : req.src_vertices) {
|
||||
auto result = GetExpandOneResult(acc, src_vertex, req);
|
||||
// Get Vertex acc
|
||||
auto src_vertex_acc_opt = acc.FindVertex(ConvertPropertyVector((src_vertex.second)), View::NEW);
|
||||
if (!src_vertex_acc_opt) {
|
||||
action_successful = false;
|
||||
spdlog::debug("Encountered an error while trying to obtain VertexAccessor. Transaction id: {}",
|
||||
req.transaction_id.logical_id);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!req.filters.empty()) {
|
||||
// NOTE - DbAccessor might get removed in the future.
|
||||
auto dba = DbAccessor{&acc};
|
||||
const bool eval = FilterOnVertex(dba, src_vertex_acc_opt.value(), req.filters, expr::identifier_node_symbol);
|
||||
if (!eval) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
auto result = GetExpandOneResult(acc, src_vertex, req, maybe_filter_based_on_edge_uniquness, edge_filler);
|
||||
|
||||
if (!result) {
|
||||
action_successful = false;
|
||||
@ -772,7 +886,8 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ExpandOneRequest &&req) {
|
||||
results.emplace_back(result.value());
|
||||
}
|
||||
|
||||
memgraph::msgs::ExpandOneResponse resp{};
|
||||
msgs::ExpandOneResponse resp{};
|
||||
resp.success = action_successful;
|
||||
if (action_successful) {
|
||||
resp.result = std::move(results);
|
||||
}
|
||||
@ -782,12 +897,12 @@ msgs::ReadResponses ShardRsm::HandleRead(msgs::ExpandOneRequest &&req) {
|
||||
|
||||
msgs::WriteResponses ShardRsm::ApplyWrite(msgs::CommitRequest &&req) {
|
||||
shard_->Access(req.transaction_id).Commit(req.commit_timestamp);
|
||||
return memgraph::msgs::CommitResponse{true};
|
||||
return msgs::CommitResponse{true};
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
|
||||
msgs::ReadResponses ShardRsm::HandleRead(msgs::GetPropertiesRequest && /*req*/) {
|
||||
return memgraph::msgs::GetPropertiesResponse{};
|
||||
return msgs::GetPropertiesResponse{};
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage::v3
|
||||
} // namespace memgraph::storage::v3
|
||||
|
@ -35,7 +35,7 @@ class ShardRsm {
|
||||
msgs::WriteResponses ApplyWrite(msgs::DeleteVerticesRequest &&req);
|
||||
msgs::WriteResponses ApplyWrite(msgs::UpdateVerticesRequest &&req);
|
||||
|
||||
msgs::WriteResponses ApplyWrite(msgs::CreateEdgesRequest &&req);
|
||||
msgs::WriteResponses ApplyWrite(msgs::CreateExpandRequest &&req);
|
||||
msgs::WriteResponses ApplyWrite(msgs::DeleteEdgesRequest &&req);
|
||||
msgs::WriteResponses ApplyWrite(msgs::UpdateEdgesRequest &&req);
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/vertex_id.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
#include <map>
|
||||
@ -29,8 +30,8 @@ using memgraph::msgs::Value;
|
||||
using memgraph::msgs::VertexId;
|
||||
|
||||
// TODO(gvolfing use come algorithm instead of explicit for loops)
|
||||
inline memgraph::storage::v3::PropertyValue ToPropertyValue(Value value) {
|
||||
using PV = memgraph::storage::v3::PropertyValue;
|
||||
inline v3::PropertyValue ToPropertyValue(Value value) {
|
||||
using PV = v3::PropertyValue;
|
||||
PV ret;
|
||||
switch (value.type) {
|
||||
case Value::Type::Null:
|
||||
@ -60,13 +61,12 @@ inline memgraph::storage::v3::PropertyValue ToPropertyValue(Value value) {
|
||||
// These are not PropertyValues
|
||||
case Value::Type::Vertex:
|
||||
case Value::Type::Edge:
|
||||
case Value::Type::Path:
|
||||
MG_ASSERT(false, "Not PropertyValue");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline Value ToValue(const memgraph::storage::v3::PropertyValue &pv) {
|
||||
inline Value FromPropertyValueToValue(memgraph::storage::v3::PropertyValue &&pv) {
|
||||
using memgraph::storage::v3::PropertyValue;
|
||||
|
||||
switch (pv.type()) {
|
||||
@ -79,17 +79,17 @@ inline Value ToValue(const memgraph::storage::v3::PropertyValue &pv) {
|
||||
case PropertyValue::Type::List: {
|
||||
std::vector<Value> list;
|
||||
list.reserve(pv.ValueList().size());
|
||||
for (const auto &elem : pv.ValueList()) {
|
||||
list.emplace_back(ToValue(elem));
|
||||
for (auto &elem : pv.ValueList()) {
|
||||
list.emplace_back(FromPropertyValueToValue(std::move(elem)));
|
||||
}
|
||||
|
||||
return Value(list);
|
||||
}
|
||||
case PropertyValue::Type::Map: {
|
||||
std::map<std::string, Value> map;
|
||||
for (const auto &[key, val] : pv.ValueMap()) {
|
||||
for (auto &[key, val] : pv.ValueMap()) {
|
||||
// maybe use std::make_pair once the && issue is resolved.
|
||||
map.emplace(key, ToValue(val));
|
||||
map.emplace(key, FromPropertyValueToValue(std::move(val)));
|
||||
}
|
||||
|
||||
return Value(map);
|
||||
@ -97,7 +97,7 @@ inline Value ToValue(const memgraph::storage::v3::PropertyValue &pv) {
|
||||
case PropertyValue::Type::Null:
|
||||
return Value{};
|
||||
case PropertyValue::Type::String:
|
||||
return Value(pv.ValueString());
|
||||
return Value(std::move(pv.ValueString()));
|
||||
case PropertyValue::Type::TemporalData: {
|
||||
// TBD -> we need to specify this in the messages, not a priority.
|
||||
MG_ASSERT(false, "Temporal datatypes are not yet implemented on Value!");
|
||||
@ -106,8 +106,8 @@ inline Value ToValue(const memgraph::storage::v3::PropertyValue &pv) {
|
||||
}
|
||||
}
|
||||
|
||||
inline std::vector<memgraph::storage::v3::PropertyValue> ConvertPropertyVector(std::vector<Value> vec) {
|
||||
std::vector<memgraph::storage::v3::PropertyValue> ret;
|
||||
inline std::vector<v3::PropertyValue> ConvertPropertyVector(std::vector<Value> vec) {
|
||||
std::vector<v3::PropertyValue> ret;
|
||||
ret.reserve(vec.size());
|
||||
|
||||
for (auto &elem : vec) {
|
||||
@ -117,15 +117,18 @@ inline std::vector<memgraph::storage::v3::PropertyValue> ConvertPropertyVector(s
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline std::vector<Value> ConvertValueVector(const std::vector<memgraph::storage::v3::PropertyValue> &vec) {
|
||||
inline std::vector<Value> ConvertValueVector(const std::vector<v3::PropertyValue> &vec) {
|
||||
std::vector<Value> ret;
|
||||
ret.reserve(vec.size());
|
||||
|
||||
for (const auto &elem : vec) {
|
||||
ret.push_back(ToValue(elem));
|
||||
ret.push_back(FromPropertyValueToValue(v3::PropertyValue{elem}));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline msgs::VertexId ToMsgsVertexId(const v3::VertexId &vertex_id) {
|
||||
return {msgs::Label{vertex_id.primary_label}, ConvertValueVector(vertex_id.primary_key)};
|
||||
}
|
||||
} // namespace memgraph::storage::conversions
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "storage/v3/vertex_accessor.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "storage/v3/conversions.hpp"
|
||||
@ -21,6 +22,7 @@
|
||||
#include "storage/v3/mvcc.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/schema_validator.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/vertex.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/memory_tracker.hpp"
|
||||
@ -378,6 +380,32 @@ Result<PropertyValue> VertexAccessor::GetProperty(View view, PropertyId property
|
||||
return GetProperty(property, view).GetValue();
|
||||
}
|
||||
|
||||
PropertyValue VertexAccessor::GetPropertyValue(PropertyId property, View view) const {
|
||||
PropertyValue value;
|
||||
|
||||
const auto primary_label = PrimaryLabel(view);
|
||||
if (primary_label.HasError()) {
|
||||
return value;
|
||||
}
|
||||
const auto *schema = vertex_validator_->schema_validator->GetSchema(*primary_label);
|
||||
if (!schema) {
|
||||
return value;
|
||||
}
|
||||
// Find PropertyId index in keystore
|
||||
size_t property_index{0};
|
||||
for (; property_index < schema->second.size(); ++property_index) {
|
||||
if (schema->second[property_index].property_id == property) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
value = vertex_->keys.GetKey(property_index);
|
||||
if (value.IsNull()) {
|
||||
value = vertex_->properties.GetProperty(property);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
Result<PropertyValue> VertexAccessor::GetProperty(PropertyId property, View view) const {
|
||||
bool exists = true;
|
||||
bool deleted = false;
|
||||
@ -385,7 +413,7 @@ Result<PropertyValue> VertexAccessor::GetProperty(PropertyId property, View view
|
||||
Delta *delta = nullptr;
|
||||
{
|
||||
deleted = vertex_->deleted;
|
||||
value = vertex_->properties.GetProperty(property);
|
||||
value = GetPropertyValue(property, view);
|
||||
delta = vertex_->delta;
|
||||
}
|
||||
ApplyDeltasForRead(transaction_, delta, view, [&exists, &deleted, &value, property](const Delta &delta) {
|
||||
@ -425,6 +453,7 @@ Result<std::map<PropertyId, PropertyValue>> VertexAccessor::Properties(View view
|
||||
Delta *delta = nullptr;
|
||||
{
|
||||
deleted = vertex_->deleted;
|
||||
// TODO(antaljanosbenjamin): This should also return the primary key
|
||||
properties = vertex_->properties.Properties();
|
||||
delta = vertex_->delta;
|
||||
}
|
||||
|
@ -133,6 +133,8 @@ class VertexAccessor final {
|
||||
/// @throw std::bad_alloc
|
||||
Result<PropertyValue> SetProperty(PropertyId property, const PropertyValue &value);
|
||||
|
||||
PropertyValue GetPropertyValue(PropertyId property, View view) const;
|
||||
|
||||
Result<void> CheckVertexExistence(View view) const;
|
||||
|
||||
Vertex *vertex_;
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
|
||||
@ -29,4 +31,9 @@ struct VertexId {
|
||||
inline bool operator==(const VertexId &lhs, const VertexId &rhs) {
|
||||
return lhs.primary_label == rhs.primary_label && lhs.primary_key == rhs.primary_key;
|
||||
}
|
||||
|
||||
inline bool operator<(const VertexId &lhs, const VertexId &rhs) {
|
||||
return std::tie(lhs.primary_label, lhs.primary_key) < std::tie(rhs.primary_label, rhs.primary_key);
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage::v3
|
||||
|
63
src/utils/print_helpers.hpp
Normal file
63
src/utils/print_helpers.hpp
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace memgraph::utils::print_helpers {
|
||||
|
||||
template <typename T>
|
||||
std::ostream &operator<<(std::ostream &in, const std::vector<T> &vector) {
|
||||
in << "[";
|
||||
bool first = true;
|
||||
for (const auto &item : vector) {
|
||||
if (!first) {
|
||||
in << ", ";
|
||||
}
|
||||
first = false;
|
||||
in << item;
|
||||
}
|
||||
in << "]";
|
||||
return in;
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
std::ostream &operator<<(std::ostream &in, const std::map<K, V> &map) {
|
||||
in << "{";
|
||||
bool first = true;
|
||||
for (const auto &[a, b] : map) {
|
||||
if (!first) {
|
||||
in << ", ";
|
||||
}
|
||||
first = false;
|
||||
in << a;
|
||||
in << ": ";
|
||||
in << b;
|
||||
}
|
||||
in << "}";
|
||||
return in;
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
std::ostream &operator<<(std::ostream &in, const std::pair<K, V> &pair) {
|
||||
const auto &[a, b] = pair;
|
||||
in << "(";
|
||||
in << a;
|
||||
in << ", ";
|
||||
in << b;
|
||||
in << ")";
|
||||
return in;
|
||||
}
|
||||
|
||||
} // namespace memgraph::utils::print_helpers
|
@ -48,15 +48,28 @@ def test_vertex_creation_and_scanall(connection):
|
||||
wait_for_shard_manager_to_initialize()
|
||||
cursor = connection.cursor()
|
||||
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:1, asd:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:2, asd:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:3, asd:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:4, asd:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:5, asd:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:1})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:2})", 0)
|
||||
assert has_n_result_row(cursor, "CREATE (n :label {property:3})", 0)
|
||||
|
||||
assert has_n_result_row(cursor, "MATCH (n) RETURN n", 5)
|
||||
assert has_n_result_row(cursor, "MATCH (n) RETURN *", 5)
|
||||
assert has_n_result_row(cursor, "MATCH (n :label) RETURN *", 5)
|
||||
assert has_n_result_row(cursor, "MATCH (n) RETURN n", 3)
|
||||
assert has_n_result_row(cursor, "MATCH (n) RETURN *", 3)
|
||||
assert has_n_result_row(cursor, "MATCH (n :label) RETURN *", 3)
|
||||
|
||||
assert has_n_result_row(cursor, "MATCH (n), (m) CREATE (n)-[:TO]->(m)", 0)
|
||||
|
||||
results = execute_and_fetch_all(cursor, "MATCH (n)-[r]->(m) RETURN n,r,m")
|
||||
assert len(results) == 9
|
||||
for (n, r, m) in results:
|
||||
n_props = n.properties
|
||||
assert len(n_props) == 0, "n is not expected to have properties, update the test!"
|
||||
assert len(n.labels) == 0, "n is not expected to have labels, update the test!"
|
||||
|
||||
assert r.type == "TO"
|
||||
|
||||
m_props = m.properties
|
||||
assert m_props["property"] <= 3 and m_props["property"] >= 0, "Wrong key"
|
||||
assert len(m.labels) == 0, "m is not expected to have labels, update the test!"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -35,12 +35,18 @@ target_link_libraries(${test_prefix}kvstore_console mg-kvstore gflags mg-utils)
|
||||
add_manual_test(query_hash.cpp)
|
||||
target_link_libraries(${test_prefix}query_hash mg-query)
|
||||
|
||||
add_manual_test(query_planner.cpp interactive_planning.cpp)
|
||||
add_manual_test(query_planner.cpp interactive/planning.cpp)
|
||||
target_link_libraries(${test_prefix}query_planner mg-query)
|
||||
if (READLINE_FOUND)
|
||||
target_link_libraries(${test_prefix}query_planner readline)
|
||||
endif()
|
||||
|
||||
add_manual_test(query_execution_dummy.cpp)
|
||||
target_link_libraries(${test_prefix}query_execution_dummy mg-query)
|
||||
if (READLINE_FOUND)
|
||||
target_link_libraries(${test_prefix}query_execution_dummy readline)
|
||||
endif()
|
||||
|
||||
add_manual_test(expression_pretty_printer.cpp)
|
||||
target_link_libraries(${test_prefix}expression_pretty_printer mg-query)
|
||||
|
||||
|
@ -9,82 +9,13 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "interactive_planning.hpp"
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "query/context.hpp"
|
||||
#include "query/db_accessor.hpp"
|
||||
#include "query/frontend/ast/cypher_main_visitor.hpp"
|
||||
#include "query/frontend/opencypher/parser.hpp"
|
||||
#include "query/frontend/semantic/symbol_generator.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
#include "query/plan/pretty_print.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/string.hpp"
|
||||
#include "readline.hpp"
|
||||
#include "timer.hpp"
|
||||
|
||||
DEFINE_string(save_mock_db_file, "", "File where the mock database should be saved (on exit)");
|
||||
|
||||
DEFINE_string(load_mock_db_file, "", "File from which the mock database should be loaded");
|
||||
|
||||
#ifdef HAS_READLINE
|
||||
// TODO: This should probably be moved to some utils file.
|
||||
|
||||
#include "readline/history.h"
|
||||
#include "readline/readline.h"
|
||||
|
||||
/**
|
||||
* Helper function that reads a line from the
|
||||
* standard input using the 'readline' lib.
|
||||
* Adds support for history and reverse-search.
|
||||
*
|
||||
* @param prompt The prompt to display.
|
||||
* @return A single command the user entered, or nullopt on EOF.
|
||||
*/
|
||||
std::optional<std::string> ReadLine(const std::string &prompt) {
|
||||
char *line = readline(prompt.c_str());
|
||||
if (!line) return std::nullopt;
|
||||
|
||||
if (*line) add_history(line);
|
||||
std::string r_val(line);
|
||||
free(line);
|
||||
return r_val;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
std::optional<std::string> ReadLine(const std::string &prompt) {
|
||||
std::cout << prompt;
|
||||
std::string line;
|
||||
std::getline(std::cin, line);
|
||||
if (std::cin.eof()) return std::nullopt;
|
||||
return line;
|
||||
}
|
||||
|
||||
#endif // HAS_READLINE
|
||||
|
||||
// Repeats the prompt untile the user inputs an integer.
|
||||
int64_t ReadInt(const std::string &prompt) {
|
||||
int64_t val = 0;
|
||||
std::stringstream ss;
|
||||
do {
|
||||
auto line = ReadLine(prompt);
|
||||
if (!line) continue;
|
||||
ss.str(*line);
|
||||
ss.clear();
|
||||
ss >> val;
|
||||
} while (ss.fail() || !ss.eof());
|
||||
return val;
|
||||
}
|
||||
|
||||
bool AskYesNo(const std::string &prompt) {
|
||||
inline bool AskYesNo(const std::string &prompt) {
|
||||
while (auto line = ReadLine(prompt + " (y/n) ")) {
|
||||
if (*line == "y" || *line == "Y") return true;
|
||||
if (*line == "n" || *line == "N") return false;
|
||||
@ -92,48 +23,6 @@ bool AskYesNo(const std::string &prompt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
class Timer {
|
||||
public:
|
||||
void Start() {
|
||||
duration_ = duration_.zero();
|
||||
start_time_ = std::chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
void Pause() {
|
||||
if (pause_ == 0) {
|
||||
duration_ += std::chrono::steady_clock::now() - start_time_;
|
||||
}
|
||||
++pause_;
|
||||
}
|
||||
|
||||
void Resume() {
|
||||
if (pause_ == 1) {
|
||||
start_time_ = std::chrono::steady_clock::now();
|
||||
}
|
||||
pause_ = std::max(0, pause_ - 1);
|
||||
}
|
||||
|
||||
template <class TFun>
|
||||
auto WithPause(const TFun &fun) {
|
||||
Pause();
|
||||
auto ret = fun();
|
||||
Resume();
|
||||
return std::move(ret);
|
||||
}
|
||||
|
||||
std::chrono::duration<double> Elapsed() {
|
||||
if (pause_ == 0) {
|
||||
return duration_ + (std::chrono::steady_clock::now() - start_time_);
|
||||
}
|
||||
return duration_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::chrono::duration<double> duration_;
|
||||
std::chrono::time_point<std::chrono::steady_clock> start_time_;
|
||||
int pause_ = 0;
|
||||
};
|
||||
|
||||
// Dummy DbAccessor which forwards user input for various vertex counts.
|
||||
class InteractiveDbAccessor {
|
||||
public:
|
||||
@ -343,164 +232,3 @@ class InteractiveDbAccessor {
|
||||
return memgraph::storage::PropertyValue(val);
|
||||
}
|
||||
};
|
||||
|
||||
DEFCOMMAND(Top) {
|
||||
int64_t n_plans = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> n_plans;
|
||||
if (ss.fail() || !ss.eof()) return;
|
||||
n_plans = std::min(static_cast<int64_t>(plans.size()), n_plans);
|
||||
for (int64_t i = 0; i < n_plans; ++i) {
|
||||
std::cout << "---- Plan #" << i << " ---- " << std::endl;
|
||||
std::cout << "cost: " << plans[i].cost << std::endl;
|
||||
memgraph::query::plan::PrettyPrint(dba, plans[i].final_plan.get());
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
DEFCOMMAND(Show) {
|
||||
int64_t plan_ix = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> plan_ix;
|
||||
if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return;
|
||||
const auto &plan = plans[plan_ix].final_plan;
|
||||
auto cost = plans[plan_ix].cost;
|
||||
std::cout << "Plan cost: " << cost << std::endl;
|
||||
memgraph::query::plan::PrettyPrint(dba, plan.get());
|
||||
}
|
||||
|
||||
DEFCOMMAND(ShowUnoptimized) {
|
||||
int64_t plan_ix = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> plan_ix;
|
||||
if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return;
|
||||
const auto &plan = plans[plan_ix].unoptimized_plan;
|
||||
memgraph::query::plan::PrettyPrint(dba, plan.get());
|
||||
}
|
||||
|
||||
DEFCOMMAND(Help);
|
||||
|
||||
std::map<std::string, Command> commands = {
|
||||
{"top", {TopCommand, 1, "Show top N plans"}},
|
||||
{"show", {ShowCommand, 1, "Show the Nth plan"}},
|
||||
{"show-unoptimized", {ShowUnoptimizedCommand, 1, "Show the Nth plan in its original, unoptimized form"}},
|
||||
{"help", {HelpCommand, 0, "Show available commands"}},
|
||||
};
|
||||
|
||||
void AddCommand(const std::string &name, const Command &command) { commands[name] = command; }
|
||||
|
||||
DEFCOMMAND(Help) {
|
||||
std::cout << "Available commands:" << std::endl;
|
||||
for (const auto &command : commands) {
|
||||
std::cout << command.first;
|
||||
for (int i = 1; i <= command.second.arg_count; ++i) {
|
||||
std::cout << " arg" << i;
|
||||
}
|
||||
std::cout << " -- " << command.second.documentation << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void ExaminePlans(memgraph::query::DbAccessor *dba, const memgraph::query::SymbolTable &symbol_table,
|
||||
std::vector<InteractivePlan> &plans, const memgraph::query::AstStorage &ast) {
|
||||
while (true) {
|
||||
auto line = ReadLine("plan? ");
|
||||
if (!line || *line == "quit") break;
|
||||
auto words = memgraph::utils::Split(memgraph::utils::ToLowerCase(*line));
|
||||
if (words.empty()) continue;
|
||||
auto command_name = words[0];
|
||||
std::vector<std::string> args(words.begin() + 1, words.end());
|
||||
auto command_it = commands.find(command_name);
|
||||
if (command_it == commands.end()) {
|
||||
std::cout << "Undefined command: '" << command_name << "'. Try 'help'." << std::endl;
|
||||
continue;
|
||||
}
|
||||
const auto &command = command_it->second;
|
||||
if (args.size() < command.arg_count) {
|
||||
std::cout << command_name << " expects " << command.arg_count << " arguments" << std::endl;
|
||||
continue;
|
||||
}
|
||||
command.function(*dba, symbol_table, plans, args, ast);
|
||||
}
|
||||
}
|
||||
|
||||
memgraph::query::Query *MakeAst(const std::string &query, memgraph::query::AstStorage *storage) {
|
||||
memgraph::query::frontend::ParsingContext parsing_context;
|
||||
parsing_context.is_query_cached = false;
|
||||
// query -> AST
|
||||
auto parser = std::make_unique<memgraph::query::frontend::opencypher::Parser>(query);
|
||||
// AST -> high level tree
|
||||
memgraph::query::frontend::CypherMainVisitor visitor(parsing_context, storage);
|
||||
visitor.visit(parser->tree());
|
||||
return visitor.query();
|
||||
}
|
||||
|
||||
// Returns a list of InteractivePlan instances, sorted in the ascending order by
|
||||
// cost.
|
||||
auto MakeLogicalPlans(memgraph::query::CypherQuery *query, memgraph::query::AstStorage &ast,
|
||||
memgraph::query::SymbolTable &symbol_table, InteractiveDbAccessor *dba) {
|
||||
auto query_parts = memgraph::query::plan::CollectQueryParts(symbol_table, ast, query);
|
||||
std::vector<InteractivePlan> interactive_plans;
|
||||
auto ctx = memgraph::query::plan::MakePlanningContext(&ast, &symbol_table, query, dba);
|
||||
if (query_parts.query_parts.size() <= 0) {
|
||||
std::cerr << "Failed to extract query parts" << std::endl;
|
||||
std::exit(EXIT_FAILURE);
|
||||
}
|
||||
memgraph::query::Parameters parameters;
|
||||
memgraph::query::plan::PostProcessor post_process(parameters);
|
||||
auto plans = memgraph::query::plan::MakeLogicalPlanForSingleQuery<memgraph::query::plan::VariableStartPlanner>(
|
||||
query_parts.query_parts.at(0).single_query_parts, &ctx);
|
||||
for (auto plan : plans) {
|
||||
memgraph::query::AstStorage ast_copy;
|
||||
auto unoptimized_plan = plan->Clone(&ast_copy);
|
||||
auto rewritten_plan = post_process.Rewrite(std::move(plan), &ctx);
|
||||
double cost = post_process.EstimatePlanCost(rewritten_plan, dba);
|
||||
interactive_plans.push_back(
|
||||
InteractivePlan{std::move(unoptimized_plan), std::move(ast_copy), std::move(rewritten_plan), cost});
|
||||
}
|
||||
std::stable_sort(interactive_plans.begin(), interactive_plans.end(),
|
||||
[](const auto &a, const auto &b) { return a.cost < b.cost; });
|
||||
return interactive_plans;
|
||||
}
|
||||
|
||||
void RunInteractivePlanning(memgraph::query::DbAccessor *dba) {
|
||||
std::string in_db_filename(memgraph::utils::Trim(FLAGS_load_mock_db_file));
|
||||
if (!in_db_filename.empty() && !std::filesystem::exists(in_db_filename)) {
|
||||
std::cerr << "File '" << in_db_filename << "' does not exist!" << std::endl;
|
||||
std::exit(EXIT_FAILURE);
|
||||
}
|
||||
Timer planning_timer;
|
||||
InteractiveDbAccessor interactive_db(dba, in_db_filename.empty() ? ReadInt("Vertices in DB: ") : 0, planning_timer);
|
||||
if (!in_db_filename.empty()) {
|
||||
std::ifstream db_file(in_db_filename);
|
||||
interactive_db.Load(db_file);
|
||||
}
|
||||
while (true) {
|
||||
auto line = ReadLine("query? ");
|
||||
if (!line || *line == "quit") break;
|
||||
if (line->empty()) continue;
|
||||
try {
|
||||
memgraph::query::AstStorage ast;
|
||||
auto *query = dynamic_cast<memgraph::query::CypherQuery *>(MakeAst(*line, &ast));
|
||||
if (!query) {
|
||||
throw memgraph::utils::BasicException(
|
||||
"Interactive planning is only avaialable for regular openCypher "
|
||||
"queries.");
|
||||
}
|
||||
auto symbol_table = memgraph::query::MakeSymbolTable(query);
|
||||
planning_timer.Start();
|
||||
auto plans = MakeLogicalPlans(query, ast, symbol_table, &interactive_db);
|
||||
auto planning_time = planning_timer.Elapsed();
|
||||
std::cout << "Planning took " << std::chrono::duration<double, std::milli>(planning_time).count() << "ms"
|
||||
<< std::endl;
|
||||
std::cout << "Generated " << plans.size() << " plans" << std::endl;
|
||||
ExaminePlans(dba, symbol_table, plans, ast);
|
||||
} catch (const memgraph::utils::BasicException &e) {
|
||||
std::cout << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
std::string db_filename(memgraph::utils::Trim(FLAGS_save_mock_db_file));
|
||||
if (!db_filename.empty()) {
|
||||
std::ofstream db_file(db_filename);
|
||||
interactive_db.Save(db_file);
|
||||
}
|
||||
}
|
70
tests/manual/interactive/plan.hpp
Normal file
70
tests/manual/interactive/plan.hpp
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "query/frontend/ast/cypher_main_visitor.hpp"
|
||||
#include "query/frontend/opencypher/parser.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
|
||||
#include "db_accessor.hpp"
|
||||
|
||||
struct InteractivePlan {
|
||||
// Original plan after going only through the RuleBasedPlanner.
|
||||
std::unique_ptr<memgraph::query::plan::LogicalOperator> unoptimized_plan;
|
||||
// Storage for the AST used in unoptimized_plan
|
||||
memgraph::query::AstStorage ast_storage;
|
||||
// Final plan after being rewritten and optimized.
|
||||
std::unique_ptr<memgraph::query::plan::LogicalOperator> final_plan;
|
||||
// Cost of the final plan.
|
||||
double cost;
|
||||
};
|
||||
|
||||
inline memgraph::query::Query *MakeAst(const std::string &query, memgraph::query::AstStorage *storage) {
|
||||
memgraph::query::frontend::ParsingContext parsing_context;
|
||||
parsing_context.is_query_cached = false;
|
||||
// query -> AST
|
||||
auto parser = std::make_unique<memgraph::query::frontend::opencypher::Parser>(query);
|
||||
// AST -> high level tree
|
||||
memgraph::query::frontend::CypherMainVisitor visitor(parsing_context, storage);
|
||||
visitor.visit(parser->tree());
|
||||
return visitor.query();
|
||||
}
|
||||
|
||||
// Returns a list of InteractivePlan instances, sorted in the ascending order by
|
||||
// cost.
|
||||
inline auto MakeLogicalPlans(memgraph::query::CypherQuery *query, memgraph::query::AstStorage &ast,
|
||||
memgraph::query::SymbolTable &symbol_table, InteractiveDbAccessor *dba) {
|
||||
auto query_parts = memgraph::query::plan::CollectQueryParts(symbol_table, ast, query);
|
||||
std::vector<InteractivePlan> interactive_plans;
|
||||
auto ctx = memgraph::query::plan::MakePlanningContext(&ast, &symbol_table, query, dba);
|
||||
if (query_parts.query_parts.size() <= 0) {
|
||||
std::cerr << "Failed to extract query parts" << std::endl;
|
||||
std::exit(EXIT_FAILURE);
|
||||
}
|
||||
memgraph::query::Parameters parameters;
|
||||
memgraph::query::plan::PostProcessor post_process(parameters);
|
||||
auto plans = memgraph::query::plan::MakeLogicalPlanForSingleQuery<memgraph::query::plan::VariableStartPlanner>(
|
||||
query_parts.query_parts.at(0).single_query_parts, &ctx);
|
||||
for (auto plan : plans) {
|
||||
memgraph::query::AstStorage ast_copy;
|
||||
auto unoptimized_plan = plan->Clone(&ast_copy);
|
||||
auto rewritten_plan = post_process.Rewrite(std::move(plan), &ctx);
|
||||
double cost = post_process.EstimatePlanCost(rewritten_plan, dba);
|
||||
interactive_plans.push_back(
|
||||
InteractivePlan{std::move(unoptimized_plan), std::move(ast_copy), std::move(rewritten_plan), cost});
|
||||
}
|
||||
std::stable_sort(interactive_plans.begin(), interactive_plans.end(),
|
||||
[](const auto &a, const auto &b) { return a.cost < b.cost; });
|
||||
return interactive_plans;
|
||||
}
|
158
tests/manual/interactive/planning.cpp
Normal file
158
tests/manual/interactive/planning.cpp
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "planning.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "db_accessor.hpp"
|
||||
#include "plan.hpp"
|
||||
#include "query/context.hpp"
|
||||
#include "query/db_accessor.hpp"
|
||||
#include "query/frontend/ast/cypher_main_visitor.hpp"
|
||||
#include "query/frontend/opencypher/parser.hpp"
|
||||
#include "query/frontend/semantic/symbol_generator.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
#include "query/plan/pretty_print.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
DEFINE_string(save_mock_db_file, "", "File where the mock database should be saved (on exit)");
|
||||
DEFINE_string(load_mock_db_file, "", "File from which the mock database should be loaded");
|
||||
|
||||
DEFCOMMAND(Top) {
|
||||
int64_t n_plans = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> n_plans;
|
||||
if (ss.fail() || !ss.eof()) return;
|
||||
n_plans = std::min(static_cast<int64_t>(plans.size()), n_plans);
|
||||
for (int64_t i = 0; i < n_plans; ++i) {
|
||||
std::cout << "---- Plan #" << i << " ---- " << std::endl;
|
||||
std::cout << "cost: " << plans[i].cost << std::endl;
|
||||
memgraph::query::plan::PrettyPrint(dba, plans[i].final_plan.get());
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
DEFCOMMAND(Show) {
|
||||
int64_t plan_ix = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> plan_ix;
|
||||
if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return;
|
||||
const auto &plan = plans[plan_ix].final_plan;
|
||||
auto cost = plans[plan_ix].cost;
|
||||
std::cout << "Plan cost: " << cost << std::endl;
|
||||
memgraph::query::plan::PrettyPrint(dba, plan.get());
|
||||
}
|
||||
|
||||
DEFCOMMAND(ShowUnoptimized) {
|
||||
int64_t plan_ix = 0;
|
||||
std::stringstream ss(args[0]);
|
||||
ss >> plan_ix;
|
||||
if (ss.fail() || !ss.eof() || plan_ix >= plans.size()) return;
|
||||
const auto &plan = plans[plan_ix].unoptimized_plan;
|
||||
memgraph::query::plan::PrettyPrint(dba, plan.get());
|
||||
}
|
||||
|
||||
DEFCOMMAND(Help);
|
||||
|
||||
std::map<std::string, Command> commands = {
|
||||
{"top", {TopCommand, 1, "Show top N plans"}},
|
||||
{"show", {ShowCommand, 1, "Show the Nth plan"}},
|
||||
{"show-unoptimized", {ShowUnoptimizedCommand, 1, "Show the Nth plan in its original, unoptimized form"}},
|
||||
{"help", {HelpCommand, 0, "Show available commands"}},
|
||||
};
|
||||
|
||||
void AddCommand(const std::string &name, const Command &command) { commands[name] = command; }
|
||||
|
||||
DEFCOMMAND(Help) {
|
||||
std::cout << "Available commands:" << std::endl;
|
||||
for (const auto &command : commands) {
|
||||
std::cout << command.first;
|
||||
for (int i = 1; i <= command.second.arg_count; ++i) {
|
||||
std::cout << " arg" << i;
|
||||
}
|
||||
std::cout << " -- " << command.second.documentation << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void ExaminePlans(memgraph::query::DbAccessor *dba, const memgraph::query::SymbolTable &symbol_table,
|
||||
std::vector<InteractivePlan> &plans, const memgraph::query::AstStorage &ast) {
|
||||
while (true) {
|
||||
auto line = ReadLine("plan? ");
|
||||
if (!line || *line == "quit") break;
|
||||
auto words = memgraph::utils::Split(memgraph::utils::ToLowerCase(*line));
|
||||
if (words.empty()) continue;
|
||||
auto command_name = words[0];
|
||||
std::vector<std::string> args(words.begin() + 1, words.end());
|
||||
auto command_it = commands.find(command_name);
|
||||
if (command_it == commands.end()) {
|
||||
std::cout << "Undefined command: '" << command_name << "'. Try 'help'." << std::endl;
|
||||
continue;
|
||||
}
|
||||
const auto &command = command_it->second;
|
||||
if (args.size() < command.arg_count) {
|
||||
std::cout << command_name << " expects " << command.arg_count << " arguments" << std::endl;
|
||||
continue;
|
||||
}
|
||||
command.function(*dba, symbol_table, plans, args, ast);
|
||||
}
|
||||
}
|
||||
|
||||
void RunInteractivePlanning(memgraph::query::DbAccessor *dba) {
|
||||
std::string in_db_filename(memgraph::utils::Trim(FLAGS_load_mock_db_file));
|
||||
if (!in_db_filename.empty() && !std::filesystem::exists(in_db_filename)) {
|
||||
std::cerr << "File '" << in_db_filename << "' does not exist!" << std::endl;
|
||||
std::exit(EXIT_FAILURE);
|
||||
}
|
||||
Timer planning_timer;
|
||||
InteractiveDbAccessor interactive_db(dba, in_db_filename.empty() ? ReadInt("Vertices in DB: ") : 0, planning_timer);
|
||||
if (!in_db_filename.empty()) {
|
||||
std::ifstream db_file(in_db_filename);
|
||||
interactive_db.Load(db_file);
|
||||
}
|
||||
while (true) {
|
||||
auto line = ReadLine("query? ");
|
||||
if (!line || *line == "quit") break;
|
||||
if (line->empty()) continue;
|
||||
try {
|
||||
memgraph::query::AstStorage ast;
|
||||
auto *query = dynamic_cast<memgraph::query::CypherQuery *>(MakeAst(*line, &ast));
|
||||
if (!query) {
|
||||
throw memgraph::utils::BasicException(
|
||||
"Interactive planning is only avaialable for regular openCypher "
|
||||
"queries.");
|
||||
}
|
||||
auto symbol_table = memgraph::query::MakeSymbolTable(query);
|
||||
planning_timer.Start();
|
||||
auto plans = MakeLogicalPlans(query, ast, symbol_table, &interactive_db);
|
||||
auto planning_time = planning_timer.Elapsed();
|
||||
std::cout << "Planning took " << std::chrono::duration<double, std::milli>(planning_time).count() << "ms"
|
||||
<< std::endl;
|
||||
std::cout << "Generated " << plans.size() << " plans" << std::endl;
|
||||
ExaminePlans(dba, symbol_table, plans, ast);
|
||||
} catch (const memgraph::utils::BasicException &e) {
|
||||
std::cout << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
std::string db_filename(memgraph::utils::Trim(FLAGS_save_mock_db_file));
|
||||
if (!db_filename.empty()) {
|
||||
std::ofstream db_file(db_filename);
|
||||
interactive_db.Save(db_file);
|
||||
}
|
||||
}
|
@ -21,21 +21,12 @@
|
||||
#include "query/frontend/semantic/symbol_table.hpp"
|
||||
#include "query/plan/operator.hpp"
|
||||
|
||||
#include "plan.hpp"
|
||||
|
||||
namespace database {
|
||||
class GraphDbAccessor;
|
||||
}
|
||||
|
||||
struct InteractivePlan {
|
||||
// Original plan after going only through the RuleBasedPlanner.
|
||||
std::unique_ptr<memgraph::query::plan::LogicalOperator> unoptimized_plan;
|
||||
// Storage for the AST used in unoptimized_plan
|
||||
memgraph::query::AstStorage ast_storage;
|
||||
// Final plan after being rewritten and optimized.
|
||||
std::unique_ptr<memgraph::query::plan::LogicalOperator> final_plan;
|
||||
// Cost of the final plan.
|
||||
double cost;
|
||||
};
|
||||
|
||||
typedef std::vector<InteractivePlan> PlansWithCost;
|
||||
|
||||
// Encapsulates a consoles command function.
|
65
tests/manual/interactive/readline.hpp
Normal file
65
tests/manual/interactive/readline.hpp
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#ifdef HAS_READLINE
|
||||
// TODO: This should probably be moved to some utils file.
|
||||
|
||||
#include "readline/history.h"
|
||||
#include "readline/readline.h"
|
||||
|
||||
/**
|
||||
* Helper function that reads a line from the
|
||||
* standard input using the 'readline' lib.
|
||||
* Adds support for history and reverse-search.
|
||||
*
|
||||
* @param prompt The prompt to display.
|
||||
* @return A single command the user entered, or nullopt on EOF.
|
||||
*/
|
||||
inline std::optional<std::string> ReadLine(const std::string &prompt) {
|
||||
char *line = readline(prompt.c_str());
|
||||
if (!line) return std::nullopt;
|
||||
|
||||
if (*line) add_history(line);
|
||||
std::string r_val(line);
|
||||
free(line);
|
||||
return r_val;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline std::optional<std::string> ReadLine(const std::string &prompt) {
|
||||
std::cout << prompt;
|
||||
std::string line;
|
||||
std::getline(std::cin, line);
|
||||
if (std::cin.eof()) return std::nullopt;
|
||||
return line;
|
||||
}
|
||||
|
||||
#endif // HAS_READLINE
|
||||
|
||||
// Repeats the prompt untile the user inputs an integer.
|
||||
inline int64_t ReadInt(const std::string &prompt) {
|
||||
int64_t val = 0;
|
||||
std::stringstream ss;
|
||||
do {
|
||||
auto line = ReadLine(prompt);
|
||||
if (!line) continue;
|
||||
ss.str(*line);
|
||||
ss.clear();
|
||||
ss >> val;
|
||||
} while (ss.fail() || !ss.eof());
|
||||
return val;
|
||||
}
|
57
tests/manual/interactive/timer.hpp
Normal file
57
tests/manual/interactive/timer.hpp
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
|
||||
class Timer {
|
||||
public:
|
||||
void Start() {
|
||||
duration_ = duration_.zero();
|
||||
start_time_ = std::chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
void Pause() {
|
||||
if (pause_ == 0) {
|
||||
duration_ += std::chrono::steady_clock::now() - start_time_;
|
||||
}
|
||||
++pause_;
|
||||
}
|
||||
|
||||
void Resume() {
|
||||
if (pause_ == 1) {
|
||||
start_time_ = std::chrono::steady_clock::now();
|
||||
}
|
||||
pause_ = std::max(0, pause_ - 1);
|
||||
}
|
||||
|
||||
template <class TFun>
|
||||
auto WithPause(const TFun &fun) {
|
||||
Pause();
|
||||
auto ret = fun();
|
||||
Resume();
|
||||
return std::move(ret);
|
||||
}
|
||||
|
||||
std::chrono::duration<double> Elapsed() {
|
||||
if (pause_ == 0) {
|
||||
return duration_ + (std::chrono::steady_clock::now() - start_time_);
|
||||
}
|
||||
return duration_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::chrono::duration<double> duration_;
|
||||
std::chrono::time_point<std::chrono::steady_clock> start_time_;
|
||||
int pause_ = 0;
|
||||
};
|
183
tests/manual/query_execution_dummy.cpp
Normal file
183
tests/manual/query_execution_dummy.cpp
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "interactive/planning.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "interactive/db_accessor.hpp"
|
||||
#include "interactive/plan.hpp"
|
||||
#include "query/frontend/semantic/symbol_generator.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace memgraph::query::plan {
|
||||
|
||||
class TestLogicalOperatorVisitor final : public HierarchicalLogicalOperatorVisitor {
|
||||
public:
|
||||
TestLogicalOperatorVisitor() {}
|
||||
|
||||
using HierarchicalLogicalOperatorVisitor::PostVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::PreVisit;
|
||||
using HierarchicalLogicalOperatorVisitor::Visit;
|
||||
|
||||
void Start() {}
|
||||
|
||||
bool IsDone() { return true; }
|
||||
|
||||
bool Visit(Once &) override {
|
||||
std::cout << "Visit Once" << std::endl;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(Filter &op) override { return true; }
|
||||
bool PostVisit(Filter &op) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAll &op) override {
|
||||
std::cout << "PreVisit ScanAll, output " << op.output_symbol_.name_ << std::endl;
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(ScanAll &scan) override { return true; }
|
||||
|
||||
bool PreVisit(Expand &op) override { return true; }
|
||||
bool PostVisit(Expand &expand) override { return true; }
|
||||
|
||||
bool PreVisit(ExpandVariable &op) override { return true; }
|
||||
bool PostVisit(ExpandVariable &expand) override { return true; }
|
||||
|
||||
bool PreVisit(Merge &op) override { return false; }
|
||||
bool PostVisit(Merge &) override { return true; }
|
||||
|
||||
bool PreVisit(Optional &op) override { return false; }
|
||||
bool PostVisit(Optional &) override { return true; }
|
||||
|
||||
bool PreVisit(Cartesian &op) override { return true; }
|
||||
bool PostVisit(Cartesian &) override { return true; }
|
||||
|
||||
bool PreVisit(Union &op) override { return false; }
|
||||
bool PostVisit(Union &) override { return true; }
|
||||
|
||||
bool PreVisit(CreateNode &op) override { return true; }
|
||||
bool PostVisit(CreateNode &) override { return true; }
|
||||
|
||||
bool PreVisit(CreateExpand &op) override { return true; }
|
||||
bool PostVisit(CreateExpand &) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByLabel &op) override { return true; }
|
||||
bool PostVisit(ScanAllByLabel &) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyRange &op) override { return true; }
|
||||
bool PostVisit(ScanAllByLabelPropertyRange &) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByLabelPropertyValue &op) override { return true; }
|
||||
bool PostVisit(ScanAllByLabelPropertyValue &) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllByLabelProperty &op) override { return true; }
|
||||
bool PostVisit(ScanAllByLabelProperty &) override { return true; }
|
||||
|
||||
bool PreVisit(ScanAllById &op) override { return true; }
|
||||
bool PostVisit(ScanAllById &) override { return true; }
|
||||
|
||||
bool PreVisit(ConstructNamedPath &op) override { return true; }
|
||||
bool PostVisit(ConstructNamedPath &) override { return true; }
|
||||
|
||||
bool PreVisit(Produce &op) override {
|
||||
std::cout << "PreVisit Produce, named expressions: ";
|
||||
for (const auto &name_expr : op.named_expressions_) {
|
||||
std::cout << name_expr->name_ << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
return true;
|
||||
}
|
||||
bool PostVisit(Produce &) override { return true; }
|
||||
|
||||
bool PreVisit(Delete &op) override { return true; }
|
||||
bool PostVisit(Delete &) override { return true; }
|
||||
|
||||
bool PreVisit(SetProperty &op) override { return true; }
|
||||
bool PostVisit(SetProperty &) override { return true; }
|
||||
|
||||
bool PreVisit(SetProperties &op) override { return true; }
|
||||
bool PostVisit(SetProperties &) override { return true; }
|
||||
|
||||
bool PreVisit(SetLabels &op) override { return true; }
|
||||
bool PostVisit(SetLabels &) override { return true; }
|
||||
|
||||
bool PreVisit(RemoveProperty &op) override { return true; }
|
||||
bool PostVisit(RemoveProperty &) override { return true; }
|
||||
|
||||
bool PreVisit(RemoveLabels &op) override { return true; }
|
||||
bool PostVisit(RemoveLabels &) override { return true; }
|
||||
|
||||
bool PreVisit(EdgeUniquenessFilter &op) override { return true; }
|
||||
bool PostVisit(EdgeUniquenessFilter &) override { return true; }
|
||||
|
||||
bool PreVisit(Accumulate &op) override { return true; }
|
||||
bool PostVisit(Accumulate &) override { return true; }
|
||||
|
||||
bool PreVisit(Aggregate &op) override { return true; }
|
||||
bool PostVisit(Aggregate &) override { return true; }
|
||||
|
||||
bool PreVisit(Skip &op) override { return true; }
|
||||
bool PostVisit(Skip &) override { return true; }
|
||||
|
||||
bool PreVisit(Limit &op) override { return true; }
|
||||
bool PostVisit(Limit &) override { return true; }
|
||||
|
||||
bool PreVisit(OrderBy &op) override { return true; }
|
||||
bool PostVisit(OrderBy &) override { return true; }
|
||||
|
||||
bool PreVisit(Unwind &op) override { return true; }
|
||||
bool PostVisit(Unwind &) override { return true; }
|
||||
|
||||
bool PreVisit(Distinct &op) override { return true; }
|
||||
bool PostVisit(Distinct &) override { return true; }
|
||||
|
||||
bool PreVisit(CallProcedure &op) override { return true; }
|
||||
bool PostVisit(CallProcedure &) override { return true; }
|
||||
};
|
||||
} // namespace memgraph::query::plan
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
||||
spdlog::set_level(spdlog::level::info);
|
||||
|
||||
memgraph::storage::Storage db;
|
||||
auto storage_dba = db.Access();
|
||||
memgraph::query::DbAccessor dba(&storage_dba);
|
||||
|
||||
Timer planning_timer;
|
||||
InteractiveDbAccessor interactive_db(&dba, 10, planning_timer);
|
||||
std::string input_query = "MATCH (n) RETURN n;";
|
||||
memgraph::query::AstStorage ast;
|
||||
auto *query = dynamic_cast<memgraph::query::CypherQuery *>(MakeAst(input_query, &ast));
|
||||
if (!query) {
|
||||
throw memgraph::utils::BasicException("Create CypherQuery failed");
|
||||
}
|
||||
auto symbol_table = memgraph::query::MakeSymbolTable(query);
|
||||
planning_timer.Start();
|
||||
auto plans = MakeLogicalPlans(query, ast, symbol_table, &interactive_db);
|
||||
if (plans.size() == 0) {
|
||||
throw memgraph::utils::BasicException("No plans");
|
||||
}
|
||||
|
||||
memgraph::query::plan::TestLogicalOperatorVisitor executor;
|
||||
plans[0].unoptimized_plan->Accept(executor);
|
||||
executor.Start();
|
||||
while (!executor.IsDone()) {
|
||||
std::cout << "Executor NOT done yet" << std::endl;
|
||||
}
|
||||
std::cout << "Executor done" << std::endl;
|
||||
return 0;
|
||||
}
|
@ -9,7 +9,7 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "interactive_planning.hpp"
|
||||
#include "interactive/planning.hpp"
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
|
@ -17,20 +17,18 @@ function(add_simulation_test test_cpp)
|
||||
# requires unique logical target names
|
||||
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
|
||||
|
||||
# sanitize
|
||||
target_compile_options(${target_name} PRIVATE -fsanitize=${san})
|
||||
target_link_options(${target_name} PRIVATE -fsanitize=${san})
|
||||
|
||||
target_link_libraries(${target_name} mg-storage-v3 mg-communication gtest gmock mg-utils mg-io mg-io-simulator mg-coordinator Boost::headers mg-query-v2)
|
||||
target_link_libraries(${target_name} mg-storage-v3 mg-communication mg-utils mg-io mg-io-simulator mg-coordinator mg-query-v2)
|
||||
target_link_libraries(${target_name} Boost::headers)
|
||||
target_link_libraries(${target_name} gtest gtest_main gmock rapidcheck rapidcheck_gtest)
|
||||
|
||||
# register test
|
||||
add_test(${target_name} ${exec_name})
|
||||
add_dependencies(memgraph__simulation ${target_name})
|
||||
endfunction(add_simulation_test)
|
||||
|
||||
add_simulation_test(basic_request.cpp address)
|
||||
add_simulation_test(raft.cpp address)
|
||||
add_simulation_test(trial_query_storage/query_storage_test.cpp address)
|
||||
add_simulation_test(sharded_map.cpp address)
|
||||
add_simulation_test(shard_request_manager.cpp address)
|
||||
add_simulation_test(basic_request.cpp)
|
||||
add_simulation_test(raft.cpp)
|
||||
add_simulation_test(trial_query_storage/query_storage_test.cpp)
|
||||
add_simulation_test(sharded_map.cpp)
|
||||
add_simulation_test(shard_rsm.cpp)
|
||||
add_simulation_test(cluster_property_test.cpp)
|
||||
|
51
tests/simulation/cluster_config.hpp
Normal file
51
tests/simulation/cluster_config.hpp
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <rapidcheck.h>
|
||||
|
||||
#include "testing_constants.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
struct ClusterConfig {
|
||||
int servers;
|
||||
int replication_factor;
|
||||
int shards;
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const ClusterConfig &cluster) {
|
||||
in << "ClusterConfig { servers: " << cluster.servers << ", replication_factor: " << cluster.replication_factor
|
||||
<< ", shards: " << cluster.shards << " }";
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
||||
|
||||
// Required namespace for rapidcheck generator
|
||||
namespace rc {
|
||||
|
||||
using memgraph::tests::simulation::ClusterConfig;
|
||||
|
||||
template <>
|
||||
struct Arbitrary<ClusterConfig> {
|
||||
static Gen<ClusterConfig> arbitrary() {
|
||||
return gen::build<ClusterConfig>(
|
||||
// gen::inRange is [inclusive min, exclusive max)
|
||||
gen::set(&ClusterConfig::servers, gen::inRange(kMinimumServers, kMaximumServers)),
|
||||
gen::set(&ClusterConfig::replication_factor,
|
||||
gen::inRange(kMinimumReplicationFactor, kMaximumReplicationFactor)),
|
||||
gen::set(&ClusterConfig::shards, gen::inRange(kMinimumShards, kMaximumShards)));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace rc
|
50
tests/simulation/cluster_property_test.cpp
Normal file
50
tests/simulation/cluster_property_test.cpp
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
// This test serves as an example of a property-based model test.
|
||||
// It generates a cluster configuration and a set of operations to
|
||||
// apply against both the real system and a greatly simplified model.
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <rapidcheck.h>
|
||||
#include <rapidcheck/gtest.h>
|
||||
|
||||
#include "generated_operations.hpp"
|
||||
#include "io/simulator/simulator_config.hpp"
|
||||
#include "io/time.hpp"
|
||||
#include "storage/v3/shard_manager.hpp"
|
||||
#include "test_cluster.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
using io::Duration;
|
||||
using io::Time;
|
||||
using io::simulator::SimulatorConfig;
|
||||
using storage::v3::kMaximumCronInterval;
|
||||
|
||||
RC_GTEST_PROP(RandomClusterConfig, HappyPath, (ClusterConfig cluster_config, NonEmptyOpVec ops)) {
|
||||
// TODO(tyler) set abort_time to something more restrictive than Time::max()
|
||||
|
||||
SimulatorConfig sim_config{
|
||||
.drop_percent = 0,
|
||||
.perform_timeouts = false,
|
||||
.scramble_messages = true,
|
||||
.rng_seed = 0,
|
||||
.start_time = Time::min(),
|
||||
.abort_time = Time::max(),
|
||||
};
|
||||
|
||||
RunClusterSimulation(sim_config, cluster_config, ops.ops);
|
||||
}
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
@ -43,26 +43,18 @@
|
||||
#include "storage/v3/value_conversions.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
using memgraph::coordinator::Hlc;
|
||||
using memgraph::io::rsm::StorageWriteRequest;
|
||||
using memgraph::io::rsm::StorageWriteResponse;
|
||||
using memgraph::io::simulator::Simulator;
|
||||
using memgraph::io::simulator::SimulatorConfig;
|
||||
using memgraph::io::simulator::SimulatorStats;
|
||||
using memgraph::io::simulator::SimulatorTransport;
|
||||
using memgraph::msgs::CreateVerticesRequest;
|
||||
using memgraph::msgs::CreateVerticesResponse;
|
||||
using memgraph::msgs::ExpandOneRequest;
|
||||
using memgraph::msgs::ExpandOneResponse;
|
||||
using memgraph::msgs::ListedValues;
|
||||
using memgraph::msgs::ScanVerticesRequest;
|
||||
using memgraph::msgs::ScanVerticesResponse;
|
||||
using memgraph::msgs::Value;
|
||||
using memgraph::msgs::VertexId;
|
||||
using memgraph::storage::v3::LabelId;
|
||||
using memgraph::storage::v3::PropertyValue;
|
||||
namespace memgraph::storage::v3::tests {
|
||||
using coordinator::Hlc;
|
||||
using io::rsm::StorageWriteRequest;
|
||||
using io::rsm::StorageWriteResponse;
|
||||
using io::simulator::Simulator;
|
||||
using io::simulator::SimulatorConfig;
|
||||
using io::simulator::SimulatorStats;
|
||||
using io::simulator::SimulatorTransport;
|
||||
using storage::v3::LabelId;
|
||||
using storage::v3::PropertyValue;
|
||||
|
||||
using ShardRsmKey = std::vector<memgraph::storage::v3::PropertyValue>;
|
||||
using ShardRsmKey = std::vector<storage::v3::PropertyValue>;
|
||||
|
||||
class MockedShardRsm {
|
||||
std::map<ShardRsmKey, int> state_;
|
||||
@ -79,32 +71,37 @@ class MockedShardRsm {
|
||||
}
|
||||
|
||||
public:
|
||||
using ReadRequests = msgs::ReadRequests;
|
||||
using ReadResponses = msgs::ReadResponses;
|
||||
using WriteRequests = msgs::WriteRequests;
|
||||
using WriteResponses = msgs::WriteResponses;
|
||||
|
||||
// ExpandOneResponse Read(ExpandOneRequest rqst);
|
||||
// GetPropertiesResponse Read(GetPropertiesRequest rqst);
|
||||
ScanVerticesResponse ReadImpl(ScanVerticesRequest rqst) {
|
||||
ScanVerticesResponse ret;
|
||||
auto as_prop_val = memgraph::storage::conversions::ConvertPropertyVector(rqst.start_id.second);
|
||||
msgs::ScanVerticesResponse ReadImpl(msgs::ScanVerticesRequest rqst) {
|
||||
msgs::ScanVerticesResponse ret;
|
||||
auto as_prop_val = storage::conversions::ConvertPropertyVector(rqst.start_id.second);
|
||||
if (!IsKeyInRange(as_prop_val)) {
|
||||
ret.success = false;
|
||||
} else if (as_prop_val == ShardRsmKey{PropertyValue(0), PropertyValue(0)}) {
|
||||
Value val(int64_t(0));
|
||||
ret.next_start_id = std::make_optional<VertexId>();
|
||||
msgs::Value val(int64_t(0));
|
||||
ret.next_start_id = std::make_optional<msgs::VertexId>();
|
||||
ret.next_start_id->second =
|
||||
memgraph::storage::conversions::ConvertValueVector(ShardRsmKey{PropertyValue(1), PropertyValue(0)});
|
||||
memgraph::msgs::ScanResultRow result;
|
||||
result.props.push_back(std::make_pair(memgraph::msgs::PropertyId::FromUint(0), val));
|
||||
storage::conversions::ConvertValueVector(ShardRsmKey{PropertyValue(1), PropertyValue(0)});
|
||||
msgs::ScanResultRow result;
|
||||
result.props.push_back(std::make_pair(msgs::PropertyId::FromUint(0), val));
|
||||
ret.results.push_back(std::move(result));
|
||||
ret.success = true;
|
||||
} else if (as_prop_val == ShardRsmKey{PropertyValue(1), PropertyValue(0)}) {
|
||||
memgraph::msgs::ScanResultRow result;
|
||||
Value val(int64_t(1));
|
||||
result.props.push_back(std::make_pair(memgraph::msgs::PropertyId::FromUint(0), val));
|
||||
msgs::ScanResultRow result;
|
||||
msgs::Value val(int64_t(1));
|
||||
result.props.push_back(std::make_pair(msgs::PropertyId::FromUint(0), val));
|
||||
ret.results.push_back(std::move(result));
|
||||
ret.success = true;
|
||||
} else if (as_prop_val == ShardRsmKey{PropertyValue(12), PropertyValue(13)}) {
|
||||
memgraph::msgs::ScanResultRow result;
|
||||
Value val(int64_t(444));
|
||||
result.props.push_back(std::make_pair(memgraph::msgs::PropertyId::FromUint(0), val));
|
||||
msgs::ScanResultRow result;
|
||||
msgs::Value val(int64_t(444));
|
||||
result.props.push_back(std::make_pair(msgs::PropertyId::FromUint(0), val));
|
||||
ret.results.push_back(std::move(result));
|
||||
ret.success = true;
|
||||
} else {
|
||||
@ -113,14 +110,25 @@ class MockedShardRsm {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ExpandOneResponse ReadImpl(ExpandOneRequest rqst) { return {}; }
|
||||
using ReadRequests = std::variant<ScanVerticesRequest, ExpandOneRequest>;
|
||||
using ReadResponses = std::variant<ScanVerticesResponse, ExpandOneResponse>;
|
||||
msgs::ExpandOneResponse ReadImpl(msgs::ExpandOneRequest rqst) { return {}; }
|
||||
msgs::ExpandOneResponse ReadImpl(msgs::GetPropertiesRequest rqst) { return {}; }
|
||||
|
||||
ReadResponses Read(ReadRequests read_requests) {
|
||||
return {std::visit([this](auto &&request) { return ReadResponses{ReadImpl(std::move(request))}; },
|
||||
return {std::visit([this]<typename T>(T &&request) { return ReadResponses{ReadImpl(std::forward<T>(request))}; },
|
||||
std::move(read_requests))};
|
||||
}
|
||||
|
||||
CreateVerticesResponse Apply(CreateVerticesRequest request) { return CreateVerticesResponse{.success = true}; }
|
||||
msgs::CreateVerticesResponse ApplyImpl(msgs::CreateVerticesRequest rqst) { return {.success = true}; }
|
||||
msgs::DeleteVerticesResponse ApplyImpl(msgs::DeleteVerticesRequest rqst) { return {}; }
|
||||
msgs::UpdateVerticesResponse ApplyImpl(msgs::UpdateVerticesRequest rqst) { return {}; }
|
||||
msgs::CreateExpandResponse ApplyImpl(msgs::CreateExpandRequest rqst) { return {.success = true}; }
|
||||
msgs::DeleteEdgesResponse ApplyImpl(msgs::DeleteEdgesRequest rqst) { return {}; }
|
||||
msgs::UpdateEdgesResponse ApplyImpl(msgs::UpdateEdgesRequest rqst) { return {}; }
|
||||
msgs::CommitResponse ApplyImpl(msgs::CommitRequest rqst) { return {}; }
|
||||
|
||||
WriteResponses Apply(WriteRequests write_requests) {
|
||||
return {std::visit([this]<typename T>(T &&request) { return WriteResponses{ApplyImpl(std::forward<T>(request))}; },
|
||||
std::move(write_requests))};
|
||||
}
|
||||
};
|
||||
} // namespace memgraph::storage::v3::tests
|
||||
|
114
tests/simulation/generated_operations.hpp
Normal file
114
tests/simulation/generated_operations.hpp
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <variant>
|
||||
|
||||
#include <rapidcheck.h>
|
||||
#include <rapidcheck/gtest.h>
|
||||
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "testing_constants.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
struct CreateVertex {
|
||||
int first;
|
||||
int second;
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const CreateVertex &add) {
|
||||
in << "CreateVertex { first: " << add.first << ", second: " << add.second << " }";
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
struct ScanAll {
|
||||
friend std::ostream &operator<<(std::ostream &in, const ScanAll &get) {
|
||||
in << "ScanAll {}";
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
using OpVariant = std::variant<CreateVertex, ScanAll>;
|
||||
|
||||
struct Op {
|
||||
OpVariant inner;
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const Op &op) {
|
||||
std::visit([&](const auto &x) { in << x; }, op.inner);
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
struct NonEmptyOpVec {
|
||||
std::vector<memgraph::tests::simulation::Op> ops;
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &in, const NonEmptyOpVec &op) {
|
||||
in << "[";
|
||||
bool first = true;
|
||||
for (const auto &op : op.ops) {
|
||||
if (!first) {
|
||||
in << ", ";
|
||||
}
|
||||
in << op;
|
||||
first = false;
|
||||
}
|
||||
in << "]";
|
||||
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
||||
|
||||
// Required namespace for rapidcheck generators
|
||||
namespace rc {
|
||||
|
||||
using namespace memgraph::tests::simulation;
|
||||
|
||||
template <>
|
||||
struct Arbitrary<CreateVertex> {
|
||||
static Gen<CreateVertex> arbitrary() {
|
||||
return gen::build<CreateVertex>(gen::set(&CreateVertex::first, gen::inRange(0, kMaximumShards + 1)),
|
||||
gen::set(&CreateVertex::second, gen::inRange(0, kMaximumShards + 1)));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Arbitrary<ScanAll> {
|
||||
static Gen<ScanAll> arbitrary() { return gen::just(ScanAll{}); }
|
||||
};
|
||||
|
||||
OpVariant opHoist(ScanAll op) { return op; }
|
||||
OpVariant opHoist(CreateVertex op) { return op; }
|
||||
|
||||
template <>
|
||||
struct ::rc::Arbitrary<Op> {
|
||||
static Gen<Op> arbitrary() {
|
||||
return gen::build<Op>(gen::set(
|
||||
&Op::inner, gen::oneOf(gen::map(gen::arbitrary<CreateVertex>(), [](CreateVertex op) { return opHoist(op); }),
|
||||
gen::map(gen::arbitrary<ScanAll>(), [](ScanAll op) { return opHoist(op); }))));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Arbitrary<NonEmptyOpVec> {
|
||||
static Gen<NonEmptyOpVec> arbitrary() {
|
||||
return gen::build<NonEmptyOpVec>(
|
||||
gen::set(&NonEmptyOpVec::ops, gen::nonEmpty<std::vector<memgraph::tests::simulation::Op>>()));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace rc
|
@ -130,7 +130,7 @@ void RunSimulation() {
|
||||
.scramble_messages = true,
|
||||
.rng_seed = 0,
|
||||
.start_time = Time::min() + std::chrono::microseconds{256 * 1024},
|
||||
.abort_time = Time::min() + std::chrono::microseconds{8 * 1024 * 128},
|
||||
.abort_time = Time::max(),
|
||||
};
|
||||
|
||||
auto simulator = Simulator(config);
|
||||
|
@ -36,51 +36,51 @@
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
using memgraph::coordinator::AddressAndStatus;
|
||||
using CompoundKey = memgraph::coordinator::PrimaryKey;
|
||||
using memgraph::coordinator::Coordinator;
|
||||
using memgraph::coordinator::CoordinatorClient;
|
||||
using memgraph::coordinator::CoordinatorRsm;
|
||||
using memgraph::coordinator::HlcRequest;
|
||||
using memgraph::coordinator::HlcResponse;
|
||||
using memgraph::coordinator::Shard;
|
||||
using memgraph::coordinator::ShardMap;
|
||||
using memgraph::coordinator::Shards;
|
||||
using memgraph::coordinator::Status;
|
||||
using memgraph::io::Address;
|
||||
using memgraph::io::Io;
|
||||
using memgraph::io::ResponseEnvelope;
|
||||
using memgraph::io::ResponseFuture;
|
||||
using memgraph::io::Time;
|
||||
using memgraph::io::TimedOut;
|
||||
using memgraph::io::rsm::Raft;
|
||||
using memgraph::io::rsm::ReadRequest;
|
||||
using memgraph::io::rsm::ReadResponse;
|
||||
using memgraph::io::rsm::StorageReadRequest;
|
||||
using memgraph::io::rsm::StorageReadResponse;
|
||||
using memgraph::io::rsm::StorageWriteRequest;
|
||||
using memgraph::io::rsm::StorageWriteResponse;
|
||||
using memgraph::io::rsm::WriteRequest;
|
||||
using memgraph::io::rsm::WriteResponse;
|
||||
using memgraph::io::simulator::Simulator;
|
||||
using memgraph::io::simulator::SimulatorConfig;
|
||||
using memgraph::io::simulator::SimulatorStats;
|
||||
using memgraph::io::simulator::SimulatorTransport;
|
||||
using memgraph::msgs::CreateVerticesRequest;
|
||||
using memgraph::msgs::CreateVerticesResponse;
|
||||
using memgraph::msgs::ListedValues;
|
||||
using memgraph::msgs::NewVertexLabel;
|
||||
using memgraph::msgs::ScanVerticesRequest;
|
||||
using memgraph::msgs::ScanVerticesResponse;
|
||||
using memgraph::storage::v3::LabelId;
|
||||
using memgraph::storage::v3::SchemaProperty;
|
||||
using memgraph::utils::BasicResult;
|
||||
namespace memgraph::query::v2::tests {
|
||||
using coordinator::AddressAndStatus;
|
||||
using CompoundKey = coordinator::PrimaryKey;
|
||||
using coordinator::Coordinator;
|
||||
using coordinator::CoordinatorClient;
|
||||
using coordinator::CoordinatorRsm;
|
||||
using coordinator::HlcRequest;
|
||||
using coordinator::HlcResponse;
|
||||
using coordinator::Shard;
|
||||
using coordinator::ShardMap;
|
||||
using coordinator::Shards;
|
||||
using coordinator::Status;
|
||||
using io::Address;
|
||||
using io::Io;
|
||||
using io::ResponseEnvelope;
|
||||
using io::ResponseFuture;
|
||||
using io::Time;
|
||||
using io::TimedOut;
|
||||
using io::rsm::Raft;
|
||||
using io::rsm::ReadRequest;
|
||||
using io::rsm::ReadResponse;
|
||||
using io::rsm::StorageReadRequest;
|
||||
using io::rsm::StorageReadResponse;
|
||||
using io::rsm::StorageWriteRequest;
|
||||
using io::rsm::StorageWriteResponse;
|
||||
using io::rsm::WriteRequest;
|
||||
using io::rsm::WriteResponse;
|
||||
using io::simulator::Simulator;
|
||||
using io::simulator::SimulatorConfig;
|
||||
using io::simulator::SimulatorStats;
|
||||
using io::simulator::SimulatorTransport;
|
||||
using msgs::CreateVerticesRequest;
|
||||
using msgs::CreateVerticesResponse;
|
||||
using msgs::ScanVerticesRequest;
|
||||
using msgs::ScanVerticesResponse;
|
||||
using msgs::VertexId;
|
||||
using storage::v3::LabelId;
|
||||
using storage::v3::SchemaProperty;
|
||||
using storage::v3::tests::MockedShardRsm;
|
||||
using utils::BasicResult;
|
||||
|
||||
namespace {
|
||||
|
||||
ShardMap CreateDummyShardmap(memgraph::coordinator::Address a_io_1, memgraph::coordinator::Address a_io_2,
|
||||
memgraph::coordinator::Address a_io_3, memgraph::coordinator::Address b_io_1,
|
||||
memgraph::coordinator::Address b_io_2, memgraph::coordinator::Address b_io_3) {
|
||||
ShardMap CreateDummyShardmap(coordinator::Address a_io_1, coordinator::Address a_io_2, coordinator::Address a_io_3,
|
||||
coordinator::Address b_io_1, coordinator::Address b_io_2, coordinator::Address b_io_3) {
|
||||
static const std::string label_name = std::string("test_label");
|
||||
ShardMap sm;
|
||||
|
||||
@ -89,8 +89,8 @@ ShardMap CreateDummyShardmap(memgraph::coordinator::Address a_io_1, memgraph::co
|
||||
const auto properties = sm.AllocatePropertyIds(property_names);
|
||||
const auto property_id_1 = properties.at("property_1");
|
||||
const auto property_id_2 = properties.at("property_2");
|
||||
const auto type_1 = memgraph::common::SchemaType::INT;
|
||||
const auto type_2 = memgraph::common::SchemaType::INT;
|
||||
const auto type_1 = common::SchemaType::INT;
|
||||
const auto type_2 = common::SchemaType::INT;
|
||||
|
||||
// register new label space
|
||||
std::vector<SchemaProperty> schema = {
|
||||
@ -113,8 +113,8 @@ ShardMap CreateDummyShardmap(memgraph::coordinator::Address a_io_1, memgraph::co
|
||||
|
||||
Shard shard1 = {aas1_1, aas1_2, aas1_3};
|
||||
|
||||
auto key1 = memgraph::storage::v3::PropertyValue(0);
|
||||
auto key2 = memgraph::storage::v3::PropertyValue(0);
|
||||
auto key1 = storage::v3::PropertyValue(0);
|
||||
auto key2 = storage::v3::PropertyValue(0);
|
||||
CompoundKey compound_key_1 = {key1, key2};
|
||||
shards_for_label[compound_key_1] = shard1;
|
||||
|
||||
@ -125,20 +125,22 @@ ShardMap CreateDummyShardmap(memgraph::coordinator::Address a_io_1, memgraph::co
|
||||
|
||||
Shard shard2 = {aas2_1, aas2_2, aas2_3};
|
||||
|
||||
auto key3 = memgraph::storage::v3::PropertyValue(12);
|
||||
auto key4 = memgraph::storage::v3::PropertyValue(13);
|
||||
auto key3 = storage::v3::PropertyValue(12);
|
||||
auto key4 = storage::v3::PropertyValue(13);
|
||||
CompoundKey compound_key_2 = {key3, key4};
|
||||
shards_for_label[compound_key_2] = shard2;
|
||||
|
||||
sm.AllocateEdgeTypeIds(std::vector<coordinator::EdgeTypeName>{"edge_type"});
|
||||
|
||||
return sm;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
using WriteRequests = CreateVerticesRequest;
|
||||
using WriteResponses = CreateVerticesResponse;
|
||||
using ReadRequests = std::variant<ScanVerticesRequest, ExpandOneRequest>;
|
||||
using ReadResponses = std::variant<ScanVerticesResponse, ExpandOneResponse>;
|
||||
using WriteRequests = msgs::WriteRequests;
|
||||
using WriteResponses = msgs::WriteResponses;
|
||||
using ReadRequests = msgs::ReadRequests;
|
||||
using ReadResponses = msgs::ReadResponses;
|
||||
|
||||
using ConcreteCoordinatorRsm = CoordinatorRsm<SimulatorTransport>;
|
||||
using ConcreteStorageRsm =
|
||||
@ -149,40 +151,34 @@ void RunStorageRaft(Raft<IoImpl, MockedShardRsm, WriteRequests, WriteResponses,
|
||||
server.Run();
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestScanAll(ShardRequestManager &io) {
|
||||
memgraph::msgs::ExecutionState<ScanVerticesRequest> state{.label = "test_label"};
|
||||
void TestScanVertices(msgs::ShardRequestManagerInterface &io) {
|
||||
msgs::ExecutionState<ScanVerticesRequest> state{.label = "test_label"};
|
||||
|
||||
auto result = io.Request(state);
|
||||
MG_ASSERT(result.size() == 2);
|
||||
{
|
||||
auto prop = result[0].GetProperty(memgraph::msgs::PropertyId::FromUint(0));
|
||||
auto prop = result[0].GetProperty(msgs::PropertyId::FromUint(0));
|
||||
MG_ASSERT(prop.int_v == 0);
|
||||
prop = result[1].GetProperty(memgraph::msgs::PropertyId::FromUint(0));
|
||||
prop = result[1].GetProperty(msgs::PropertyId::FromUint(0));
|
||||
MG_ASSERT(prop.int_v == 444);
|
||||
}
|
||||
|
||||
result = io.Request(state);
|
||||
{
|
||||
MG_ASSERT(result.size() == 1);
|
||||
auto prop = result[0].GetProperty(memgraph::msgs::PropertyId::FromUint(0));
|
||||
auto prop = result[0].GetProperty(msgs::PropertyId::FromUint(0));
|
||||
MG_ASSERT(prop.int_v == 1);
|
||||
}
|
||||
|
||||
// Exhaust it, request should be empty
|
||||
result = io.Request(state);
|
||||
MG_ASSERT(result.size() == 0);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestCreateVertices(ShardRequestManager &io) {
|
||||
using PropVal = memgraph::msgs::Value;
|
||||
memgraph::msgs::ExecutionState<CreateVerticesRequest> state;
|
||||
std::vector<memgraph::msgs::NewVertex> new_vertices;
|
||||
auto label_id = io.LabelNameToLabelId("test_label");
|
||||
memgraph::msgs::NewVertex a1{.primary_key = {PropVal(int64_t(1)), PropVal(int64_t(0))}};
|
||||
void TestCreateVertices(msgs::ShardRequestManagerInterface &io) {
|
||||
using PropVal = msgs::Value;
|
||||
msgs::ExecutionState<CreateVerticesRequest> state;
|
||||
std::vector<msgs::NewVertex> new_vertices;
|
||||
auto label_id = io.NameToLabel("test_label");
|
||||
msgs::NewVertex a1{.primary_key = {PropVal(int64_t(1)), PropVal(int64_t(0))}};
|
||||
a1.label_ids.push_back({label_id});
|
||||
memgraph::msgs::NewVertex a2{.primary_key = {PropVal(int64_t(13)), PropVal(int64_t(13))}};
|
||||
msgs::NewVertex a2{.primary_key = {PropVal(int64_t(13)), PropVal(int64_t(13))}};
|
||||
a2.label_ids.push_back({label_id});
|
||||
new_vertices.push_back(std::move(a1));
|
||||
new_vertices.push_back(std::move(a2));
|
||||
@ -191,131 +187,176 @@ void TestCreateVertices(ShardRequestManager &io) {
|
||||
MG_ASSERT(result.size() == 2);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestExpand(ShardRequestManager &io) {}
|
||||
void TestCreateExpand(msgs::ShardRequestManagerInterface &io) {
|
||||
using PropVal = msgs::Value;
|
||||
msgs::ExecutionState<msgs::CreateExpandRequest> state;
|
||||
std::vector<msgs::NewExpand> new_expands;
|
||||
|
||||
const auto edge_type_id = io.NameToEdgeType("edge_type");
|
||||
const auto label = msgs::Label{io.NameToLabel("test_label")};
|
||||
const msgs::VertexId vertex_id_1{label, {PropVal(int64_t(0)), PropVal(int64_t(0))}};
|
||||
const msgs::VertexId vertex_id_2{label, {PropVal(int64_t(13)), PropVal(int64_t(13))}};
|
||||
msgs::NewExpand expand_1{
|
||||
.id = {.gid = 0}, .type = {edge_type_id}, .src_vertex = vertex_id_1, .dest_vertex = vertex_id_2};
|
||||
msgs::NewExpand expand_2{
|
||||
.id = {.gid = 1}, .type = {edge_type_id}, .src_vertex = vertex_id_2, .dest_vertex = vertex_id_1};
|
||||
new_expands.push_back(std::move(expand_1));
|
||||
new_expands.push_back(std::move(expand_2));
|
||||
|
||||
auto responses = io.Request(state, std::move(new_expands));
|
||||
MG_ASSERT(responses.size() == 2);
|
||||
MG_ASSERT(responses[0].success);
|
||||
MG_ASSERT(responses[1].success);
|
||||
}
|
||||
|
||||
void TestExpandOne(msgs::ShardRequestManagerInterface &shard_request_manager) {
|
||||
msgs::ExecutionState<msgs::ExpandOneRequest> state{};
|
||||
msgs::ExpandOneRequest request;
|
||||
const auto edge_type_id = shard_request_manager.NameToEdgeType("edge_type");
|
||||
const auto label = msgs::Label{shard_request_manager.NameToLabel("test_label")};
|
||||
request.src_vertices.push_back(msgs::VertexId{label, {msgs::Value(int64_t(0)), msgs::Value(int64_t(0))}});
|
||||
request.edge_types.push_back(msgs::EdgeType{edge_type_id});
|
||||
request.direction = msgs::EdgeDirection::BOTH;
|
||||
auto result_rows = shard_request_manager.Request(state, std::move(request));
|
||||
MG_ASSERT(result_rows.size() == 2);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestAggregate(ShardRequestManager &io) {}
|
||||
|
||||
int main() {
|
||||
// SimulatorConfig config{
|
||||
// .drop_percent = 0,
|
||||
// .perform_timeouts = false,
|
||||
// .scramble_messages = false,
|
||||
// .rng_seed = 0,
|
||||
// .start_time = Time::min() + std::chrono::microseconds{256 * 1024},
|
||||
// .abort_time = Time::min() + std::chrono::microseconds{2 * 8 * 1024 * 1024},
|
||||
// };
|
||||
void DoTest() {
|
||||
SimulatorConfig config{
|
||||
.drop_percent = 0,
|
||||
.perform_timeouts = false,
|
||||
.scramble_messages = false,
|
||||
.rng_seed = 0,
|
||||
.start_time = Time::min() + std::chrono::microseconds{256 * 1024},
|
||||
.abort_time = Time::min() + std::chrono::microseconds{2 * 8 * 1024 * 1024},
|
||||
};
|
||||
|
||||
// auto simulator = Simulator(config);
|
||||
// const auto one_second = std::chrono::seconds(1);
|
||||
auto simulator = Simulator(config);
|
||||
const auto one_second = std::chrono::seconds(1);
|
||||
|
||||
// Io<SimulatorTransport> cli_io = simulator.RegisterNew();
|
||||
// cli_io.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> cli_io = simulator.RegisterNew();
|
||||
cli_io.SetDefaultTimeout(one_second);
|
||||
|
||||
// // Register
|
||||
// Io<SimulatorTransport> a_io_1 = simulator.RegisterNew();
|
||||
// a_io_1.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> a_io_2 = simulator.RegisterNew();
|
||||
// a_io_2.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> a_io_3 = simulator.RegisterNew();
|
||||
// a_io_3.SetDefaultTimeout(one_second);
|
||||
// Register
|
||||
Io<SimulatorTransport> a_io_1 = simulator.RegisterNew();
|
||||
a_io_1.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> a_io_2 = simulator.RegisterNew();
|
||||
a_io_2.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> a_io_3 = simulator.RegisterNew();
|
||||
a_io_3.SetDefaultTimeout(one_second);
|
||||
|
||||
// Io<SimulatorTransport> b_io_1 = simulator.RegisterNew();
|
||||
// b_io_1.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> b_io_2 = simulator.RegisterNew();
|
||||
// b_io_2.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> b_io_3 = simulator.RegisterNew();
|
||||
// b_io_3.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> b_io_1 = simulator.RegisterNew();
|
||||
b_io_1.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> b_io_2 = simulator.RegisterNew();
|
||||
b_io_2.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> b_io_3 = simulator.RegisterNew();
|
||||
b_io_3.SetDefaultTimeout(one_second);
|
||||
|
||||
// // Preconfigure coordinator with kv shard 'A' and 'B'
|
||||
// auto sm1 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
// b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
// auto sm2 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
// b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
// auto sm3 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
// b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
// Preconfigure coordinator with kv shard 'A' and 'B'
|
||||
auto sm1 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
auto sm2 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
auto sm3 = CreateDummyShardmap(a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress(), b_io_1.GetAddress(),
|
||||
b_io_2.GetAddress(), b_io_3.GetAddress());
|
||||
|
||||
// // Spin up shard A
|
||||
// std::vector<Address> a_addrs = {a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress()};
|
||||
// Spin up shard A
|
||||
std::vector<Address> a_addrs = {a_io_1.GetAddress(), a_io_2.GetAddress(), a_io_3.GetAddress()};
|
||||
|
||||
// std::vector<Address> a_1_peers = {a_addrs[1], a_addrs[2]};
|
||||
// std::vector<Address> a_2_peers = {a_addrs[0], a_addrs[2]};
|
||||
// std::vector<Address> a_3_peers = {a_addrs[0], a_addrs[1]};
|
||||
std::vector<Address> a_1_peers = {a_addrs[1], a_addrs[2]};
|
||||
std::vector<Address> a_2_peers = {a_addrs[0], a_addrs[2]};
|
||||
std::vector<Address> a_3_peers = {a_addrs[0], a_addrs[1]};
|
||||
|
||||
// ConcreteStorageRsm a_1{std::move(a_io_1), a_1_peers, MockedShardRsm{}};
|
||||
// ConcreteStorageRsm a_2{std::move(a_io_2), a_2_peers, MockedShardRsm{}};
|
||||
// ConcreteStorageRsm a_3{std::move(a_io_3), a_3_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm a_1{std::move(a_io_1), a_1_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm a_2{std::move(a_io_2), a_2_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm a_3{std::move(a_io_3), a_3_peers, MockedShardRsm{}};
|
||||
|
||||
// auto a_thread_1 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_1));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[0]);
|
||||
auto a_thread_1 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_1));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[0]);
|
||||
|
||||
// auto a_thread_2 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_2));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[1]);
|
||||
auto a_thread_2 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_2));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[1]);
|
||||
|
||||
// auto a_thread_3 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_3));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[2]);
|
||||
auto a_thread_3 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(a_3));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(a_addrs[2]);
|
||||
|
||||
// // Spin up shard B
|
||||
// std::vector<Address> b_addrs = {b_io_1.GetAddress(), b_io_2.GetAddress(), b_io_3.GetAddress()};
|
||||
// Spin up shard B
|
||||
std::vector<Address> b_addrs = {b_io_1.GetAddress(), b_io_2.GetAddress(), b_io_3.GetAddress()};
|
||||
|
||||
// std::vector<Address> b_1_peers = {b_addrs[1], b_addrs[2]};
|
||||
// std::vector<Address> b_2_peers = {b_addrs[0], b_addrs[2]};
|
||||
// std::vector<Address> b_3_peers = {b_addrs[0], b_addrs[1]};
|
||||
std::vector<Address> b_1_peers = {b_addrs[1], b_addrs[2]};
|
||||
std::vector<Address> b_2_peers = {b_addrs[0], b_addrs[2]};
|
||||
std::vector<Address> b_3_peers = {b_addrs[0], b_addrs[1]};
|
||||
|
||||
// ConcreteStorageRsm b_1{std::move(b_io_1), b_1_peers, MockedShardRsm{}};
|
||||
// ConcreteStorageRsm b_2{std::move(b_io_2), b_2_peers, MockedShardRsm{}};
|
||||
// ConcreteStorageRsm b_3{std::move(b_io_3), b_3_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm b_1{std::move(b_io_1), b_1_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm b_2{std::move(b_io_2), b_2_peers, MockedShardRsm{}};
|
||||
ConcreteStorageRsm b_3{std::move(b_io_3), b_3_peers, MockedShardRsm{}};
|
||||
|
||||
// auto b_thread_1 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_1));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[0]);
|
||||
auto b_thread_1 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_1));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[0]);
|
||||
|
||||
// auto b_thread_2 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_2));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[1]);
|
||||
auto b_thread_2 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_2));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[1]);
|
||||
|
||||
// auto b_thread_3 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_3));
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[2]);
|
||||
auto b_thread_3 = std::jthread(RunStorageRaft<SimulatorTransport>, std::move(b_3));
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(b_addrs[2]);
|
||||
|
||||
// // Spin up coordinators
|
||||
// Spin up coordinators
|
||||
|
||||
// Io<SimulatorTransport> c_io_1 = simulator.RegisterNew();
|
||||
// c_io_1.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> c_io_2 = simulator.RegisterNew();
|
||||
// c_io_2.SetDefaultTimeout(one_second);
|
||||
// Io<SimulatorTransport> c_io_3 = simulator.RegisterNew();
|
||||
// c_io_3.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> c_io_1 = simulator.RegisterNew();
|
||||
c_io_1.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> c_io_2 = simulator.RegisterNew();
|
||||
c_io_2.SetDefaultTimeout(one_second);
|
||||
Io<SimulatorTransport> c_io_3 = simulator.RegisterNew();
|
||||
c_io_3.SetDefaultTimeout(one_second);
|
||||
|
||||
// std::vector<Address> c_addrs = {c_io_1.GetAddress(), c_io_2.GetAddress(), c_io_3.GetAddress()};
|
||||
std::vector<Address> c_addrs = {c_io_1.GetAddress(), c_io_2.GetAddress(), c_io_3.GetAddress()};
|
||||
|
||||
// std::vector<Address> c_1_peers = {c_addrs[1], c_addrs[2]};
|
||||
// std::vector<Address> c_2_peers = {c_addrs[0], c_addrs[2]};
|
||||
// std::vector<Address> c_3_peers = {c_addrs[0], c_addrs[1]};
|
||||
std::vector<Address> c_1_peers = {c_addrs[1], c_addrs[2]};
|
||||
std::vector<Address> c_2_peers = {c_addrs[0], c_addrs[2]};
|
||||
std::vector<Address> c_3_peers = {c_addrs[0], c_addrs[1]};
|
||||
|
||||
// ConcreteCoordinatorRsm c_1{std::move(c_io_1), c_1_peers, Coordinator{(sm1)}};
|
||||
// ConcreteCoordinatorRsm c_2{std::move(c_io_2), c_2_peers, Coordinator{(sm2)}};
|
||||
// ConcreteCoordinatorRsm c_3{std::move(c_io_3), c_3_peers, Coordinator{(sm3)}};
|
||||
ConcreteCoordinatorRsm c_1{std::move(c_io_1), c_1_peers, Coordinator{(sm1)}};
|
||||
ConcreteCoordinatorRsm c_2{std::move(c_io_2), c_2_peers, Coordinator{(sm2)}};
|
||||
ConcreteCoordinatorRsm c_3{std::move(c_io_3), c_3_peers, Coordinator{(sm3)}};
|
||||
|
||||
// auto c_thread_1 = std::jthread([c_1]() mutable { c_1.Run(); });
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[0]);
|
||||
auto c_thread_1 = std::jthread([c_1]() mutable { c_1.Run(); });
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[0]);
|
||||
|
||||
// auto c_thread_2 = std::jthread([c_2]() mutable { c_2.Run(); });
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[1]);
|
||||
auto c_thread_2 = std::jthread([c_2]() mutable { c_2.Run(); });
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[1]);
|
||||
|
||||
// auto c_thread_3 = std::jthread([c_3]() mutable { c_3.Run(); });
|
||||
// simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[2]);
|
||||
auto c_thread_3 = std::jthread([c_3]() mutable { c_3.Run(); });
|
||||
simulator.IncrementServerCountAndWaitForQuiescentState(c_addrs[2]);
|
||||
|
||||
// std::cout << "beginning test after servers have become quiescent" << std::endl;
|
||||
std::cout << "beginning test after servers have become quiescent" << std::endl;
|
||||
|
||||
// // Have client contact coordinator RSM for a new transaction ID and
|
||||
// // also get the current shard map
|
||||
// CoordinatorClient<SimulatorTransport> coordinator_client(cli_io, c_addrs[0], c_addrs);
|
||||
// Have client contact coordinator RSM for a new transaction ID and
|
||||
// also get the current shard map
|
||||
CoordinatorClient<SimulatorTransport> coordinator_client(cli_io, c_addrs[0], c_addrs);
|
||||
|
||||
// memgraph::msgs::ShardRequestManager<SimulatorTransport> io(std::move(coordinator_client), std::move(cli_io));
|
||||
msgs::ShardRequestManager<SimulatorTransport> io(std::move(coordinator_client), std::move(cli_io));
|
||||
|
||||
// io.StartTransaction();
|
||||
// TestScanAll(io);
|
||||
// TestCreateVertices(io);
|
||||
io.StartTransaction();
|
||||
TestScanVertices(io);
|
||||
TestCreateVertices(io);
|
||||
TestCreateExpand(io);
|
||||
|
||||
// simulator.ShutDown();
|
||||
return 0;
|
||||
simulator.ShutDown();
|
||||
|
||||
SimulatorStats stats = simulator.Stats();
|
||||
|
||||
std::cout << "total messages: " << stats.total_messages << std::endl;
|
||||
std::cout << "dropped messages: " << stats.dropped_messages << std::endl;
|
||||
std::cout << "timed out requests: " << stats.timed_out_requests << std::endl;
|
||||
std::cout << "total requests: " << stats.total_requests << std::endl;
|
||||
std::cout << "total responses: " << stats.total_responses << std::endl;
|
||||
std::cout << "simulator ticks: " << stats.simulator_ticks << std::endl;
|
||||
|
||||
std::cout << "========================== SUCCESS :) ==========================" << std::endl;
|
||||
}
|
||||
} // namespace memgraph::query::v2::tests
|
||||
|
||||
int main() { memgraph::query::v2::tests::DoTest(); }
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "io/simulator/simulator_transport.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/key_store.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/shard.hpp"
|
||||
#include "storage/v3/shard_rsm.hpp"
|
||||
@ -78,10 +79,10 @@ uint64_t GetUniqueInteger() {
|
||||
return prop_val_val++;
|
||||
}
|
||||
|
||||
LabelId get_primary_label() { return LabelId::FromUint(0); }
|
||||
constexpr LabelId get_primary_label() { return LabelId::FromUint(1); }
|
||||
|
||||
SchemaProperty get_schema_property() {
|
||||
return {.property_id = PropertyId::FromUint(0), .type = common::SchemaType::INT};
|
||||
constexpr SchemaProperty get_schema_property() {
|
||||
return {.property_id = PropertyId::FromUint(2), .type = common::SchemaType::INT};
|
||||
}
|
||||
|
||||
msgs::PrimaryKey GetPrimaryKey(int64_t value) {
|
||||
@ -92,7 +93,7 @@ msgs::PrimaryKey GetPrimaryKey(int64_t value) {
|
||||
|
||||
msgs::NewVertex GetNewVertex(int64_t value) {
|
||||
// Specify Labels.
|
||||
msgs::Label label1 = {.id = LabelId::FromUint(1)};
|
||||
msgs::Label label1 = {.id = LabelId::FromUint(3)};
|
||||
std::vector<msgs::Label> label_ids = {label1};
|
||||
|
||||
// Specify primary key.
|
||||
@ -100,14 +101,14 @@ msgs::NewVertex GetNewVertex(int64_t value) {
|
||||
|
||||
// Specify properties
|
||||
auto val1 = msgs::Value(static_cast<int64_t>(value));
|
||||
auto prop1 = std::make_pair(PropertyId::FromUint(1), val1);
|
||||
auto prop1 = std::make_pair(PropertyId::FromUint(4), val1);
|
||||
|
||||
auto val3 = msgs::Value(static_cast<int64_t>(value));
|
||||
auto prop3 = std::make_pair(PropertyId::FromUint(2), val3);
|
||||
auto prop3 = std::make_pair(PropertyId::FromUint(5), val3);
|
||||
|
||||
//(VERIFY) does the schema has to be specified with the properties or the primarykey?
|
||||
auto val2 = msgs::Value(static_cast<int64_t>(value));
|
||||
auto prop2 = std::make_pair(PropertyId::FromUint(0), val2);
|
||||
auto prop2 = std::make_pair(PropertyId::FromUint(6), val2);
|
||||
|
||||
std::vector<std::pair<PropertyId, msgs::Value>> properties{prop1, prop2, prop3};
|
||||
|
||||
@ -136,6 +137,7 @@ void Commit(ShardClient &client, const coordinator::Hlc &transaction_timestamp)
|
||||
|
||||
auto write_response_result = write_res.GetValue();
|
||||
auto write_response = std::get<msgs::CommitResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.success, "Commit expected to be successful, but it is failed");
|
||||
|
||||
break;
|
||||
}
|
||||
@ -185,7 +187,7 @@ bool AttemptToUpdateVertex(ShardClient &client, int64_t value) {
|
||||
auto vertex_id = GetValuePrimaryKeysWithValue(value)[0];
|
||||
|
||||
std::vector<std::pair<PropertyId, msgs::Value>> property_updates;
|
||||
auto property_update = std::make_pair(PropertyId::FromUint(2), msgs::Value(static_cast<int64_t>(10000)));
|
||||
auto property_update = std::make_pair(PropertyId::FromUint(5), msgs::Value(static_cast<int64_t>(10000)));
|
||||
|
||||
auto vertex_prop = msgs::UpdateVertexProp{};
|
||||
vertex_prop.primary_key = vertex_id;
|
||||
@ -210,7 +212,7 @@ bool AttemptToUpdateVertex(ShardClient &client, int64_t value) {
|
||||
}
|
||||
|
||||
bool AttemptToAddEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t value_of_vertex_2, int64_t edge_gid,
|
||||
int64_t edge_type_id) {
|
||||
EdgeTypeId edge_type_id) {
|
||||
auto id = msgs::EdgeId{};
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
|
||||
@ -221,15 +223,14 @@ bool AttemptToAddEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t va
|
||||
auto type = msgs::EdgeType{};
|
||||
type.id = edge_type_id;
|
||||
|
||||
auto edge = msgs::Edge{};
|
||||
msgs::NewExpand edge;
|
||||
edge.id = id;
|
||||
edge.type = type;
|
||||
edge.src = src;
|
||||
edge.dst = dst;
|
||||
edge.properties = std::nullopt;
|
||||
edge.src_vertex = src;
|
||||
edge.dest_vertex = dst;
|
||||
|
||||
msgs::CreateEdgesRequest create_req{};
|
||||
create_req.edges = {edge};
|
||||
msgs::CreateExpandRequest create_req{};
|
||||
create_req.new_expands = {edge};
|
||||
create_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
@ -239,18 +240,19 @@ bool AttemptToAddEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t va
|
||||
}
|
||||
|
||||
auto write_response_result = write_res.GetValue();
|
||||
auto write_response = std::get<msgs::CreateEdgesResponse>(write_response_result);
|
||||
auto write_response = std::get<msgs::CreateExpandResponse>(write_response_result);
|
||||
|
||||
Commit(client, create_req.transaction_id);
|
||||
|
||||
return write_response.success;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AttemptToAddEdgeWithProperties(ShardClient &client, int64_t value_of_vertex_1, int64_t value_of_vertex_2,
|
||||
int64_t edge_gid, uint64_t edge_prop_id, int64_t edge_prop_val,
|
||||
const std::vector<uint64_t> &edge_type_id) {
|
||||
auto id1 = msgs::EdgeId{};
|
||||
const std::vector<EdgeTypeId> &edge_type_id) {
|
||||
msgs::EdgeId id1;
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
|
||||
auto src = std::make_pair(label, GetPrimaryKey(value_of_vertex_1));
|
||||
@ -262,19 +264,19 @@ bool AttemptToAddEdgeWithProperties(ShardClient &client, int64_t value_of_vertex
|
||||
|
||||
auto edge_prop = std::make_pair(PropertyId::FromUint(edge_prop_id), msgs::Value(edge_prop_val));
|
||||
|
||||
auto edge = msgs::Edge{};
|
||||
edge.id = id1;
|
||||
edge.type = type1;
|
||||
edge.src = src;
|
||||
edge.dst = dst;
|
||||
edge.properties = {edge_prop};
|
||||
auto expand = msgs::NewExpand{};
|
||||
expand.id = id1;
|
||||
expand.type = type1;
|
||||
expand.src_vertex = src;
|
||||
expand.dest_vertex = dst;
|
||||
expand.properties = {edge_prop};
|
||||
|
||||
msgs::CreateEdgesRequest create_req{};
|
||||
create_req.edges = {edge};
|
||||
msgs::CreateExpandRequest create_req{};
|
||||
create_req.new_expands = {expand};
|
||||
create_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
auto write_res = client.SendWriteRequest(create_req);
|
||||
MG_ASSERT(write_res.HasValue() && std::get<msgs::CreateEdgesResponse>(write_res.GetValue()).success,
|
||||
MG_ASSERT(write_res.HasValue() && std::get<msgs::CreateExpandResponse>(write_res.GetValue()).success,
|
||||
"Unexpected failure");
|
||||
|
||||
Commit(client, create_req.transaction_id);
|
||||
@ -282,7 +284,7 @@ bool AttemptToAddEdgeWithProperties(ShardClient &client, int64_t value_of_vertex
|
||||
}
|
||||
|
||||
bool AttemptToDeleteEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t value_of_vertex_2, int64_t edge_gid,
|
||||
int64_t edge_type_id) {
|
||||
EdgeTypeId edge_type_id) {
|
||||
auto id = msgs::EdgeId{};
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
|
||||
@ -319,7 +321,7 @@ bool AttemptToDeleteEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t
|
||||
}
|
||||
|
||||
bool AttemptToUpdateEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t value_of_vertex_2, int64_t edge_gid,
|
||||
int64_t edge_type_id, uint64_t edge_prop_id, int64_t edge_prop_val) {
|
||||
EdgeTypeId edge_type_id, uint64_t edge_prop_id, int64_t edge_prop_val) {
|
||||
auto id = msgs::EdgeId{};
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
|
||||
@ -338,7 +340,7 @@ bool AttemptToUpdateEdge(ShardClient &client, int64_t value_of_vertex_1, int64_t
|
||||
auto edge_prop = std::vector<std::pair<PropertyId, msgs::Value>>{
|
||||
std::make_pair(PropertyId::FromUint(edge_prop_id), msgs::Value(edge_prop_val))};
|
||||
|
||||
msgs::UpdateEdgeProp update_props{.src = src, .dst = dst, .edge_id = id, .property_updates = edge_prop};
|
||||
msgs::UpdateEdgeProp update_props{.edge_id = id, .src = src, .dst = dst, .property_updates = edge_prop};
|
||||
|
||||
msgs::UpdateEdgesRequest update_req{};
|
||||
update_req.transaction_id.logical_id = GetTransactionId();
|
||||
@ -362,7 +364,7 @@ std::tuple<size_t, std::optional<msgs::VertexId>> AttemptToScanAllWithoutBatchLi
|
||||
msgs::VertexId start_id) {
|
||||
msgs::ScanVerticesRequest scan_req{};
|
||||
scan_req.batch_limit = {};
|
||||
scan_req.filter_expressions = std::nullopt;
|
||||
scan_req.filter_expressions.clear();
|
||||
scan_req.props_to_return = std::nullopt;
|
||||
scan_req.start_id = start_id;
|
||||
scan_req.storage_view = msgs::StorageView::OLD;
|
||||
@ -388,7 +390,7 @@ std::tuple<size_t, std::optional<msgs::VertexId>> AttemptToScanAllWithBatchLimit
|
||||
uint64_t batch_limit) {
|
||||
msgs::ScanVerticesRequest scan_req{};
|
||||
scan_req.batch_limit = batch_limit;
|
||||
scan_req.filter_expressions = std::nullopt;
|
||||
scan_req.filter_expressions.clear();
|
||||
scan_req.props_to_return = std::nullopt;
|
||||
scan_req.start_id = start_id;
|
||||
scan_req.storage_view = msgs::StorageView::OLD;
|
||||
@ -409,61 +411,108 @@ std::tuple<size_t, std::optional<msgs::VertexId>> AttemptToScanAllWithBatchLimit
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithWrongEdgeType(ShardClient &client, uint64_t src_vertex_val, uint64_t edge_type_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
std::tuple<size_t, std::optional<msgs::VertexId>> AttemptToScanAllWithExpression(ShardClient &client,
|
||||
msgs::VertexId start_id,
|
||||
uint64_t batch_limit,
|
||||
uint64_t prop_val_to_check_against) {
|
||||
std::string filter_expr1 = "MG_SYMBOL_NODE.prop1 = " + std::to_string(prop_val_to_check_against);
|
||||
std::vector<std::string> filter_expressions = {filter_expr1};
|
||||
|
||||
// Edge type
|
||||
auto edge_type = msgs::EdgeType{};
|
||||
edge_type.id = edge_type_id + 1;
|
||||
std::string regular_expr1 = "2+2";
|
||||
std::vector<std::string> vertex_expressions = {regular_expr1};
|
||||
|
||||
// Edge direction
|
||||
auto edge_direction = msgs::EdgeDirection::OUT;
|
||||
|
||||
// Source Vertex properties to look for
|
||||
std::optional<std::vector<PropertyId>> src_vertex_properties = {};
|
||||
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<msgs::Expression> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::optional<msgs::Filter> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.expressions = expressions;
|
||||
expand_one_req.filter = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
expand_one_req.src_vertices = {src_vertex};
|
||||
expand_one_req.transaction_id.logical_id = GetTransactionId();
|
||||
msgs::ScanVerticesRequest scan_req{};
|
||||
scan_req.batch_limit = batch_limit;
|
||||
scan_req.filter_expressions = filter_expressions;
|
||||
scan_req.vertex_expressions = vertex_expressions;
|
||||
scan_req.props_to_return = std::nullopt;
|
||||
scan_req.start_id = start_id;
|
||||
scan_req.storage_view = msgs::StorageView::NEW;
|
||||
scan_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(expand_one_req);
|
||||
auto read_res = client.SendReadRequest(scan_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
auto write_response = std::get<msgs::ScanVerticesResponse>(write_response_result);
|
||||
|
||||
MG_ASSERT(write_response.result[0].edges_with_all_properties);
|
||||
MG_ASSERT(write_response.result[0].edges_with_all_properties->size() == 0);
|
||||
MG_ASSERT(!write_response.result[0].edges_with_specific_properties);
|
||||
MG_ASSERT(write_response.success);
|
||||
MG_ASSERT(!write_response.results.empty(), "There are no results!");
|
||||
MG_ASSERT(write_response.results[0].evaluated_vertex_expressions[0].int_v == 4);
|
||||
return {write_response.results.size(), write_response.next_start_id};
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToScanAllWithOrderByOnPrimaryProperty(ShardClient &client, msgs::VertexId start_id, uint64_t batch_limit) {
|
||||
msgs::ScanVerticesRequest scan_req;
|
||||
scan_req.batch_limit = batch_limit;
|
||||
scan_req.order_bys = {{msgs::Expression{"MG_SYMBOL_NODE.prop1"}, msgs::OrderingDirection::DESCENDING}};
|
||||
scan_req.props_to_return = std::nullopt;
|
||||
scan_req.start_id = start_id;
|
||||
scan_req.storage_view = msgs::StorageView::NEW;
|
||||
scan_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(scan_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ScanVerticesResponse>(write_response_result);
|
||||
|
||||
MG_ASSERT(write_response.success);
|
||||
MG_ASSERT(write_response.results.size() == 5, "Expecting 5 results!");
|
||||
for (int64_t i{0}; i < 5; ++i) {
|
||||
const auto expected_primary_key = std::vector{msgs::Value(1023 - i)};
|
||||
const auto actual_primary_key = write_response.results[i].vertex.id.second;
|
||||
MG_ASSERT(expected_primary_key == actual_primary_key, "The order of vertices is not correct");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneSimple(ShardClient &client, uint64_t src_vertex_val, uint64_t edge_type_id) {
|
||||
void AttemptToScanAllWithOrderByOnSecondaryProperty(ShardClient &client, msgs::VertexId start_id,
|
||||
uint64_t batch_limit) {
|
||||
msgs::ScanVerticesRequest scan_req;
|
||||
scan_req.batch_limit = batch_limit;
|
||||
scan_req.order_bys = {{msgs::Expression{"MG_SYMBOL_NODE.prop4"}, msgs::OrderingDirection::DESCENDING}};
|
||||
scan_req.props_to_return = std::nullopt;
|
||||
scan_req.start_id = start_id;
|
||||
scan_req.storage_view = msgs::StorageView::NEW;
|
||||
scan_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(scan_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ScanVerticesResponse>(write_response_result);
|
||||
|
||||
MG_ASSERT(write_response.success);
|
||||
MG_ASSERT(write_response.results.size() == 5, "Expecting 5 results!");
|
||||
for (int64_t i{0}; i < 5; ++i) {
|
||||
const auto expected_prop4 = std::vector{msgs::Value(1023 - i)};
|
||||
const auto actual_prop4 = std::invoke([&write_response, i]() {
|
||||
const auto res = std::ranges::find_if(write_response.results[i].props, [](const auto &id_value_prop_pair) {
|
||||
return id_value_prop_pair.first.AsInt() == 4;
|
||||
});
|
||||
MG_ASSERT(res != write_response.results[i].props.end(), "Property does not exist!");
|
||||
return std::vector{res->second};
|
||||
});
|
||||
|
||||
MG_ASSERT(expected_prop4 == actual_prop4, "The order of vertices is not correct");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithWrongEdgeType(ShardClient &client, uint64_t src_vertex_val, EdgeTypeId edge_type_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
@ -481,18 +530,18 @@ void AttemptToExpandOneSimple(ShardClient &client, uint64_t src_vertex_val, uint
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<msgs::Expression> expressions;
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::optional<msgs::Filter> filter = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.expressions = expressions;
|
||||
expand_one_req.filter = filter;
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
@ -508,17 +557,131 @@ void AttemptToExpandOneSimple(ShardClient &client, uint64_t src_vertex_val, uint
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].edges_with_all_properties->size() == 2);
|
||||
auto number_of_properties_on_edge =
|
||||
(std::get<std::map<PropertyId, msgs::Value>>(write_response.result[0].edges_with_all_properties.value()[0]))
|
||||
.size();
|
||||
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.empty());
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneSimple(ShardClient &client, uint64_t src_vertex_val, EdgeTypeId edge_type_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
|
||||
// Edge type
|
||||
auto edge_type = msgs::EdgeType{};
|
||||
edge_type.id = edge_type_id;
|
||||
|
||||
// Edge direction
|
||||
auto edge_direction = msgs::EdgeDirection::OUT;
|
||||
|
||||
// Source Vertex properties to look for
|
||||
std::optional<std::vector<PropertyId>> src_vertex_properties = {};
|
||||
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
expand_one_req.src_vertices = {src_vertex};
|
||||
expand_one_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(expand_one_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.size() == 2);
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.empty());
|
||||
const auto number_of_properties_on_edge =
|
||||
(write_response.result[0].out_edges_with_all_properties[0]).properties.size();
|
||||
MG_ASSERT(number_of_properties_on_edge == 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithUniqueEdges(ShardClient &client, uint64_t src_vertex_val, EdgeTypeId edge_type_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
|
||||
// Edge type
|
||||
auto edge_type = msgs::EdgeType{};
|
||||
edge_type.id = edge_type_id;
|
||||
|
||||
// Edge direction
|
||||
auto edge_direction = msgs::EdgeDirection::OUT;
|
||||
|
||||
// Source Vertex properties to look for
|
||||
std::optional<std::vector<PropertyId>> src_vertex_properties = {};
|
||||
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
expand_one_req.src_vertices = {src_vertex};
|
||||
expand_one_req.only_unique_neighbor_rows = true;
|
||||
expand_one_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(expand_one_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.empty());
|
||||
const auto number_of_properties_on_edge =
|
||||
(write_response.result[0].out_edges_with_all_properties[0]).properties.size();
|
||||
MG_ASSERT(number_of_properties_on_edge == 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithSpecifiedSrcVertexProperties(ShardClient &client, uint64_t src_vertex_val,
|
||||
uint64_t edge_type_id) {
|
||||
EdgeTypeId edge_type_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
@ -537,18 +700,18 @@ void AttemptToExpandOneWithSpecifiedSrcVertexProperties(ShardClient &client, uin
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<msgs::Expression> expressions;
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::optional<msgs::Filter> filter = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.expressions = expressions;
|
||||
expand_one_req.filter = filter;
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
@ -564,19 +727,21 @@ void AttemptToExpandOneWithSpecifiedSrcVertexProperties(ShardClient &client, uin
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
auto src_vertex_props_size = write_response.result[0].src_vertex_properties->size();
|
||||
auto src_vertex_props_size = write_response.result[0].src_vertex_properties.size();
|
||||
MG_ASSERT(src_vertex_props_size == 1);
|
||||
MG_ASSERT(write_response.result[0].edges_with_all_properties->size() == 2);
|
||||
auto number_of_properties_on_edge =
|
||||
(std::get<std::map<PropertyId, msgs::Value>>(write_response.result[0].edges_with_all_properties.value()[0]))
|
||||
.size();
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.size() == 2);
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.empty());
|
||||
const auto number_of_properties_on_edge =
|
||||
(write_response.result[0].out_edges_with_all_properties[0]).properties.size();
|
||||
MG_ASSERT(number_of_properties_on_edge == 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithSpecifiedEdgeProperties(ShardClient &client, uint64_t src_vertex_val, uint64_t edge_type_id,
|
||||
uint64_t edge_prop_id) {
|
||||
void AttemptToExpandOneWithSpecifiedEdgeProperties(ShardClient &client, uint64_t src_vertex_val,
|
||||
EdgeTypeId edge_type_id, uint64_t edge_prop_id) {
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
@ -595,18 +760,18 @@ void AttemptToExpandOneWithSpecifiedEdgeProperties(ShardClient &client, uint64_t
|
||||
std::vector<PropertyId> specified_edge_prop{PropertyId::FromUint(edge_prop_id)};
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {specified_edge_prop};
|
||||
|
||||
std::vector<msgs::Expression> expressions;
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::optional<msgs::Filter> filter = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.expressions = expressions;
|
||||
expand_one_req.filter = filter;
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = filter;
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
@ -622,9 +787,69 @@ void AttemptToExpandOneWithSpecifiedEdgeProperties(ShardClient &client, uint64_t
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
auto specific_properties_size =
|
||||
(std::get<std::vector<msgs::Value>>(write_response.result[0].edges_with_specific_properties.value()[0]));
|
||||
MG_ASSERT(specific_properties_size.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.size() == 2);
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.empty());
|
||||
const auto specific_properties_size =
|
||||
(write_response.result[0].out_edges_with_specific_properties[0]).properties.size();
|
||||
MG_ASSERT(specific_properties_size == 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void AttemptToExpandOneWithFilters(ShardClient &client, uint64_t src_vertex_val, EdgeTypeId edge_type_id,
|
||||
uint64_t edge_prop_id, uint64_t prop_val_to_check_against) {
|
||||
std::string filter_expr1 = "MG_SYMBOL_NODE.prop1 = " + std::to_string(prop_val_to_check_against);
|
||||
|
||||
// Source vertex
|
||||
msgs::Label label = {.id = get_primary_label()};
|
||||
auto src_vertex = std::make_pair(label, GetPrimaryKey(src_vertex_val));
|
||||
|
||||
// Edge type
|
||||
auto edge_type = msgs::EdgeType{};
|
||||
edge_type.id = edge_type_id;
|
||||
|
||||
// Edge direction
|
||||
auto edge_direction = msgs::EdgeDirection::OUT;
|
||||
|
||||
// Source Vertex properties to look for
|
||||
std::optional<std::vector<PropertyId>> src_vertex_properties = {};
|
||||
|
||||
// Edge properties to look for
|
||||
std::optional<std::vector<PropertyId>> edge_properties = {};
|
||||
|
||||
std::vector<std::string> expressions;
|
||||
std::optional<std::vector<msgs::OrderBy>> order_by = {};
|
||||
std::optional<size_t> limit = {};
|
||||
std::vector<std::string> filter = {};
|
||||
|
||||
msgs::ExpandOneRequest expand_one_req{};
|
||||
|
||||
expand_one_req.direction = edge_direction;
|
||||
expand_one_req.edge_properties = edge_properties;
|
||||
expand_one_req.edge_types = {edge_type};
|
||||
expand_one_req.vertex_expressions = expressions;
|
||||
expand_one_req.filters = {filter_expr1};
|
||||
expand_one_req.limit = limit;
|
||||
expand_one_req.order_by = order_by;
|
||||
expand_one_req.src_vertex_properties = src_vertex_properties;
|
||||
expand_one_req.src_vertices = {src_vertex};
|
||||
expand_one_req.transaction_id.logical_id = GetTransactionId();
|
||||
|
||||
while (true) {
|
||||
auto read_res = client.SendReadRequest(expand_one_req);
|
||||
if (read_res.HasError()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto write_response_result = read_res.GetValue();
|
||||
auto write_response = std::get<msgs::ExpandOneResponse>(write_response_result);
|
||||
MG_ASSERT(write_response.result.size() == 1);
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_specific_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].in_edges_with_all_properties.empty());
|
||||
MG_ASSERT(write_response.result[0].out_edges_with_all_properties.size() == 2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -658,7 +883,7 @@ void TestCreateEdge(ShardClient &client) {
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_2));
|
||||
|
||||
auto edge_gid = GetUniqueInteger();
|
||||
auto edge_type_id = GetUniqueInteger();
|
||||
auto edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
|
||||
MG_ASSERT(AttemptToAddEdge(client, unique_prop_val_1, unique_prop_val_2, edge_gid, edge_type_id));
|
||||
}
|
||||
@ -672,7 +897,7 @@ void TestCreateAndDeleteEdge(ShardClient &client) {
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_2));
|
||||
|
||||
auto edge_gid = GetUniqueInteger();
|
||||
auto edge_type_id = GetUniqueInteger();
|
||||
auto edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
|
||||
MG_ASSERT(AttemptToAddEdge(client, unique_prop_val_1, unique_prop_val_2, edge_gid, edge_type_id));
|
||||
|
||||
@ -689,7 +914,7 @@ void TestUpdateEdge(ShardClient &client) {
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_2));
|
||||
|
||||
auto edge_gid = GetUniqueInteger();
|
||||
auto edge_type_id = GetUniqueInteger();
|
||||
auto edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
|
||||
auto edge_prop_id = GetUniqueInteger();
|
||||
auto edge_prop_val_old = GetUniqueInteger();
|
||||
@ -721,6 +946,12 @@ void TestScanAllOneGo(ShardClient &client) {
|
||||
|
||||
msgs::VertexId v_id = {prim_label, prim_key};
|
||||
|
||||
auto [result_size_2, next_id_2] = AttemptToScanAllWithExpression(client, v_id, 5, unique_prop_val_2);
|
||||
MG_ASSERT(result_size_2 == 1);
|
||||
|
||||
AttemptToScanAllWithOrderByOnPrimaryProperty(client, v_id, 5);
|
||||
AttemptToScanAllWithOrderByOnSecondaryProperty(client, v_id, 5);
|
||||
|
||||
auto [result_size_with_batch, next_id_with_batch] = AttemptToScanAllWithBatchLimit(client, v_id, 5);
|
||||
auto [result_size_without_batch, next_id_without_batch] = AttemptToScanAllWithoutBatchLimit(client, v_id);
|
||||
|
||||
@ -770,7 +1001,7 @@ void TestScanAllWithSmallBatchSize(ShardClient &client) {
|
||||
MG_ASSERT(!next_id4);
|
||||
}
|
||||
|
||||
void TestExpandOne(ShardClient &client) {
|
||||
void TestExpandOneGraphOne(ShardClient &client) {
|
||||
{
|
||||
// ExpandOneSimple
|
||||
auto unique_prop_val_1 = GetUniqueInteger();
|
||||
@ -781,7 +1012,8 @@ void TestExpandOne(ShardClient &client) {
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_2));
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_3));
|
||||
|
||||
auto edge_type_id = GetUniqueInteger();
|
||||
auto edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
auto wrong_edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
|
||||
auto edge_gid_1 = GetUniqueInteger();
|
||||
auto edge_gid_2 = GetUniqueInteger();
|
||||
@ -797,9 +1029,38 @@ void TestExpandOne(ShardClient &client) {
|
||||
edge_prop_val, {edge_type_id}));
|
||||
|
||||
AttemptToExpandOneSimple(client, unique_prop_val_1, edge_type_id);
|
||||
AttemptToExpandOneWithWrongEdgeType(client, unique_prop_val_1, edge_type_id);
|
||||
AttemptToExpandOneWithWrongEdgeType(client, unique_prop_val_1, wrong_edge_type_id);
|
||||
AttemptToExpandOneWithSpecifiedSrcVertexProperties(client, unique_prop_val_1, edge_type_id);
|
||||
AttemptToExpandOneWithSpecifiedEdgeProperties(client, unique_prop_val_1, edge_type_id, edge_prop_id);
|
||||
AttemptToExpandOneWithFilters(client, unique_prop_val_1, edge_type_id, edge_prop_id, unique_prop_val_1);
|
||||
}
|
||||
}
|
||||
|
||||
void TestExpandOneGraphTwo(ShardClient &client) {
|
||||
{
|
||||
// ExpandOneSimple
|
||||
auto unique_prop_val_1 = GetUniqueInteger();
|
||||
auto unique_prop_val_2 = GetUniqueInteger();
|
||||
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_1));
|
||||
MG_ASSERT(AttemptToCreateVertex(client, unique_prop_val_2));
|
||||
|
||||
auto edge_type_id = EdgeTypeId::FromUint(GetUniqueInteger());
|
||||
|
||||
auto edge_gid_1 = GetUniqueInteger();
|
||||
auto edge_gid_2 = GetUniqueInteger();
|
||||
|
||||
auto edge_prop_id = GetUniqueInteger();
|
||||
auto edge_prop_val = GetUniqueInteger();
|
||||
|
||||
// (V1)-[edge_type_id]->(V2)
|
||||
MG_ASSERT(AttemptToAddEdgeWithProperties(client, unique_prop_val_1, unique_prop_val_2, edge_gid_1, edge_prop_id,
|
||||
edge_prop_val, {edge_type_id}));
|
||||
// (V1)-[edge_type_id]->(V3)
|
||||
MG_ASSERT(AttemptToAddEdgeWithProperties(client, unique_prop_val_1, unique_prop_val_2, edge_gid_2, edge_prop_id,
|
||||
edge_prop_val, {edge_type_id}));
|
||||
// AttemptToExpandOneSimple(client, unique_prop_val_1, edge_type_id);
|
||||
AttemptToExpandOneWithUniqueEdges(client, unique_prop_val_1, edge_type_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -812,7 +1073,7 @@ int TestMessages() {
|
||||
.scramble_messages = false,
|
||||
.rng_seed = 0,
|
||||
.start_time = Time::min() + std::chrono::microseconds{256 * 1024},
|
||||
.abort_time = Time::min() + std::chrono::microseconds{4 * 8 * 1024 * 1024},
|
||||
.abort_time = Time::max(),
|
||||
};
|
||||
|
||||
auto simulator = Simulator(config);
|
||||
@ -835,14 +1096,15 @@ int TestMessages() {
|
||||
PropertyValue max_pk(static_cast<int64_t>(10000000));
|
||||
std::vector<PropertyValue> max_prim_key = {max_pk};
|
||||
|
||||
std::vector<SchemaProperty> schema = {get_schema_property()};
|
||||
auto shard_ptr1 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema);
|
||||
auto shard_ptr2 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema);
|
||||
auto shard_ptr3 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema);
|
||||
std::vector<SchemaProperty> schema_prop = {get_schema_property()};
|
||||
|
||||
shard_ptr1->CreateSchema(get_primary_label(), schema);
|
||||
shard_ptr2->CreateSchema(get_primary_label(), schema);
|
||||
shard_ptr3->CreateSchema(get_primary_label(), schema);
|
||||
auto shard_ptr1 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema_prop);
|
||||
auto shard_ptr2 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema_prop);
|
||||
auto shard_ptr3 = std::make_unique<Shard>(get_primary_label(), min_prim_key, max_prim_key, schema_prop);
|
||||
|
||||
shard_ptr1->StoreMapping({{1, "label"}, {2, "prop1"}, {3, "label1"}, {4, "prop2"}, {5, "prop3"}, {6, "prop4"}});
|
||||
shard_ptr2->StoreMapping({{1, "label"}, {2, "prop1"}, {3, "label1"}, {4, "prop2"}, {5, "prop3"}, {6, "prop4"}});
|
||||
shard_ptr3->StoreMapping({{1, "label"}, {2, "prop1"}, {3, "label1"}, {4, "prop2"}, {5, "prop3"}, {6, "prop4"}});
|
||||
|
||||
std::vector<Address> address_for_1{shard_server_2_address, shard_server_3_address};
|
||||
std::vector<Address> address_for_2{shard_server_1_address, shard_server_3_address};
|
||||
@ -880,7 +1142,8 @@ int TestMessages() {
|
||||
TestScanAllWithSmallBatchSize(client);
|
||||
|
||||
// ExpandOne tests
|
||||
TestExpandOne(client);
|
||||
TestExpandOneGraphOne(client);
|
||||
TestExpandOneGraphTwo(client);
|
||||
|
||||
simulator.ShutDown();
|
||||
|
||||
|
251
tests/simulation/test_cluster.hpp
Normal file
251
tests/simulation/test_cluster.hpp
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <thread>
|
||||
|
||||
#include <rapidcheck.h>
|
||||
|
||||
#include "cluster_config.hpp"
|
||||
#include "coordinator/coordinator_client.hpp"
|
||||
#include "coordinator/coordinator_rsm.hpp"
|
||||
#include "coordinator/shard_map.hpp"
|
||||
#include "generated_operations.hpp"
|
||||
#include "io/address.hpp"
|
||||
#include "io/simulator/simulator.hpp"
|
||||
#include "io/simulator/simulator_config.hpp"
|
||||
#include "io/simulator/simulator_transport.hpp"
|
||||
#include "machine_manager/machine_config.hpp"
|
||||
#include "machine_manager/machine_manager.hpp"
|
||||
#include "query/v2/requests.hpp"
|
||||
#include "query/v2/shard_request_manager.hpp"
|
||||
#include "testing_constants.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
using coordinator::Coordinator;
|
||||
using coordinator::CoordinatorClient;
|
||||
using coordinator::CoordinatorReadRequests;
|
||||
using coordinator::CoordinatorWriteRequests;
|
||||
using coordinator::CoordinatorWriteResponses;
|
||||
using coordinator::GetShardMapRequest;
|
||||
using coordinator::GetShardMapResponse;
|
||||
using coordinator::Hlc;
|
||||
using coordinator::HlcResponse;
|
||||
using coordinator::Shard;
|
||||
using coordinator::ShardMap;
|
||||
using io::Address;
|
||||
using io::Io;
|
||||
using io::rsm::RsmClient;
|
||||
using io::simulator::Simulator;
|
||||
using io::simulator::SimulatorConfig;
|
||||
using io::simulator::SimulatorStats;
|
||||
using io::simulator::SimulatorTransport;
|
||||
using machine_manager::MachineConfig;
|
||||
using machine_manager::MachineManager;
|
||||
using msgs::ReadRequests;
|
||||
using msgs::ReadResponses;
|
||||
using msgs::WriteRequests;
|
||||
using msgs::WriteResponses;
|
||||
using storage::v3::LabelId;
|
||||
using storage::v3::SchemaProperty;
|
||||
|
||||
using CompoundKey = std::pair<int, int>;
|
||||
using ShardClient = RsmClient<SimulatorTransport, WriteRequests, WriteResponses, ReadRequests, ReadResponses>;
|
||||
|
||||
MachineManager<SimulatorTransport> MkMm(Simulator &simulator, std::vector<Address> coordinator_addresses, Address addr,
|
||||
ShardMap shard_map) {
|
||||
MachineConfig config{
|
||||
.coordinator_addresses = coordinator_addresses,
|
||||
.is_storage = true,
|
||||
.is_coordinator = true,
|
||||
.listen_ip = addr.last_known_ip,
|
||||
.listen_port = addr.last_known_port,
|
||||
};
|
||||
|
||||
Io<SimulatorTransport> io = simulator.Register(addr);
|
||||
|
||||
Coordinator coordinator{shard_map};
|
||||
|
||||
return MachineManager{io, config, coordinator, shard_map};
|
||||
}
|
||||
|
||||
void RunMachine(MachineManager<SimulatorTransport> mm) { mm.Run(); }
|
||||
|
||||
void WaitForShardsToInitialize(CoordinatorClient<SimulatorTransport> &coordinator_client) {
|
||||
// Call coordinator client's read method for GetShardMap and keep
|
||||
// reading it until the shard map contains proper replicas for
|
||||
// each shard in the label space.
|
||||
|
||||
while (true) {
|
||||
GetShardMapRequest req{};
|
||||
CoordinatorReadRequests read_req = req;
|
||||
auto read_res = coordinator_client.SendReadRequest(read_req);
|
||||
if (read_res.HasError()) {
|
||||
// timed out
|
||||
continue;
|
||||
}
|
||||
auto response_result = read_res.GetValue();
|
||||
auto response = std::get<GetShardMapResponse>(response_result);
|
||||
auto shard_map = response.shard_map;
|
||||
|
||||
if (shard_map.ClusterInitialized()) {
|
||||
spdlog::info("cluster stabilized - beginning workload");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ShardMap TestShardMap(int n_splits, int replication_factor) {
|
||||
ShardMap sm{};
|
||||
|
||||
const std::string label_name = std::string("test_label");
|
||||
|
||||
// register new properties
|
||||
const std::vector<std::string> property_names = {"property_1", "property_2"};
|
||||
const auto properties = sm.AllocatePropertyIds(property_names);
|
||||
const auto property_id_1 = properties.at("property_1");
|
||||
const auto property_id_2 = properties.at("property_2");
|
||||
const auto type_1 = memgraph::common::SchemaType::INT;
|
||||
const auto type_2 = memgraph::common::SchemaType::INT;
|
||||
|
||||
// register new label space
|
||||
std::vector<SchemaProperty> schema = {
|
||||
SchemaProperty{.property_id = property_id_1, .type = type_1},
|
||||
SchemaProperty{.property_id = property_id_2, .type = type_2},
|
||||
};
|
||||
|
||||
std::optional<LabelId> label_id = sm.InitializeNewLabel(label_name, schema, replication_factor, sm.shard_map_version);
|
||||
RC_ASSERT(label_id.has_value());
|
||||
|
||||
// split the shard at N split points
|
||||
for (int64_t i = 1; i < n_splits; ++i) {
|
||||
const auto key1 = memgraph::storage::v3::PropertyValue(i);
|
||||
const auto key2 = memgraph::storage::v3::PropertyValue(0);
|
||||
|
||||
const auto split_point = {key1, key2};
|
||||
|
||||
const bool split_success = sm.SplitShard(sm.shard_map_version, label_id.value(), split_point);
|
||||
|
||||
RC_ASSERT(split_success);
|
||||
}
|
||||
|
||||
return sm;
|
||||
}
|
||||
|
||||
void ExecuteOp(msgs::ShardRequestManager<SimulatorTransport> &shard_request_manager,
|
||||
std::set<CompoundKey> &correctness_model, CreateVertex create_vertex) {
|
||||
const auto key1 = memgraph::storage::v3::PropertyValue(create_vertex.first);
|
||||
const auto key2 = memgraph::storage::v3::PropertyValue(create_vertex.second);
|
||||
|
||||
std::vector<msgs::Value> primary_key = {msgs::Value(int64_t(create_vertex.first)),
|
||||
msgs::Value(int64_t(create_vertex.second))};
|
||||
|
||||
if (correctness_model.contains(std::make_pair(create_vertex.first, create_vertex.second))) {
|
||||
// TODO(tyler) remove this early-return when we have properly handled setting non-unique vertexes
|
||||
return;
|
||||
}
|
||||
|
||||
msgs::ExecutionState<msgs::CreateVerticesRequest> state;
|
||||
|
||||
auto label_id = shard_request_manager.NameToLabel("test_label");
|
||||
|
||||
msgs::NewVertex nv{.primary_key = primary_key};
|
||||
nv.label_ids.push_back({label_id});
|
||||
|
||||
std::vector<msgs::NewVertex> new_vertices;
|
||||
new_vertices.push_back(std::move(nv));
|
||||
|
||||
auto result = shard_request_manager.Request(state, std::move(new_vertices));
|
||||
|
||||
RC_ASSERT(result.size() == 1);
|
||||
RC_ASSERT(result[0].success);
|
||||
|
||||
correctness_model.emplace(std::make_pair(create_vertex.first, create_vertex.second));
|
||||
}
|
||||
|
||||
void ExecuteOp(msgs::ShardRequestManager<SimulatorTransport> &shard_request_manager,
|
||||
std::set<CompoundKey> &correctness_model, ScanAll scan_all) {
|
||||
msgs::ExecutionState<msgs::ScanVerticesRequest> request{.label = "test_label"};
|
||||
|
||||
auto results = shard_request_manager.Request(request);
|
||||
|
||||
RC_ASSERT(results.size() == correctness_model.size());
|
||||
|
||||
for (const auto &vertex_accessor : results) {
|
||||
const auto properties = vertex_accessor.Properties();
|
||||
const auto primary_key = vertex_accessor.Id().second;
|
||||
const CompoundKey model_key = std::make_pair(primary_key[0].int_v, primary_key[1].int_v);
|
||||
RC_ASSERT(correctness_model.contains(model_key));
|
||||
}
|
||||
}
|
||||
|
||||
void RunClusterSimulation(const SimulatorConfig &sim_config, const ClusterConfig &cluster_config,
|
||||
const std::vector<Op> &ops) {
|
||||
spdlog::info("========================== NEW SIMULATION ==========================");
|
||||
|
||||
auto simulator = Simulator(sim_config);
|
||||
|
||||
auto cli_addr = Address::TestAddress(1);
|
||||
auto machine_1_addr = cli_addr.ForkUniqueAddress();
|
||||
|
||||
Io<SimulatorTransport> cli_io = simulator.Register(cli_addr);
|
||||
|
||||
auto coordinator_addresses = std::vector{
|
||||
machine_1_addr,
|
||||
};
|
||||
|
||||
ShardMap initialization_sm = TestShardMap(cluster_config.shards - 1, cluster_config.replication_factor);
|
||||
|
||||
auto mm_1 = MkMm(simulator, coordinator_addresses, machine_1_addr, initialization_sm);
|
||||
Address coordinator_address = mm_1.CoordinatorAddress();
|
||||
|
||||
auto mm_thread_1 = std::jthread(RunMachine, std::move(mm_1));
|
||||
|
||||
// Need to detach this thread so that the destructor does not
|
||||
// block before we can propagate assertion failures.
|
||||
mm_thread_1.detach();
|
||||
|
||||
// TODO(tyler) clarify addresses of coordinator etc... as it's a mess
|
||||
|
||||
CoordinatorClient<SimulatorTransport> coordinator_client(cli_io, coordinator_address, {coordinator_address});
|
||||
WaitForShardsToInitialize(coordinator_client);
|
||||
|
||||
msgs::ShardRequestManager<SimulatorTransport> shard_request_manager(std::move(coordinator_client), std::move(cli_io));
|
||||
|
||||
shard_request_manager.StartTransaction();
|
||||
|
||||
auto correctness_model = std::set<CompoundKey>{};
|
||||
|
||||
for (const Op &op : ops) {
|
||||
std::visit([&](auto &o) { ExecuteOp(shard_request_manager, correctness_model, o); }, op.inner);
|
||||
}
|
||||
|
||||
simulator.ShutDown();
|
||||
|
||||
SimulatorStats stats = simulator.Stats();
|
||||
|
||||
spdlog::info("total messages: {}", stats.total_messages);
|
||||
spdlog::info("dropped messages: {}", stats.dropped_messages);
|
||||
spdlog::info("timed out requests: {}", stats.timed_out_requests);
|
||||
spdlog::info("total requests: {}", stats.total_requests);
|
||||
spdlog::info("total responses: {}", stats.total_responses);
|
||||
spdlog::info("simulator ticks: {}", stats.simulator_ticks);
|
||||
|
||||
spdlog::info("========================== SUCCESS :) ==========================");
|
||||
}
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
28
tests/simulation/testing_constants.hpp
Normal file
28
tests/simulation/testing_constants.hpp
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
|
||||
// TODO(tyler) increase this when we start standing up multiple machines in cluster tests
|
||||
static constexpr auto kMinimumShards = 1;
|
||||
static constexpr auto kMaximumShards = kMinimumShards + 10;
|
||||
|
||||
// TODO(tyler) increase this when we start standing up multiple machines in cluster tests
|
||||
static constexpr auto kMinimumServers = 1;
|
||||
static constexpr auto kMaximumServers = kMinimumServers + 1;
|
||||
|
||||
// TODO(tyler) increase this when we start standing up multiple machines in cluster tests
|
||||
static constexpr auto kMinimumReplicationFactor = 1;
|
||||
static constexpr auto kMaximumReplicationFactor = kMinimumReplicationFactor + 1;
|
||||
|
||||
} // namespace memgraph::tests::simulation
|
@ -20,8 +20,8 @@
|
||||
#include "messages.hpp"
|
||||
|
||||
namespace memgraph::tests::simulation {
|
||||
using memgraph::io::Io;
|
||||
using memgraph::io::simulator::SimulatorTransport;
|
||||
using io::Io;
|
||||
using io::simulator::SimulatorTransport;
|
||||
|
||||
void run_server(Io<SimulatorTransport> io) {
|
||||
while (!io.ShouldShutDown()) {
|
||||
|
@ -333,36 +333,35 @@ target_link_libraries(${test_prefix}storage_v3_schema mg-storage-v3)
|
||||
|
||||
# Test mg-query-v2
|
||||
# These are commented out because of the new TypedValue in the query engine
|
||||
#add_unit_test(query_v2_interpreter.cpp ${CMAKE_SOURCE_DIR}/src/glue/v2/communication.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_interpreter mg-storage-v3 mg-query-v2 mg-communication)
|
||||
# add_unit_test(query_v2_interpreter.cpp ${CMAKE_SOURCE_DIR}/src/glue/v2/communication.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_interpreter mg-storage-v3 mg-query-v2 mg-communication)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_accumulate_aggregate.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_accumulate_aggregate mg-query-v2)
|
||||
# add_unit_test(query_v2_query_plan_accumulate_aggregate.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_accumulate_aggregate mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_create_set_remove_delete.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_create_set_remove_delete mg-query-v2 mg-expr)
|
||||
# add_unit_test(query_v2_query_plan_create_set_remove_delete.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_create_set_remove_delete mg-query-v2 mg-expr)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_bag_semantics.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_bag_semantics mg-query-v2)
|
||||
# add_unit_test(query_v2_query_plan_bag_semantics.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_bag_semantics mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_edge_cases.cpp ${CMAKE_SOURCE_DIR}/src/glue/v2/communication.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_edge_cases mg-communication mg-query-v2)
|
||||
# add_unit_test(query_v2_query_plan_edge_cases.cpp ${CMAKE_SOURCE_DIR}/src/glue/v2/communication.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_edge_cases mg-communication mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_v2_create_set_remove_delete.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_v2_create_set_remove_delete mg-query-v2)
|
||||
# add_unit_test(query_v2_query_plan_v2_create_set_remove_delete.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_v2_create_set_remove_delete mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_query_plan_match_filter_return.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_plan_match_filter_return mg-query-v2)
|
||||
# add_unit_test(query_v2_query_plan_match_filter_return.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_plan_match_filter_return mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_cypher_main_visitor.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_cypher_main_visitor mg-query-v2)
|
||||
# add_unit_test(query_v2_cypher_main_visitor.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_cypher_main_visitor mg-query-v2)
|
||||
#
|
||||
#add_unit_test(query_v2_query_required_privileges.cpp)
|
||||
#target_link_libraries(${test_prefix}query_v2_query_required_privileges mg-query-v2)
|
||||
# add_unit_test(query_v2_query_required_privileges.cpp)
|
||||
# target_link_libraries(${test_prefix}query_v2_query_required_privileges mg-query-v2)
|
||||
#
|
||||
#add_unit_test(replication_persistence_helper.cpp)
|
||||
#target_link_libraries(${test_prefix}replication_persistence_helper mg-storage-v2)
|
||||
|
||||
# add_unit_test(replication_persistence_helper.cpp)
|
||||
# target_link_libraries(${test_prefix}replication_persistence_helper mg-storage-v2)
|
||||
add_unit_test(query_v2_dummy_test.cpp)
|
||||
target_link_libraries(${test_prefix}query_v2_dummy_test mg-query-v2)
|
||||
|
||||
@ -436,3 +435,7 @@ target_link_libraries(${test_prefix}machine_manager mg-io mg-coordinator mg-stor
|
||||
|
||||
add_unit_test(pretty_print_ast_to_original_expression_test.cpp)
|
||||
target_link_libraries(${test_prefix}pretty_print_ast_to_original_expression_test mg-io mg-expr mg-query-v2)
|
||||
|
||||
# Tests for mg-coordinator
|
||||
add_unit_test(coordinator_shard_map.cpp)
|
||||
target_link_libraries(${test_prefix}coordinator_shard_map mg-coordinator)
|
||||
|
104
tests/unit/coordinator_shard_map.cpp
Normal file
104
tests/unit/coordinator_shard_map.cpp
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "common/types.hpp"
|
||||
#include "coordinator/shard_map.hpp"
|
||||
#include "gtest/gtest.h"
|
||||
#include "storage/v3/id_types.hpp"
|
||||
#include "storage/v3/property_value.hpp"
|
||||
#include "storage/v3/schemas.hpp"
|
||||
|
||||
namespace memgraph::coordinator::tests {
|
||||
TEST(ShardMap, Parse) {
|
||||
std::string input = R"(4
|
||||
property_1
|
||||
property_2
|
||||
property_3
|
||||
property_4
|
||||
3
|
||||
edge_type_1
|
||||
edge_type_2
|
||||
edge_type_3
|
||||
2
|
||||
label_1
|
||||
1
|
||||
primary_property_name_1
|
||||
string
|
||||
4
|
||||
[asdasd]
|
||||
[qweqwe]
|
||||
[bnm]
|
||||
[tryuryturtyur]
|
||||
label_2
|
||||
3
|
||||
property_1
|
||||
string
|
||||
property_2
|
||||
int
|
||||
primary_property_name_2
|
||||
InT
|
||||
2
|
||||
[first,1 ,2]
|
||||
[ second ,-1, -9223372036854775808]
|
||||
)";
|
||||
|
||||
std::stringstream stream(input);
|
||||
auto shard_map = ShardMap::Parse(stream);
|
||||
EXPECT_EQ(shard_map.properties.size(), 6);
|
||||
EXPECT_EQ(shard_map.edge_types.size(), 3);
|
||||
EXPECT_EQ(shard_map.label_spaces.size(), 2);
|
||||
EXPECT_EQ(shard_map.schemas.size(), 2);
|
||||
|
||||
auto check_label = [&shard_map](const std::string &label_name, const std::vector<SchemaProperty> &expected_schema,
|
||||
const std::vector<PrimaryKey> &expected_split_points) {
|
||||
ASSERT_TRUE(shard_map.labels.contains(label_name));
|
||||
const auto label_id = shard_map.labels.at(label_name);
|
||||
const auto &schema = shard_map.schemas.at(label_id);
|
||||
ASSERT_EQ(schema.size(), expected_schema.size());
|
||||
for (auto pp_index = 0; pp_index < schema.size(); ++pp_index) {
|
||||
EXPECT_EQ(schema[pp_index].property_id, expected_schema[pp_index].property_id);
|
||||
EXPECT_EQ(schema[pp_index].type, expected_schema[pp_index].type);
|
||||
}
|
||||
|
||||
const auto &label_space = shard_map.label_spaces.at(label_id);
|
||||
|
||||
ASSERT_EQ(label_space.shards.size(), expected_split_points.size());
|
||||
for (const auto &split_point : expected_split_points) {
|
||||
EXPECT_TRUE(label_space.shards.contains(split_point)) << split_point[0];
|
||||
}
|
||||
};
|
||||
|
||||
check_label("label_1",
|
||||
{SchemaProperty{shard_map.properties.at("primary_property_name_1"), common::SchemaType::STRING}},
|
||||
std::vector<PrimaryKey>{
|
||||
PrimaryKey{PropertyValue{""}},
|
||||
PrimaryKey{PropertyValue{"asdasd"}},
|
||||
PrimaryKey{PropertyValue{"qweqwe"}},
|
||||
PrimaryKey{PropertyValue{"bnm"}},
|
||||
PrimaryKey{PropertyValue{"tryuryturtyur"}},
|
||||
});
|
||||
|
||||
static constexpr int64_t kMinInt = std::numeric_limits<int64_t>::min();
|
||||
check_label("label_2",
|
||||
{SchemaProperty{shard_map.properties.at("property_1"), common::SchemaType::STRING},
|
||||
SchemaProperty{shard_map.properties.at("property_2"), common::SchemaType::INT},
|
||||
SchemaProperty{shard_map.properties.at("primary_property_name_2"), common::SchemaType::INT}},
|
||||
std::vector<PrimaryKey>{
|
||||
PrimaryKey{PropertyValue{""}, PropertyValue{kMinInt}, PropertyValue{kMinInt}},
|
||||
PrimaryKey{PropertyValue{"first"}, PropertyValue{1}, PropertyValue{2}},
|
||||
PrimaryKey{PropertyValue{" second "}, PropertyValue{-1},
|
||||
PropertyValue{int64_t{-9223372036854775807LL - 1LL}}},
|
||||
});
|
||||
}
|
||||
} // namespace memgraph::coordinator::tests
|
@ -86,6 +86,7 @@ ShardMap TestShardMap() {
|
||||
const auto label_id = sm.InitializeNewLabel(kLabelName, schema, replication_factor, sm.shard_map_version);
|
||||
EXPECT_TRUE(label_id.has_value());
|
||||
|
||||
sm.AllocateEdgeTypeIds(std::vector<std::string>{"edge_type"});
|
||||
// split the shard at N split points
|
||||
// NB: this is the logic that should be provided by the "split file"
|
||||
// TODO(tyler) split points should account for signedness
|
||||
@ -116,12 +117,11 @@ void TestScanAll(ShardRequestManager &shard_request_manager) {
|
||||
EXPECT_EQ(result.size(), 2);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestCreateVertices(ShardRequestManager &shard_request_manager) {
|
||||
void TestCreateVertices(msgs::ShardRequestManagerInterface &shard_request_manager) {
|
||||
using PropVal = msgs::Value;
|
||||
msgs::ExecutionState<msgs::CreateVerticesRequest> state;
|
||||
std::vector<msgs::NewVertex> new_vertices;
|
||||
auto label_id = shard_request_manager.LabelNameToLabelId(kLabelName);
|
||||
auto label_id = shard_request_manager.NameToLabel(kLabelName);
|
||||
msgs::NewVertex a1{.primary_key = {PropVal(int64_t(0)), PropVal(int64_t(0))}};
|
||||
a1.label_ids.push_back({label_id});
|
||||
msgs::NewVertex a2{.primary_key = {PropVal(int64_t(13)), PropVal(int64_t(13))}};
|
||||
@ -133,8 +133,40 @@ void TestCreateVertices(ShardRequestManager &shard_request_manager) {
|
||||
EXPECT_EQ(result.size(), 1);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestExpand(ShardRequestManager &shard_request_manager) {}
|
||||
void TestCreateExpand(msgs::ShardRequestManagerInterface &shard_request_manager) {
|
||||
using PropVal = msgs::Value;
|
||||
msgs::ExecutionState<msgs::CreateExpandRequest> state;
|
||||
std::vector<msgs::NewExpand> new_expands;
|
||||
|
||||
const auto edge_type_id = shard_request_manager.NameToEdgeType("edge_type");
|
||||
const auto label = msgs::Label{shard_request_manager.NameToLabel("test_label")};
|
||||
const msgs::VertexId vertex_id_1{label, {PropVal(int64_t(0)), PropVal(int64_t(0))}};
|
||||
const msgs::VertexId vertex_id_2{label, {PropVal(int64_t(13)), PropVal(int64_t(13))}};
|
||||
msgs::NewExpand expand_1{
|
||||
.id = {.gid = 0}, .type = {edge_type_id}, .src_vertex = vertex_id_1, .dest_vertex = vertex_id_2};
|
||||
msgs::NewExpand expand_2{
|
||||
.id = {.gid = 1}, .type = {edge_type_id}, .src_vertex = vertex_id_2, .dest_vertex = vertex_id_1};
|
||||
new_expands.push_back(std::move(expand_1));
|
||||
new_expands.push_back(std::move(expand_2));
|
||||
|
||||
auto responses = shard_request_manager.Request(state, std::move(new_expands));
|
||||
MG_ASSERT(responses.size() == 1);
|
||||
MG_ASSERT(responses[0].success);
|
||||
}
|
||||
|
||||
void TestExpandOne(msgs::ShardRequestManagerInterface &shard_request_manager) {
|
||||
msgs::ExecutionState<msgs::ExpandOneRequest> state{};
|
||||
msgs::ExpandOneRequest request;
|
||||
const auto edge_type_id = shard_request_manager.NameToEdgeType("edge_type");
|
||||
const auto label = msgs::Label{shard_request_manager.NameToLabel("test_label")};
|
||||
request.src_vertices.push_back(msgs::VertexId{label, {msgs::Value(int64_t(0)), msgs::Value(int64_t(0))}});
|
||||
request.edge_types.push_back(msgs::EdgeType{edge_type_id});
|
||||
request.direction = msgs::EdgeDirection::BOTH;
|
||||
auto result_rows = shard_request_manager.Request(state, std::move(request));
|
||||
MG_ASSERT(result_rows.size() == 1);
|
||||
MG_ASSERT(result_rows[0].in_edges_with_all_properties.size() == 1);
|
||||
MG_ASSERT(result_rows[0].out_edges_with_all_properties.size() == 1);
|
||||
}
|
||||
|
||||
template <typename ShardRequestManager>
|
||||
void TestAggregate(ShardRequestManager &shard_request_manager) {}
|
||||
@ -198,6 +230,8 @@ TEST(MachineManager, BasicFunctionality) {
|
||||
shard_request_manager.StartTransaction();
|
||||
TestCreateVertices(shard_request_manager);
|
||||
TestScanAll(shard_request_manager);
|
||||
TestCreateExpand(shard_request_manager);
|
||||
TestExpandOne(shard_request_manager);
|
||||
local_system.ShutDown();
|
||||
};
|
||||
|
||||
|
@ -10,11 +10,13 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <spdlog/cfg/env.h>
|
||||
#include <utils/logging.hpp>
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
memgraph::logging::RedirectToStderr();
|
||||
spdlog::set_level(spdlog::level::trace);
|
||||
spdlog::cfg::load_env_levels();
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
@ -192,11 +192,18 @@ TEST_F(SchemaValidatorTest, TestSchemaValidateVertexCreate) {
|
||||
label1, schema_prop_string));
|
||||
}
|
||||
{
|
||||
const auto schema_violation = schema_validator.ValidateVertexCreate(label2, {}, {});
|
||||
const auto schema_violation =
|
||||
schema_validator.ValidateVertexCreate(label2, {}, std::vector<std::pair<PropertyId, PropertyValue>>{});
|
||||
ASSERT_NE(schema_violation, std::nullopt);
|
||||
EXPECT_EQ(*schema_violation, SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_HAS_NO_PRIMARY_PROPERTY,
|
||||
label2, schema_prop_string));
|
||||
}
|
||||
{
|
||||
const auto schema_violation = schema_validator.ValidateVertexCreate(label2, {}, std::vector<PropertyValue>{});
|
||||
ASSERT_NE(schema_violation, std::nullopt);
|
||||
EXPECT_EQ(*schema_violation,
|
||||
SchemaViolation(SchemaViolation::ValidationStatus::VERTEX_PRIMARY_PROPERTIES_UNDEFINED, label2));
|
||||
}
|
||||
// Validate wrong secondary label
|
||||
{
|
||||
const auto schema_violation =
|
||||
|
@ -3,43 +3,187 @@ import gdb.printing
|
||||
|
||||
|
||||
def build_memgraph_pretty_printers():
|
||||
'''Instantiate and return all memgraph pretty printer classes.'''
|
||||
pp = gdb.printing.RegexpCollectionPrettyPrinter('memgraph')
|
||||
pp.add_printer('memgraph::query::TypedValue', '^memgraph::query::TypedValue$', TypedValuePrinter)
|
||||
"""Instantiate and return all memgraph pretty printer classes."""
|
||||
pp = gdb.printing.RegexpCollectionPrettyPrinter("memgraph")
|
||||
pp.add_printer("memgraph::query::TypedValue", "^memgraph::query::TypedValue$", TypedValuePrinter)
|
||||
pp.add_printer("memgraph::query::v2::TypedValue", "^memgraph::query::v2::TypedValue$", TypedValuePrinter2)
|
||||
pp.add_printer("memgraph::storage::v3::TypedValue", "^memgraph::storage::v3::TypedValue$", TypedValuePrinter3)
|
||||
pp.add_printer(
|
||||
"memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>",
|
||||
"^memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>$",
|
||||
TypedValuePrinter4,
|
||||
)
|
||||
return pp
|
||||
|
||||
|
||||
class TypedValuePrinter(gdb.printing.PrettyPrinter):
|
||||
'''Pretty printer for memgraph::query::TypedValue'''
|
||||
"""Pretty printer for memgraph::query::TypedValue"""
|
||||
|
||||
def __init__(self, val):
|
||||
super(TypedValuePrinter, self).__init__('TypedValue')
|
||||
super(TypedValuePrinter, self).__init__("TypedValue")
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
def _to_str(val):
|
||||
return '{%s %s}' % (value_type, self.val[val])
|
||||
value_type = str(self.val['type_'])
|
||||
if value_type == 'memgraph::query::TypedValue::Type::Null':
|
||||
return '{%s}' % value_type
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Bool':
|
||||
return _to_str('bool_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Int':
|
||||
return _to_str('int_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Double':
|
||||
return _to_str('double_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::String':
|
||||
return _to_str('string_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::List':
|
||||
return _to_str('list_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Map':
|
||||
return _to_str('map_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Vertex':
|
||||
return _to_str('vertex_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Edge':
|
||||
return _to_str('edge_v')
|
||||
elif value_type == 'memgraph::query::TypedValue::Type::Path':
|
||||
return _to_str('path_v')
|
||||
return '{%s}' % value_type
|
||||
return "{%s %s}" % (value_type, self.val[val])
|
||||
|
||||
gdb.printing.register_pretty_printer(None, build_memgraph_pretty_printers(),
|
||||
replace=True)
|
||||
value_type = str(self.val["type_"])
|
||||
if value_type == "memgraph::query::TypedValue::Type::Null":
|
||||
return "{%s}" % value_type
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Bool":
|
||||
return _to_str("bool_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Int":
|
||||
return _to_str("int_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Double":
|
||||
return _to_str("double_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::String":
|
||||
return _to_str("string_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::List":
|
||||
return _to_str("list_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Map":
|
||||
return _to_str("map_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Vertex":
|
||||
return _to_str("vertex_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Edge":
|
||||
return _to_str("edge_v")
|
||||
elif value_type == "memgraph::query::TypedValue::Type::Path":
|
||||
return _to_str("path_v")
|
||||
return "{%s}" % value_type
|
||||
|
||||
|
||||
class TypedValuePrinter2(gdb.printing.PrettyPrinter):
|
||||
"""Pretty printer for memgraph::query::TypedValue"""
|
||||
|
||||
def __init__(self, val):
|
||||
super(TypedValuePrinter2, self).__init__("TypedValue2")
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
def _to_str(val):
|
||||
return "{%s %s}" % (value_type, self.val[val])
|
||||
|
||||
value_type = str(self.val["type_"])
|
||||
if value_type == "memgraph::query::v2::TypedValue::Type::Null":
|
||||
return "{%s}" % value_type
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Bool":
|
||||
return _to_str("bool_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Int":
|
||||
return _to_str("int_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Double":
|
||||
return _to_str("double_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::String":
|
||||
return _to_str("string_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::List":
|
||||
return _to_str("list_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Map":
|
||||
return _to_str("map_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Vertex":
|
||||
return _to_str("vertex_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Edge":
|
||||
return _to_str("edge_v")
|
||||
elif value_type == "memgraph::query::v2::TypedValue::Type::Path":
|
||||
return _to_str("path_v")
|
||||
return "{%s}" % value_type
|
||||
|
||||
|
||||
class TypedValuePrinter3(gdb.printing.PrettyPrinter):
|
||||
"""Pretty printer for memgraph::query::TypedValue"""
|
||||
|
||||
def __init__(self, val):
|
||||
super(TypedValuePrinter3, self).__init__("TypedValue3")
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
def _to_str(val):
|
||||
return "{%s %s}" % (value_type, self.val[val])
|
||||
|
||||
value_type = str(self.val["type_"])
|
||||
if value_type == "memgraph::storage::v3::TypedValue::Type::Null":
|
||||
return "{%s}" % value_type
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Bool":
|
||||
return _to_str("bool_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Int":
|
||||
return _to_str("int_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Double":
|
||||
return _to_str("double_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::String":
|
||||
return _to_str("string_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::List":
|
||||
return _to_str("list_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Map":
|
||||
return _to_str("map_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Vertex":
|
||||
return _to_str("vertex_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Edge":
|
||||
return _to_str("edge_v")
|
||||
elif value_type == "memgraph::storage::v3::TypedValue::Type::Path":
|
||||
return _to_str("path_v")
|
||||
return "{%s}" % value_type
|
||||
|
||||
|
||||
class TypedValuePrinter4(gdb.printing.PrettyPrinter):
|
||||
"""Pretty printer for memgraph::query::TypedValue"""
|
||||
|
||||
def __init__(self, val):
|
||||
super(TypedValuePrinter4, self).__init__("TypedValue4")
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
def _to_str(val):
|
||||
return "{%s %s}" % (value_type, self.val[val])
|
||||
|
||||
value_type = str(self.val["type_"])
|
||||
if (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Null"
|
||||
):
|
||||
return "{%s}" % value_type
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Bool"
|
||||
):
|
||||
return _to_str("bool_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Int"
|
||||
):
|
||||
return _to_str("int_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Double"
|
||||
):
|
||||
return _to_str("double_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::String"
|
||||
):
|
||||
return _to_str("string_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::List"
|
||||
):
|
||||
return _to_str("list_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Map"
|
||||
):
|
||||
return _to_str("map_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Vertex"
|
||||
):
|
||||
return _to_str("vertex_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Edge"
|
||||
):
|
||||
return _to_str("edge_v")
|
||||
elif (
|
||||
value_type
|
||||
== "memgraph::expr::TypedValueT<memgraph::storage::v3::VertexAccessor, memgraph::storage::v3::EdgeAccessor, memgraph::storage::v3::Path>::Type::Path"
|
||||
):
|
||||
return _to_str("path_v")
|
||||
return "{%s}" % value_type
|
||||
|
||||
|
||||
gdb.printing.register_pretty_printer(None, build_memgraph_pretty_printers(), replace=True)
|
||||
|
Loading…
Reference in New Issue
Block a user