From f3574012c50b2ab1b87019d63c99a67031831255 Mon Sep 17 00:00:00 2001 From: Andi Date: Mon, 19 Feb 2024 11:36:51 +0100 Subject: [PATCH 1/5] Add cpp23 support (#1726) --- CMakeLists.txt | 2 +- tests/unit/integrations_kafka_consumer.cpp | 3 +-- tests/unit/kafka_mock.cpp | 6 +----- tests/unit/kafka_mock.hpp | 3 +-- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 62c5a6fcf..3812cc86d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -189,7 +189,7 @@ add_custom_target(clean_all # is easier debugging of compilation and linker flags. set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD 23) set(CMAKE_CXX_STANDARD_REQUIRED ON) # c99-designator is disabled because of required mixture of designated and # non-designated initializers in Python Query Module code (`py_module.cpp`). diff --git a/tests/unit/integrations_kafka_consumer.cpp b/tests/unit/integrations_kafka_consumer.cpp index 3d5feb80b..2265aa310 100644 --- a/tests/unit/integrations_kafka_consumer.cpp +++ b/tests/unit/integrations_kafka_consumer.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -113,7 +113,6 @@ struct ConsumerTest : public ::testing::Test { void SeedTopicWithInt(const std::string &topic_name, int value) { std::array int_as_char{}; std::memcpy(int_as_char.data(), &value, int_as_char.size()); - cluster.SeedTopic(topic_name, int_as_char); } diff --git a/tests/unit/kafka_mock.cpp b/tests/unit/kafka_mock.cpp index 7cf788479..0ea9bcac4 100644 --- a/tests/unit/kafka_mock.cpp +++ b/tests/unit/kafka_mock.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -78,10 +78,6 @@ void KafkaClusterMock::CreateTopic(const std::string &topic_name) { } } -void KafkaClusterMock::SeedTopic(const std::string &topic_name, std::string_view message) { - SeedTopic(topic_name, std::span{message.data(), message.size()}); -} - void KafkaClusterMock::SeedTopic(const std::string &topic_name, std::span message) { char errstr[256] = {'\0'}; std::string bootstraps_servers = Bootstraps(); diff --git a/tests/unit/kafka_mock.hpp b/tests/unit/kafka_mock.hpp index fce563fda..5905d281f 100644 --- a/tests/unit/kafka_mock.hpp +++ b/tests/unit/kafka_mock.hpp @@ -1,4 +1,4 @@ -// Copyright 2021 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -41,7 +41,6 @@ class KafkaClusterMock { std::string Bootstraps() const; void CreateTopic(const std::string &topic_name); void SeedTopic(const std::string &topic_name, std::span message); - void SeedTopic(const std::string &topic_name, std::string_view message); private: RdKafkaUniquePtr rk_{nullptr}; From bae3e8a6d3b3209dded322efc60248b7d89eebe9 Mon Sep 17 00:00:00 2001 From: Josipmrden Date: Mon, 19 Feb 2024 13:56:01 +0100 Subject: [PATCH 2/5] Add function for property sizes (#1557) Add function for property sizes --- src/query/db_accessor.hpp | 16 ++ .../interpret/awesome_memgraph_functions.cpp | 24 ++ src/storage/v2/edge_accessor.cpp | 22 ++ src/storage/v2/edge_accessor.hpp | 3 + src/storage/v2/name_id_mapper.hpp | 12 + src/storage/v2/property_store.cpp | 263 +++++++++++++---- src/storage/v2/property_store.hpp | 5 + src/storage/v2/storage.hpp | 12 + src/storage/v2/vertex_accessor.cpp | 22 +- src/storage/v2/vertex_accessor.hpp | 5 +- tests/e2e/CMakeLists.txt | 1 + tests/e2e/awesome_functions/CMakeLists.txt | 6 + .../awesome_functions/awesome_functions.py | 269 ++++++++++++++++++ tests/e2e/awesome_functions/common.py | 29 ++ tests/e2e/awesome_functions/workloads.yaml | 14 + 15 files changed, 642 insertions(+), 61 deletions(-) create mode 100644 tests/e2e/awesome_functions/CMakeLists.txt create mode 100644 tests/e2e/awesome_functions/awesome_functions.py create mode 100644 tests/e2e/awesome_functions/common.py create mode 100644 tests/e2e/awesome_functions/workloads.yaml diff --git a/src/query/db_accessor.hpp b/src/query/db_accessor.hpp index 71b997d9e..e10102ee5 100644 --- a/src/query/db_accessor.hpp +++ b/src/query/db_accessor.hpp @@ -54,6 +54,10 @@ class EdgeAccessor final { return impl_.GetProperty(key, view); } + storage::Result GetPropertySize(storage::PropertyId key, storage::View view) const { + return impl_.GetPropertySize(key, view); + } + storage::Result SetProperty(storage::PropertyId key, const storage::PropertyValue &value) { return impl_.SetProperty(key, value); } @@ -129,6 +133,10 @@ class VertexAccessor final { return impl_.GetProperty(key, view); } + storage::Result GetPropertySize(storage::PropertyId key, storage::View view) const { + return impl_.GetPropertySize(key, view); + } + storage::Result SetProperty(storage::PropertyId key, const storage::PropertyValue &value) { return impl_.SetProperty(key, value); } @@ -268,6 +276,10 @@ class SubgraphVertexAccessor final { return impl_.GetProperty(view, key); } + storage::Result GetPropertySize(storage::PropertyId key, storage::View view) const { + return impl_.GetPropertySize(key, view); + } + storage::Gid Gid() const noexcept { return impl_.Gid(); } storage::Result InDegree(storage::View view) const { return impl_.InDegree(view); } @@ -529,6 +541,10 @@ class DbAccessor final { storage::PropertyId NameToProperty(const std::string_view name) { return accessor_->NameToProperty(name); } + std::optional NameToPropertyIfExists(std::string_view name) const { + return accessor_->NameToPropertyIfExists(name); + } + storage::LabelId NameToLabel(const std::string_view name) { return accessor_->NameToLabel(name); } storage::EdgeTypeId NameToEdgeType(const std::string_view name) { return accessor_->NameToEdgeType(name); } diff --git a/src/query/interpret/awesome_memgraph_functions.cpp b/src/query/interpret/awesome_memgraph_functions.cpp index ece0aec78..6be8c4837 100644 --- a/src/query/interpret/awesome_memgraph_functions.cpp +++ b/src/query/interpret/awesome_memgraph_functions.cpp @@ -442,6 +442,29 @@ TypedValue Size(const TypedValue *args, int64_t nargs, const FunctionContext &ct } } +TypedValue PropertySize(const TypedValue *args, int64_t nargs, const FunctionContext &ctx) { + FType, Or>("propertySize", args, nargs); + + auto *dba = ctx.db_accessor; + + const auto &property_name = args[1].ValueString(); + const auto maybe_property_id = dba->NameToPropertyIfExists(property_name); + + if (!maybe_property_id) { + return TypedValue(0, ctx.memory); + } + + uint64_t property_size = 0; + const auto &graph_entity = args[0]; + if (graph_entity.IsVertex()) { + property_size = graph_entity.ValueVertex().GetPropertySize(*maybe_property_id, ctx.view).GetValue(); + } else if (graph_entity.IsEdge()) { + property_size = graph_entity.ValueEdge().GetPropertySize(*maybe_property_id, ctx.view).GetValue(); + } + + return TypedValue(static_cast(property_size), ctx.memory); +} + TypedValue StartNode(const TypedValue *args, int64_t nargs, const FunctionContext &ctx) { FType>("startNode", args, nargs); if (args[0].IsNull()) return TypedValue(ctx.memory); @@ -1325,6 +1348,7 @@ std::function EdgeAccessor::GetProperty(PropertyId property, View view) return *std::move(value); } +Result EdgeAccessor::GetPropertySize(PropertyId property, View view) const { + if (!storage_->config_.salient.items.properties_on_edges) return 0; + + auto guard = std::shared_lock{edge_.ptr->lock}; + Delta *delta = edge_.ptr->delta; + if (!delta) { + return edge_.ptr->properties.PropertySize(property); + } + + auto property_result = this->GetProperty(property, view); + + if (property_result.HasError()) { + return property_result.GetError(); + } + + auto property_store = storage::PropertyStore(); + property_store.SetProperty(property, *property_result); + + return property_store.PropertySize(property); +}; + Result> EdgeAccessor::Properties(View view) const { if (!storage_->config_.salient.items.properties_on_edges) return std::map{}; bool exists = true; diff --git a/src/storage/v2/edge_accessor.hpp b/src/storage/v2/edge_accessor.hpp index 83a3e549d..6b76ddbe8 100644 --- a/src/storage/v2/edge_accessor.hpp +++ b/src/storage/v2/edge_accessor.hpp @@ -82,6 +82,9 @@ class EdgeAccessor final { /// @throw std::bad_alloc Result GetProperty(PropertyId property, View view) const; + /// Returns the size of the encoded edge property in bytes. + Result GetPropertySize(PropertyId property, View view) const; + /// @throw std::bad_alloc Result> Properties(View view) const; diff --git a/src/storage/v2/name_id_mapper.hpp b/src/storage/v2/name_id_mapper.hpp index bb91e3647..d1e8293f9 100644 --- a/src/storage/v2/name_id_mapper.hpp +++ b/src/storage/v2/name_id_mapper.hpp @@ -83,6 +83,18 @@ class NameIdMapper { return id; } + /// This method unlike NameToId does not insert the new property id if not found + /// but just returns either std::nullopt or the value of the property id if it + /// finds it. + virtual std::optional NameToIdIfExists(const std::string_view name) { + auto name_to_id_acc = name_to_id_.access(); + auto found = name_to_id_acc.find(name); + if (found == name_to_id_acc.end()) { + return std::nullopt; + } + return found->id; + } + // NOTE: Currently this function returns a `const std::string &` instead of a // `std::string` to avoid making unnecessary copies of the string. // Usually, this wouldn't be correct because the accessor to the diff --git a/src/storage/v2/property_store.cpp b/src/storage/v2/property_store.cpp index 427998fbe..e6e4dbbaf 100644 --- a/src/storage/v2/property_store.cpp +++ b/src/storage/v2/property_store.cpp @@ -93,6 +93,19 @@ enum class Size : uint8_t { INT64 = 0x03, }; +uint64_t SizeToByteSize(Size size) { + switch (size) { + case Size::INT8: + return 1; + case Size::INT16: + return 2; + case Size::INT32: + return 4; + case Size::INT64: + return 8; + } +} + // All of these values must have the lowest 4 bits set to zero because they are // used to store two `Size` values as described in the comment above. enum class Type : uint8_t { @@ -486,6 +499,27 @@ std::optional DecodeTemporalData(Reader &reader) { return TemporalData{static_cast(*type_value), *microseconds_value}; } +std::optional DecodeTemporalDataSize(Reader &reader) { + uint64_t temporal_data_size = 0; + + auto metadata = reader.ReadMetadata(); + if (!metadata || metadata->type != Type::TEMPORAL_DATA) return std::nullopt; + + temporal_data_size += 1; + + auto type_value = reader.ReadUint(metadata->id_size); + if (!type_value) return std::nullopt; + + temporal_data_size += SizeToByteSize(metadata->id_size); + + auto microseconds_value = reader.ReadInt(metadata->payload_size); + if (!microseconds_value) return std::nullopt; + + temporal_data_size += SizeToByteSize(metadata->payload_size); + + return temporal_data_size; +} + } // namespace // Function used to decode a PropertyValue from a byte stream. @@ -572,6 +606,92 @@ std::optional DecodeTemporalData(Reader &reader) { } } +[[nodiscard]] bool DecodePropertyValueSize(Reader *reader, Type type, Size payload_size, uint64_t &property_size) { + switch (type) { + case Type::EMPTY: { + return false; + } + case Type::NONE: + case Type::BOOL: { + return true; + } + case Type::INT: { + reader->ReadInt(payload_size); + property_size += SizeToByteSize(payload_size); + return true; + } + case Type::DOUBLE: { + reader->ReadDouble(payload_size); + property_size += SizeToByteSize(payload_size); + return true; + } + case Type::STRING: { + auto size = reader->ReadUint(payload_size); + if (!size) return false; + property_size += SizeToByteSize(payload_size); + + std::string str_v(*size, '\0'); + if (!reader->SkipBytes(*size)) return false; + property_size += *size; + + return true; + } + case Type::LIST: { + auto size = reader->ReadUint(payload_size); + if (!size) return false; + + uint64_t list_property_size = SizeToByteSize(payload_size); + + for (uint64_t i = 0; i < *size; ++i) { + auto metadata = reader->ReadMetadata(); + if (!metadata) return false; + + list_property_size += 1; + if (!DecodePropertyValueSize(reader, metadata->type, metadata->payload_size, list_property_size)) return false; + } + + property_size += list_property_size; + return true; + } + case Type::MAP: { + auto size = reader->ReadUint(payload_size); + if (!size) return false; + + uint64_t map_property_size = SizeToByteSize(payload_size); + + for (uint64_t i = 0; i < *size; ++i) { + auto metadata = reader->ReadMetadata(); + if (!metadata) return false; + + map_property_size += 1; + + auto key_size = reader->ReadUint(metadata->id_size); + if (!key_size) return false; + + map_property_size += SizeToByteSize(metadata->id_size); + + std::string key(*key_size, '\0'); + if (!reader->ReadBytes(key.data(), *key_size)) return false; + + map_property_size += *key_size; + + if (!DecodePropertyValueSize(reader, metadata->type, metadata->payload_size, map_property_size)) return false; + } + + property_size += map_property_size; + return true; + } + + case Type::TEMPORAL_DATA: { + const auto maybe_temporal_data_size = DecodeTemporalDataSize(*reader); + if (!maybe_temporal_data_size) return false; + + property_size += *maybe_temporal_data_size; + return true; + } + } +} + // Function used to skip a PropertyValue from a byte stream. // // @sa ComparePropertyValue @@ -788,6 +908,27 @@ enum class ExpectedPropertyStatus { : ExpectedPropertyStatus::GREATER; } +[[nodiscard]] ExpectedPropertyStatus DecodeExpectedPropertySize(Reader *reader, PropertyId expected_property, + uint64_t &size) { + auto metadata = reader->ReadMetadata(); + if (!metadata) return ExpectedPropertyStatus::MISSING_DATA; + + auto property_id = reader->ReadUint(metadata->id_size); + if (!property_id) return ExpectedPropertyStatus::MISSING_DATA; + + if (*property_id == expected_property.AsUint()) { + // Add one byte for reading metadata + add the number of bytes for the property key + size += (1 + SizeToByteSize(metadata->id_size)); + if (!DecodePropertyValueSize(reader, metadata->type, metadata->payload_size, size)) + return ExpectedPropertyStatus::MISSING_DATA; + return ExpectedPropertyStatus::EQUAL; + } + // Don't load the value if this isn't the expected property. + if (!SkipPropertyValue(reader, metadata->type, metadata->payload_size)) return ExpectedPropertyStatus::MISSING_DATA; + return (*property_id < expected_property.AsUint()) ? ExpectedPropertyStatus::SMALLER + : ExpectedPropertyStatus::GREATER; +} + // Function used to check a property exists (PropertyId) from a byte stream. // It will skip the encoded PropertyValue. // @@ -875,6 +1016,13 @@ enum class ExpectedPropertyStatus { } } +[[nodiscard]] ExpectedPropertyStatus FindSpecificPropertySize(Reader *reader, PropertyId property, uint64_t &size) { + ExpectedPropertyStatus ret = ExpectedPropertyStatus::SMALLER; + while ((ret = DecodeExpectedPropertySize(reader, property, size)) == ExpectedPropertyStatus::SMALLER) { + } + return ret; +} + // Function used to find if property is set. It relies on the fact that the properties // are sorted (by ID) in the buffer. // @@ -983,6 +1131,31 @@ std::pair GetSizeData(const uint8_t *buffer) { return {size, data}; } +struct BufferInfo { + uint64_t size; + uint8_t *data{nullptr}; + bool in_local_buffer; +}; + +template +BufferInfo GetBufferInfo(const uint8_t (&buffer)[N]) { + uint64_t size = 0; + const uint8_t *data = nullptr; + bool in_local_buffer = false; + std::tie(size, data) = GetSizeData(buffer); + if (size % 8 != 0) { + // We are storing the data in the local buffer. + size = sizeof(buffer) - 1; + data = &buffer[1]; + in_local_buffer = true; + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + auto *non_const_data = const_cast(data); + + return {size, non_const_data, in_local_buffer}; +} + void SetSizeData(uint8_t *buffer, uint64_t size, uint8_t *data) { memcpy(buffer, &size, sizeof(uint64_t)); memcpy(buffer + sizeof(uint64_t), &data, sizeof(uint8_t *)); @@ -1023,30 +1196,27 @@ PropertyStore::~PropertyStore() { } PropertyValue PropertyStore::GetProperty(PropertyId property) const { - uint64_t size; - const uint8_t *data; - std::tie(size, data) = GetSizeData(buffer_); - if (size % 8 != 0) { - // We are storing the data in the local buffer. - size = sizeof(buffer_) - 1; - data = &buffer_[1]; - } - Reader reader(data, size); + BufferInfo buffer_info = GetBufferInfo(buffer_); + Reader reader(buffer_info.data, buffer_info.size); + PropertyValue value; if (FindSpecificProperty(&reader, property, value) != ExpectedPropertyStatus::EQUAL) return {}; return value; } +uint64_t PropertyStore::PropertySize(PropertyId property) const { + auto data_size_localbuffer = GetBufferInfo(buffer_); + Reader reader(data_size_localbuffer.data, data_size_localbuffer.size); + + uint64_t property_size = 0; + if (FindSpecificPropertySize(&reader, property, property_size) != ExpectedPropertyStatus::EQUAL) return 0; + return property_size; +} + bool PropertyStore::HasProperty(PropertyId property) const { - uint64_t size; - const uint8_t *data; - std::tie(size, data) = GetSizeData(buffer_); - if (size % 8 != 0) { - // We are storing the data in the local buffer. - size = sizeof(buffer_) - 1; - data = &buffer_[1]; - } - Reader reader(data, size); + BufferInfo buffer_info = GetBufferInfo(buffer_); + Reader reader(buffer_info.data, buffer_info.size); + return ExistsSpecificProperty(&reader, property) == ExpectedPropertyStatus::EQUAL; } @@ -1081,32 +1251,20 @@ std::optional> PropertyStore::ExtractPropertyValues( } bool PropertyStore::IsPropertyEqual(PropertyId property, const PropertyValue &value) const { - uint64_t size; - const uint8_t *data; - std::tie(size, data) = GetSizeData(buffer_); - if (size % 8 != 0) { - // We are storing the data in the local buffer. - size = sizeof(buffer_) - 1; - data = &buffer_[1]; - } - Reader reader(data, size); + BufferInfo buffer_info = GetBufferInfo(buffer_); + Reader reader(buffer_info.data, buffer_info.size); + auto info = FindSpecificPropertyAndBufferInfo(&reader, property); if (info.property_size == 0) return value.IsNull(); - Reader prop_reader(data + info.property_begin, info.property_size); + Reader prop_reader(buffer_info.data + info.property_begin, info.property_size); if (!CompareExpectedProperty(&prop_reader, property, value)) return false; return prop_reader.GetPosition() == info.property_size; } std::map PropertyStore::Properties() const { - uint64_t size; - const uint8_t *data; - std::tie(size, data) = GetSizeData(buffer_); - if (size % 8 != 0) { - // We are storing the data in the local buffer. - size = sizeof(buffer_) - 1; - data = &buffer_[1]; - } - Reader reader(data, size); + BufferInfo buffer_info = GetBufferInfo(buffer_); + Reader reader(buffer_info.data, buffer_info.size); + std::map props; while (true) { PropertyValue value; @@ -1340,33 +1498,20 @@ bool PropertyStore::InitProperties(std::vector(data[i]); + BufferInfo buffer_info = GetBufferInfo(buffer_); + + std::string arr(buffer_info.size, ' '); + for (uint i = 0; i < buffer_info.size; ++i) { + arr[i] = static_cast(buffer_info.data[i]); } return arr; } diff --git a/src/storage/v2/property_store.hpp b/src/storage/v2/property_store.hpp index c217cbd81..eee83f5df 100644 --- a/src/storage/v2/property_store.hpp +++ b/src/storage/v2/property_store.hpp @@ -45,6 +45,11 @@ class PropertyStore { /// @throw std::bad_alloc PropertyValue GetProperty(PropertyId property) const; + /// Returns the size of the encoded property in bytes. + /// Returns 0 if the property does not exist. + /// The time complexity of this function is O(n). + uint64_t PropertySize(PropertyId property) const; + /// Checks whether the property `property` exists in the store. The time /// complexity of this function is O(n). bool HasProperty(PropertyId property) const; diff --git a/src/storage/v2/storage.hpp b/src/storage/v2/storage.hpp index a096f27fd..722867f74 100644 --- a/src/storage/v2/storage.hpp +++ b/src/storage/v2/storage.hpp @@ -250,6 +250,10 @@ class Storage { PropertyId NameToProperty(std::string_view name) { return storage_->NameToProperty(name); } + std::optional NameToPropertyIfExists(std::string_view name) const { + return storage_->NameToPropertyIfExists(name); + } + EdgeTypeId NameToEdgeType(std::string_view name) { return storage_->NameToEdgeType(name); } StorageMode GetCreationStorageMode() const noexcept; @@ -318,6 +322,14 @@ class Storage { return PropertyId::FromUint(name_id_mapper_->NameToId(name)); } + std::optional NameToPropertyIfExists(std::string_view name) const { + const auto id = name_id_mapper_->NameToIdIfExists(name); + if (!id) { + return std::nullopt; + } + return PropertyId::FromUint(*id); + } + EdgeTypeId NameToEdgeType(const std::string_view name) const { return EdgeTypeId::FromUint(name_id_mapper_->NameToId(name)); } diff --git a/src/storage/v2/vertex_accessor.cpp b/src/storage/v2/vertex_accessor.cpp index ff5062444..ef0a6ab3e 100644 --- a/src/storage/v2/vertex_accessor.cpp +++ b/src/storage/v2/vertex_accessor.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -438,6 +438,26 @@ Result VertexAccessor::GetProperty(PropertyId property, View view return std::move(value); } +Result VertexAccessor::GetPropertySize(PropertyId property, View view) const { + { + auto guard = std::shared_lock{vertex_->lock}; + Delta *delta = vertex_->delta; + if (!delta) { + return vertex_->properties.PropertySize(property); + } + } + + auto property_result = this->GetProperty(property, view); + if (property_result.HasError()) { + return property_result.GetError(); + } + + auto property_store = storage::PropertyStore(); + property_store.SetProperty(property, *property_result); + + return property_store.PropertySize(property); +}; + Result> VertexAccessor::Properties(View view) const { bool exists = true; bool deleted = false; diff --git a/src/storage/v2/vertex_accessor.hpp b/src/storage/v2/vertex_accessor.hpp index 0e5972d14..18fad3dcc 100644 --- a/src/storage/v2/vertex_accessor.hpp +++ b/src/storage/v2/vertex_accessor.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -80,6 +80,9 @@ class VertexAccessor final { /// @throw std::bad_alloc Result GetProperty(PropertyId property, View view) const; + /// Returns the size of the encoded vertex property in bytes. + Result GetPropertySize(PropertyId property, View view) const; + /// @throw std::bad_alloc Result> Properties(View view) const; diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt index 7e555398e..b8fee9940 100644 --- a/tests/e2e/CMakeLists.txt +++ b/tests/e2e/CMakeLists.txt @@ -76,6 +76,7 @@ add_subdirectory(queries) add_subdirectory(query_modules_storage_modes) add_subdirectory(garbage_collection) add_subdirectory(query_planning) +add_subdirectory(awesome_functions) if (MG_EXPERIMENTAL_HIGH_AVAILABILITY) add_subdirectory(high_availability_experimental) diff --git a/tests/e2e/awesome_functions/CMakeLists.txt b/tests/e2e/awesome_functions/CMakeLists.txt new file mode 100644 index 000000000..9d6e0143b --- /dev/null +++ b/tests/e2e/awesome_functions/CMakeLists.txt @@ -0,0 +1,6 @@ +function(copy_awesome_functions_e2e_python_files FILE_NAME) + copy_e2e_python_files(awesome_functions ${FILE_NAME}) +endfunction() + +copy_awesome_functions_e2e_python_files(common.py) +copy_awesome_functions_e2e_python_files(awesome_functions.py) diff --git a/tests/e2e/awesome_functions/awesome_functions.py b/tests/e2e/awesome_functions/awesome_functions.py new file mode 100644 index 000000000..9761708ed --- /dev/null +++ b/tests/e2e/awesome_functions/awesome_functions.py @@ -0,0 +1,269 @@ +# Copyright 2023 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import sys + +import pytest +from common import get_bytes, memgraph + + +def test_property_size_on_null_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.null_prop = null; + """ + ) + + null_bytes = get_bytes(memgraph, "null_prop") + + # No property stored, no bytes allocated + assert null_bytes == 0 + + +def test_property_size_on_bool_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.bool_prop = True; + """ + ) + + bool_bytes = get_bytes(memgraph, "bool_prop") + + # 1 byte metadata, 1 byte prop id, but value is encoded in the metadata + assert bool_bytes == 2 + + +def test_property_size_on_one_byte_int_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.S_int_prop = 4; + """ + ) + + s_int_bytes = get_bytes(memgraph, "S_int_prop") + + # 1 byte metadata, 1 byte prop id + payload size 1 byte to store the int + assert s_int_bytes == 3 + + +def test_property_size_on_two_byte_int_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.M_int_prop = 500; + """ + ) + + m_int_bytes = get_bytes(memgraph, "M_int_prop") + + # 1 byte metadata, 1 byte prop id + payload size 2 bytes to store the int + assert m_int_bytes == 4 + + +def test_property_size_on_four_byte_int_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.L_int_prop = 1000000000; + """ + ) + + l_int_bytes = get_bytes(memgraph, "L_int_prop") + + # 1 byte metadata, 1 byte prop id + payload size 4 bytes to store the int + assert l_int_bytes == 6 + + +def test_property_size_on_eight_byte_int_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.XL_int_prop = 1000000000000; + """ + ) + + xl_int_bytes = get_bytes(memgraph, "XL_int_prop") + + # 1 byte metadata, 1 byte prop id + payload size 8 bytes to store the int + assert xl_int_bytes == 10 + + +def test_property_size_on_float_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.float_prop = 4.0; + """ + ) + + float_bytes = get_bytes(memgraph, "float_prop") + + # 1 byte metadata, 1 byte prop id + payload size 8 bytes to store the float + assert float_bytes == 10 + + +def test_property_size_on_string_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.str_prop = 'str_value'; + """ + ) + + str_bytes = get_bytes(memgraph, "str_prop") + + # 1 byte metadata + # 1 byte prop id + # - the payload size contains the amount of bytes stored for the size in the next sequence + # X bytes for the length of the string (1, 2, 4 or 8 bytes) -> "str_value" has 1 byte for the length of 9 + # Y bytes for the string content -> 9 bytes for "str_value" + assert str_bytes == 12 + + +def test_property_size_on_list_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.list_prop = [1, 2, 3]; + """ + ) + + list_bytes = get_bytes(memgraph, "list_prop") + + # 1 byte metadata + # 1 byte prop id + # - the payload size contains the amount of bytes stored for the size of the list + # X bytes for the size of the list (1, 2, 4 or 8 bytes) + # for each list element: + # - 1 byte for the metadata + # - the amount of bytes for the payload of the type (a small int is 1 additional byte) + # in this case 1 + 1 + 3 * (1 + 1) + assert list_bytes == 9 + + +def test_property_size_on_map_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.map_prop = {key1: 'value', key2: 4}; + """ + ) + + map_bytes = get_bytes(memgraph, "map_prop") + + # 1 byte metadata + # 1 byte prop id + # - the payload size contains the amount of bytes stored for the size of the map + # X bytes for the size of the map (1, 2, 4 or 8 bytes - in this case 1) + # for every map element: + # - 1 byte for metadata + # - 1, 2, 4 or 8 bytes for the key length (read from the metadata payload) -> this case 1 + # - Y bytes for the key content -> this case 4 + # - Z amount of bytes for the type + # - for 'value' -> 1 byte for size and 5 for length + # - for 4 -> 1 byte for content read from payload + # total: 1 + 1 + (1 + 1 + 4 + (1 + 5)) + (1 + 1 + 4 + (1)) + assert map_bytes == 22 + + +def test_property_size_on_date_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.date_prop = date('2023-01-01'); + """ + ) + + date_bytes = get_bytes(memgraph, "date_prop") + + # 1 byte metadata (to see that it's temporal data) + # 1 byte prop id + # 1 byte metadata + # - type is again the same + # - id field contains the length of the specific temporal type (1, 2, 4 or 8 bytes) -> probably always 1 + # - payload field contains the length of the microseconds (1, 2, 4, or 8 bytes) -> probably always 8 + assert date_bytes == 12 + + +def test_property_size_on_local_time_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.localtime_prop = localtime('23:00:00'); + """ + ) + + local_time_bytes = get_bytes(memgraph, "localtime_prop") + + # 1 byte metadata (to see that it's temporal data) + # 1 byte prop id + # 1 byte metadata + # - type is again the same + # - id field contains the length of the specific temporal type (1, 2, 4 or 8 bytes) -> probably always 1 + # - payload field contains the length of the microseconds (1, 2, 4, or 8 bytes) -> probably always 8 + assert local_time_bytes == 12 + + +def test_property_size_on_local_date_time_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.localdatetime_prop = localdatetime('2022-01-01T23:00:00'); + """ + ) + + local_date_time_bytes = get_bytes(memgraph, "localdatetime_prop") + + # 1 byte metadata (to see that it's temporal data) + # 1 byte prop id + # 1 byte metadata + # - type is again the same + # - id field contains the length of the specific temporal type (1, 2, 4 or 8 bytes) -> probably always 1 + # - payload field contains the length of the microseconds (1, 2, 4, or 8 bytes) -> probably always 8 + assert local_date_time_bytes == 12 + + +def test_property_size_on_duration_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node) + SET n.duration_prop = duration('P5DT2M2.33S'); + """ + ) + + duration_bytes = get_bytes(memgraph, "duration_prop") + + # 1 byte metadata (to see that it's temporal data) + # 1 byte prop id + # 1 byte metadata + # - type is again the same + # - id field contains the length of the specific temporal type (1, 2, 4 or 8 bytes) -> probably always 1 + # - payload field contains the length of the microseconds (1, 2, 4, or 8 bytes) -> probably always 8 + assert duration_bytes == 12 + + +def test_property_size_on_nonexistent_prop(memgraph): + memgraph.execute( + """ + CREATE (n:Node); + """ + ) + + nonexistent_bytes = get_bytes(memgraph, "nonexistent_prop") + + assert nonexistent_bytes == 0 + + +if __name__ == "__main__": + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/awesome_functions/common.py b/tests/e2e/awesome_functions/common.py new file mode 100644 index 000000000..14f272c23 --- /dev/null +++ b/tests/e2e/awesome_functions/common.py @@ -0,0 +1,29 @@ +# Copyright 2023 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import pytest +from gqlalchemy import Memgraph + + +@pytest.fixture +def memgraph(**kwargs) -> Memgraph: + memgraph = Memgraph() + + yield memgraph + + memgraph.drop_indexes() + memgraph.ensure_constraints([]) + memgraph.drop_database() + + +def get_bytes(memgraph, prop_name): + res = list(memgraph.execute_and_fetch(f"MATCH (n) RETURN propertySize(n, '{prop_name}') AS size")) + return res[0]["size"] diff --git a/tests/e2e/awesome_functions/workloads.yaml b/tests/e2e/awesome_functions/workloads.yaml new file mode 100644 index 000000000..37e5e8813 --- /dev/null +++ b/tests/e2e/awesome_functions/workloads.yaml @@ -0,0 +1,14 @@ +awesome_functions_cluster: &awesome_functions_cluster + cluster: + main: + args: ["--bolt-port", "7687", "--log-level=TRACE"] + log_file: "awesome_functions.log" + setup_queries: [] + validation_queries: [] + + +workloads: + - name: "Awesome Functions" + binary: "tests/e2e/pytest_runner.sh" + args: ["awesome_functions/awesome_functions.py"] + <<: *awesome_functions_cluster From f098a9d5e33d417f44f791fb5c99bd73b5063084 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Budiseli=C4=87?= Date: Mon, 19 Feb 2024 14:50:37 +0100 Subject: [PATCH 3/5] Patch NuRaft for clang-17 compilation (#1733) --- libs/.gitignore | 1 + libs/nuraft2.1.0.patch | 24 ++++++++++++++++++++++++ libs/setup.sh | 1 + 3 files changed, 26 insertions(+) create mode 100644 libs/nuraft2.1.0.patch diff --git a/libs/.gitignore b/libs/.gitignore index 1d149f2f0..6eb8fabc0 100644 --- a/libs/.gitignore +++ b/libs/.gitignore @@ -7,3 +7,4 @@ !pulsar.patch !antlr4.10.1.patch !rocksdb8.1.1.patch +!nuraft2.1.0.patch diff --git a/libs/nuraft2.1.0.patch b/libs/nuraft2.1.0.patch new file mode 100644 index 000000000..574978872 --- /dev/null +++ b/libs/nuraft2.1.0.patch @@ -0,0 +1,24 @@ +diff --git a/include/libnuraft/asio_service_options.hxx b/include/libnuraft/asio_service_options.hxx +index 8fe1ec9..9497355 100644 +--- a/include/libnuraft/asio_service_options.hxx ++++ b/include/libnuraft/asio_service_options.hxx +@@ -17,6 +17,7 @@ limitations under the License. + + #pragma once + ++#include + #include + #include + #include +diff --git a/include/libnuraft/callback.hxx b/include/libnuraft/callback.hxx +index 7b71624..d48c1e2 100644 +--- a/include/libnuraft/callback.hxx ++++ b/include/libnuraft/callback.hxx +@@ -18,6 +18,7 @@ limitations under the License. + #ifndef _CALLBACK_H_ + #define _CALLBACK_H_ + ++#include + #include + #include + diff --git a/libs/setup.sh b/libs/setup.sh index 74291911e..ebf20e830 100755 --- a/libs/setup.sh +++ b/libs/setup.sh @@ -286,5 +286,6 @@ repo_clone_try_double "${primary_urls[range-v3]}" "${secondary_urls[range-v3]}" nuraft_tag="v2.1.0" repo_clone_try_double "${primary_urls[nuraft]}" "${secondary_urls[nuraft]}" "nuraft" "$nuraft_tag" true pushd nuraft +git apply ../nuraft2.1.0.patch ./prepare.sh popd From 7ec648b4cebdd4d3c2e5e305b3e6406d1efdb31f Mon Sep 17 00:00:00 2001 From: Andi Date: Mon, 19 Feb 2024 17:28:15 +0100 Subject: [PATCH 4/5] Add --experimental-enabled=high-availability (#1720) --- .github/workflows/diff.yaml | 47 --- CMakeLists.txt | 12 - src/coordination/CMakeLists.txt | 1 - src/coordination/coordinator_handlers.cpp | 2 +- src/coordination/coordinator_instance.cpp | 8 +- .../include/coordination/constants.hpp | 22 -- src/flags/experimental.cpp | 5 +- src/flags/experimental.hpp | 1 + src/query/interpreter.cpp | 57 +--- src/query/replication_query_handler.hpp | 4 +- .../replication_handler.hpp | 13 +- .../replication_handler.cpp | 14 +- src/storage/v2/name_id_mapper.hpp | 2 +- src/utils/functional.hpp | 11 +- tests/e2e/CMakeLists.txt | 6 +- tests/e2e/configuration/default_config.py | 2 +- tests/e2e/high_availability/CMakeLists.txt | 15 + .../common.py | 0 .../coord_cluster_registration.py | 6 + .../coordinator.py | 0 .../disable_writing_on_main_after_restart.py | 6 + .../distributed_coords.py | 6 + .../manual_setting_replicas.py | 17 +- .../not_replicate_from_old_main.py | 63 ++-- .../single_coordinator.py | 17 +- .../workloads.yaml | 22 +- .../CMakeLists.txt | 15 - tests/e2e/interactive_mg_runner.py | 6 + .../show_while_creating_invalid_state.py | 5 +- tests/unit/storage_v2_replication.cpp | 284 ++++++++---------- 30 files changed, 275 insertions(+), 394 deletions(-) delete mode 100644 src/coordination/include/coordination/constants.hpp create mode 100644 tests/e2e/high_availability/CMakeLists.txt rename tests/e2e/{high_availability_experimental => high_availability}/common.py (100%) rename tests/e2e/{high_availability_experimental => high_availability}/coord_cluster_registration.py (97%) rename tests/e2e/{high_availability_experimental => high_availability}/coordinator.py (100%) rename tests/e2e/{high_availability_experimental => high_availability}/disable_writing_on_main_after_restart.py (94%) rename tests/e2e/{high_availability_experimental => high_availability}/distributed_coords.py (94%) rename tests/e2e/{high_availability_experimental => high_availability}/manual_setting_replicas.py (84%) rename tests/e2e/{high_availability_experimental => high_availability}/not_replicate_from_old_main.py (79%) rename tests/e2e/{high_availability_experimental => high_availability}/single_coordinator.py (97%) rename tests/e2e/{high_availability_experimental => high_availability}/workloads.yaml (57%) delete mode 100644 tests/e2e/high_availability_experimental/CMakeLists.txt diff --git a/.github/workflows/diff.yaml b/.github/workflows/diff.yaml index 143ac102f..a2dc0aef2 100644 --- a/.github/workflows/diff.yaml +++ b/.github/workflows/diff.yaml @@ -336,53 +336,6 @@ jobs: # multiple paths could be defined build/logs - experimental_build_ha: - name: "High availability build" - runs-on: [self-hosted, Linux, X64, Diff] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - - steps: - - name: Set up repository - uses: actions/checkout@v4 - with: - # Number of commits to fetch. `0` indicates all history for all - # branches and tags. (default: 1) - fetch-depth: 0 - - - name: Build release binaries - run: | - source /opt/toolchain-v4/activate - ./init - cd build - cmake -DCMAKE_BUILD_TYPE=Release -DMG_EXPERIMENTAL_HIGH_AVAILABILITY=ON .. - make -j$THREADS - - name: Run unit tests - run: | - source /opt/toolchain-v4/activate - cd build - ctest -R memgraph__unit --output-on-failure -j$THREADS - - name: Run e2e tests - if: false - run: | - cd tests - ./setup.sh /opt/toolchain-v4/activate - source ve3/bin/activate_e2e - cd e2e - ./run.sh "Coordinator" - ./run.sh "Client initiated failover" - ./run.sh "Uninitialized cluster" - - name: Save test data - uses: actions/upload-artifact@v4 - if: always() - with: - name: "Test data(High availability build)" - path: | - # multiple paths could be defined - build/logs - release_jepsen_test: name: "Release Jepsen Test" runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl] diff --git a/CMakeLists.txt b/CMakeLists.txt index 3812cc86d..85e2b085c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -271,18 +271,6 @@ endif() set(libs_dir ${CMAKE_SOURCE_DIR}/libs) add_subdirectory(libs EXCLUDE_FROM_ALL) -option(MG_EXPERIMENTAL_HIGH_AVAILABILITY "Feature flag for experimental high availability" OFF) - -if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_HIGH_AVAILABILITY) - set(MG_EXPERIMENTAL_HIGH_AVAILABILITY OFF) - message(FATAL_ERROR "MG_EXPERIMENTAL_HIGH_AVAILABILITY can only be used with enterpise version of the code.") -endif () - -if (MG_EXPERIMENTAL_HIGH_AVAILABILITY) - add_compile_definitions(MG_EXPERIMENTAL_HIGH_AVAILABILITY) -endif () - -# Optional subproject configuration ------------------------------------------- option(TEST_COVERAGE "Generate coverage reports from running memgraph" OFF) option(TOOLS "Build tools binaries" ON) option(QUERY_MODULES "Build query modules containing custom procedures" ON) diff --git a/src/coordination/CMakeLists.txt b/src/coordination/CMakeLists.txt index 3150a6c02..936d7a5c2 100644 --- a/src/coordination/CMakeLists.txt +++ b/src/coordination/CMakeLists.txt @@ -11,7 +11,6 @@ target_sources(mg-coordination include/coordination/coordinator_slk.hpp include/coordination/coordinator_instance.hpp include/coordination/coordinator_handlers.hpp - include/coordination/constants.hpp include/coordination/instance_status.hpp include/coordination/replication_instance.hpp include/coordination/raft_state.hpp diff --git a/src/coordination/coordinator_handlers.cpp b/src/coordination/coordinator_handlers.cpp index ff534b549..f605069fe 100644 --- a/src/coordination/coordinator_handlers.cpp +++ b/src/coordination/coordinator_handlers.cpp @@ -132,7 +132,7 @@ void CoordinatorHandlers::PromoteReplicaToMainHandler(replication::ReplicationHa // registering replicas for (auto const &config : req.replication_clients_info | ranges::views::transform(converter)) { - auto instance_client = replication_handler.RegisterReplica(config, false); + auto instance_client = replication_handler.RegisterReplica(config); if (instance_client.HasError()) { using enum memgraph::replication::RegisterReplicaError; switch (instance_client.GetError()) { diff --git a/src/coordination/coordinator_instance.cpp b/src/coordination/coordinator_instance.cpp index 1bbcf4f8f..166b1e8b8 100644 --- a/src/coordination/coordinator_instance.cpp +++ b/src/coordination/coordinator_instance.cpp @@ -186,11 +186,9 @@ auto CoordinatorInstance::TryFailover() -> void { } } - // TODO: (andi) fmap compliant - ReplicationClientsInfo repl_clients_info; - repl_clients_info.reserve(repl_instances_.size() - 1); - std::ranges::transform(repl_instances_ | ranges::views::filter(is_not_new_main), - std::back_inserter(repl_clients_info), &ReplicationInstance::ReplicationClientInfo); + auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) | + ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) | + ranges::to(); if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), main_succ_cb_, main_fail_cb_)) { spdlog::warn("Failover failed since promoting replica to main failed!"); diff --git a/src/coordination/include/coordination/constants.hpp b/src/coordination/include/coordination/constants.hpp deleted file mode 100644 index 819b9fa05..000000000 --- a/src/coordination/include/coordination/constants.hpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2024 Memgraph Ltd. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source -// License, and you may not use this file except in compliance with the Business Source License. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -#pragma once - -namespace memgraph::coordination { - -#ifdef MG_EXPERIMENTAL_HIGH_AVAILABILITY -constexpr bool allow_ha = true; -#else -constexpr bool allow_ha = false; -#endif - -} // namespace memgraph::coordination diff --git a/src/flags/experimental.cpp b/src/flags/experimental.cpp index 7bd26a837..123903c96 100644 --- a/src/flags/experimental.cpp +++ b/src/flags/experimental.cpp @@ -19,13 +19,14 @@ // Bolt server flags. // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) DEFINE_string(experimental_enabled, "", - "Experimental features to be used, comma seperated. Options [system-replication]"); + "Experimental features to be used, comma seperated. Options [system-replication, high-availability]"); using namespace std::string_view_literals; namespace memgraph::flags { -auto const mapping = std::map{std::pair{"system-replication"sv, Experiments::SYSTEM_REPLICATION}}; +auto const mapping = std::map{std::pair{"system-replication"sv, Experiments::SYSTEM_REPLICATION}, + std::pair{"high-availability"sv, Experiments::HIGH_AVAILABILITY}}; auto ExperimentsInstance() -> Experiments & { static auto instance = Experiments{}; diff --git a/src/flags/experimental.hpp b/src/flags/experimental.hpp index ec4db2037..5a19889fe 100644 --- a/src/flags/experimental.hpp +++ b/src/flags/experimental.hpp @@ -23,6 +23,7 @@ namespace memgraph::flags { // old experiments can be reused once code cleanup has happened enum class Experiments : uint8_t { SYSTEM_REPLICATION = 1 << 0, + HIGH_AVAILABILITY = 1 << 1, }; bool AreExperimentsEnabled(Experiments experiments); diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index c7ccbb1ef..9fed81d7e 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -109,7 +109,6 @@ #include "utils/variant_helpers.hpp" #ifdef MG_ENTERPRISE -#include "coordination/constants.hpp" #include "flags/experimental.hpp" #endif @@ -370,7 +369,7 @@ class ReplQueryHandler { .replica_check_frequency = replica_check_frequency, .ssl = std::nullopt}; - const auto error = handler_->TryRegisterReplica(replication_config, true).HasError(); + const auto error = handler_->TryRegisterReplica(replication_config).HasError(); if (error) { throw QueryRuntimeException(fmt::format("Couldn't register replica '{}'!", name)); @@ -1131,17 +1130,21 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Parameters ¶meters, coordination::CoordinatorState *coordinator_state, const query::InterpreterConfig &config, std::vector *notifications) { + using enum memgraph::flags::Experiments; + + if (!license::global_license_checker.IsEnterpriseValidFast()) { + throw QueryRuntimeException("High availability is only available in Memgraph Enterprise."); + } + + if (!flags::AreExperimentsEnabled(HIGH_AVAILABILITY)) { + throw QueryRuntimeException( + "High availability is experimental feature. If you want to use it, add high-availability option to the " + "--experimental-enabled flag."); + } + Callback callback; switch (coordinator_query->action_) { case CoordinatorQuery::Action::ADD_COORDINATOR_INSTANCE: { - if (!license::global_license_checker.IsEnterpriseValidFast()) { - throw QueryException("Trying to use enterprise feature without a valid license."); - } - if constexpr (!coordination::allow_ha) { - throw QueryRuntimeException( - "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " - "be able to use this functionality."); - } if (!FLAGS_raft_server_id) { throw QueryRuntimeException("Only coordinator can add coordinator instance!"); } @@ -1165,15 +1168,6 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param return callback; } case CoordinatorQuery::Action::REGISTER_INSTANCE: { - if (!license::global_license_checker.IsEnterpriseValidFast()) { - throw QueryException("Trying to use enterprise feature without a valid license."); - } - - if constexpr (!coordination::allow_ha) { - throw QueryRuntimeException( - "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " - "be able to use this functionality."); - } if (!FLAGS_raft_server_id) { throw QueryRuntimeException("Only coordinator can register coordinator server!"); } @@ -1205,15 +1199,6 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param return callback; } case CoordinatorQuery::Action::UNREGISTER_INSTANCE: - if (!license::global_license_checker.IsEnterpriseValidFast()) { - throw QueryException("Trying to use enterprise feature without a valid license."); - } - - if constexpr (!coordination::allow_ha) { - throw QueryRuntimeException( - "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " - "be able to use this functionality."); - } if (!FLAGS_raft_server_id) { throw QueryRuntimeException("Only coordinator can register coordinator server!"); } @@ -1229,14 +1214,6 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param return callback; case CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN: { - if (!license::global_license_checker.IsEnterpriseValidFast()) { - throw QueryException("Trying to use enterprise feature without a valid license."); - } - if constexpr (!coordination::allow_ha) { - throw QueryRuntimeException( - "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " - "be able to use this functionality."); - } if (!FLAGS_raft_server_id) { throw QueryRuntimeException("Only coordinator can register coordinator server!"); } @@ -1254,14 +1231,6 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param return callback; } case CoordinatorQuery::Action::SHOW_INSTANCES: { - if (!license::global_license_checker.IsEnterpriseValidFast()) { - throw QueryException("Trying to use enterprise feature without a valid license."); - } - if constexpr (!coordination::allow_ha) { - throw QueryRuntimeException( - "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " - "be able to use this functionality."); - } if (!FLAGS_raft_server_id) { throw QueryRuntimeException("Only coordinator can run SHOW INSTANCES."); } diff --git a/src/query/replication_query_handler.hpp b/src/query/replication_query_handler.hpp index 011548bd4..f2e0f8b19 100644 --- a/src/query/replication_query_handler.hpp +++ b/src/query/replication_query_handler.hpp @@ -53,10 +53,10 @@ struct ReplicationQueryHandler { const std::optional &main_uuid) = 0; // as MAIN, define and connect to REPLICAs - virtual auto TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config, bool send_swap_uuid) + virtual auto TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> utils::BasicResult = 0; - virtual auto RegisterReplica(const memgraph::replication::ReplicationClientConfig &config, bool send_swap_uuid) + virtual auto RegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> utils::BasicResult = 0; // as MAIN, remove a REPLICA connection diff --git a/src/replication_handler/include/replication_handler/replication_handler.hpp b/src/replication_handler/include/replication_handler/replication_handler.hpp index 3e8e21265..0e1d15148 100644 --- a/src/replication_handler/include/replication_handler/replication_handler.hpp +++ b/src/replication_handler/include/replication_handler/replication_handler.hpp @@ -123,10 +123,10 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler { const std::optional &main_uuid) override; // as MAIN, define and connect to REPLICAs - auto TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config, bool send_swap_uuid) + auto TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> memgraph::utils::BasicResult override; - auto RegisterReplica(const memgraph::replication::ReplicationClientConfig &config, bool send_swap_uuid) + auto RegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> memgraph::utils::BasicResult override; // as MAIN, remove a REPLICA connection @@ -145,8 +145,8 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler { auto GetReplicaUUID() -> std::optional; private: - template - auto RegisterReplica_(const memgraph::replication::ReplicationClientConfig &config, bool send_swap_uuid) + template + auto RegisterReplica_(const memgraph::replication::ReplicationClientConfig &config) -> memgraph::utils::BasicResult { MG_ASSERT(repl_state_.IsMain(), "Only main instance can register a replica!"); auto maybe_client = repl_state_.RegisterReplica(config); @@ -172,7 +172,7 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler { } const auto main_uuid = std::get(dbms_handler_.ReplicationState().ReplicationData()).uuid_; - if (send_swap_uuid) { + if constexpr (SendSwapUUID) { if (!memgraph::replication_coordination_glue::SendSwapMainUUIDRpc(maybe_client.GetValue()->rpc_client_, main_uuid)) { return memgraph::query::RegisterReplicaError::ERROR_ACCEPTING_MAIN; @@ -205,9 +205,6 @@ struct ReplicationHandler : public memgraph::query::ReplicationQueryHandler { if (state == storage::replication::ReplicaState::DIVERGED_FROM_MAIN) { return false; } - if (state == storage::replication::ReplicaState::MAYBE_BEHIND) { - return AllowRPCFailure; - } return true; }); diff --git a/src/replication_handler/replication_handler.cpp b/src/replication_handler/replication_handler.cpp index 747f327e4..0d95cbd51 100644 --- a/src/replication_handler/replication_handler.cpp +++ b/src/replication_handler/replication_handler.cpp @@ -192,12 +192,12 @@ bool ReplicationHandler::SetReplicationRoleMain() { bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config, const std::optional &main_uuid) { - return SetReplicationRoleReplica_(config, main_uuid); + return SetReplicationRoleReplica_(config, main_uuid); } bool ReplicationHandler::TrySetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config, const std::optional &main_uuid) { - return SetReplicationRoleReplica_(config, main_uuid); + return SetReplicationRoleReplica_(config, main_uuid); } bool ReplicationHandler::DoReplicaToMainPromotion(const utils::UUID &main_uuid) { @@ -226,16 +226,14 @@ bool ReplicationHandler::DoReplicaToMainPromotion(const utils::UUID &main_uuid) }; // as MAIN, define and connect to REPLICAs -auto ReplicationHandler::TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config, - bool send_swap_uuid) +auto ReplicationHandler::TryRegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> memgraph::utils::BasicResult { - return RegisterReplica_(config, send_swap_uuid); + return RegisterReplica_(config); } -auto ReplicationHandler::RegisterReplica(const memgraph::replication::ReplicationClientConfig &config, - bool send_swap_uuid) +auto ReplicationHandler::RegisterReplica(const memgraph::replication::ReplicationClientConfig &config) -> memgraph::utils::BasicResult { - return RegisterReplica_(config, send_swap_uuid); + return RegisterReplica_(config); } auto ReplicationHandler::UnregisterReplica(std::string_view name) -> memgraph::query::UnregisterReplicaResult { diff --git a/src/storage/v2/name_id_mapper.hpp b/src/storage/v2/name_id_mapper.hpp index d1e8293f9..2c5aee352 100644 --- a/src/storage/v2/name_id_mapper.hpp +++ b/src/storage/v2/name_id_mapper.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source diff --git a/src/utils/functional.hpp b/src/utils/functional.hpp index 299e49612..e0714de2a 100644 --- a/src/utils/functional.hpp +++ b/src/utils/functional.hpp @@ -14,14 +14,13 @@ #include #include +#include + namespace memgraph::utils { -template ::type, class V = std::vector> -V fmap(F &&f, const std::vector &v) { - V r; - r.reserve(v.size()); - std::ranges::transform(v, std::back_inserter(r), std::forward(f)); - return r; +template ::type> +auto fmap(F &&f, std::vector const &v) -> std::vector { + return v | ranges::views::transform(std::forward(f)) | ranges::to>(); } } // namespace memgraph::utils diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt index b8fee9940..1876074ee 100644 --- a/tests/e2e/CMakeLists.txt +++ b/tests/e2e/CMakeLists.txt @@ -56,7 +56,6 @@ add_subdirectory(python_query_modules_reloading) add_subdirectory(analyze_graph) add_subdirectory(transaction_queue) add_subdirectory(mock_api) -#add_subdirectory(graphql) add_subdirectory(disk_storage) add_subdirectory(load_csv) add_subdirectory(init_file_flags) @@ -77,10 +76,7 @@ add_subdirectory(query_modules_storage_modes) add_subdirectory(garbage_collection) add_subdirectory(query_planning) add_subdirectory(awesome_functions) - -if (MG_EXPERIMENTAL_HIGH_AVAILABILITY) - add_subdirectory(high_availability_experimental) -endif () +add_subdirectory(high_availability) add_subdirectory(replication_experimental) diff --git a/tests/e2e/configuration/default_config.py b/tests/e2e/configuration/default_config.py index de05a5617..558cb63f5 100644 --- a/tests/e2e/configuration/default_config.py +++ b/tests/e2e/configuration/default_config.py @@ -228,6 +228,6 @@ startup_config_dict = { "experimental_enabled": ( "", "", - "Experimental features to be used, comma seperated. Options [system-replication]", + "Experimental features to be used, comma seperated. Options [system-replication, high-availability]", ), } diff --git a/tests/e2e/high_availability/CMakeLists.txt b/tests/e2e/high_availability/CMakeLists.txt new file mode 100644 index 000000000..47a1781aa --- /dev/null +++ b/tests/e2e/high_availability/CMakeLists.txt @@ -0,0 +1,15 @@ +find_package(gflags REQUIRED) + +copy_e2e_python_files(high_availability coordinator.py) +copy_e2e_python_files(high_availability single_coordinator.py) +copy_e2e_python_files(high_availability coord_cluster_registration.py) +copy_e2e_python_files(high_availability distributed_coords.py) +copy_e2e_python_files(high_availability disable_writing_on_main_after_restart.py) +copy_e2e_python_files(high_availability manual_setting_replicas.py) +copy_e2e_python_files(high_availability not_replicate_from_old_main.py) +copy_e2e_python_files(high_availability common.py) +copy_e2e_python_files(high_availability workloads.yaml) + +copy_e2e_python_files_from_parent_folder(high_availability ".." memgraph.py) +copy_e2e_python_files_from_parent_folder(high_availability ".." interactive_mg_runner.py) +copy_e2e_python_files_from_parent_folder(high_availability ".." mg_utils.py) diff --git a/tests/e2e/high_availability_experimental/common.py b/tests/e2e/high_availability/common.py similarity index 100% rename from tests/e2e/high_availability_experimental/common.py rename to tests/e2e/high_availability/common.py diff --git a/tests/e2e/high_availability_experimental/coord_cluster_registration.py b/tests/e2e/high_availability/coord_cluster_registration.py similarity index 97% rename from tests/e2e/high_availability_experimental/coord_cluster_registration.py rename to tests/e2e/high_availability/coord_cluster_registration.py index cccaec5ad..8b093b5c4 100644 --- a/tests/e2e/high_availability_experimental/coord_cluster_registration.py +++ b/tests/e2e/high_availability/coord_cluster_registration.py @@ -31,6 +31,7 @@ TEMP_DIR = tempfile.TemporaryDirectory().name MEMGRAPH_INSTANCES_DESCRIPTION = { "instance_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", @@ -44,6 +45,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", @@ -57,6 +59,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", @@ -70,6 +73,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", @@ -81,6 +85,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7691", "--log-level=TRACE", @@ -92,6 +97,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7692", "--log-level=TRACE", diff --git a/tests/e2e/high_availability_experimental/coordinator.py b/tests/e2e/high_availability/coordinator.py similarity index 100% rename from tests/e2e/high_availability_experimental/coordinator.py rename to tests/e2e/high_availability/coordinator.py diff --git a/tests/e2e/high_availability_experimental/disable_writing_on_main_after_restart.py b/tests/e2e/high_availability/disable_writing_on_main_after_restart.py similarity index 94% rename from tests/e2e/high_availability_experimental/disable_writing_on_main_after_restart.py rename to tests/e2e/high_availability/disable_writing_on_main_after_restart.py index ad3d95828..53d570a6d 100644 --- a/tests/e2e/high_availability_experimental/disable_writing_on_main_after_restart.py +++ b/tests/e2e/high_availability/disable_writing_on_main_after_restart.py @@ -31,6 +31,7 @@ TEMP_DIR = tempfile.TemporaryDirectory().name MEMGRAPH_INSTANCES_DESCRIPTION = { "instance_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", @@ -49,6 +50,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", @@ -67,6 +69,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", @@ -85,6 +88,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", @@ -96,6 +100,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7691", "--log-level=TRACE", @@ -107,6 +112,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7692", "--log-level=TRACE", diff --git a/tests/e2e/high_availability_experimental/distributed_coords.py b/tests/e2e/high_availability/distributed_coords.py similarity index 94% rename from tests/e2e/high_availability_experimental/distributed_coords.py rename to tests/e2e/high_availability/distributed_coords.py index 052cb6dba..fde8889a5 100644 --- a/tests/e2e/high_availability_experimental/distributed_coords.py +++ b/tests/e2e/high_availability/distributed_coords.py @@ -31,6 +31,7 @@ TEMP_DIR = tempfile.TemporaryDirectory().name MEMGRAPH_INSTANCES_DESCRIPTION = { "instance_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", @@ -44,6 +45,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", @@ -57,6 +59,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", @@ -70,6 +73,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", @@ -81,6 +85,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7691", "--log-level=TRACE", @@ -92,6 +97,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "coordinator_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7692", "--log-level=TRACE", diff --git a/tests/e2e/high_availability_experimental/manual_setting_replicas.py b/tests/e2e/high_availability/manual_setting_replicas.py similarity index 84% rename from tests/e2e/high_availability_experimental/manual_setting_replicas.py rename to tests/e2e/high_availability/manual_setting_replicas.py index f2d48ffd7..b0b0965bc 100644 --- a/tests/e2e/high_availability_experimental/manual_setting_replicas.py +++ b/tests/e2e/high_availability/manual_setting_replicas.py @@ -14,8 +14,7 @@ import sys import interactive_mg_runner import pytest -from common import execute_and_fetch_all -from mg_utils import mg_sleep_and_assert +from common import connect, execute_and_fetch_all interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) interactive_mg_runner.PROJECT_DIR = os.path.normpath( @@ -26,20 +25,28 @@ interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactiv MEMGRAPH_INSTANCES_DESCRIPTION = { "instance_3": { - "args": ["--bolt-port", "7687", "--log-level", "TRACE", "--coordinator-server-port", "10013"], + "args": [ + "--experimental-enabled=high-availability", + "--bolt-port", + "7687", + "--log-level", + "TRACE", + "--coordinator-server-port", + "10013", + ], "log_file": "main.log", "setup_queries": [], }, } -def test_no_manual_setup_on_main(connection): +def test_no_manual_setup_on_main(): # Goal of this test is to check that all manual registration actions are disabled on instances with coordiantor server port # 1 interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) - any_main = connection(7687, "instance_3").cursor() + any_main = connect(host="localhost", port=7687).cursor() with pytest.raises(Exception) as e: execute_and_fetch_all(any_main, "REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:10001';") assert str(e.value) == "Can't register replica manually on instance with coordinator server port." diff --git a/tests/e2e/high_availability_experimental/not_replicate_from_old_main.py b/tests/e2e/high_availability/not_replicate_from_old_main.py similarity index 79% rename from tests/e2e/high_availability_experimental/not_replicate_from_old_main.py rename to tests/e2e/high_availability/not_replicate_from_old_main.py index b859cae84..201ff7afa 100644 --- a/tests/e2e/high_availability_experimental/not_replicate_from_old_main.py +++ b/tests/e2e/high_availability/not_replicate_from_old_main.py @@ -16,7 +16,7 @@ import tempfile import interactive_mg_runner import pytest -from common import execute_and_fetch_all, safe_execute +from common import connect, execute_and_fetch_all from mg_utils import mg_sleep_and_assert interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -28,12 +28,12 @@ interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactiv MEMGRAPH_FIRST_CLUSTER_DESCRIPTION = { "shared_replica": { - "args": ["--bolt-port", "7688", "--log-level", "TRACE"], + "args": ["--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", "TRACE"], "log_file": "replica2.log", "setup_queries": ["SET REPLICATION ROLE TO REPLICA WITH PORT 10001;"], }, "main1": { - "args": ["--bolt-port", "7687", "--log-level", "TRACE"], + "args": ["--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", "TRACE"], "log_file": "main.log", "setup_queries": ["REGISTER REPLICA shared_replica SYNC TO '127.0.0.1:10001' ;"], }, @@ -42,12 +42,12 @@ MEMGRAPH_FIRST_CLUSTER_DESCRIPTION = { MEMGRAPH_SECOND_CLUSTER_DESCRIPTION = { "replica": { - "args": ["--bolt-port", "7689", "--log-level", "TRACE"], + "args": ["--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", "TRACE"], "log_file": "replica.log", "setup_queries": ["SET REPLICATION ROLE TO REPLICA WITH PORT 10002;"], }, "main_2": { - "args": ["--bolt-port", "7690", "--log-level", "TRACE"], + "args": ["--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level", "TRACE"], "log_file": "main_2.log", "setup_queries": [ "REGISTER REPLICA shared_replica SYNC TO '127.0.0.1:10001' ;", @@ -57,7 +57,7 @@ MEMGRAPH_SECOND_CLUSTER_DESCRIPTION = { } -def test_replication_works_on_failover(connection): +def test_replication_works_on_failover(): # Goal of this test is to check that after changing `shared_replica` # to be part of new cluster, `main` (old cluster) can't write any more to it @@ -65,7 +65,7 @@ def test_replication_works_on_failover(connection): interactive_mg_runner.start_all_keep_others(MEMGRAPH_FIRST_CLUSTER_DESCRIPTION) # 2 - main_cursor = connection(7687, "main1").cursor() + main_cursor = connect(host="localhost", port=7687).cursor() expected_data_on_main = [ ("shared_replica", "127.0.0.1:10001", "sync", 0, 0, "ready"), ] @@ -76,7 +76,7 @@ def test_replication_works_on_failover(connection): interactive_mg_runner.start_all_keep_others(MEMGRAPH_SECOND_CLUSTER_DESCRIPTION) # 4 - new_main_cursor = connection(7690, "main_2").cursor() + new_main_cursor = connect(host="localhost", port=7690).cursor() def retrieve_data_show_replicas(): return sorted(list(execute_and_fetch_all(new_main_cursor, "SHOW REPLICAS;"))) @@ -88,14 +88,11 @@ def test_replication_works_on_failover(connection): mg_sleep_and_assert(expected_data_on_new_main, retrieve_data_show_replicas) # 5 - shared_replica_cursor = connection(7688, "shared_replica").cursor() + shared_replica_cursor = connect(host="localhost", port=7688).cursor() with pytest.raises(Exception) as e: execute_and_fetch_all(main_cursor, "CREATE ();") - assert ( - str(e.value) - == "Replication Exception: At least one SYNC replica has not confirmed committing last transaction. Check the status of the replicas using 'SHOW REPLICAS' query." - ) + assert "At least one SYNC replica has not confirmed committing last transaction." in str(e.value) res = execute_and_fetch_all(main_cursor, "MATCH (n) RETURN count(n) as count;")[0][0] assert res == 1, "Vertex should be created" @@ -115,7 +112,7 @@ def test_replication_works_on_failover(connection): interactive_mg_runner.stop_all() -def test_not_replicate_old_main_register_new_cluster(connection): +def test_not_replicate_old_main_register_new_cluster(): # Goal of this test is to check that although replica is registered in one cluster # it can be re-registered to new cluster # This flow checks if Registering replica is idempotent and that old main cannot talk to replica @@ -130,6 +127,7 @@ def test_not_replicate_old_main_register_new_cluster(connection): MEMGRAPH_FISRT_COORD_CLUSTER_DESCRIPTION = { "shared_instance": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", @@ -143,6 +141,7 @@ def test_not_replicate_old_main_register_new_cluster(connection): }, "instance_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", @@ -155,7 +154,14 @@ def test_not_replicate_old_main_register_new_cluster(connection): "setup_queries": [], }, "coordinator_1": { - "args": ["--bolt-port", "7690", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10111"], + "args": [ + "--experimental-enabled=high-availability", + "--bolt-port", + "7690", + "--log-level=TRACE", + "--raft-server-id=1", + "--raft-server-port=10111", + ], "log_file": "coordinator.log", "setup_queries": [ "REGISTER INSTANCE shared_instance ON '127.0.0.1:10011' WITH '127.0.0.1:10001';", @@ -170,7 +176,7 @@ def test_not_replicate_old_main_register_new_cluster(connection): # 2 - first_cluster_coord_cursor = connection(7690, "coord_1").cursor() + first_cluster_coord_cursor = connect(host="localhost", port=7690).cursor() def show_repl_cluster(): return sorted(list(execute_and_fetch_all(first_cluster_coord_cursor, "SHOW INSTANCES;"))) @@ -188,6 +194,7 @@ def test_not_replicate_old_main_register_new_cluster(connection): MEMGRAPH_SECOND_COORD_CLUSTER_DESCRIPTION = { "instance_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", @@ -200,14 +207,21 @@ def test_not_replicate_old_main_register_new_cluster(connection): "setup_queries": [], }, "coordinator_2": { - "args": ["--bolt-port", "7691", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10112"], + "args": [ + "--experimental-enabled=high-availability", + "--bolt-port", + "7691", + "--log-level=TRACE", + "--raft-server-id=1", + "--raft-server-port=10112", + ], "log_file": "coordinator.log", "setup_queries": [], }, } interactive_mg_runner.start_all_keep_others(MEMGRAPH_SECOND_COORD_CLUSTER_DESCRIPTION) - second_cluster_coord_cursor = connection(7691, "coord_2").cursor() + second_cluster_coord_cursor = connect(host="localhost", port=7691).cursor() execute_and_fetch_all( second_cluster_coord_cursor, "REGISTER INSTANCE shared_instance ON '127.0.0.1:10011' WITH '127.0.0.1:10001';" ) @@ -230,24 +244,21 @@ def test_not_replicate_old_main_register_new_cluster(connection): mg_sleep_and_assert(expected_data_up_second_cluster, show_repl_cluster) # 5 - main_1_cursor = connection(7689, "main_1").cursor() + main_1_cursor = connect(host="localhost", port=7689).cursor() with pytest.raises(Exception) as e: execute_and_fetch_all(main_1_cursor, "CREATE ();") - assert ( - str(e.value) - == "Replication Exception: At least one SYNC replica has not confirmed committing last transaction. Check the status of the replicas using 'SHOW REPLICAS' query." - ) + assert "At least one SYNC replica has not confirmed committing last transaction." in str(e.value) - shared_replica_cursor = connection(7688, "shared_replica").cursor() + shared_replica_cursor = connect(host="localhost", port=7688).cursor() res = execute_and_fetch_all(shared_replica_cursor, "MATCH (n) RETURN count(n);")[0][0] assert res == 0, "Old main should not replicate to 'shared' replica" # 6 - main_2_cursor = connection(7687, "main_2").cursor() + main_2_cursor = connect(host="localhost", port=7687).cursor() execute_and_fetch_all(main_2_cursor, "CREATE ();") - shared_replica_cursor = connection(7688, "shared_replica").cursor() + shared_replica_cursor = connect(host="localhost", port=7688).cursor() res = execute_and_fetch_all(shared_replica_cursor, "MATCH (n) RETURN count(n);")[0][0] assert res == 1, "New main should replicate to 'shared' replica" diff --git a/tests/e2e/high_availability_experimental/single_coordinator.py b/tests/e2e/high_availability/single_coordinator.py similarity index 97% rename from tests/e2e/high_availability_experimental/single_coordinator.py rename to tests/e2e/high_availability/single_coordinator.py index 8e620e7e4..ba6dee3b6 100644 --- a/tests/e2e/high_availability_experimental/single_coordinator.py +++ b/tests/e2e/high_availability/single_coordinator.py @@ -30,6 +30,7 @@ TEMP_DIR = tempfile.TemporaryDirectory().name MEMGRAPH_INSTANCES_DESCRIPTION = { "instance_1": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level", @@ -43,6 +44,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_2": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level", @@ -56,6 +58,7 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { }, "instance_3": { "args": [ + "--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level", @@ -68,7 +71,14 @@ MEMGRAPH_INSTANCES_DESCRIPTION = { "setup_queries": [], }, "coordinator": { - "args": ["--bolt-port", "7690", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10111"], + "args": [ + "--experimental-enabled=high-availability", + "--bolt-port", + "7690", + "--log-level=TRACE", + "--raft-server-id=1", + "--raft-server-port=10111", + ], "log_file": "coordinator.log", "setup_queries": [ "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001';", @@ -198,10 +208,7 @@ def test_replication_works_on_replica_instance_restart(): instance_1_cursor = connect(host="localhost", port=7688).cursor() with pytest.raises(Exception) as e: execute_and_fetch_all(main_cursor, "CREATE ();") - assert ( - str(e.value) - == "Replication Exception: At least one SYNC replica has not confirmed committing last transaction. Check the status of the replicas using 'SHOW REPLICAS' query." - ) + assert "At least one SYNC replica has not confirmed committing last transaction." in str(e.value) res_instance_1 = execute_and_fetch_all(instance_1_cursor, "MATCH (n) RETURN count(n)")[0][0] assert res_instance_1 == 1 diff --git a/tests/e2e/high_availability_experimental/workloads.yaml b/tests/e2e/high_availability/workloads.yaml similarity index 57% rename from tests/e2e/high_availability_experimental/workloads.yaml rename to tests/e2e/high_availability/workloads.yaml index 2159c374c..75f17b2f7 100644 --- a/tests/e2e/high_availability_experimental/workloads.yaml +++ b/tests/e2e/high_availability/workloads.yaml @@ -1,19 +1,19 @@ ha_cluster: &ha_cluster cluster: replica_1: - args: ["--bolt-port", "7688", "--log-level=TRACE", "--coordinator-server-port=10011"] + args: ["--experimental-enabled=high-availability", "--bolt-port", "7688", "--log-level=TRACE", "--coordinator-server-port=10011"] log_file: "replication-e2e-replica1.log" setup_queries: [] replica_2: - args: ["--bolt-port", "7689", "--log-level=TRACE", "--coordinator-server-port=10012"] + args: ["--experimental-enabled=high-availability", "--bolt-port", "7689", "--log-level=TRACE", "--coordinator-server-port=10012"] log_file: "replication-e2e-replica2.log" setup_queries: [] main: - args: ["--bolt-port", "7687", "--log-level=TRACE", "--coordinator-server-port=10013"] + args: ["--experimental-enabled=high-availability", "--bolt-port", "7687", "--log-level=TRACE", "--coordinator-server-port=10013"] log_file: "replication-e2e-main.log" setup_queries: [] coordinator: - args: ["--bolt-port", "7690", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10111"] + args: ["--experimental-enabled=high-availability", "--bolt-port", "7690", "--log-level=TRACE", "--raft-server-id=1", "--raft-server-port=10111"] log_file: "replication-e2e-coordinator.log" setup_queries: [ "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001';", @@ -25,29 +25,29 @@ ha_cluster: &ha_cluster workloads: - name: "Coordinator" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/coordinator.py"] + args: ["high_availability/coordinator.py"] <<: *ha_cluster - name: "Single coordinator" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/single_coordinator.py"] + args: ["high_availability/single_coordinator.py"] - name: "Disabled manual setting of replication cluster" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/manual_setting_replicas.py"] + args: ["high_availability/manual_setting_replicas.py"] - name: "Coordinator cluster registration" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/coord_cluster_registration.py"] + args: ["high_availability/coord_cluster_registration.py"] - name: "Not replicate from old main" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/not_replicate_from_old_main.py"] + args: ["high_availability/not_replicate_from_old_main.py"] - name: "Disable writing on main after restart" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/disable_writing_on_main_after_restart.py"] + args: ["high_availability/disable_writing_on_main_after_restart.py"] - name: "Distributed coordinators" binary: "tests/e2e/pytest_runner.sh" - args: ["high_availability_experimental/distributed_coords.py"] + args: ["high_availability/distributed_coords.py"] diff --git a/tests/e2e/high_availability_experimental/CMakeLists.txt b/tests/e2e/high_availability_experimental/CMakeLists.txt deleted file mode 100644 index bbef1ebc7..000000000 --- a/tests/e2e/high_availability_experimental/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ -find_package(gflags REQUIRED) - -copy_e2e_python_files(ha_experimental coordinator.py) -copy_e2e_python_files(ha_experimental single_coordinator.py) -copy_e2e_python_files(ha_experimental coord_cluster_registration.py) -copy_e2e_python_files(ha_experimental distributed_coords.py) -copy_e2e_python_files(ha_experimental manual_setting_replicas.py) -copy_e2e_python_files(ha_experimental not_replicate_from_old_main.py) -copy_e2e_python_files(ha_experimental disable_writing_on_main_after_restart.py) -copy_e2e_python_files(ha_experimental common.py) -copy_e2e_python_files(ha_experimental workloads.yaml) - -copy_e2e_python_files_from_parent_folder(ha_experimental ".." memgraph.py) -copy_e2e_python_files_from_parent_folder(ha_experimental ".." interactive_mg_runner.py) -copy_e2e_python_files_from_parent_folder(ha_experimental ".." mg_utils.py) diff --git a/tests/e2e/interactive_mg_runner.py b/tests/e2e/interactive_mg_runner.py index 06908747e..efa4dc3d5 100755 --- a/tests/e2e/interactive_mg_runner.py +++ b/tests/e2e/interactive_mg_runner.py @@ -160,6 +160,12 @@ def kill(context, name, keep_directories=True): MEMGRAPH_INSTANCES.pop(name) +def kill_all(context, keep_directories=True): + for key in MEMGRAPH_INSTANCES.keys(): + MEMGRAPH_INSTANCES[key].kill(keep_directories) + MEMGRAPH_INSTANCES.clear() + + def cleanup_directories_on_exit(value=True): CLEANUP_DIRECTORIES_ON_EXIT = value diff --git a/tests/e2e/replication/show_while_creating_invalid_state.py b/tests/e2e/replication/show_while_creating_invalid_state.py index abd5b5f48..2ddd466d9 100644 --- a/tests/e2e/replication/show_while_creating_invalid_state.py +++ b/tests/e2e/replication/show_while_creating_invalid_state.py @@ -795,7 +795,7 @@ def test_async_replication_when_main_is_killed(): "data_directory": f"{data_directory_main.name}", }, } - + interactive_mg_runner.kill_all(CONFIGURATION) interactive_mg_runner.start_all(CONFIGURATION) # 1/ @@ -878,7 +878,7 @@ def test_sync_replication_when_main_is_killed(): "data_directory": f"{data_directory_main.name}", }, } - + interactive_mg_runner.kill_all(CONFIGURATION) interactive_mg_runner.start_all(CONFIGURATION) # 1/ @@ -1990,5 +1990,4 @@ def test_replication_not_messed_up_by_ShowIndexInfo(connection): if __name__ == "__main__": - sys.exit(pytest.main([__file__, "-k", "test_basic_recovery"])) sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/unit/storage_v2_replication.cpp b/tests/unit/storage_v2_replication.cpp index 64366f331..4ae2101cb 100644 --- a/tests/unit/storage_v2_replication.cpp +++ b/tests/unit/storage_v2_replication.cpp @@ -149,14 +149,12 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { }, std::nullopt); - const auto ® = main.repl_handler.TryRegisterReplica( - ReplicationClientConfig{ - .name = "REPLICA", - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true); + const auto ® = main.repl_handler.TryRegisterReplica(ReplicationClientConfig{ + .name = "REPLICA", + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }); ASSERT_FALSE(reg.HasError()) << (int)reg.GetError(); // vertex create @@ -453,24 +451,20 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) { std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }) .HasError()); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[1], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[1], + }) .HasError()); const auto *vertex_label = "label"; @@ -604,14 +598,12 @@ TEST_F(ReplicationTest, RecoveryProcess) { }, std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }) .HasError()); ASSERT_EQ(main.db.storage()->GetReplicaState(replicas[0]), ReplicaState::RECOVERY); @@ -684,14 +676,12 @@ TEST_F(ReplicationTest, BasicAsynchronousReplicationTest) { std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = "REPLICA_ASYNC", - .mode = ReplicationMode::ASYNC, - .ip_address = local_host, - .port = ports[1], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = "REPLICA_ASYNC", + .mode = ReplicationMode::ASYNC, + .ip_address = local_host, + .port = ports[1], + }) .HasError()); static constexpr size_t vertices_create_num = 10; @@ -742,25 +732,21 @@ TEST_F(ReplicationTest, EpochTest) { std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }) .HasError()); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = 10001, - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = 10001, + }) .HasError()); std::optional vertex_gid; @@ -789,15 +775,12 @@ TEST_F(ReplicationTest, EpochTest) { ASSERT_TRUE(replica1.repl_handler.SetReplicationRoleMain()); ASSERT_FALSE(replica1.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = 10001, - }, - true) - + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = 10001, + }) .HasError()); { @@ -826,15 +809,12 @@ TEST_F(ReplicationTest, EpochTest) { }, std::nullopt); ASSERT_TRUE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true) - + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }) .HasError()); { @@ -875,27 +855,21 @@ TEST_F(ReplicationTest, ReplicationInformation) { std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = replica1_port, - }, - true) - + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = replica1_port, + }) .HasError()); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::ASYNC, - .ip_address = local_host, - .port = replica2_port, - }, - true) - + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::ASYNC, + .ip_address = local_host, + .port = replica2_port, + }) .HasError()); ASSERT_TRUE(main.repl_state.IsMain()); @@ -939,25 +913,21 @@ TEST_F(ReplicationTest, ReplicationReplicaWithExistingName) { }, std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = replica1_port, - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = replica1_port, + }) .HasError()); ASSERT_TRUE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::ASYNC, - .ip_address = local_host, - .port = replica2_port, - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::ASYNC, + .ip_address = local_host, + .port = replica2_port, + }) .GetError() == RegisterReplicaError::NAME_EXISTS); } @@ -982,25 +952,21 @@ TEST_F(ReplicationTest, ReplicationReplicaWithExistingEndPoint) { std::nullopt); ASSERT_FALSE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = common_port, - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = common_port, + }) .HasError()); ASSERT_TRUE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::ASYNC, - .ip_address = local_host, - .port = common_port, - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::ASYNC, + .ip_address = local_host, + .port = common_port, + }) .GetError() == RegisterReplicaError::ENDPOINT_EXISTS); } @@ -1038,23 +1004,19 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartupAfterDroppingReplica) { }, std::nullopt); - auto res = main->repl_handler.TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true); + auto res = main->repl_handler.TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }); ASSERT_FALSE(res.HasError()) << (int)res.GetError(); - res = main->repl_handler.TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[1], - }, - true); + res = main->repl_handler.TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[1], + }); ASSERT_FALSE(res.HasError()) << (int)res.GetError(); auto replica_infos = main->db.storage()->ReplicasInfo(); @@ -1103,23 +1065,19 @@ TEST_F(ReplicationTest, RestoringReplicationAtStartup) { .port = ports[1], }, std::nullopt); - auto res = main->repl_handler.TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[0], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true); + auto res = main->repl_handler.TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[0], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }); ASSERT_FALSE(res.HasError()); - res = main->repl_handler.TryRegisterReplica( - ReplicationClientConfig{ - .name = replicas[1], - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[1], - }, - true); + res = main->repl_handler.TryRegisterReplica(ReplicationClientConfig{ + .name = replicas[1], + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[1], + }); ASSERT_FALSE(res.HasError()); auto replica_infos = main->db.storage()->ReplicasInfo(); @@ -1157,13 +1115,11 @@ TEST_F(ReplicationTest, AddingInvalidReplica) { MinMemgraph main(main_conf); ASSERT_TRUE(main.repl_handler - .TryRegisterReplica( - ReplicationClientConfig{ - .name = "REPLICA", - .mode = ReplicationMode::SYNC, - .ip_address = local_host, - .port = ports[0], - }, - true) + .TryRegisterReplica(ReplicationClientConfig{ + .name = "REPLICA", + .mode = ReplicationMode::SYNC, + .ip_address = local_host, + .port = ports[0], + }) .GetError() == RegisterReplicaError::ERROR_ACCEPTING_MAIN); } From 61b9bb0f597e5d6f1a0e6a0dd32f6fc4a1eec700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Budiseli=C4=87?= Date: Mon, 19 Feb 2024 21:09:54 +0100 Subject: [PATCH 5/5] Add toolchain-v5 compatibility Revert to C++20 (#587) * Upgrade cppitertools, spdlog, fmt, rapidcheck * Make compilation work on both v4 and v5 toolchains --- CMakeLists.txt | 11 +- include/_mgp.hpp | 4 +- libs/CMakeLists.txt | 2 +- libs/librdtsc.patch | 16 ++- libs/rocksdb.patch | 21 --- libs/setup.sh | 7 +- src/auth/crypto.hpp | 4 +- src/communication/bolt/client.cpp | 5 +- src/communication/bolt/v1/fmt.hpp | 27 ++++ src/communication/fmt.hpp | 20 +++ src/communication/http/listener.hpp | 5 +- src/communication/listener.hpp | 3 +- src/communication/server.hpp | 3 +- src/communication/v2/server.hpp | 5 +- src/communication/v2/session.hpp | 24 ++-- src/communication/websocket/listener.cpp | 5 +- src/coordination/coordinator_instance.cpp | 1 + src/coordination/fmt.hpp | 60 ++++++++ src/dbms/inmemory/replication_handlers.cpp | 1 + src/dbms/utils.hpp | 0 .../include/distributed/lamport_clock.hpp | 1 + src/integrations/kafka/consumer.cpp | 3 +- src/integrations/kafka/fmt.hpp | 25 ++++ src/integrations/pulsar/consumer.cpp | 4 +- src/integrations/pulsar/fmt.hpp | 21 +++ src/io/network/fmt.hpp | 21 +++ src/io/network/stream_buffer.hpp | 3 +- src/kvstore/kvstore.hpp | 13 +- src/mg_import_csv.cpp | 5 + src/py/py.hpp | 7 +- src/query/common.hpp | 1 + src/query/fmt.hpp | 23 +++ src/query/plan/preprocess.cpp | 30 +++- src/query/plan/preprocess.hpp | 134 +++++++++++------- src/query/procedure/fmt.hpp | 82 +++++++++++ src/query/procedure/mg_procedure_helpers.cpp | 3 +- src/query/procedure/mg_procedure_helpers.hpp | 3 +- src/query/procedure/mg_procedure_impl.cpp | 1 + src/query/typed_value.cpp | 16 ++- .../include/replication/messages.hpp | 0 src/replication/messages.cpp | 0 src/replication/replication_client.cpp | 3 +- src/rpc/client.hpp | 2 + src/storage/v2/disk/storage.cpp | 14 +- src/storage/v2/disk/storage.hpp | 2 +- src/storage/v2/durability/snapshot.cpp | 1 + src/storage/v2/fmt.hpp | 23 +++ .../v2/replication/replication_client.cpp | 4 +- src/storage/v2/replication/serialization.cpp | 2 +- src/utils/async_timer.cpp | 9 +- src/utils/logging.hpp | 7 +- src/utils/message.hpp | 7 +- src/utils/stat.hpp | 19 +-- tests/e2e/replication/constraints.cpp | 3 +- tests/e2e/replication/indices.cpp | 3 +- .../e2e/replication/read_write_benchmark.cpp | 3 +- tests/e2e/triggers/privilige_check.cpp | 3 +- .../macro_benchmark/clients/pokec_client.cpp | 6 +- tests/manual/interactive_planning.cpp | 1 + tests/manual/query_hash.cpp | 3 +- tests/unit/query_plan_operator_to_string.cpp | 13 +- 61 files changed, 576 insertions(+), 172 deletions(-) delete mode 100644 libs/rocksdb.patch create mode 100644 src/communication/bolt/v1/fmt.hpp create mode 100644 src/communication/fmt.hpp create mode 100644 src/coordination/fmt.hpp delete mode 100644 src/dbms/utils.hpp create mode 100644 src/integrations/kafka/fmt.hpp create mode 100644 src/integrations/pulsar/fmt.hpp create mode 100644 src/io/network/fmt.hpp create mode 100644 src/query/fmt.hpp create mode 100644 src/query/procedure/fmt.hpp delete mode 100644 src/replication/include/replication/messages.hpp delete mode 100644 src/replication/messages.cpp create mode 100644 src/storage/v2/fmt.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 85e2b085c..028406447 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -189,7 +189,7 @@ add_custom_target(clean_all # is easier debugging of compilation and linker flags. set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -set(CMAKE_CXX_STANDARD 23) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) # c99-designator is disabled because of required mixture of designated and # non-designated initializers in Python Query Module code (`py_module.cpp`). @@ -211,8 +211,13 @@ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO # ** Static linking is allowed only for executables! ** set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++") -# Use lld linker to speedup build -add_link_options(-fuse-ld=lld) # TODO: use mold linker +# Use lld linker to speedup build and use less memory. +add_link_options(-fuse-ld=lld) +# NOTE: Moving to latest Clang (probably starting from 15), lld stopped to work +# without explicit link_directories call. +string(REPLACE ":" " " LD_LIBS $ENV{LD_LIBRARY_PATH}) +separate_arguments(LD_LIBS) +link_directories(${LD_LIBS}) # release flags set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG") diff --git a/include/_mgp.hpp b/include/_mgp.hpp index 4f6797739..8b67bc36a 100644 --- a/include/_mgp.hpp +++ b/include/_mgp.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -283,7 +283,7 @@ inline mgp_list *list_all_unique_constraints(mgp_graph *graph, mgp_memory *memor } // mgp_graph - + inline bool graph_is_transactional(mgp_graph *graph) { return MgInvoke(mgp_graph_is_transactional, graph); } inline bool graph_is_mutable(mgp_graph *graph) { return MgInvoke(mgp_graph_is_mutable, graph); } diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt index fd6823ee5..7d568d548 100644 --- a/libs/CMakeLists.txt +++ b/libs/CMakeLists.txt @@ -16,7 +16,7 @@ set(GFLAGS_NOTHREADS OFF) # NOTE: config/generate.py depends on the gflags help XML format. find_package(gflags REQUIRED) -find_package(fmt 8.0.1) +find_package(fmt 8.0.1 REQUIRED) find_package(ZLIB 1.2.11 REQUIRED) set(LIB_DIR ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/libs/librdtsc.patch b/libs/librdtsc.patch index 70a98c94a..c6022adac 100644 --- a/libs/librdtsc.patch +++ b/libs/librdtsc.patch @@ -5,7 +5,7 @@ index ee9b58c..31359a9 100644 @@ -48,7 +48,7 @@ option(LIBRDTSC_USE_PMU "Enables PMU usage on ARM platforms" OFF) # | Library Build and Install Properties | # +--------------------------------------------------------+ - + -add_library(rdtsc SHARED +add_library(rdtsc src/cycles.c @@ -14,7 +14,7 @@ index ee9b58c..31359a9 100644 @@ -72,15 +72,6 @@ target_include_directories(rdtsc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ) - + -# Install directory changes depending on build mode -if (CMAKE_BUILD_TYPE MATCHES "^[Dd]ebug") - # During debug, the library will be installed into a local directory @@ -27,3 +27,15 @@ index ee9b58c..31359a9 100644 # Specifying what to export when installing (GNUInstallDirs required) install(TARGETS rdtsc EXPORT librstsc-config +diff --git a/include/librdtsc/common_timer.h b/include/librdtsc/common_timer.h +index a6922d8..080dc77 100644 +--- a/include/librdtsc/common_timer.h ++++ b/include/librdtsc/common_timer.h +@@ -2,6 +2,7 @@ + #define LIBRDTSC_COMMON_TIMER_H + + #include ++#include + + extern uint64_t rdtsc_get_tsc_freq_arch(); + extern uint64_t rdtsc_get_tsc_freq(); diff --git a/libs/rocksdb.patch b/libs/rocksdb.patch deleted file mode 100644 index 297e509fb..000000000 --- a/libs/rocksdb.patch +++ /dev/null @@ -1,21 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 6761929..6a369af 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -220,6 +220,7 @@ else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") - endif() - endif() -+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-copy -Wno-unused-but-set-variable") - endif() - - include(CheckCCompilerFlag) -@@ -997,7 +998,7 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS) - - if(ROCKSDB_BUILD_SHARED) - install( -- TARGETS ${ROCKSDB_SHARED_LIB} -+ TARGETS ${ROCKSDB_SHARED_LIB} OPTIONAL - EXPORT RocksDBTargets - COMPONENT runtime - ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" diff --git a/libs/setup.sh b/libs/setup.sh index ebf20e830..9c2a38c47 100755 --- a/libs/setup.sh +++ b/libs/setup.sh @@ -168,12 +168,11 @@ pushd antlr4 git apply ../antlr4.10.1.patch popd -# cppitertools v2.0 2019-12-23 -cppitertools_ref="cb3635456bdb531121b82b4d2e3afc7ae1f56d47" +cppitertools_ref="v2.1" # 2021-01-15 repo_clone_try_double "${primary_urls[cppitertools]}" "${secondary_urls[cppitertools]}" "cppitertools" "$cppitertools_ref" # rapidcheck -rapidcheck_tag="7bc7d302191a4f3d0bf005692677126136e02f60" # (2020-05-04) +rapidcheck_tag="1c91f40e64d87869250cfb610376c629307bf77d" # (2023-08-15) repo_clone_try_double "${primary_urls[rapidcheck]}" "${secondary_urls[rapidcheck]}" "rapidcheck" "$rapidcheck_tag" # google benchmark @@ -221,7 +220,7 @@ repo_clone_try_double "${primary_urls[pymgclient]}" "${secondary_urls[pymgclient mgconsole_tag="v1.4.0" # (2023-05-21) repo_clone_try_double "${primary_urls[mgconsole]}" "${secondary_urls[mgconsole]}" "mgconsole" "$mgconsole_tag" true -spdlog_tag="v1.9.2" # (2021-08-12) +spdlog_tag="v1.12.0" # (2022-11-02) repo_clone_try_double "${primary_urls[spdlog]}" "${secondary_urls[spdlog]}" "spdlog" "$spdlog_tag" true # librdkafka diff --git a/src/auth/crypto.hpp b/src/auth/crypto.hpp index c5dfc1c05..a0458a067 100644 --- a/src/auth/crypto.hpp +++ b/src/auth/crypto.hpp @@ -8,10 +8,12 @@ #pragma once -#include +#include #include #include +#include + namespace memgraph::auth { /// Need to be stable, auth durability depends on this enum class PasswordHashAlgorithm : uint8_t { BCRYPT = 0, SHA256 = 1, SHA256_MULTIPLE = 2 }; diff --git a/src/communication/bolt/client.cpp b/src/communication/bolt/client.cpp index 39cd24730..29f7d237a 100644 --- a/src/communication/bolt/client.cpp +++ b/src/communication/bolt/client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,6 +15,9 @@ #include "communication/bolt/v1/value.hpp" #include "utils/logging.hpp" +#include "communication/bolt/v1/fmt.hpp" +#include "io/network/fmt.hpp" + namespace { constexpr uint8_t kBoltV43Version[4] = {0x00, 0x00, 0x03, 0x04}; constexpr uint8_t kEmptyBoltVersion[4] = {0x00, 0x00, 0x00, 0x00}; diff --git a/src/communication/bolt/v1/fmt.hpp b/src/communication/bolt/v1/fmt.hpp new file mode 100644 index 000000000..0a6808643 --- /dev/null +++ b/src/communication/bolt/v1/fmt.hpp @@ -0,0 +1,27 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include "communication/bolt/v1/value.hpp" + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; + +template <> +class fmt::formatter> : public fmt::ostream_formatter {}; + +template <> +class fmt::formatter> : public fmt::ostream_formatter {}; +#endif diff --git a/src/communication/fmt.hpp b/src/communication/fmt.hpp new file mode 100644 index 000000000..ab65066b2 --- /dev/null +++ b/src/communication/fmt.hpp @@ -0,0 +1,20 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include +#include + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/communication/http/listener.hpp b/src/communication/http/listener.hpp index fac4cfaf3..aa3e7e2f5 100644 --- a/src/communication/http/listener.hpp +++ b/src/communication/http/listener.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -21,6 +21,7 @@ #include #include "communication/context.hpp" +#include "communication/fmt.hpp" #include "communication/http/session.hpp" #include "utils/spin_lock.hpp" #include "utils/synchronized.hpp" @@ -82,7 +83,7 @@ class Listener final : public std::enable_shared_from_this #include "communication/context.hpp" +#include "communication/fmt.hpp" #include "communication/init.hpp" #include "communication/v2/listener.hpp" #include "communication/v2/pool.hpp" @@ -129,7 +130,7 @@ bool Server::Start() { listener_->Start(); spdlog::info("{} server is fully armed and operational", service_name_); - spdlog::info("{} listening on {}", service_name_, endpoint_.address()); + spdlog::info("{} listening on {}", service_name_, endpoint_); context_thread_pool_.Run(); return true; diff --git a/src/communication/v2/session.hpp b/src/communication/v2/session.hpp index b54607729..0b23d9301 100644 --- a/src/communication/v2/session.hpp +++ b/src/communication/v2/session.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -47,6 +47,7 @@ #include "communication/buffer.hpp" #include "communication/context.hpp" #include "communication/exceptions.hpp" +#include "communication/fmt.hpp" #include "dbms/global.hpp" #include "utils/event_counter.hpp" #include "utils/logging.hpp" @@ -212,14 +213,11 @@ class WebsocketSession : public std::enable_shared_from_this 90000 +#include +#include + +#include +#include "utils/logging.hpp" + +inline std::string ToString(const nuraft::cmd_result_code &code) { + switch (code) { + case nuraft::cmd_result_code::OK: + return "OK"; + case nuraft::cmd_result_code::FAILED: + return "FAILED"; + case nuraft::cmd_result_code::RESULT_NOT_EXIST_YET: + return "RESULT_NOT_EXIST_YET"; + case nuraft::cmd_result_code::TERM_MISMATCH: + return "TERM_MISMATCH"; + case nuraft::cmd_result_code::SERVER_IS_LEAVING: + return "SERVER_IS_LEAVING"; + case nuraft::cmd_result_code::CANNOT_REMOVE_LEADER: + return "CANNOT_REMOVE_LEADER"; + case nuraft::cmd_result_code::SERVER_NOT_FOUND: + return "SERVER_NOT_FOUND"; + case nuraft::cmd_result_code::SERVER_IS_JOINING: + return "SERVER_IS_JOINING"; + case nuraft::cmd_result_code::CONFIG_CHANGING: + return "CONFIG_CHANGING"; + case nuraft::cmd_result_code::SERVER_ALREADY_EXISTS: + return "SERVER_ALREADY_EXISTS"; + case nuraft::cmd_result_code::BAD_REQUEST: + return "BAD_REQUEST"; + case nuraft::cmd_result_code::NOT_LEADER: + return "NOT_LEADER"; + case nuraft::cmd_result_code::TIMEOUT: + return "TIMEOUT"; + case nuraft::cmd_result_code::CANCELLED: + return "CANCELLED"; + } + LOG_FATAL("ToString of a nuraft::cmd_result_code -> check missing switch case"); +} +inline std::ostream &operator<<(std::ostream &os, const nuraft::cmd_result_code &code) { + os << ToString(code); + return os; +} +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/dbms/inmemory/replication_handlers.cpp b/src/dbms/inmemory/replication_handlers.cpp index b7b2146f4..3fc174d3c 100644 --- a/src/dbms/inmemory/replication_handlers.cpp +++ b/src/dbms/inmemory/replication_handlers.cpp @@ -19,6 +19,7 @@ #include "storage/v2/durability/durability.hpp" #include "storage/v2/durability/snapshot.hpp" #include "storage/v2/durability/version.hpp" +#include "storage/v2/fmt.hpp" #include "storage/v2/indices/label_index_stats.hpp" #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/inmemory/unique_constraints.hpp" diff --git a/src/dbms/utils.hpp b/src/dbms/utils.hpp deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/distributed/include/distributed/lamport_clock.hpp b/src/distributed/include/distributed/lamport_clock.hpp index f3e91e47a..2bbc0b447 100644 --- a/src/distributed/include/distributed/lamport_clock.hpp +++ b/src/distributed/include/distributed/lamport_clock.hpp @@ -10,6 +10,7 @@ // licenses/APL.txt. #pragma once +#include #include #include #include diff --git a/src/integrations/kafka/consumer.cpp b/src/integrations/kafka/consumer.cpp index 9889fe46b..c5604e85a 100644 --- a/src/integrations/kafka/consumer.cpp +++ b/src/integrations/kafka/consumer.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -22,6 +22,7 @@ #include "integrations/constants.hpp" #include "integrations/kafka/exceptions.hpp" +#include "integrations/kafka/fmt.hpp" #include "utils/exceptions.hpp" #include "utils/logging.hpp" #include "utils/on_scope_exit.hpp" diff --git a/src/integrations/kafka/fmt.hpp b/src/integrations/kafka/fmt.hpp new file mode 100644 index 000000000..f85f74b49 --- /dev/null +++ b/src/integrations/kafka/fmt.hpp @@ -0,0 +1,25 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include + +inline std::ostream &operator<<(std::ostream &os, const RdKafka::ErrorCode &code) { + os << RdKafka::err2str(code); + return os; +} +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/integrations/pulsar/consumer.cpp b/src/integrations/pulsar/consumer.cpp index f004cf6dc..1cfd8159c 100644 --- a/src/integrations/pulsar/consumer.cpp +++ b/src/integrations/pulsar/consumer.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,12 +15,12 @@ #include #include -#include #include #include #include "integrations/constants.hpp" #include "integrations/pulsar/exceptions.hpp" +#include "integrations/pulsar/fmt.hpp" #include "utils/concepts.hpp" #include "utils/logging.hpp" #include "utils/on_scope_exit.hpp" diff --git a/src/integrations/pulsar/fmt.hpp b/src/integrations/pulsar/fmt.hpp new file mode 100644 index 000000000..7585d87c7 --- /dev/null +++ b/src/integrations/pulsar/fmt.hpp @@ -0,0 +1,21 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include "integrations/pulsar/consumer.hpp" + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/io/network/fmt.hpp b/src/io/network/fmt.hpp new file mode 100644 index 000000000..014de5353 --- /dev/null +++ b/src/io/network/fmt.hpp @@ -0,0 +1,21 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include "io/network/endpoint.hpp" + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/io/network/stream_buffer.hpp b/src/io/network/stream_buffer.hpp index 5ed7fc69e..5a9f01bf7 100644 --- a/src/io/network/stream_buffer.hpp +++ b/src/io/network/stream_buffer.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,6 +11,7 @@ #pragma once +#include #include namespace memgraph::io::network { diff --git a/src/kvstore/kvstore.hpp b/src/kvstore/kvstore.hpp index b9675d75b..84aa27009 100644 --- a/src/kvstore/kvstore.hpp +++ b/src/kvstore/kvstore.hpp @@ -160,13 +160,14 @@ class KVStore final { * and behaves as if all of those pairs are stored in a single iterable * collection of std::pair. */ - class iterator final : public std::iterator, // value_type - long, // difference_type - const std::pair *, // pointer - const std::pair & // reference - > { + class iterator final { public: + using iterator_concept [[maybe_unused]] = std::input_iterator_tag; + using value_type = std::pair; + using difference_type = long; + using pointer = const std::pair *; + using reference = const std::pair &; + explicit iterator(const KVStore *kvstore, const std::string &prefix = "", bool at_end = false); iterator(const iterator &other) = delete; diff --git a/src/mg_import_csv.cpp b/src/mg_import_csv.cpp index cbfb905aa..2d77c2db2 100644 --- a/src/mg_import_csv.cpp +++ b/src/mg_import_csv.cpp @@ -139,6 +139,11 @@ struct NodeId { std::string id_space; }; +#if FMT_VERSION > 90000 +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif + bool operator==(const NodeId &a, const NodeId &b) { return a.id == b.id && a.id_space == b.id_space; } std::ostream &operator<<(std::ostream &stream, const NodeId &node_id) { diff --git a/src/py/py.hpp b/src/py/py.hpp index 14d54d657..7b25b595e 100644 --- a/src/py/py.hpp +++ b/src/py/py.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -274,3 +274,8 @@ inline void RestoreError(ExceptionInfo exc_info) { } } // namespace memgraph::py + +#if FMT_VERSION > 90000 +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/query/common.hpp b/src/query/common.hpp index 054714164..36ba07791 100644 --- a/src/query/common.hpp +++ b/src/query/common.hpp @@ -19,6 +19,7 @@ #include "query/db_accessor.hpp" #include "query/exceptions.hpp" +#include "query/fmt.hpp" #include "query/frontend/ast/ast.hpp" #include "query/frontend/semantic/symbol.hpp" #include "query/typed_value.hpp" diff --git a/src/query/fmt.hpp b/src/query/fmt.hpp new file mode 100644 index 000000000..50a915715 --- /dev/null +++ b/src/query/fmt.hpp @@ -0,0 +1,23 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include "query/typed_value.hpp" + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/query/plan/preprocess.cpp b/src/query/plan/preprocess.cpp index cf8ad9c97..c3bfdf462 100644 --- a/src/query/plan/preprocess.cpp +++ b/src/query/plan/preprocess.cpp @@ -313,7 +313,7 @@ void Filters::CollectPatternFilters(Pattern &pattern, SymbolTable &symbol_table, auto *property_lookup = storage.Create(atom->filter_lambda_.inner_edge, prop_pair.first); auto *prop_equal = storage.Create(property_lookup, prop_pair.second); // Currently, variable expand has no gains if we set PropertyFilter. - all_filters_.emplace_back(FilterInfo{FilterInfo::Type::Generic, prop_equal, collector.symbols_}); + all_filters_.emplace_back(FilterInfo::Type::Generic, prop_equal, collector.symbols_); } { collector.symbols_.clear(); @@ -328,9 +328,9 @@ void Filters::CollectPatternFilters(Pattern &pattern, SymbolTable &symbol_table, auto *prop_equal = storage.Create(property_lookup, prop_pair.second); // Currently, variable expand has no gains if we set PropertyFilter. all_filters_.emplace_back( - FilterInfo{FilterInfo::Type::Generic, - storage.Create(identifier, atom->identifier_, storage.Create(prop_equal)), - collector.symbols_}); + FilterInfo::Type::Generic, + storage.Create(identifier, atom->identifier_, storage.Create(prop_equal)), + collector.symbols_); } } return; @@ -639,6 +639,12 @@ void AddMatching(const Match &match, SymbolTable &symbol_table, AstStorage &stor } } +PatternFilterVisitor::PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage) + : symbol_table_(symbol_table), storage_(storage) {} +PatternFilterVisitor::PatternFilterVisitor(const PatternFilterVisitor &) = default; +PatternFilterVisitor::PatternFilterVisitor(PatternFilterVisitor &&) noexcept = default; +PatternFilterVisitor::~PatternFilterVisitor() = default; + void PatternFilterVisitor::Visit(Exists &op) { std::vector patterns; patterns.push_back(op.pattern_); @@ -652,6 +658,8 @@ void PatternFilterVisitor::Visit(Exists &op) { matchings_.push_back(std::move(filter_matching)); } +std::vector PatternFilterVisitor::getMatchings() { return matchings_; } + static void ParseForeach(query::Foreach &foreach, SingleQueryPart &query_part, AstStorage &storage, SymbolTable &symbol_table) { for (auto *clause : foreach.clauses_) { @@ -723,4 +731,18 @@ QueryParts CollectQueryParts(SymbolTable &symbol_table, AstStorage &storage, Cyp return QueryParts{query_parts, distinct}; } +FilterInfo::FilterInfo(Type type, Expression *expression, std::unordered_set used_symbols, + std::optional property_filter, std::optional id_filter) + : type(type), + expression(expression), + used_symbols(std::move(used_symbols)), + property_filter(std::move(property_filter)), + id_filter(std::move(id_filter)), + matchings({}) {} +FilterInfo::FilterInfo(const FilterInfo &) = default; +FilterInfo &FilterInfo::operator=(const FilterInfo &) = default; +FilterInfo::FilterInfo(FilterInfo &&) noexcept = default; +FilterInfo &FilterInfo::operator=(FilterInfo &&) noexcept = default; +FilterInfo::~FilterInfo() = default; + } // namespace memgraph::query::plan diff --git a/src/query/plan/preprocess.hpp b/src/query/plan/preprocess.hpp index 2b53fb7b0..01b10ebaf 100644 --- a/src/query/plan/preprocess.hpp +++ b/src/query/plan/preprocess.hpp @@ -19,6 +19,7 @@ #include #include "query/frontend/ast/ast.hpp" +#include "query/frontend/ast/ast_visitor.hpp" #include "query/frontend/semantic/symbol_table.hpp" namespace memgraph::query::plan { @@ -159,8 +160,12 @@ enum class PatternFilterType { EXISTS }; /// Collects matchings from filters that include patterns class PatternFilterVisitor : public ExpressionVisitor { public: - explicit PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage) - : symbol_table_(symbol_table), storage_(storage) {} + explicit PatternFilterVisitor(SymbolTable &symbol_table, AstStorage &storage); + PatternFilterVisitor(const PatternFilterVisitor &); + PatternFilterVisitor &operator=(const PatternFilterVisitor &) = delete; + PatternFilterVisitor(PatternFilterVisitor &&) noexcept; + PatternFilterVisitor &operator=(PatternFilterVisitor &&) noexcept = delete; + ~PatternFilterVisitor() override; using ExpressionVisitor::Visit; @@ -232,7 +237,7 @@ class PatternFilterVisitor : public ExpressionVisitor { void Visit(RegexMatch &op) override{}; void Visit(PatternComprehension &op) override{}; - std::vector getMatchings() { return matchings_; } + std::vector getMatchings(); SymbolTable &symbol_table_; AstStorage &storage_; @@ -298,9 +303,23 @@ struct FilterInfo { /// elements. enum class Type { Generic, Label, Property, Id, Pattern }; - Type type; + // FilterInfo is tricky because FilterMatching is not yet defined: + // * if no declared constructor -> FilterInfo is std::__is_complete_or_unbounded + // * if any user-declared constructor -> non-aggregate type -> no designated initializers are possible + // * IMPORTANT: Matchings will always be initialized to an empty container. + explicit FilterInfo(Type type = Type::Generic, Expression *expression = nullptr, + std::unordered_set used_symbols = {}, std::optional property_filter = {}, + std::optional id_filter = {}); + // All other constructors are also defined in the cpp file because this struct is incomplete here. + FilterInfo(const FilterInfo &); + FilterInfo &operator=(const FilterInfo &); + FilterInfo(FilterInfo &&) noexcept; + FilterInfo &operator=(FilterInfo &&) noexcept; + ~FilterInfo(); + + Type type{Type::Generic}; /// The original filter expression which must be satisfied. - Expression *expression; + Expression *expression{nullptr}; /// Set of used symbols by the filter @c expression. std::unordered_set used_symbols{}; /// Labels for Type::Label filtering. @@ -310,7 +329,8 @@ struct FilterInfo { /// Information for Type::Id filtering. std::optional id_filter{}; /// Matchings for filters that include patterns - std::vector matchings{}; + /// NOTE: The vector is not defined here because FilterMatching is forward declared above. + std::vector matchings; }; /// Stores information on filters used inside the @c Matching of a @c QueryPart. @@ -329,34 +349,15 @@ class Filters final { auto empty() const { return all_filters_.empty(); } - auto erase(iterator pos) { return all_filters_.erase(pos); } - auto erase(const_iterator pos) { return all_filters_.erase(pos); } - auto erase(iterator first, iterator last) { return all_filters_.erase(first, last); } - auto erase(const_iterator first, const_iterator last) { return all_filters_.erase(first, last); } + auto erase(iterator pos) -> iterator; + auto erase(const_iterator pos) -> iterator; + auto erase(iterator first, iterator last) -> iterator; + auto erase(const_iterator first, const_iterator last) -> iterator; void SetFilters(std::vector &&all_filters) { all_filters_ = std::move(all_filters); } - auto FilteredLabels(const Symbol &symbol) const { - std::unordered_set labels; - for (const auto &filter : all_filters_) { - if (filter.type == FilterInfo::Type::Label && utils::Contains(filter.used_symbols, symbol)) { - MG_ASSERT(filter.used_symbols.size() == 1U, "Expected a single used symbol for label filter"); - labels.insert(filter.labels.begin(), filter.labels.end()); - } - } - return labels; - } - - auto FilteredProperties(const Symbol &symbol) const -> std::unordered_set { - std::unordered_set properties; - - for (const auto &filter : all_filters_) { - if (filter.type == FilterInfo::Type::Property && filter.property_filter->symbol_ == symbol) { - properties.insert(filter.property_filter->property_); - } - } - return properties; - } + auto FilteredLabels(const Symbol &symbol) const -> std::unordered_set; + auto FilteredProperties(const Symbol &symbol) const -> std::unordered_set; /// Remove a filter; may invalidate iterators. /// Removal is done by comparing only the expression, so that multiple @@ -370,26 +371,10 @@ class Filters final { std::vector *removed_filters = nullptr); /// Returns a vector of FilterInfo for properties. - auto PropertyFilters(const Symbol &symbol) const { - std::vector filters; - for (const auto &filter : all_filters_) { - if (filter.type == FilterInfo::Type::Property && filter.property_filter->symbol_ == symbol) { - filters.push_back(filter); - } - } - return filters; - } + auto PropertyFilters(const Symbol &symbol) const -> std::vector; /// Return a vector of FilterInfo for ID equality filtering. - auto IdFilters(const Symbol &symbol) const { - std::vector filters; - for (const auto &filter : all_filters_) { - if (filter.type == FilterInfo::Type::Id && filter.id_filter->symbol_ == symbol) { - filters.push_back(filter); - } - } - return filters; - } + auto IdFilters(const Symbol &symbol) const -> std::vector; /// Collects filtering information from a pattern. /// @@ -459,6 +444,57 @@ struct FilterMatching : Matching { std::optional symbol; }; +inline auto Filters::erase(Filters::iterator pos) -> iterator { return all_filters_.erase(pos); } +inline auto Filters::erase(Filters::const_iterator pos) -> iterator { return all_filters_.erase(pos); } +inline auto Filters::erase(Filters::iterator first, Filters::iterator last) -> iterator { + return all_filters_.erase(first, last); +} +inline auto Filters::erase(Filters::const_iterator first, Filters::const_iterator last) -> iterator { + return all_filters_.erase(first, last); +} + +inline auto Filters::FilteredLabels(const Symbol &symbol) const -> std::unordered_set { + std::unordered_set labels; + for (const auto &filter : all_filters_) { + if (filter.type == FilterInfo::Type::Label && utils::Contains(filter.used_symbols, symbol)) { + MG_ASSERT(filter.used_symbols.size() == 1U, "Expected a single used symbol for label filter"); + labels.insert(filter.labels.begin(), filter.labels.end()); + } + } + return labels; +} + +inline auto Filters::FilteredProperties(const Symbol &symbol) const -> std::unordered_set { + std::unordered_set properties; + + for (const auto &filter : all_filters_) { + if (filter.type == FilterInfo::Type::Property && filter.property_filter->symbol_ == symbol) { + properties.insert(filter.property_filter->property_); + } + } + return properties; +} + +inline auto Filters::PropertyFilters(const Symbol &symbol) const -> std::vector { + std::vector filters; + for (const auto &filter : all_filters_) { + if (filter.type == FilterInfo::Type::Property && filter.property_filter->symbol_ == symbol) { + filters.push_back(filter); + } + } + return filters; +} + +inline auto Filters::IdFilters(const Symbol &symbol) const -> std::vector { + std::vector filters; + for (const auto &filter : all_filters_) { + if (filter.type == FilterInfo::Type::Id && filter.id_filter->symbol_ == symbol) { + filters.push_back(filter); + } + } + return filters; +} + /// @brief Represents a read (+ write) part of a query. Parts are split on /// `WITH` clauses. /// diff --git a/src/query/procedure/fmt.hpp b/src/query/procedure/fmt.hpp new file mode 100644 index 000000000..85775da46 --- /dev/null +++ b/src/query/procedure/fmt.hpp @@ -0,0 +1,82 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include +#include + +#include "mg_procedure.h" +#include "utils/logging.hpp" + +inline std::string ToString(const mgp_log_level &log_level) { + switch (log_level) { + case mgp_log_level::MGP_LOG_LEVEL_CRITICAL: + return "CRITICAL"; + case mgp_log_level::MGP_LOG_LEVEL_ERROR: + return "ERROR"; + case mgp_log_level::MGP_LOG_LEVEL_WARN: + return "WARN"; + case mgp_log_level::MGP_LOG_LEVEL_INFO: + return "INFO"; + case mgp_log_level::MGP_LOG_LEVEL_DEBUG: + return "DEBUG"; + case mgp_log_level::MGP_LOG_LEVEL_TRACE: + return "TRACE"; + } + LOG_FATAL("ToString of a wrong mgp_log_level -> check missing switch case"); +} +inline std::ostream &operator<<(std::ostream &os, const mgp_log_level &log_level) { + os << ToString(log_level); + return os; +} +template <> +class fmt::formatter : public fmt::ostream_formatter {}; + +inline std::string ToString(const mgp_error &error) { + switch (error) { + case mgp_error::MGP_ERROR_NO_ERROR: + return "NO ERROR"; + case mgp_error::MGP_ERROR_UNKNOWN_ERROR: + return "UNKNOWN ERROR"; + case mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE: + return "UNABLE TO ALLOCATE ERROR"; + case mgp_error::MGP_ERROR_INSUFFICIENT_BUFFER: + return "INSUFFICIENT BUFFER ERROR"; + case mgp_error::MGP_ERROR_OUT_OF_RANGE: + return "OUT OF RANGE ERROR"; + case mgp_error::MGP_ERROR_LOGIC_ERROR: + return "LOGIC ERROR"; + case mgp_error::MGP_ERROR_DELETED_OBJECT: + return "DELETED OBJECT ERROR"; + case mgp_error::MGP_ERROR_INVALID_ARGUMENT: + return "INVALID ARGUMENT ERROR"; + case mgp_error::MGP_ERROR_KEY_ALREADY_EXISTS: + return "KEY ALREADY EXISTS ERROR"; + case mgp_error::MGP_ERROR_IMMUTABLE_OBJECT: + return "IMMUTABLE OBJECT ERROR"; + case mgp_error::MGP_ERROR_VALUE_CONVERSION: + return "VALUE CONVERSION ERROR"; + case mgp_error::MGP_ERROR_SERIALIZATION_ERROR: + return "SERIALIZATION ERROR"; + case mgp_error::MGP_ERROR_AUTHORIZATION_ERROR: + return "AUTHORIZATION ERROR"; + } + LOG_FATAL("ToString of a wrong mgp_error -> check missing switch case"); +} +inline std::ostream &operator<<(std::ostream &os, const mgp_error &error) { + os << ToString(error); + return os; +} +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/query/procedure/mg_procedure_helpers.cpp b/src/query/procedure/mg_procedure_helpers.cpp index 6b206e7dc..a6590a287 100644 --- a/src/query/procedure/mg_procedure_helpers.cpp +++ b/src/query/procedure/mg_procedure_helpers.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "query/procedure/mg_procedure_helpers.hpp" +#include "query/procedure/fmt.hpp" namespace memgraph::query::procedure { MgpUniquePtr GetStringValueOrSetError(const char *string, mgp_memory *memory, mgp_result *result) { diff --git a/src/query/procedure/mg_procedure_helpers.hpp b/src/query/procedure/mg_procedure_helpers.hpp index cb8bd55db..d0032c521 100644 --- a/src/query/procedure/mg_procedure_helpers.hpp +++ b/src/query/procedure/mg_procedure_helpers.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -18,6 +18,7 @@ #include #include "mg_procedure.h" +#include "query/procedure/fmt.hpp" namespace memgraph::query::procedure { template diff --git a/src/query/procedure/mg_procedure_impl.cpp b/src/query/procedure/mg_procedure_impl.cpp index c7faf15f7..647f3e14d 100644 --- a/src/query/procedure/mg_procedure_impl.cpp +++ b/src/query/procedure/mg_procedure_impl.cpp @@ -29,6 +29,7 @@ #include "query/db_accessor.hpp" #include "query/frontend/ast/ast.hpp" #include "query/procedure/cypher_types.hpp" +#include "query/procedure/fmt.hpp" #include "query/procedure/mg_procedure_helpers.hpp" #include "query/stream/common.hpp" #include "storage/v2/property_value.hpp" diff --git a/src/query/typed_value.cpp b/src/query/typed_value.cpp index 4cb79508e..86d25f01b 100644 --- a/src/query/typed_value.cpp +++ b/src/query/typed_value.cpp @@ -19,6 +19,7 @@ #include #include +#include "query/fmt.hpp" #include "storage/v2/temporal.hpp" #include "utils/exceptions.hpp" #include "utils/fnv.hpp" @@ -326,13 +327,11 @@ TypedValue::operator storage::PropertyValue() const { throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \ return field; \ } \ - \ const type_param &TypedValue::Value##type_enum() const { \ if (type_ != Type::type_enum) [[unlikely]] \ throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \ return field; \ } \ - \ bool TypedValue::Is##type_enum() const { return type_ == Type::type_enum; } DEFINE_VALUE_AND_TYPE_GETTERS(bool, Bool, bool_v) @@ -783,10 +782,13 @@ TypedValue operator<(const TypedValue &a, const TypedValue &b) { return false; } }; - if (!is_legal(a.type()) || !is_legal(b.type())) + if (!is_legal(a.type()) || !is_legal(b.type())) { throw TypedValueException("Invalid 'less' operand types({} + {})", a.type(), b.type()); + } - if (a.IsNull() || b.IsNull()) return TypedValue(a.GetMemoryResource()); + if (a.IsNull() || b.IsNull()) { + return TypedValue(a.GetMemoryResource()); + } if (a.IsString() || b.IsString()) { if (a.type() != b.type()) { @@ -956,8 +958,9 @@ inline void EnsureArithmeticallyOk(const TypedValue &a, const TypedValue &b, boo // checked here because they are handled before this check is performed in // arithmetic op implementations. - if (!is_legal(a) || !is_legal(b)) + if (!is_legal(a) || !is_legal(b)) { throw TypedValueException("Invalid {} operand types {}, {}", op_name, a.type(), b.type()); + } } namespace { @@ -1107,8 +1110,9 @@ TypedValue operator%(const TypedValue &a, const TypedValue &b) { } inline void EnsureLogicallyOk(const TypedValue &a, const TypedValue &b, const std::string &op_name) { - if (!((a.IsBool() || a.IsNull()) && (b.IsBool() || b.IsNull()))) + if (!((a.IsBool() || a.IsNull()) && (b.IsBool() || b.IsNull()))) { throw TypedValueException("Invalid {} operand types({} && {})", op_name, a.type(), b.type()); + } } TypedValue operator&&(const TypedValue &a, const TypedValue &b) { diff --git a/src/replication/include/replication/messages.hpp b/src/replication/include/replication/messages.hpp deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/replication/messages.cpp b/src/replication/messages.cpp deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/replication/replication_client.cpp b/src/replication/replication_client.cpp index ed46ea471..262d698bf 100644 --- a/src/replication/replication_client.cpp +++ b/src/replication/replication_client.cpp @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "replication/replication_client.hpp" +#include "io/network/fmt.hpp" namespace memgraph::replication { @@ -30,7 +31,7 @@ ReplicationClient::ReplicationClient(const memgraph::replication::ReplicationCli ReplicationClient::~ReplicationClient() { try { auto const &endpoint = rpc_client_.Endpoint(); - spdlog::trace("Closing replication client on {}:{}", endpoint.address, endpoint.port); + spdlog::trace("Closing replication client on {}", endpoint); } catch (...) { // Logging can throw. Not a big deal, just ignore. } diff --git a/src/rpc/client.hpp b/src/rpc/client.hpp index 3a2fefd57..d14746313 100644 --- a/src/rpc/client.hpp +++ b/src/rpc/client.hpp @@ -27,6 +27,8 @@ #include "utils/on_scope_exit.hpp" #include "utils/typeinfo.hpp" +#include "io/network/fmt.hpp" + namespace memgraph::rpc { /// Client is thread safe, but it is recommended to use thread_local clients. diff --git a/src/storage/v2/disk/storage.cpp b/src/storage/v2/disk/storage.cpp index adc0e92f4..fa9f93ccb 100644 --- a/src/storage/v2/disk/storage.cpp +++ b/src/storage/v2/disk/storage.cpp @@ -1278,7 +1278,7 @@ bool DiskStorage::DeleteEdgeFromConnectivityIndex(Transaction *transaction, cons /// std::map /// Here we also do flushing of too many things, we don't need to serialize edges in read-only txn, check that... [[nodiscard]] utils::BasicResult DiskStorage::FlushModifiedEdges( - Transaction *transaction, const auto &edge_acc) { + Transaction *transaction, const auto &edges_acc) { for (const auto &modified_edge : transaction->modified_edges_) { const std::string edge_gid = modified_edge.first.ToString(); const Delta::Action root_action = modified_edge.second.delta_action; @@ -1304,8 +1304,8 @@ bool DiskStorage::DeleteEdgeFromConnectivityIndex(Transaction *transaction, cons return StorageManipulationError{SerializationError{}}; } - const auto &edge = edge_acc.find(modified_edge.first); - MG_ASSERT(edge != edge_acc.end(), + const auto &edge = edges_acc.find(modified_edge.first); + MG_ASSERT(edge != edges_acc.end(), "Database in invalid state, commit not possible! Please restart your DB and start the import again."); /// TODO: (andi) I think this is not wrong but it would be better to use AtomicWrites across column families. @@ -1693,9 +1693,8 @@ utils::BasicResult DiskStorage::DiskAccessor::Co transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release); if (edge_import_mode_active) { - if (auto res = - disk_storage->FlushModifiedEdges(&transaction_, disk_storage->edge_import_mode_cache_->AccessToEdges()); - res.HasError()) { + auto edges_acc = disk_storage->edge_import_mode_cache_->AccessToEdges(); + if (auto res = disk_storage->FlushModifiedEdges(&transaction_, edges_acc); res.HasError()) { Abort(); return res; } @@ -1717,7 +1716,8 @@ utils::BasicResult DiskStorage::DiskAccessor::Co return del_vertices_res.GetError(); } - if (auto modified_edges_res = disk_storage->FlushModifiedEdges(&transaction_, transaction_.edges_->access()); + auto tx_edges_acc = transaction_.edges_->access(); + if (auto modified_edges_res = disk_storage->FlushModifiedEdges(&transaction_, tx_edges_acc); modified_edges_res.HasError()) { Abort(); return modified_edges_res.GetError(); diff --git a/src/storage/v2/disk/storage.hpp b/src/storage/v2/disk/storage.hpp index 293e102b1..20b1645be 100644 --- a/src/storage/v2/disk/storage.hpp +++ b/src/storage/v2/disk/storage.hpp @@ -195,7 +195,7 @@ class DiskStorage final : public Storage { [[nodiscard]] utils::BasicResult FlushDeletedVertices(Transaction *transaction); [[nodiscard]] utils::BasicResult FlushDeletedEdges(Transaction *transaction); [[nodiscard]] utils::BasicResult FlushModifiedEdges(Transaction *transaction, - const auto &edge_acc); + const auto &edges_acc); [[nodiscard]] utils::BasicResult ClearDanglingVertices(Transaction *transaction); /// Writing methods diff --git a/src/storage/v2/durability/snapshot.cpp b/src/storage/v2/durability/snapshot.cpp index 0d434fadf..eee099870 100644 --- a/src/storage/v2/durability/snapshot.cpp +++ b/src/storage/v2/durability/snapshot.cpp @@ -22,6 +22,7 @@ #include "storage/v2/edge.hpp" #include "storage/v2/edge_accessor.hpp" #include "storage/v2/edge_ref.hpp" +#include "storage/v2/fmt.hpp" #include "storage/v2/id_types.hpp" #include "storage/v2/indices/label_index_stats.hpp" #include "storage/v2/indices/label_property_index_stats.hpp" diff --git a/src/storage/v2/fmt.hpp b/src/storage/v2/fmt.hpp new file mode 100644 index 000000000..e200d7299 --- /dev/null +++ b/src/storage/v2/fmt.hpp @@ -0,0 +1,23 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#if FMT_VERSION > 90000 +#include + +#include "storage/v2/property_value.hpp" + +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +template <> +class fmt::formatter : public fmt::ostream_formatter {}; +#endif diff --git a/src/storage/v2/replication/replication_client.cpp b/src/storage/v2/replication/replication_client.cpp index 16247de57..a581201c1 100644 --- a/src/storage/v2/replication/replication_client.cpp +++ b/src/storage/v2/replication/replication_client.cpp @@ -9,6 +9,8 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. +#include + #include "replication/replication_client.hpp" #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" @@ -17,7 +19,7 @@ #include "utils/uuid.hpp" #include "utils/variant_helpers.hpp" -#include +#include "io/network/fmt.hpp" namespace { template diff --git a/src/storage/v2/replication/serialization.cpp b/src/storage/v2/replication/serialization.cpp index 6651b8999..d0ba2e8ac 100644 --- a/src/storage/v2/replication/serialization.cpp +++ b/src/storage/v2/replication/serialization.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source diff --git a/src/utils/async_timer.cpp b/src/utils/async_timer.cpp index dd5789172..b72be4d45 100644 --- a/src/utils/async_timer.cpp +++ b/src/utils/async_timer.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -56,12 +56,11 @@ void EraseFlag(uint64_t flag_id) { expiration_flags.access().remove(flag_id); } std::weak_ptr> GetFlag(uint64_t flag_id) { const auto flag_accessor = expiration_flags.access(); - const auto it = flag_accessor.find(flag_id); - if (it == flag_accessor.end()) { + const auto iter = flag_accessor.find(flag_id); + if (iter == flag_accessor.end()) { return {}; } - - return it->flag; + return iter->flag; } void MarkDone(const uint64_t flag_id) { diff --git a/src/utils/logging.hpp b/src/utils/logging.hpp index 02389beab..adc5db51a 100644 --- a/src/utils/logging.hpp +++ b/src/utils/logging.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -23,6 +23,11 @@ #include #include +// NOTE: fmt 9+ introduced fmt/std.h, it's important because of, e.g., std::path formatting. toolchain-v4 has fmt 8, +// the guard is here because of fmt 8 compatibility. +#if FMT_VERSION > 90000 +#include +#endif #include #include #include diff --git a/src/utils/message.hpp b/src/utils/message.hpp index c301b3878..009bea032 100644 --- a/src/utils/message.hpp +++ b/src/utils/message.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,8 +17,13 @@ namespace memgraph::utils { template std::string MessageWithLink(fmt::format_string fmt, Args &&...args) { +#if FMT_VERSION > 90000 + return fmt::format(fmt::runtime(fmt::format(fmt::runtime("{} For more details, visit {{}}."), fmt.get())), + std::forward(args)...); +#else return fmt::format(fmt::runtime(fmt::format(fmt::runtime("{} For more details, visit {{}}."), fmt)), std::forward(args)...); +#endif } } // namespace memgraph::utils diff --git a/src/utils/stat.hpp b/src/utils/stat.hpp index 4c2eec6d6..de806f853 100644 --- a/src/utils/stat.hpp +++ b/src/utils/stat.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -18,6 +18,7 @@ #include #include "utils/file.hpp" +#include "utils/logging.hpp" #include "utils/string.hpp" namespace memgraph::utils { @@ -39,19 +40,19 @@ inline uint64_t GetDirDiskUsage(const std::filesystem::path &path) { return 0; } uint64_t size = 0; - for (const auto &p : std::filesystem::directory_iterator(path)) { - if (IgnoreSymlink && std::filesystem::is_symlink(p)) continue; - if (std::filesystem::is_directory(p)) { - size += GetDirDiskUsage(p); - } else if (std::filesystem::is_regular_file(p)) { - if (!utils::HasReadAccess(p)) { + for (const auto &dir_entry : std::filesystem::directory_iterator(path)) { + if (IgnoreSymlink && std::filesystem::is_symlink(dir_entry)) continue; + if (std::filesystem::is_directory(dir_entry)) { + size += GetDirDiskUsage(dir_entry); + } else if (std::filesystem::is_regular_file(dir_entry)) { + if (!utils::HasReadAccess(dir_entry)) { spdlog::warn( "Skipping file path on collecting directory disk usage '{}' because it is not readable, check file " "ownership and read permissions!", - p); + dir_entry.path()); continue; } - size += std::filesystem::file_size(p); + size += std::filesystem::file_size(dir_entry); } } diff --git a/tests/e2e/replication/constraints.cpp b/tests/e2e/replication/constraints.cpp index 6f7e2991a..de090007f 100644 --- a/tests/e2e/replication/constraints.cpp +++ b/tests/e2e/replication/constraints.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -18,6 +18,7 @@ #include #include "common.hpp" +#include "io/network/fmt.hpp" #include "utils/logging.hpp" #include "utils/thread.hpp" #include "utils/timer.hpp" diff --git a/tests/e2e/replication/indices.cpp b/tests/e2e/replication/indices.cpp index c0eee23a7..d4b5397f1 100644 --- a/tests/e2e/replication/indices.cpp +++ b/tests/e2e/replication/indices.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -18,6 +18,7 @@ #include #include "common.hpp" +#include "io/network/fmt.hpp" #include "utils/logging.hpp" #include "utils/thread.hpp" #include "utils/timer.hpp" diff --git a/tests/e2e/replication/read_write_benchmark.cpp b/tests/e2e/replication/read_write_benchmark.cpp index 243aab2a8..b7719faf0 100644 --- a/tests/e2e/replication/read_write_benchmark.cpp +++ b/tests/e2e/replication/read_write_benchmark.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,6 +19,7 @@ #include #include "common.hpp" +#include "io/network/fmt.hpp" #include "utils/logging.hpp" #include "utils/thread.hpp" #include "utils/timer.hpp" diff --git a/tests/e2e/triggers/privilige_check.cpp b/tests/e2e/triggers/privilige_check.cpp index f2cad40d4..2d7ac0f1c 100644 --- a/tests/e2e/triggers/privilige_check.cpp +++ b/tests/e2e/triggers/privilige_check.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,7 +13,6 @@ #include #include -#include #include #include "common.hpp" #include "utils/logging.hpp" diff --git a/tests/macro_benchmark/clients/pokec_client.cpp b/tests/macro_benchmark/clients/pokec_client.cpp index ba6f96941..40854707e 100644 --- a/tests/macro_benchmark/clients/pokec_client.cpp +++ b/tests/macro_benchmark/clients/pokec_client.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -24,11 +24,13 @@ #include #include +#include "communication/bolt/v1/value.hpp" #include "io/network/utils.hpp" +#include "long_running_common.hpp" #include "utils/algorithm.hpp" #include "utils/timer.hpp" -#include "long_running_common.hpp" +#include "communication/bolt/v1/fmt.hpp" using memgraph::communication::bolt::Edge; using memgraph::communication::bolt::Value; diff --git a/tests/manual/interactive_planning.cpp b/tests/manual/interactive_planning.cpp index f550b9724..3f64c4f37 100644 --- a/tests/manual/interactive_planning.cpp +++ b/tests/manual/interactive_planning.cpp @@ -27,6 +27,7 @@ #include "query/plan/planner.hpp" #include "query/plan/pretty_print.hpp" #include "query/typed_value.hpp" +#include "storage/v2/fmt.hpp" #include "storage/v2/property_value.hpp" #include "utils/string.hpp" diff --git a/tests/manual/query_hash.cpp b/tests/manual/query_hash.cpp index 8688da351..fb16f6db5 100644 --- a/tests/manual/query_hash.cpp +++ b/tests/manual/query_hash.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,6 +15,7 @@ #include #include "query/frontend/stripped.hpp" +#include "storage/v2/fmt.hpp" DEFINE_string(q, "CREATE (n) RETURN n", "Query"); diff --git a/tests/unit/query_plan_operator_to_string.cpp b/tests/unit/query_plan_operator_to_string.cpp index 694552cf0..9696050f2 100644 --- a/tests/unit/query_plan_operator_to_string.cpp +++ b/tests/unit/query_plan_operator_to_string.cpp @@ -214,23 +214,22 @@ TYPED_TEST(OperatorToStringTest, Filter) { auto node_ident = IDENT("person"); auto property = this->dba.NameToProperty("name"); auto property_ix = this->storage.GetPropertyIx("name"); - - FilterInfo generic_filter_info = {.type = FilterInfo::Type::Generic, .used_symbols = {node}}; + auto generic_filter_info = FilterInfo{FilterInfo::Type::Generic, nullptr, {node}}; auto id_filter = IdFilter(this->symbol_table, node, LITERAL(42)); - FilterInfo id_filter_info = {.type = FilterInfo::Type::Id, .id_filter = id_filter}; + auto id_filter_info = FilterInfo{FilterInfo::Type::Id, nullptr, {}, {}, id_filter}; std::vector labels{this->storage.GetLabelIx("Customer"), this->storage.GetLabelIx("Visitor")}; auto labels_test = LABELS_TEST(node_ident, labels); - FilterInfo label_filter_info = {.type = FilterInfo::Type::Label, .expression = labels_test}; + auto label_filter_info = FilterInfo{FilterInfo::Type::Label, labels_test}; auto labels_test_2 = LABELS_TEST(PROPERTY_LOOKUP(this->dba, "person", property), labels); - FilterInfo label_filter_2_info = {.type = FilterInfo::Type::Label, .expression = labels_test_2}; + auto label_filter_2_info = FilterInfo{FilterInfo::Type::Label, labels_test_2}; auto property_filter = PropertyFilter(node, property_ix, PropertyFilter::Type::EQUAL); - FilterInfo property_filter_info = {.type = FilterInfo::Type::Property, .property_filter = property_filter}; + auto property_filter_info = FilterInfo{FilterInfo::Type::Property, nullptr, {}, property_filter}; - FilterInfo pattern_filter_info = {.type = FilterInfo::Type::Pattern}; + auto pattern_filter_info = FilterInfo{FilterInfo::Type::Pattern}; Filters filters; filters.SetFilters({generic_filter_info, id_filter_info, label_filter_info, label_filter_2_info, property_filter_info,