diff --git a/.github/workflows/diff.yaml b/.github/workflows/diff.yaml index ba00941b8..77c0f070f 100644 --- a/.github/workflows/diff.yaml +++ b/.github/workflows/diff.yaml @@ -337,6 +337,70 @@ jobs: # multiple paths could be defined build/logs + experimental_build: + name: "MultiTenancy replication build" + runs-on: [self-hosted, Linux, X64, Diff] + env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + + steps: + - name: Set up repository + uses: actions/checkout@v3 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build MT replication experimental binaries. + cd build + cmake -DCMAKE_BUILD_TYPE=Release -D MG_EXPERIMENTAL_REPLICATION_MULTITENANCY=ON .. + make -j$THREADS + + - name: Run unit tests + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Run unit tests. + cd build + ctest -R memgraph__unit --output-on-failure -j$THREADS + + - name: Run e2e tests + run: | + cd tests + ./setup.sh /opt/toolchain-v4/activate + source ve3/bin/activate_e2e + cd e2e + + # Just the replication based e2e tests + ./run.sh "Replicate multitenancy" + ./run.sh "Show" + ./run.sh "Show while creating invalid state" + ./run.sh "Delete edge replication" + ./run.sh "Read-write benchmark" + ./run.sh "Index replication" + ./run.sh "Constraints" + + - name: Save test data + uses: actions/upload-artifact@v3 + if: always() + with: + name: "Test data" + path: | + # multiple paths could be defined + build/logs + release_jepsen_test: name: "Release Jepsen Test" runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl] diff --git a/CMakeLists.txt b/CMakeLists.txt index a5ad2612a..8751cfa16 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,18 @@ option(ASAN "Build with Address Sanitizer. To get a reasonable performance optio option(TSAN "Build with Thread Sanitizer. To get a reasonable performance option should be used only in Release or RelWithDebInfo build " OFF) option(UBSAN "Build with Undefined Behaviour Sanitizer" OFF) +# Build feature flags +option(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY "Feature flag for experimental replicaition of multitenacy" OFF) + +if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + set(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY OFF) + message(FATAL_ERROR "MG_EXPERIMENTAL_REPLICATION_MULTITENANCY with community edition build isn't possible") +endif () + +if (MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + add_compile_definitions(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) +endif () + if (TEST_COVERAGE) string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type) if (NOT lower_build_type STREQUAL "debug") diff --git a/src/auth/auth.cpp b/src/auth/auth.cpp index cfe9dbdbe..16f6607b7 100644 --- a/src/auth/auth.cpp +++ b/src/auth/auth.cpp @@ -255,10 +255,8 @@ void Auth::SaveRole(const Role &role) { } std::optional Auth::AddRole(const std::string &rolename) { - auto existing_role = GetRole(rolename); - if (existing_role) return std::nullopt; - auto existing_user = GetUser(rolename); - if (existing_user) return std::nullopt; + if (auto existing_role = GetRole(rolename)) return std::nullopt; + if (auto existing_user = GetUser(rolename)) return std::nullopt; auto new_role = Role(rolename); SaveRole(new_role); return new_role; @@ -285,8 +283,7 @@ std::vector Auth::AllRoles() const { for (auto it = storage_.begin(kRolePrefix); it != storage_.end(kRolePrefix); ++it) { auto rolename = it->first.substr(kRolePrefix.size()); if (rolename != utils::ToLowerCase(rolename)) continue; - auto role = GetRole(rolename); - if (role) { + if (auto role = GetRole(rolename)) { ret.push_back(*role); } else { throw AuthException("Couldn't load role '{}'!", rolename); @@ -296,15 +293,14 @@ std::vector Auth::AllRoles() const { } std::vector Auth::AllUsersForRole(const std::string &rolename_orig) const { - auto rolename = utils::ToLowerCase(rolename_orig); + const auto rolename = utils::ToLowerCase(rolename_orig); std::vector ret; for (auto it = storage_.begin(kLinkPrefix); it != storage_.end(kLinkPrefix); ++it) { auto username = it->first.substr(kLinkPrefix.size()); if (username != utils::ToLowerCase(username)) continue; if (it->second != utils::ToLowerCase(it->second)) continue; if (it->second == rolename) { - auto user = GetUser(username); - if (user) { + if (auto user = GetUser(username)) { ret.push_back(std::move(*user)); } else { throw AuthException("Couldn't load user '{}'!", username); @@ -316,8 +312,7 @@ std::vector Auth::AllUsersForRole(const std::string &rolename_orig) #ifdef MG_ENTERPRISE bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { + if (auto user = GetUser(name)) { if (db == kAllDatabases) { user->db_access().GrantAll(); } else { @@ -330,8 +325,7 @@ bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) { } bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { + if (auto user = GetUser(name)) { if (db == kAllDatabases) { user->db_access().DenyAll(); } else { @@ -346,17 +340,15 @@ bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name void Auth::DeleteDatabase(const std::string &db) { for (auto it = storage_.begin(kUserPrefix); it != storage_.end(kUserPrefix); ++it) { auto username = it->first.substr(kUserPrefix.size()); - auto user = GetUser(username); - if (user) { + if (auto user = GetUser(username)) { user->db_access().Delete(db); SaveUser(*user); } } } -bool Auth::SetMainDatabase(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { +bool Auth::SetMainDatabase(std::string_view db, const std::string &name) { + if (auto user = GetUser(name)) { if (!user->db_access().SetDefault(db)) { throw AuthException("Couldn't set default database '{}' for user '{}'!", db, name); } diff --git a/src/auth/auth.hpp b/src/auth/auth.hpp index 8d2a9d91c..b9568c311 100644 --- a/src/auth/auth.hpp +++ b/src/auth/auth.hpp @@ -195,7 +195,7 @@ class Auth final { * @return true on success * @throw AuthException if unable to find or update the user */ - bool SetMainDatabase(const std::string &db, const std::string &name); + bool SetMainDatabase(std::string_view db, const std::string &name); #endif private: diff --git a/src/auth/models.cpp b/src/auth/models.cpp index 5415dc08d..e03948578 100644 --- a/src/auth/models.cpp +++ b/src/auth/models.cpp @@ -486,13 +486,13 @@ bool operator==(const Role &first, const Role &second) { } #ifdef MG_ENTERPRISE -void Databases::Add(const std::string &db) { +void Databases::Add(std::string_view db) { if (allow_all_) { grants_dbs_.clear(); allow_all_ = false; } grants_dbs_.emplace(db); - denies_dbs_.erase(db); + denies_dbs_.erase(std::string{db}); // TODO: C++23 use transparent key compare } void Databases::Remove(const std::string &db) { @@ -523,13 +523,13 @@ void Databases::DenyAll() { denies_dbs_.clear(); } -bool Databases::SetDefault(const std::string &db) { +bool Databases::SetDefault(std::string_view db) { if (!Contains(db)) return false; default_db_ = db; return true; } -[[nodiscard]] bool Databases::Contains(const std::string &db) const { +[[nodiscard]] bool Databases::Contains(std::string_view db) const { return !denies_dbs_.contains(db) && (allow_all_ || grants_dbs_.contains(db)); } diff --git a/src/auth/models.hpp b/src/auth/models.hpp index 9f66d3119..a8a85908b 100644 --- a/src/auth/models.hpp +++ b/src/auth/models.hpp @@ -246,7 +246,7 @@ bool operator==(const Role &first, const Role &second); #ifdef MG_ENTERPRISE class Databases final { public: - Databases() : grants_dbs_({dbms::kDefaultDB}), allow_all_(false), default_db_(dbms::kDefaultDB) {} + Databases() : grants_dbs_{std::string{dbms::kDefaultDB}}, allow_all_(false), default_db_(dbms::kDefaultDB) {} Databases(const Databases &) = default; Databases &operator=(const Databases &) = default; @@ -259,7 +259,7 @@ class Databases final { * * @param db name of the database to grant access to */ - void Add(const std::string &db); + void Add(std::string_view db); /** * @brief Remove database to the list of granted access. @@ -291,7 +291,7 @@ class Databases final { /** * @brief Set the default database. */ - bool SetDefault(const std::string &db); + bool SetDefault(std::string_view db); /** * @brief Checks if access is grated to the database. @@ -299,7 +299,7 @@ class Databases final { * @param db name of the database * @return true if allow_all and not denied or granted */ - bool Contains(const std::string &db) const; + bool Contains(std::string_view db) const; bool GetAllowAll() const { return allow_all_; } const std::set> &GetGrants() const { return grants_dbs_; } @@ -312,7 +312,7 @@ class Databases final { private: Databases(bool allow_all, std::set> grant, std::set> deny, - std::string default_db = dbms::kDefaultDB) + std::string default_db = std::string{dbms::kDefaultDB}) : grants_dbs_(std::move(grant)), denies_dbs_(std::move(deny)), allow_all_(allow_all), diff --git a/src/communication/bolt/v1/states/handlers.hpp b/src/communication/bolt/v1/states/handlers.hpp index 3b5a67b17..3ffcb6f55 100644 --- a/src/communication/bolt/v1/states/handlers.hpp +++ b/src/communication/bolt/v1/states/handlers.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -170,6 +170,7 @@ inline State HandleFailure(TSession &session, const std::exception &e) { spdlog::trace("Error trace: {}", p->trace()); } session.encoder_buffer_.Clear(); + auto code_message = ExceptionToErrorMessage(e); bool fail_sent = session.encoder_.MessageFailure({{"code", code_message.first}, {"message", code_message.second}}); if (!fail_sent) { diff --git a/src/communication/result_stream_faker.hpp b/src/communication/result_stream_faker.hpp index f8786dd43..779d039cc 100644 --- a/src/communication/result_stream_faker.hpp +++ b/src/communication/result_stream_faker.hpp @@ -44,7 +44,7 @@ class ResultStreamFaker { std::vector bvalues; bvalues.reserve(values.size()); for (const auto &value : values) { - auto maybe_value = memgraph::glue::ToBoltValue(value, *store_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(value, store_, memgraph::storage::View::NEW); MG_ASSERT(maybe_value.HasValue()); bvalues.push_back(std::move(*maybe_value)); } @@ -56,7 +56,7 @@ class ResultStreamFaker { void Summary(const std::map &summary) { std::map bsummary; for (const auto &item : summary) { - auto maybe_value = memgraph::glue::ToBoltValue(item.second, *store_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(item.second, store_, memgraph::storage::View::NEW); MG_ASSERT(maybe_value.HasValue()); bsummary.insert({item.first, std::move(*maybe_value)}); } diff --git a/src/dbms/constants.hpp b/src/dbms/constants.hpp index e7ea9987b..a0e9f6f22 100644 --- a/src/dbms/constants.hpp +++ b/src/dbms/constants.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,7 +13,8 @@ namespace memgraph::dbms { -constexpr static const char *kDefaultDB = "memgraph"; //!< Name of the default database +constexpr std::string_view kDefaultDB = "memgraph"; //!< Name of the default database +constexpr std::string_view kMultiTenantDir = "databases"; //!< Name of the multi-tenant directory #ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY constexpr bool allow_mt_repl = true; diff --git a/src/dbms/database.cpp b/src/dbms/database.cpp index 74ee13892..9a56d400a 100644 --- a/src/dbms/database.cpp +++ b/src/dbms/database.cpp @@ -26,7 +26,7 @@ Database::Database(storage::Config config, replication::ReplicationState &repl_s streams_{config.durability.storage_directory / "streams"}, plan_cache_{FLAGS_query_plan_cache_max_size}, repl_state_(&repl_state) { - if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk || + if (config.salient.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk || utils::DirExists(config.disk.main_storage_directory)) { storage_ = std::make_unique(std::move(config)); } else { diff --git a/src/dbms/database.hpp b/src/dbms/database.hpp index 955c66998..0d87165b1 100644 --- a/src/dbms/database.hpp +++ b/src/dbms/database.hpp @@ -81,7 +81,14 @@ class Database { * * @return const std::string& */ - const std::string &id() const { return storage_->id(); } + const std::string &name() const { return storage_->name(); } + + /** + * @brief Unique storage identified (uuid) + * + * @return const utils::UUID& + */ + const utils::UUID &uuid() const { return storage_->uuid(); } /** * @brief Returns the storage configuration diff --git a/src/dbms/database_handler.hpp b/src/dbms/database_handler.hpp index 617e614c3..de5f813ba 100644 --- a/src/dbms/database_handler.hpp +++ b/src/dbms/database_handler.hpp @@ -51,7 +51,7 @@ class DatabaseHandler : public Handler { * @param config Storage configuration * @return HandlerT::NewResult */ - HandlerT::NewResult New(std::string_view name, storage::Config config, replication::ReplicationState &repl_state) { + HandlerT::NewResult New(storage::Config config, replication::ReplicationState &repl_state) { // Control that no one is using the same data directory if (std::any_of(begin(), end(), [&](auto &elem) { auto db_acc = elem.second.access(); @@ -61,8 +61,7 @@ class DatabaseHandler : public Handler { spdlog::info("Tried to generate new storage using a claimed directory."); return NewError::EXISTS; } - config.name = name; // Set storage id via config - return HandlerT::New(std::piecewise_construct, name, config, repl_state); + return HandlerT::New(std::piecewise_construct, config.salient.name, config, repl_state); } /** diff --git a/src/dbms/dbms_handler.cpp b/src/dbms/dbms_handler.cpp index 0af9364bf..df929331e 100644 --- a/src/dbms/dbms_handler.cpp +++ b/src/dbms/dbms_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,56 +11,202 @@ #include "dbms/dbms_handler.hpp" +#include +#include + +#include "dbms/constants.hpp" +#include "dbms/global.hpp" +#include "dbms/replication_client.hpp" +#include "spdlog/spdlog.h" +#include "utils/exceptions.hpp" +#include "utils/logging.hpp" +#include "utils/uuid.hpp" + namespace memgraph::dbms { + #ifdef MG_ENTERPRISE + +namespace { +constexpr std::string_view kDBPrefix = "database:"; // Key prefix for database durability +constexpr std::string_view kLastCommitedSystemTsKey = "last_commited_system_ts"; // Key for timestamp durability +} // namespace + +struct Durability { + enum class DurabilityVersion : uint8_t { + V0 = 0, + V1, + }; + + struct VersionException : public utils::BasicException { + VersionException() : utils::BasicException("Unsupported durability version!") {} + }; + + struct UnknownVersionException : public utils::BasicException { + UnknownVersionException() : utils::BasicException("Unable to parse the durability version!") {} + }; + + struct MigrationException : public utils::BasicException { + MigrationException() : utils::BasicException("Failed to migrate to the current durability version!") {} + }; + + static DurabilityVersion VersionCheck(std::optional val) { + if (!val) { + return DurabilityVersion::V0; + } + if (val == "V1") { + return DurabilityVersion::V1; + } + throw UnknownVersionException(); + }; + + static auto GenKey(std::string_view name) -> std::string { return fmt::format("{}{}", kDBPrefix, name); } + + static auto GenVal(utils::UUID uuid, std::filesystem::path rel_dir) { + nlohmann::json json; + json["uuid"] = uuid; + json["rel_dir"] = rel_dir; + // TODO: Serialize the configuration + return json.dump(); + } + + static void Migrate(kvstore::KVStore *durability, const std::filesystem::path &root) { + const auto ver_val = durability->Get("version"); + const auto ver = VersionCheck(ver_val); + + std::map to_put; + std::vector to_delete; + + // Update from V0 to V1 + if (ver == DurabilityVersion::V0) { + for (const auto &[key, val] : *durability) { + if (key == "version") continue; // Reserved key + // Generate a UUID + auto const uuid = utils::UUID(); + // New json values + auto new_key = GenKey(key); + auto path = root; + if (key != kDefaultDB) { // Special case for non-default DBs + // Move directory to new UUID dir + path = root / kMultiTenantDir / std::string{uuid}; + std::filesystem::path old_dir(root / kMultiTenantDir / key); + std::error_code ec; + std::filesystem::rename(old_dir, path, ec); + MG_ASSERT(!ec, "Failed to upgrade durability: cannot move default directory."); + } + // Generate json and update value + auto new_data = GenVal(uuid, std::filesystem::relative(path, root)); + to_put.emplace(std::move(new_key), std::move(new_data)); + to_delete.emplace_back(key); + } + } + + // Set version + durability->Put("version", "V1"); + // Update to the new key-value pairs + if (!durability->PutAndDeleteMultiple(to_put, to_delete)) { + throw MigrationException(); + } + } +}; + DbmsHandler::DbmsHandler( storage::Config config, memgraph::utils::Synchronized *auth, - bool recovery_on_startup, bool delete_on_drop) - : default_config_{std::move(config)}, - delete_on_drop_(delete_on_drop), - repl_state_{ReplicationStateRootPath(default_config_)} { + bool recovery_on_startup) + : default_config_{std::move(config)}, repl_state_{ReplicationStateRootPath(default_config_)} { // TODO: Decouple storage config from dbms config // TODO: Save individual db configs inside the kvstore and restore from there - storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases"); - const auto &db_dir = default_config_.durability.storage_directory; + + /* + * FILESYSTEM MANIPULATION + */ + const auto &root = default_config_.durability.storage_directory; + storage::UpdatePaths(default_config_, root); + const auto &db_dir = default_config_.durability.storage_directory / kMultiTenantDir; + // TODO: Unify durability and wal const auto durability_dir = db_dir / ".durability"; utils::EnsureDirOrDie(db_dir); utils::EnsureDirOrDie(durability_dir); durability_ = std::make_unique(durability_dir); - // Generate the default database - MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB."); + /* + * DURABILITY + */ + // Migrate durability + Durability::Migrate(durability_.get(), root); + auto directories = std::set{std::string{kDefaultDB}}; // Recover previous databases if (recovery_on_startup) { - for (const auto &[name, _] : *durability_) { - if (name == kDefaultDB) continue; // Already set - spdlog::info("Restoring database {}.", name); - MG_ASSERT(!New_(name).HasError(), "Failed while creating database {}.", name); + auto it = durability_->begin(std::string(kDBPrefix)); + auto end = durability_->end(std::string(kDBPrefix)); + for (; it != end; ++it) { + const auto &[key, config_json] = *it; + const auto name = key.substr(kDBPrefix.size()); + auto json = nlohmann::json::parse(config_json); + const auto uuid = json.at("uuid").get(); + const auto rel_dir = json.at("rel_dir").get(); + spdlog::info("Restoring database {} at {}.", name, rel_dir); + auto new_db = New_(name, uuid, rel_dir); + MG_ASSERT(!new_db.HasError(), "Failed while creating database {}.", name); + directories.emplace(rel_dir.filename()); spdlog::info("Database {} restored.", name); } + // Read the last timestamp + auto lcst = durability_->Get(kLastCommitedSystemTsKey); + if (lcst) { + last_commited_system_timestamp_ = std::stoul(*lcst); + system_timestamp_ = last_commited_system_timestamp_; + } } else { // Clear databases from the durability list and auth auto locked_auth = auth->Lock(); - for (const auto &[name, _] : *durability_) { + auto it = durability_->begin(std::string{kDBPrefix}); + auto end = durability_->end(std::string{kDBPrefix}); + for (; it != end; ++it) { + const auto &[key, _] = *it; + const auto name = key.substr(kDBPrefix.size()); if (name == kDefaultDB) continue; locked_auth->DeleteDatabase(name); - durability_->Delete(name); + durability_->Delete(key); + } + // Delete the last timestamp + durability_->Delete(kLastCommitedSystemTsKey); + } + + /* + * DATABASES CLEAN UP + */ + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(db_dir)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + auto itr = directories.find(name); + if (itr == directories.end()) { + std::error_code dummy; + std::filesystem::remove_all(entry, dummy); + } else { + directories.erase(itr); + } } } + /* + * DEFAULT DB SETUP + */ + // Setup the default DB + SetupDefault_(); + + /* + * REPLICATION RECOVERY AND STARTUP + */ // Startup replication state (if recovered at startup) - auto replica = [this](replication::RoleReplicaData const &data) { - // Register handlers - InMemoryReplicationHandlers::Register(this, *data.server); - if (!data.server->Start()) { - spdlog::error("Unable to start the replication server."); - return false; - } - return true; - }; - // Replication frequent check start + auto replica = [this](replication::RoleReplicaData const &data) { return StartRpcServer(*this, data); }; + // Replication recovery and frequent check start auto main = [this](replication::RoleMainData &data) { + for (auto &client : data.registered_replicas_) { + SystemRestore(client); + } + ForEach([this](DatabaseAccess db) { RecoverReplication(db); }); for (auto &client : data.registered_replicas_) { StartReplicaClient(*this, client); } @@ -69,7 +215,226 @@ DbmsHandler::DbmsHandler( // Startup proccess for main/replica MG_ASSERT(std::visit(memgraph::utils::Overloaded{replica, main}, repl_state_.ReplicationData()), "Replica recovery failure!"); -} -#endif + // Warning + if (default_config_.durability.snapshot_wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && + repl_state_.IsMain()) { + spdlog::warn( + "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please " + "consider " + "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because " + "without write-ahead logs this instance is not replicating any data."); + } +} + +DbmsHandler::DeleteResult DbmsHandler::TryDelete(std::string_view db_name) { + std::lock_guard wr(lock_); + if (db_name == kDefaultDB) { + // MSG cannot delete the default db + return DeleteError::DEFAULT_DB; + } + + // Get DB config for the UUID and disk clean up + const auto conf = db_handler_.GetConfig(db_name); + if (!conf) { + return DeleteError::NON_EXISTENT; + } + const auto &storage_path = conf->durability.storage_directory; + const auto &uuid = conf->salient.uuid; + + // Check if db exists + try { + // Low level handlers + if (!db_handler_.TryDelete(db_name)) { + return DeleteError::USING; + } + } catch (utils::BasicException &) { + return DeleteError::NON_EXISTENT; + } + + // Remove from durability list + if (durability_) durability_->Delete(Durability::GenKey(db_name)); + + // Delete disk storage + std::error_code ec; + (void)std::filesystem::remove_all(storage_path, ec); + if (ec) { + spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path); + } + + // Success + // Save delta + if (system_transaction_) { + system_transaction_->delta.emplace(SystemTransaction::Delta::drop_database, uuid); + } + return {}; +} + +DbmsHandler::DeleteResult DbmsHandler::Delete(std::string_view db_name) { + auto wr = std::lock_guard(lock_); + return Delete_(db_name); +} + +DbmsHandler::DeleteResult DbmsHandler::Delete(utils::UUID uuid) { + auto wr = std::lock_guard(lock_); + std::string db_name; + try { + const auto db = Get_(uuid); + db_name = db->name(); + } catch (const UnknownDatabaseException &) { + return DeleteError::NON_EXISTENT; + } + return Delete_(db_name); +} + +DbmsHandler::NewResultT DbmsHandler::New_(storage::Config storage_config) { + auto new_db = db_handler_.New(storage_config, repl_state_); + + if (new_db.HasValue()) { // Success + // Save delta + if (system_transaction_) { + system_transaction_->delta.emplace(SystemTransaction::Delta::create_database, storage_config.salient); + } + UpdateDurability(storage_config); + return new_db.GetValue(); + } + return new_db.GetError(); +} + +DbmsHandler::DeleteResult DbmsHandler::Delete_(std::string_view db_name) { + if (db_name == kDefaultDB) { + // MSG cannot delete the default db + return DeleteError::DEFAULT_DB; + } + + const auto storage_path = StorageDir_(db_name); + if (!storage_path) return DeleteError::NON_EXISTENT; + + { + auto db = db_handler_.Get(db_name); + if (!db) return DeleteError::NON_EXISTENT; + // TODO: ATM we assume REPLICA won't have streams, + // this is a best effort approach just in case they do + // there is still subtle data race we stream manipulation + // can occur while we are dropping the database + db->prepare_for_deletion(); + auto &database = *db->get(); + database.streams()->StopAll(); + database.streams()->DropAll(); + database.thread_pool()->Shutdown(); + } + + // Remove from durability list + if (durability_) durability_->Delete(Durability::GenKey(db_name)); + + // Check if db exists + // Low level handlers + db_handler_.DeferDelete(db_name, [storage_path = *storage_path, db_name = std::string{db_name}]() { + // Delete disk storage + std::error_code ec; + (void)std::filesystem::remove_all(storage_path, ec); + if (ec) { + spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path); + } + }); + + return {}; // Success +} + +void DbmsHandler::UpdateDurability(const storage::Config &config, std::optional rel_dir) { + if (!durability_) return; + // Save database in a list of active databases + const auto &key = Durability::GenKey(config.salient.name); + if (rel_dir == std::nullopt) + rel_dir = + std::filesystem::relative(config.durability.storage_directory, default_config_.durability.storage_directory); + const auto &val = Durability::GenVal(config.salient.uuid, *rel_dir); + durability_->Put(key, val); +} + +AllSyncReplicaStatus DbmsHandler::Commit() { + if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt) + return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit + const auto &delta = *system_transaction_->delta; + + auto sync_status = AllSyncReplicaStatus::AllCommitsConfirmed; + // TODO Create a system client that can handle all of this automatically + switch (delta.action) { + using enum SystemTransaction::Delta::Action; + case CREATE_DATABASE: { + // Replication + auto main_handler = [&](memgraph::replication::RoleMainData &main_data) { + // TODO: data race issue? registered_replicas_ access not protected + // This is sync in any case, as this is the startup + for (auto &client : main_data.registered_replicas_) { + bool completed = SteamAndFinalizeDelta( + client, + [](const storage::replication::CreateDatabaseRes &response) { + return response.result != storage::replication::CreateDatabaseRes::Result::FAILURE; + }, + std::string(main_data.epoch_.id()), last_commited_system_timestamp_, + system_transaction_->system_timestamp, delta.config); + // TODO: reduce duplicate code + if (!completed && client.mode_ == replication::ReplicationMode::SYNC) { + sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed; + } + } + // Sync database with REPLICAs + RecoverReplication(Get_(delta.config.name)); + }; + auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ }; + std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData()); + } break; + case DROP_DATABASE: { + // Replication + auto main_handler = [&](memgraph::replication::RoleMainData &main_data) { + // TODO: data race issue? registered_replicas_ access not protected + // This is sync in any case, as this is the startup + for (auto &client : main_data.registered_replicas_) { + bool completed = SteamAndFinalizeDelta( + client, + [](const storage::replication::DropDatabaseRes &response) { + return response.result != storage::replication::DropDatabaseRes::Result::FAILURE; + }, + std::string(main_data.epoch_.id()), last_commited_system_timestamp_, + system_transaction_->system_timestamp, delta.uuid); + // TODO: reduce duplicate code + if (!completed && client.mode_ == replication::ReplicationMode::SYNC) { + sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed; + } + } + }; + auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ }; + std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData()); + } break; + } + + durability_->Put(kLastCommitedSystemTsKey, std::to_string(system_transaction_->system_timestamp)); + last_commited_system_timestamp_ = system_transaction_->system_timestamp; + ResetSystemTransaction(); + return sync_status; +} + +#else // not MG_ENTERPRISE + +AllSyncReplicaStatus DbmsHandler::Commit() { + if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt) { + return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit + } + const auto &delta = *system_transaction_->delta; + + switch (delta.action) { + using enum SystemTransaction::Delta::Action; + case CREATE_DATABASE: + case DROP_DATABASE: + /* Community edition doesn't support multi-tenant replication */ + break; + } + + last_commited_system_timestamp_ = system_transaction_->system_timestamp; + ResetSystemTransaction(); + return AllSyncReplicaStatus::AllCommitsConfirmed; +} + +#endif } // namespace memgraph::dbms diff --git a/src/dbms/dbms_handler.hpp b/src/dbms/dbms_handler.hpp index f9aa621dc..6520370e0 100644 --- a/src/dbms/dbms_handler.hpp +++ b/src/dbms/dbms_handler.hpp @@ -12,34 +12,36 @@ #pragma once #include -#include +#include #include #include #include #include #include -#include -#include #include -#include +#include #include "auth/auth.hpp" #include "constants.hpp" #include "dbms/database.hpp" #include "dbms/inmemory/replication_handlers.hpp" +#include "dbms/replication_handler.hpp" +#include "kvstore/kvstore.hpp" +#include "replication/replication_client.hpp" +#include "storage/v2/config.hpp" +#include "storage/v2/replication/enums.hpp" +#include "storage/v2/replication/rpc.hpp" +#include "storage/v2/transaction.hpp" +#include "utils/thread_pool.hpp" #ifdef MG_ENTERPRISE #include "dbms/database_handler.hpp" #endif -#include "dbms/replication_client.hpp" +#include "dbms/transaction.hpp" #include "global.hpp" #include "query/config.hpp" #include "query/interpreter_context.hpp" #include "spdlog/spdlog.h" -#include "storage/v2/durability/durability.hpp" -#include "storage/v2/durability/paths.hpp" #include "storage/v2/isolation_level.hpp" -#include "utils/exceptions.hpp" -#include "utils/file.hpp" #include "utils/logging.hpp" #include "utils/result.hpp" #include "utils/rw_lock.hpp" @@ -48,6 +50,11 @@ namespace memgraph::dbms { +enum class AllSyncReplicaStatus { + AllCommitsConfirmed, + SomeCommitsUnconfirmed, +}; + struct Statistics { uint64_t num_vertex; //!< Sum of vertexes in every database uint64_t num_edges; //!< Sum of edges in every database @@ -102,11 +109,10 @@ class DbmsHandler { * @param configs storage configuration * @param auth pointer to the global authenticator * @param recovery_on_startup restore databases (and its content) and authentication data - * @param delete_on_drop when dropping delete any associated directories on disk */ DbmsHandler(storage::Config config, memgraph::utils::Synchronized *auth, - bool recovery_on_startup, bool delete_on_drop); // TODO If more arguments are added use a config strut + bool recovery_on_startup); // TODO If more arguments are added use a config struct #else /** * @brief Initialize the handler. A single database is supported in community edition. @@ -116,10 +122,12 @@ class DbmsHandler { DbmsHandler(storage::Config config) : repl_state_{ReplicationStateRootPath(config)}, db_gatekeeper_{[&] { - config.name = kDefaultDB; + config.salient.name = kDefaultDB; return std::move(config); }(), - repl_state_} {} + repl_state_} { + RecoverReplication(Get()); + } #endif #ifdef MG_ENTERPRISE @@ -131,9 +139,56 @@ class DbmsHandler { */ NewResultT New(const std::string &name) { std::lock_guard wr(lock_); - return New_(name, name); + const auto uuid = utils::UUID{}; + return New_(name, uuid); } + /** + * @brief Create new if name/uuid do not match any database. Drop and recreate if database already present. + * @note Default database is not dropped, only its UUID is updated and only if the database is clean. + * + * @param config desired salient config + * @return NewResultT context on success, error on failure + */ + NewResultT Update(const storage::SalientConfig &config) { + std::lock_guard wr(lock_); + auto new_db = New_(config); + if (new_db.HasValue() || new_db.GetError() != NewError::EXISTS) { + // NOTE: If db already exists we retry below + return new_db; + } + + spdlog::debug("Trying to create db '{}' on replica which already exists.", config.name); + + auto db = Get_(config.name); + if (db->uuid() == config.uuid) { // Same db + return db; + } + + spdlog::debug("Different UUIDs"); + + // TODO: Fix this hack + if (config.name == kDefaultDB) { + if (db->storage()->repl_storage_state_.last_commit_timestamp_ != storage::kTimestampInitialId) { + spdlog::debug("Default storage is not clean, cannot update UUID..."); + return NewError::GENERIC; // Update error + } + spdlog::debug("Update default db's UUID"); + // Default db cannot be deleted and remade, have to just update the UUID + db->storage()->config_.salient.uuid = config.uuid; + UpdateDurability(db->storage()->config_, "."); + return db; + } + + spdlog::debug("Drop database and recreate with the correct UUID"); + // Defer drop + (void)Delete_(db->name()); + // Second attempt + return New_(config); + } + + void UpdateDurability(const storage::Config &config, std::optional rel_dir = {}); + /** * @brief Get the context associated with the "name" database * @@ -145,6 +200,19 @@ class DbmsHandler { std::shared_lock rd(lock_); return Get_(name); } + + /** + * @brief Get the context associated with the UUID database + * + * @param uuid + * @return DatabaseAccess + * @throw UnknownDatabaseException if database not found + */ + DatabaseAccess Get(const utils::UUID &uuid) { + std::shared_lock rd(lock_); + return Get_(uuid); + } + #else /** * @brief Get the context associated with the default database @@ -160,50 +228,28 @@ class DbmsHandler { #ifdef MG_ENTERPRISE /** - * @brief Delete database. + * @brief Attempt to delete database. * * @param db_name database name * @return DeleteResult error on failure */ - DeleteResult Delete(const std::string &db_name) { - std::lock_guard wr(lock_); - if (db_name == kDefaultDB) { - // MSG cannot delete the default db - return DeleteError::DEFAULT_DB; - } + DeleteResult TryDelete(std::string_view db_name); - const auto storage_path = StorageDir_(db_name); - if (!storage_path) return DeleteError::NON_EXISTENT; + /** + * @brief Delete or defer deletion of database. + * + * @param db_name database name + * @return DeleteResult error on failure + */ + DeleteResult Delete(std::string_view db_name); - // Check if db exists - try { - // Low level handlers - if (!db_handler_.Delete(db_name)) { - return DeleteError::USING; - } - } catch (utils::BasicException &) { - return DeleteError::NON_EXISTENT; - } - - // Remove from durability list - if (durability_) durability_->Delete(db_name); - - // Delete disk storage - if (delete_on_drop_) { - std::error_code ec; - (void)std::filesystem::remove_all(*storage_path, ec); - if (ec) { - spdlog::error("Failed to clean disk while deleting database \"{}\".", db_name); - defunct_dbs_.emplace(db_name); - return DeleteError::DISK_FAIL; - } - } - - // Delete from defunct_dbs_ (in case a second delete call was successful) - defunct_dbs_.erase(db_name); - - return {}; // Success - } + /** + * @brief Delete or defer deletion of database. + * + * @param uuid database UUID + * @return DeleteResult error on failure + */ + DeleteResult Delete(utils::UUID uuid); #endif /** @@ -216,7 +262,7 @@ class DbmsHandler { std::shared_lock rd(lock_); return db_handler_.All(); #else - return {db_gatekeeper_.access()->get()->id()}; + return {db_gatekeeper_.access()->get()->name()}; #endif } @@ -305,7 +351,7 @@ class DbmsHandler { auto db_acc_opt = db_gk.access(); if (db_acc_opt) { auto &db_acc = *db_acc_opt; - spdlog::debug("Restoring trigger for database \"{}\"", db_acc->id()); + spdlog::debug("Restoring trigger for database \"{}\"", db_acc->name()); auto storage_accessor = db_acc->Access(); auto dba = memgraph::query::DbAccessor{storage_accessor.get()}; db_acc->trigger_store()->RestoreTriggers(&ic->ast_cache, &dba, ic->config.query, ic->auth_checker); @@ -330,7 +376,7 @@ class DbmsHandler { auto db_acc = db_gk.access(); if (db_acc) { auto *db = db_acc->get(); - spdlog::debug("Restoring streams for database \"{}\"", db->id()); + spdlog::debug("Restoring streams for database \"{}\"", db->name()); db->streams()->RestoreStreams(*db_acc, ic); } } @@ -341,7 +387,7 @@ class DbmsHandler { * * @param f */ - void ForEach(auto f) { + void ForEach(std::invocable auto f) { #ifdef MG_ENTERPRISE std::shared_lock rd(lock_); for (auto &[_, db_gk] : db_handler_) { @@ -351,33 +397,103 @@ class DbmsHandler { #endif auto db_acc = db_gk.access(); if (db_acc) { // This isn't an error, just a defunct db - f(db_acc->get()); + f(*db_acc); } } } - /** - * @brief todo - * - * @param f - */ - void ForOne(auto f) { + void NewSystemTransaction() { + DMG_ASSERT(!system_transaction_, "Already running a system transaction"); + system_transaction_.emplace(++system_timestamp_); + } + + void ResetSystemTransaction() { system_transaction_.reset(); } + + //! \tparam RPC An rpc::RequestResponse + //! \tparam Args the args type + //! \param client the client to use for rpc communication + //! \param check predicate to check response is ok + //! \param args arguments to forward to the rpc request + //! \return If replica stream is completed or enqueued + template + bool SteamAndFinalizeDelta(auto &client, auto &&check, Args &&...args) { + try { + auto stream = client.rpc_client_.template Stream(std::forward(args)...); + auto task = [&client, check = std::forward(check), stream = std::move(stream)]() mutable { + if (stream.IsDefunct()) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + } + try { + if (check(stream.AwaitResponse())) { + return true; + } + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + // swallow error, fallthrough to error handling + } + // This replica needs SYSTEM recovery + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + }; + + if (client.mode_ == memgraph::replication::ReplicationMode::ASYNC) { + client.thread_pool_.AddTask([task = utils::CopyMovableFunctionWrapper{std::move(task)}]() mutable { task(); }); + return true; + } + + return task(); + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + // This replica needs SYSTEM recovery + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + } + }; + + AllSyncReplicaStatus Commit(); + + auto LastCommitedTS() const -> uint64_t { return last_commited_system_timestamp_; } + void SetLastCommitedTS(uint64_t new_ts) { last_commited_system_timestamp_.store(new_ts); } + #ifdef MG_ENTERPRISE - std::shared_lock rd(lock_); - for (auto &[_, db_gk] : db_handler_) { - auto db_acc = db_gk.access(); - if (db_acc) { // This isn't an error, just a defunct db - if (f(db_acc->get())) break; // Run until the first successful one + // When being called by intepreter no need to gain lock, it should already be under a system transaction + // But concurrently the FrequentCheck is running and will need to lock before reading last_commited_system_timestamp_ + template + void SystemRestore(replication::ReplicationClient &client) { + // Check if system is up to date + if (client.state_.WithLock( + [](auto &state) { return state == memgraph::replication::ReplicationClient::State::READY; })) + return; + + // Try to recover... + { + auto [database_configs, last_commited_system_timestamp] = std::invoke([&] { + auto sys_guard = + std::unique_lock{system_lock_, std::defer_lock}; // ensure no other system transaction in progress + if constexpr (REQUIRE_LOCK) { + sys_guard.lock(); + } + auto configs = std::vector{}; + ForEach([&configs](DatabaseAccess acc) { configs.emplace_back(acc->config().salient); }); + return std::pair{configs, last_commited_system_timestamp_.load()}; + }); + try { + auto stream = client.rpc_client_.Stream(last_commited_system_timestamp, + std::move(database_configs)); + const auto response = stream.AwaitResponse(); + if (response.result == storage::replication::SystemRecoveryRes::Result::FAILURE) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return; + } + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return; } } -#else - { - auto db_acc = db_gatekeeper_.access(); - MG_ASSERT(db_acc, "Should always have the database"); - f(db_acc->get()); - } -#endif + + // Successfully recovered + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::READY; }); } +#endif private: #ifdef MG_ENTERPRISE @@ -387,7 +503,7 @@ class DbmsHandler { * @param name Database name * @return std::optional */ - std::optional StorageDir_(const std::string &name) { + std::optional StorageDir_(std::string_view name) { const auto conf = db_handler_.GetConfig(name); if (conf) { return conf->durability.storage_directory; @@ -400,105 +516,108 @@ class DbmsHandler { * @brief Create a new Database associated with the "name" database * * @param name name of the database + * @param uuid undelying RocksDB directory * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name) { return New_(name, name); } + NewResultT New_(std::string_view name, utils::UUID uuid, std::optional rel_dir = {}) { + auto config_copy = default_config_; + config_copy.salient.name = name; + config_copy.salient.uuid = uuid; + spdlog::debug("Creating database '{}' - '{}'", name, std::string{uuid}); + if (rel_dir) { + storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / *rel_dir); + } else { + storage::UpdatePaths(config_copy, + default_config_.durability.storage_directory / kMultiTenantDir / std::string{uuid}); + } + return New_(std::move(config_copy)); + } /** - * @brief Create a new Database associated with the "name" database + * @brief Create a new Database using the passed configuration * - * @param name name of the database - * @param storage_subdir undelying RocksDB directory + * @param config configuration to be used * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name, std::filesystem::path storage_subdir) { + NewResultT New_(const storage::SalientConfig &config) { auto config_copy = default_config_; - storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / storage_subdir); - return New_(name, config_copy); + config_copy.salient = config; // name, uuid, mode, etc + UpdatePaths(config_copy, config_copy.durability.storage_directory / kMultiTenantDir / std::string{config.uuid}); + return New_(std::move(config_copy)); } /** * @brief Create a new Database associated with the "name" database * - * @param name name of the database * @param storage_config storage configuration * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name, storage::Config &storage_config) { - if (defunct_dbs_.contains(name)) { - spdlog::warn("Failed to generate database due to the unknown state of the previously defunct database \"{}\".", - name); - return NewError::DEFUNCT; - } + NewResultT New_(storage::Config storage_config); - auto new_db = db_handler_.New(name, storage_config, repl_state_); - if (new_db.HasValue()) { - // Success - if (durability_) durability_->Put(name, "ok"); // TODO: Serialize the configuration? - return new_db.GetValue(); - } - return new_db.GetError(); - } + // TODO: new overload of Delete_ with DatabaseAccess + DeleteResult Delete_(std::string_view db_name); /** * @brief Create a new Database associated with the default database * * @return NewResultT context on success, error on failure */ - NewResultT NewDefault_() { - // Create the default DB in the root (this is how it was done pre multi-tenancy) - auto res = New_(kDefaultDB, ".."); - if (res.HasValue()) { - // For back-compatibility... - // Recreate the dbms layout for the default db and symlink to the root - const auto dir = StorageDir_(kDefaultDB); - MG_ASSERT(dir, "Failed to find storage path."); - const auto main_dir = *dir / "databases" / kDefaultDB; + void SetupDefault_() { + try { + Get(kDefaultDB); + } catch (const UnknownDatabaseException &) { + // No default DB restored, create it + MG_ASSERT(New_(kDefaultDB, {/* random UUID */}, ".").HasValue(), "Failed while creating the default database"); + } - if (!std::filesystem::exists(main_dir)) { - std::filesystem::create_directory(main_dir); - } + // For back-compatibility... + // Recreate the dbms layout for the default db and symlink to the root + const auto dir = StorageDir_(kDefaultDB); + MG_ASSERT(dir, "Failed to find storage path."); + const auto main_dir = *dir / kMultiTenantDir / kDefaultDB; - // Force link on-disk directories - const auto conf = db_handler_.GetConfig(kDefaultDB); - MG_ASSERT(conf, "No configuration for the default database."); - const auto &tmp_conf = conf->disk; - std::vector to_link{ - tmp_conf.main_storage_directory, tmp_conf.label_index_directory, - tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory, - tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory, - tmp_conf.durability_directory, tmp_conf.wal_directory, - }; + if (!std::filesystem::exists(main_dir)) { + std::filesystem::create_directory(main_dir); + } - // Add in-memory paths - // Some directories are redundant (skip those) - const std::vector skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"}; - for (auto const &item : std::filesystem::directory_iterator{*dir}) { - const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path()); - if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue; - to_link.push_back(item.path()); - } + // Force link on-disk directories + const auto conf = db_handler_.GetConfig(kDefaultDB); + MG_ASSERT(conf, "No configuration for the default database."); + const auto &tmp_conf = conf->disk; + std::vector to_link{ + tmp_conf.main_storage_directory, tmp_conf.label_index_directory, + tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory, + tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory, + tmp_conf.durability_directory, tmp_conf.wal_directory, + }; - // Symlink to root dir - for (auto const &item : to_link) { - const auto dir_name = std::filesystem::relative(item, item.parent_path()); - const auto link = main_dir / dir_name; - const auto to = std::filesystem::relative(item, main_dir); - if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) { - std::filesystem::create_directory_symlink(to, link); - } else { // Check existing link - std::error_code ec; - const auto test_link = std::filesystem::read_symlink(link, ec); - if (ec || test_link != to) { - MG_ASSERT(false, - "Memgraph storage directory incompatible with new version.\n" - "Please use a clean directory or remove \"{}\" and try again.", - link.string()); - } + // Add in-memory paths + // Some directories are redundant (skip those) + const std::vector skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"}; + for (auto const &item : std::filesystem::directory_iterator{*dir}) { + const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path()); + if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue; + to_link.push_back(item.path()); + } + + // Symlink to root dir + for (auto const &item : to_link) { + const auto dir_name = std::filesystem::relative(item, item.parent_path()); + const auto link = main_dir / dir_name; + const auto to = std::filesystem::relative(item, main_dir); + if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) { + std::filesystem::create_directory_symlink(to, link); + } else { // Check existing link + std::error_code ec; + const auto test_link = std::filesystem::read_symlink(link, ec); + if (ec || test_link != to) { + MG_ASSERT(false, + "Memgraph storage directory incompatible with new version.\n" + "Please use a clean directory or remove \"{}\" and try again.", + link.string()); } } } - return res; } /** @@ -516,17 +635,56 @@ class DbmsHandler { throw UnknownDatabaseException("Tried to retrieve an unknown database \"{}\".", name); } + /** + * @brief Get the context associated with the UUID database + * + * @param uuid + * @return DatabaseAccess + * @throw UnknownDatabaseException if database not found + */ + DatabaseAccess Get_(const utils::UUID &uuid) { + // TODO Speed up + for (auto &[_, db_gk] : db_handler_) { + auto acc = db_gk.access(); + if (acc->get()->uuid() == uuid) { + return std::move(*acc); + } + } + throw UnknownDatabaseException("Tried to retrieve an unknown database with UUID \"{}\".", std::string{uuid}); + } +#endif + + void RecoverReplication(DatabaseAccess db_acc) { + if (allow_mt_repl || db_acc->name() == dbms::kDefaultDB) { + // Handle global replication state + spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); + // RECOVER REPLICA CONNECTIONS + memgraph::dbms::RestoreReplication(repl_state_, std::move(db_acc)); + } else if (const ::memgraph::replication::RoleMainData *data = + std::get_if<::memgraph::replication::RoleMainData>(&repl_state_.ReplicationData()); + data && !data->registered_replicas_.empty()) { + spdlog::warn("Multi-tenant replication is currently not supported!"); + } + } + +#ifdef MG_ENTERPRISE mutable LockT lock_{utils::RWLock::Priority::READ}; //!< protective lock storage::Config default_config_; //!< Storage configuration used when creating new databases DatabaseHandler db_handler_; //!< multi-tenancy storage handler std::unique_ptr durability_; //!< list of active dbs (pointer so we can postpone its creation) - bool delete_on_drop_; //!< Flag defining if dropping storage also deletes its directory - std::set defunct_dbs_; //!< Databases that are in an unknown state due to various failures #endif + // TODO: Make an api + public: + utils::ResourceLock system_lock_{}; //!> Ensure exclusive access for system queries + private: + std::optional system_transaction_; //!< Current system transaction (only one at a time) + uint64_t system_timestamp_{storage::kTimestampInitialId}; //!< System timestamp + std::atomic_uint64_t last_commited_system_timestamp_{ + storage::kTimestampInitialId}; //!< Last commited system timestamp replication::ReplicationState repl_state_; //!< Global replication state #ifndef MG_ENTERPRISE mutable utils::Gatekeeper db_gatekeeper_; //!< Single databases gatekeeper #endif -}; +}; // namespace memgraph::dbms } // namespace memgraph::dbms diff --git a/src/dbms/handler.hpp b/src/dbms/handler.hpp index 568b2fc7c..53724dabe 100644 --- a/src/dbms/handler.hpp +++ b/src/dbms/handler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -21,6 +21,7 @@ #include "utils/exceptions.hpp" #include "utils/gatekeeper.hpp" #include "utils/result.hpp" +#include "utils/thread_pool.hpp" namespace memgraph::dbms { @@ -82,7 +83,7 @@ class Handler { * @return true on success * @throw BasicException */ - bool Delete(const std::string &name) { + bool TryDelete(std::string_view name) { if (auto itr = items_.find(name); itr != items_.end()) { auto db_acc = itr->second.access(); if (db_acc && db_acc->try_delete()) { @@ -92,9 +93,42 @@ class Handler { } return false; } + // TODO: Change to return enum throw utils::BasicException("Unknown item \"{}\".", name); } + /** + * @brief Delete or defunct the context associated with the name. + * + * @param name Name associated with the context to delete + * @param post_delete_func What to do after deletion has happened + */ + template + void DeferDelete(std::string_view name, Func &&post_delete_func) { + auto itr = items_.find(name); + if (itr == items_.end()) return; + + auto db_acc = itr->second.access(); + if (!db_acc) return; + + if (db_acc->try_delete()) { + // Delete the database now + db_acc->reset(); + post_delete_func(); + } else { + // Defer deletion + db_acc->reset(); + // TODO: Make sure this shuts down correctly + auto task = [gk = std::move(itr->second), post_delete_func = std::forward(post_delete_func)]() mutable { + gk.~Gatekeeper(); + post_delete_func(); + }; + defer_pool_.AddTask(utils::CopyMovableFunctionWrapper{std::move(task)}); + } + // In any case remove from handled map + items_.erase(itr); + } + /** * @brief Check if a name is already used. * @@ -120,6 +154,7 @@ class Handler { private: std::unordered_map, string_hash, std::equal_to<>> items_; //!< map to all active items + utils::ThreadPool defer_pool_{1}; }; } // namespace memgraph::dbms diff --git a/src/dbms/inmemory/replication_handlers.cpp b/src/dbms/inmemory/replication_handlers.cpp index 2b8d6a86b..24ddcfd02 100644 --- a/src/dbms/inmemory/replication_handlers.cpp +++ b/src/dbms/inmemory/replication_handlers.cpp @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "dbms/inmemory/replication_handlers.hpp" +#include #include #include "dbms/constants.hpp" #include "dbms/dbms_handler.hpp" @@ -49,29 +50,29 @@ std::pair ReadDelta(storage::durability::BaseDecoder *de } }; -std::optional GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, std::string_view db_name) { +std::optional GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, const utils::UUID &uuid) { try { #ifdef MG_ENTERPRISE - auto acc = dbms_handler->Get(db_name); -#else - if (db_name != dbms::kDefaultDB) { - spdlog::warn("Trying to replicate a non-default database on a community replica."); - return std::nullopt; - } - auto acc = dbms_handler->Get(); -#endif + auto acc = dbms_handler->Get(uuid); if (!acc) { - spdlog::error("Failed to get access to ", db_name); + spdlog::error("Failed to get access to UUID ", std::string{uuid}); return std::nullopt; } +#else + auto acc = dbms_handler->Get(); + if (!acc) { + spdlog::warn("Failed to get access to the default db."); + return std::nullopt; + } +#endif auto *inmem_storage = dynamic_cast(acc.get()->storage()); if (!inmem_storage || inmem_storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) { - spdlog::error("Database \"{}\" is not IN_MEMORY_TRANSACTIONAL.", db_name); + spdlog::error("Database is not IN_MEMORY_TRANSACTIONAL."); return std::nullopt; } return std::optional{std::move(acc)}; } catch (const dbms::UnknownDatabaseException &e) { - spdlog::warn("No database \"{}\" on replica!", db_name); + spdlog::warn("No database with UUID \"{}\" on replica!", std::string{uuid}); return std::nullopt; } } @@ -109,13 +110,16 @@ void InMemoryReplicationHandlers::HeartbeatHandler(dbms::DbmsHandler *dbms_handl slk::Builder *res_builder) { storage::replication::HeartbeatReq req; slk::Load(&req, req_reader); - auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::HeartbeatRes res{false, 0, ""}; + slk::Save(res, res_builder); + return; + } // TODO: this handler is agnostic of InMemory, move to be reused by on-disk auto const *storage = db_acc->get()->storage(); - storage::replication::HeartbeatRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load(), + storage::replication::HeartbeatRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load(), std::string{storage->repl_storage_state_.epoch_.id()}}; slk::Save(res, res_builder); } @@ -124,8 +128,12 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha slk::Builder *res_builder) { storage::replication::AppendDeltasReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::AppendDeltasRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -165,7 +173,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating } - storage::replication::AppendDeltasRes res{storage->id(), false, repl_storage_state.last_commit_timestamp_.load()}; + storage::replication::AppendDeltasRes res{false, repl_storage_state.last_commit_timestamp_.load()}; slk::Save(res, res_builder); return; } @@ -174,7 +182,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha storage, &decoder, storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating - storage::replication::AppendDeltasRes res{storage->id(), true, repl_storage_state.last_commit_timestamp_.load()}; + storage::replication::AppendDeltasRes res{true, repl_storage_state.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from append deltas finished, replica is now up to date!"); } @@ -183,8 +191,12 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle slk::Builder *res_builder) { storage::replication::SnapshotReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::SnapshotRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -232,8 +244,7 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle } storage_guard.unlock(); - storage::replication::SnapshotRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::SnapshotRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::trace("Deleting old snapshot files due to snapshot recovery."); @@ -263,8 +274,12 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle slk::Builder *res_builder) { storage::replication::WalFilesReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::WalFilesRes res{false, 0}; + slk::Save(res, res_builder); + return; + } const auto wal_file_number = req.file_number; spdlog::debug("Received WAL files: {}", wal_file_number); @@ -278,8 +293,7 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle LoadWal(storage, &decoder); } - storage::replication::WalFilesRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::WalFilesRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from WAL files ended successfully, replica is now up to date!"); } @@ -288,8 +302,12 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand slk::Builder *res_builder) { storage::replication::CurrentWalReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::CurrentWalRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -298,8 +316,7 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand LoadWal(storage, &decoder); - storage::replication::CurrentWalRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::CurrentWalRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from current WAL ended successfully, replica is now up to date!"); } @@ -318,6 +335,8 @@ void InMemoryReplicationHandlers::LoadWal(storage::InMemoryStorage *storage, sto } auto &replica_epoch = storage->repl_storage_state_.epoch_; if (wal_info.epoch_id != replica_epoch.id()) { + // questionable behaviour, we trust that any change in epoch implies change in who is MAIN + // when we use high availability, this assumption need to be checked. auto prev_epoch = replica_epoch.SetEpoch(wal_info.epoch_id); storage->repl_storage_state_.AddEpochToHistoryForce(prev_epoch); } @@ -355,13 +374,16 @@ void InMemoryReplicationHandlers::TimestampHandler(dbms::DbmsHandler *dbms_handl slk::Builder *res_builder) { storage::replication::TimestampReq req; slk::Load(&req, req_reader); - auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::TimestampRes res{false, 0}; + slk::Save(res, res_builder); + return; + } // TODO: this handler is agnostic of InMemory, move to be reused by on-disk auto const *storage = db_acc->get()->storage(); - storage::replication::TimestampRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::TimestampRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); } @@ -505,7 +527,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage case WalDeltaData::Type::EDGE_SET_PROPERTY: { spdlog::trace(" Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(), delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value); - if (!storage->config_.items.properties_on_edges) + if (!storage->config_.salient.items.properties_on_edges) throw utils::BasicException( "Can't set properties on edges because properties on edges " "are disabled!"); @@ -572,8 +594,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage spdlog::trace(" Transaction end"); if (!commit_timestamp_and_accessor || commit_timestamp_and_accessor->first != timestamp) throw utils::BasicException("Invalid commit data!"); - auto ret = - commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first, false /* not main */); + auto ret = commit_timestamp_and_accessor->second.Commit( + {.desired_commit_timestamp = commit_timestamp_and_accessor->first, .is_main = false}); if (ret.HasError()) throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__); commit_timestamp_and_accessor = std::nullopt; diff --git a/src/dbms/inmemory/replication_handlers.hpp b/src/dbms/inmemory/replication_handlers.hpp index fc76d2b3a..4f6523747 100644 --- a/src/dbms/inmemory/replication_handlers.hpp +++ b/src/dbms/inmemory/replication_handlers.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -12,7 +12,6 @@ #pragma once #include "replication/replication_server.hpp" -#include "replication/state.hpp" #include "storage/v2/replication/serialization.hpp" namespace memgraph::storage { diff --git a/src/dbms/inmemory/storage_helper.hpp b/src/dbms/inmemory/storage_helper.hpp index 8e38f0a0f..fa1b9646a 100644 --- a/src/dbms/inmemory/storage_helper.hpp +++ b/src/dbms/inmemory/storage_helper.hpp @@ -24,8 +24,7 @@ namespace memgraph::dbms { inline std::unique_ptr CreateInMemoryStorage(storage::Config config, ::memgraph::replication::ReplicationState &repl_state) { - const auto wal_mode = config.durability.snapshot_wal_mode; - const auto name = config.name; + const auto name = config.salient.name; auto storage = std::make_unique(std::move(config)); // Connect replication state and storage @@ -34,24 +33,6 @@ inline std::unique_ptr CreateInMemoryStorage(storage::Config c return storage->CreateSnapshot(repl_state.GetRole()); }); - if (allow_mt_repl || name == dbms::kDefaultDB) { - // Handle global replication state - spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); - // RECOVER REPLICA CONNECTIONS - memgraph::dbms::RestoreReplication(repl_state, *storage); - } else if (const ::memgraph::replication::RoleMainData *data = - std::get_if<::memgraph::replication::RoleMainData>(&repl_state.ReplicationData()); - data && !data->registered_replicas_.empty()) { - spdlog::warn("Multi-tenant replication is currently not supported!"); - } - - if (wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && repl_state.IsMain()) { - spdlog::warn( - "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please consider " - "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because " - "without write-ahead logs this instance is not replicating any data."); - } - return std::move(storage); } diff --git a/src/dbms/replication_client.cpp b/src/dbms/replication_client.cpp index bfa4c622f..fa0c30daa 100644 --- a/src/dbms/replication_client.cpp +++ b/src/dbms/replication_client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "dbms/replication_client.hpp" +#include "replication/replication_client.hpp" namespace memgraph::dbms { @@ -17,18 +18,26 @@ void StartReplicaClient(DbmsHandler &dbms_handler, replication::ReplicationClien // No client error, start instance level client auto const &endpoint = client.rpc_client_.Endpoint(); spdlog::trace("Replication client started at: {}:{}", endpoint.address, endpoint.port); - client.StartFrequentCheck([&dbms_handler](std::string_view name) { - // Working connection, check if any database has been left behind - dbms_handler.ForEach([name](dbms::Database *db) { + client.StartFrequentCheck([&dbms_handler](bool reconnect, replication::ReplicationClient &client) { + // Working connection + // Check if system needs restoration + if (reconnect) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + } +#ifdef MG_ENTERPRISE + dbms_handler.SystemRestore(client); +#endif + // Check if any database has been left behind + dbms_handler.ForEach([&name = client.name_, reconnect](dbms::DatabaseAccess db_acc) { // Specific database <-> replica client - db->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) { - if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { + db_acc->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) { + if (reconnect || client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { // Database <-> replica might be behind, check and recover - client->TryCheckReplicaStateAsync(db->storage()); + client->TryCheckReplicaStateAsync(db_acc->storage(), db_acc); } }); }); }); -} +} // namespace memgraph::dbms } // namespace memgraph::dbms diff --git a/src/dbms/replication_handler.cpp b/src/dbms/replication_handler.cpp index 2cbe2c432..ee16dc7b2 100644 --- a/src/dbms/replication_handler.cpp +++ b/src/dbms/replication_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,12 +11,18 @@ #include "dbms/replication_handler.hpp" +#include + #include "dbms/constants.hpp" #include "dbms/dbms_handler.hpp" +#include "dbms/global.hpp" #include "dbms/inmemory/replication_handlers.hpp" -#include "dbms/inmemory/storage_helper.hpp" #include "dbms/replication_client.hpp" #include "replication/state.hpp" +#include "spdlog/spdlog.h" +#include "storage/v2/config.hpp" +#include "storage/v2/replication/rpc.hpp" +#include "utils/on_scope_exit.hpp" using memgraph::replication::ReplicationClientConfig; using memgraph::replication::ReplicationState; @@ -51,8 +57,8 @@ bool ReplicationHandler::SetReplicationRoleMain() { }; auto const replica_handler = [this](RoleReplicaData const &) { // STEP 1) bring down all REPLICA servers - dbms_handler_.ForEach([](Database *db) { - auto *storage = db->storage(); + dbms_handler_.ForEach([](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); // Remember old epoch + storage timestamp association storage->PrepareForNewEpoch(); }); @@ -67,8 +73,8 @@ bool ReplicationHandler::SetReplicationRoleMain() { // STEP 3) We are now MAIN, update storage local epoch const auto &epoch = std::get(std::as_const(dbms_handler_.ReplicationState()).ReplicationData()).epoch_; - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); + dbms_handler_.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); storage->repl_storage_state_.epoch_ = epoch; }); @@ -89,8 +95,8 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication:: // TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are // deleting the replica. // Remove database specific clients - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); + dbms_handler_.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); }); }); // Remove instance level clients @@ -105,15 +111,7 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication:: // ASSERT return false; }, - [this](RoleReplicaData const &data) { - // Register handlers - InMemoryReplicationHandlers::Register(&dbms_handler_, *data.server); - if (!data.server->Start()) { - spdlog::error("Unable to start the replication server."); - return false; - } - return true; - }}, + [this](RoleReplicaData const &data) { return StartRpcServer(dbms_handler_, data); }}, dbms_handler_.ReplicationState().ReplicationData()); // TODO Handle error (restore to main?) return success; @@ -124,7 +122,8 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio MG_ASSERT(dbms_handler_.ReplicationState().IsMain(), "Only main instance can register a replica!"); auto instance_client = dbms_handler_.ReplicationState().RegisterReplica(config); - if (instance_client.HasError()) switch (instance_client.GetError()) { + if (instance_client.HasError()) { + switch (instance_client.GetError()) { case memgraph::replication::RegisterReplicaError::NOT_MAIN: MG_ASSERT(false, "Only main instance can register a replica!"); return {}; @@ -137,29 +136,36 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio case memgraph::replication::RegisterReplicaError::SUCCESS: break; } + } if (!allow_mt_repl && dbms_handler_.All().size() > 1) { spdlog::warn("Multi-tenant replication is currently not supported!"); } +#ifdef MG_ENTERPRISE + // Update system before enabling individual storage <-> replica clients + dbms_handler_.SystemRestore(*instance_client.GetValue()); +#endif + bool all_clients_good = true; // Add database specific clients (NOTE Currently all databases are connected to each replica) - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); - if (!allow_mt_repl && storage->id() != kDefaultDB) { + dbms_handler_.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); + if (!allow_mt_repl && storage->name() != kDefaultDB) { return; } // TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return; - all_clients_good &= - storage->repl_storage_state_.replication_clients_.WithLock([storage, &instance_client](auto &storage_clients) { + all_clients_good &= storage->repl_storage_state_.replication_clients_.WithLock( + [storage, &instance_client, db_acc = std::move(db_acc)](auto &storage_clients) mutable { // NOLINT auto client = std::make_unique(*instance_client.GetValue()); - client->Start(storage); + // All good, start replica client + client->Start(storage, std::move(db_acc)); // After start the storage <-> replica state should be READY or RECOVERING (if correctly started) // MAYBE_BEHIND isn't a statement of the current state, this is the default value - // Failed to start due to branching of MAIN and REPLICA + // Failed to start due an error like branching of MAIN and REPLICA if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { return false; } @@ -170,7 +176,7 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio // NOTE Currently if any databases fails, we revert back if (!all_clients_good) { - spdlog::error("Failed to register all databases to the REPLICA \"{}\"", config.name); + spdlog::error("Failed to register all databases on the REPLICA \"{}\"", config.name); UnregisterReplica(config.name); return RegisterReplicaError::CONNECTION_FAILED; } @@ -189,8 +195,8 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED; } // Remove database specific clients - dbms_handler_.ForEach([name](Database *db) { - db->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) { + dbms_handler_.ForEach([name](DatabaseAccess db_acc) { + db_acc->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) { std::erase_if(clients, [name](const auto &client) { return client->Name() == name; }); }); }); @@ -214,20 +220,20 @@ bool ReplicationHandler::IsReplica() const { return dbms_handler_.ReplicationSta // Per storage // NOTE Storage will connect to all replicas. Future work might change this -void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage) { +void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc) { spdlog::info("Restoring replication role."); /// MAIN - auto const recover_main = [&storage](RoleMainData &mainData) { + auto const recover_main = [db_acc = std::move(db_acc)](RoleMainData &mainData) mutable { // NOLINT // Each individual client has already been restored and started. Here we just go through each database and start its // client for (auto &instance_client : mainData.registered_replicas_) { - spdlog::info("Replica {} restoration started for {}.", instance_client.name_, storage.id()); - - const auto &ret = storage.repl_storage_state_.replication_clients_.WithLock( - [&](auto &storage_clients) -> utils::BasicResult { + spdlog::info("Replica {} restoration started for {}.", instance_client.name_, db_acc->name()); + const auto &ret = db_acc->storage()->repl_storage_state_.replication_clients_.WithLock( + [&, db_acc](auto &storage_clients) mutable -> utils::BasicResult { auto client = std::make_unique(instance_client); - client->Start(&storage); + auto *storage = db_acc->storage(); + client->Start(storage, std::move(db_acc)); // After start the storage <-> replica state should be READY or RECOVERING (if correctly started) // MAYBE_BEHIND isn't a statement of the current state, this is the default value // Failed to start due to branching of MAIN and REPLICA @@ -244,7 +250,7 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor LOG_FATAL("Failure when restoring replica {}: {}.", instance_client.name_, RegisterReplicaErrorToString(ret.GetError())); } - spdlog::info("Replica {} restored for {}.", instance_client.name_, storage.id()); + spdlog::info("Replica {} restored for {}.", instance_client.name_, db_acc->name()); } spdlog::info("Replication role restored to MAIN."); }; @@ -259,4 +265,177 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor }, repl_state.ReplicationData()); } + +namespace system_replication { +#ifdef MG_ENTERPRISE +void SystemHeartbeatHandler(const uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder) { + replication::SystemHeartbeatReq req; + replication::SystemHeartbeatReq::Load(&req, req_reader); + + replication::SystemHeartbeatRes res(ts); + memgraph::slk::Save(res, res_builder); +} + +void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + memgraph::storage::replication::CreateDatabaseReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::CreateDatabaseRes; + CreateDatabaseRes res(CreateDatabaseRes::Result::FAILURE); + + // Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot + // of the set of databases. Hence no history exists to maintain regarding epoch change. + // If MAIN has changed we need to check this new group_timestamp is consistent with + // what we have so far. + + if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) { + spdlog::debug("CreateDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp, + dbms_handler.LastCommitedTS()); + memgraph::slk::Save(res, res_builder); + return; + } + + try { + // Create new + auto new_db = dbms_handler.Update(req.config); + if (new_db.HasValue()) { + // Successfully create db + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = CreateDatabaseRes(CreateDatabaseRes::Result::SUCCESS); + spdlog::debug("CreateDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp); + } + } catch (...) { + // Failure + } + + memgraph::slk::Save(res, res_builder); +} + +void DropDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + memgraph::storage::replication::DropDatabaseReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::DropDatabaseRes; + DropDatabaseRes res(DropDatabaseRes::Result::FAILURE); + + // Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot + // of the set of databases. Hence no history exists to maintain regarding epoch change. + // If MAIN has changed we need to check this new group_timestamp is consistent with + // what we have so far. + + if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) { + spdlog::debug("DropDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp, + dbms_handler.LastCommitedTS()); + memgraph::slk::Save(res, res_builder); + return; + } + + try { + // NOTE: Single communication channel can exist at a time, no other database can be deleted/created at the moment. + auto new_db = dbms_handler.Delete(req.uuid); + if (new_db.HasError()) { + if (new_db.GetError() == DeleteError::NON_EXISTENT) { + // Nothing to drop + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = DropDatabaseRes(DropDatabaseRes::Result::NO_NEED); + } + } else { + // Successfully drop db + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = DropDatabaseRes(DropDatabaseRes::Result::SUCCESS); + spdlog::debug("DropDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp); + } + } catch (...) { + // Failure + } + + memgraph::slk::Save(res, res_builder); +} + +void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + // TODO Speed up + memgraph::storage::replication::SystemRecoveryReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::SystemRecoveryRes; + SystemRecoveryRes res(SystemRecoveryRes::Result::FAILURE); + + utils::OnScopeExit send_on_exit([&]() { memgraph::slk::Save(res, res_builder); }); + + // Get all current dbs + auto old = dbms_handler.All(); + + // Check/create the incoming dbs + for (const auto &config : req.database_configs) { + // Missing db + try { + if (dbms_handler.Update(config).HasError()) { + spdlog::debug("SystemRecoveryHandler: Failed to update database \"{}\".", config.name); + return; // Send failure on exit + } + } catch (const UnknownDatabaseException &) { + spdlog::debug("SystemRecoveryHandler: UnknownDatabaseException"); + return; // Send failure on exit + } + const auto it = std::find(old.begin(), old.end(), config.name); + if (it != old.end()) old.erase(it); + } + + // Delete all the leftover old dbs + for (const auto &remove_db : old) { + const auto del = dbms_handler.Delete(remove_db); + if (del.HasError()) { + // Some errors are not terminal + if (del.GetError() == DeleteError::DEFAULT_DB || del.GetError() == DeleteError::NON_EXISTENT) { + spdlog::debug("SystemRecoveryHandler: Dropped database \"{}\".", remove_db); + continue; + } + spdlog::debug("SystemRecoveryHandler: Failed to drop database \"{}\".", remove_db); + return; // Send failure on exit + } + } + // Successfully recovered + dbms_handler.SetLastCommitedTS(req.forced_group_timestamp); + spdlog::debug("SystemRecoveryHandler: SUCCESS updated LCTS to {}", req.forced_group_timestamp); + res = SystemRecoveryRes(SystemRecoveryRes::Result::SUCCESS); +} +#endif + +void Register(replication::RoleReplicaData const &data, dbms::DbmsHandler &dbms_handler) { +#ifdef MG_ENTERPRISE + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received SystemHeartbeatRpc"); + SystemHeartbeatHandler(dbms_handler.LastCommitedTS(), req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received CreateDatabaseRpc"); + CreateDatabaseHandler(dbms_handler, req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received DropDatabaseRpc"); + DropDatabaseHandler(dbms_handler, req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received SystemRecoveryRpc"); + SystemRecoveryHandler(dbms_handler, req_reader, res_builder); + }); +#endif +} +} // namespace system_replication + +bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data) { + // Register handlers + InMemoryReplicationHandlers::Register(&dbms_handler, *data.server); + system_replication::Register(data, dbms_handler); + // Start server + if (!data.server->Start()) { + spdlog::error("Unable to start the replication server."); + return false; + } + return true; +} } // namespace memgraph::dbms diff --git a/src/dbms/replication_handler.hpp b/src/dbms/replication_handler.hpp index dc95407b1..179d14015 100644 --- a/src/dbms/replication_handler.hpp +++ b/src/dbms/replication_handler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,8 +11,8 @@ #pragma once +#include "dbms/database.hpp" #include "replication/role.hpp" -#include "storage/v2/storage.hpp" #include "utils/result.hpp" // BEGIN fwd declares @@ -62,6 +62,20 @@ struct ReplicationHandler { /// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage /// TODO: extend to do multiple storages -void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage); +void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc); + +namespace system_replication { +// System handlers +#ifdef MG_ENTERPRISE +void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder); +void SystemHeartbeatHandler(uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder); +void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder); +#endif + +/// Register all DBMS level RPC handlers +void Register(replication::RoleReplicaData const &data, DbmsHandler &dbms_handler); +} // namespace system_replication + +bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data); } // namespace memgraph::dbms diff --git a/src/dbms/transaction.hpp b/src/dbms/transaction.hpp new file mode 100644 index 000000000..7167d9ec5 --- /dev/null +++ b/src/dbms/transaction.hpp @@ -0,0 +1,64 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include +#include "storage/v2/config.hpp" + +namespace memgraph::dbms { +struct SystemTransaction { + struct Delta { + enum class Action { + CREATE_DATABASE, + DROP_DATABASE, + }; + + static constexpr struct CreateDatabase { + } create_database; + static constexpr struct DropDatabase { + } drop_database; + + Delta(CreateDatabase /*tag*/, storage::SalientConfig config) + : action(Action::CREATE_DATABASE), config(std::move(config)) {} + Delta(DropDatabase /*tag*/, const utils::UUID &uuid) : action(Action::DROP_DATABASE), uuid(uuid) {} + + Delta(const Delta &) = delete; + Delta(Delta &&) = delete; + Delta &operator=(const Delta &) = delete; + Delta &operator=(Delta &&) = delete; + + ~Delta() { + switch (action) { + case Action::CREATE_DATABASE: + std::destroy_at(&config); + break; + case Action::DROP_DATABASE: + break; + // Some deltas might have special destructor handling + } + } + + Action action; + union { + storage::SalientConfig config; + utils::UUID uuid; + }; + }; + + explicit SystemTransaction(uint64_t timestamp) : system_timestamp(timestamp) {} + + // Currently system transitions support a single delta + std::optional delta{}; + uint64_t system_timestamp; +}; + +} // namespace memgraph::dbms diff --git a/src/flags/general.cpp b/src/flags/general.cpp index 88973fc80..bc77d0043 100644 --- a/src/flags/general.cpp +++ b/src/flags/general.cpp @@ -131,12 +131,6 @@ DEFINE_uint64(storage_recovery_thread_count, DEFINE_bool(storage_enable_schema_metadata, false, "Controls whether metadata should be collected about the resident labels and edge types."); -#ifdef MG_ENTERPRISE -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_bool(storage_delete_on_drop, true, - "If set to true the query 'DROP DATABASE x' will delete the underlying storage as well."); -#endif - // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DEFINE_bool(telemetry_enabled, false, "Set to true to enable telemetry. We collect information about the " diff --git a/src/flags/general.hpp b/src/flags/general.hpp index b6072250b..0a2a71988 100644 --- a/src/flags/general.hpp +++ b/src/flags/general.hpp @@ -84,10 +84,6 @@ DECLARE_bool(storage_parallel_schema_recovery); DECLARE_uint64(storage_recovery_thread_count); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_bool(storage_enable_schema_metadata); -#ifdef MG_ENTERPRISE -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DECLARE_bool(storage_delete_on_drop); -#endif // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_bool(telemetry_enabled); diff --git a/src/flags/storage_mode.cpp b/src/flags/storage_mode.cpp index b342719dd..63e9948fd 100644 --- a/src/flags/storage_mode.cpp +++ b/src/flags/storage_mode.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,20 +17,14 @@ #include "gflags/gflags.h" -#include - -inline constexpr std::array storage_mode_mappings{ - std::pair{std::string_view{"IN_MEMORY_TRANSACTIONAL"}, memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL}, - std::pair{std::string_view{"IN_MEMORY_ANALYTICAL"}, memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL}, - std::pair{std::string_view{"ON_DISK_TRANSACTIONAL"}, memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL}}; - const std::string storage_mode_help_string = fmt::format("Default storage mode Memgraph uses. Allowed values: {}", - memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings)); + memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings)); // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_help_string.c_str(), { - if (const auto result = memgraph::utils::IsValidEnumValueString(value, storage_mode_mappings); result.HasError()) { + if (const auto result = memgraph::utils::IsValidEnumValueString(value, memgraph::storage::storage_mode_mappings); + result.HasError()) { switch (result.GetError()) { case memgraph::utils::ValidationError::EmptyValue: { std::cout << "Storage mode cannot be empty." << std::endl; @@ -38,7 +32,7 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he } case memgraph::utils::ValidationError::InvalidValue: { std::cout << "Invalid value for storage mode. Allowed values: " - << memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings) << std::endl; + << memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings) << std::endl; break; } } @@ -48,8 +42,8 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he }); memgraph::storage::StorageMode memgraph::flags::ParseStorageMode() { - const auto storage_mode = - memgraph::utils::StringToEnum(FLAGS_storage_mode, storage_mode_mappings); + const auto storage_mode = memgraph::utils::StringToEnum( + FLAGS_storage_mode, memgraph::storage::storage_mode_mappings); MG_ASSERT(storage_mode, "Invalid storage mode"); return *storage_mode; } diff --git a/src/glue/SessionHL.cpp b/src/glue/SessionHL.cpp index bff12d188..c27d6a3bf 100644 --- a/src/glue/SessionHL.cpp +++ b/src/glue/SessionHL.cpp @@ -80,7 +80,7 @@ std::vector TypedValueResultStreamBase::De std::vector decoded_values; decoded_values.reserve(values.size()); for (const auto &v : values) { - auto maybe_value = memgraph::glue::ToBoltValue(v, *storage_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(v, storage_, memgraph::storage::View::NEW); if (maybe_value.HasError()) { switch (maybe_value.GetError()) { case memgraph::storage::Error::DELETED_OBJECT: @@ -112,14 +112,14 @@ std::string SessionHL::GetDefaultDB() { if (user_.has_value()) { return user_->db_access().GetDefault(); } - return memgraph::dbms::kDefaultDB; + return std::string{memgraph::dbms::kDefaultDB}; } #endif std::string SessionHL::GetCurrentDB() const { if (!interpreter_.current_db_.db_acc_) return ""; const auto *db = interpreter_.current_db_.db_acc_->get(); - return db->id(); + return db->name(); } std::optional SessionHL::GetServerNameForInit() { @@ -167,10 +167,10 @@ std::map SessionHL::Discard(s std::map SessionHL::Pull(SessionHL::TEncoder *encoder, std::optional n, std::optional qid) { - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); try { - TypedValueResultStream stream(encoder, db->storage()); + auto &db = interpreter_.current_db_.db_acc_; + auto *storage = db ? db->get()->storage() : nullptr; + TypedValueResultStream stream(encoder, storage); return DecodeSummary(interpreter_.Pull(&stream, n, qid)); } catch (const memgraph::query::QueryException &e) { // Count the number of specific exceptions thrown @@ -199,11 +199,10 @@ std::pair, std::optional> SessionHL::Interpret( } #ifdef MG_ENTERPRISE - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { + auto &db = interpreter_.current_db_.db_acc_; audit_log_->Record(endpoint_.address().to_string(), user_ ? *username : "", query, - memgraph::storage::PropertyValue(params_pv), db->id()); + memgraph::storage::PropertyValue(params_pv), db ? db->get()->name() : "no known database"); } #endif try { @@ -351,11 +350,11 @@ SessionHL::~SessionHL() { std::map SessionHL::DecodeSummary( const std::map &summary) { - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); + auto &db_acc = interpreter_.current_db_.db_acc_; + auto *storage = db_acc ? db_acc->get()->storage() : nullptr; std::map decoded_summary; for (const auto &kv : summary) { - auto maybe_value = ToBoltValue(kv.second, *db->storage(), memgraph::storage::View::NEW); + auto maybe_value = ToBoltValue(kv.second, storage, memgraph::storage::View::NEW); if (maybe_value.HasError()) { switch (maybe_value.GetError()) { case memgraph::storage::Error::DELETED_OBJECT: diff --git a/src/glue/auth_handler.cpp b/src/glue/auth_handler.cpp index b4ebfcd2a..a86dc5f48 100644 --- a/src/glue/auth_handler.cpp +++ b/src/glue/auth_handler.cpp @@ -294,7 +294,7 @@ bool AuthQueryHandler::CreateUser(const std::string &username, const std::option ); #ifdef MG_ENTERPRISE GrantDatabaseToUser(auth::kAllDatabases, username); - SetMainDatabase(username, dbms::kDefaultDB); + SetMainDatabase(dbms::kDefaultDB, username); #endif } @@ -393,7 +393,7 @@ std::vector> AuthQueryHandler::GetDatab } } -bool AuthQueryHandler::SetMainDatabase(const std::string &db, const std::string &username) { +bool AuthQueryHandler::SetMainDatabase(std::string_view db, const std::string &username) { if (!std::regex_match(username, name_regex_)) { throw memgraph::query::QueryRuntimeException("Invalid user name."); } diff --git a/src/glue/auth_handler.hpp b/src/glue/auth_handler.hpp index 8798c150a..e6b8724d4 100644 --- a/src/glue/auth_handler.hpp +++ b/src/glue/auth_handler.hpp @@ -44,7 +44,7 @@ class AuthQueryHandler final : public memgraph::query::AuthQueryHandler { std::vector> GetDatabasePrivileges(const std::string &username) override; - bool SetMainDatabase(const std::string &db, const std::string &username) override; + bool SetMainDatabase(std::string_view db, const std::string &username) override; void DeleteDatabase(std::string_view db) override; #endif diff --git a/src/glue/communication.cpp b/src/glue/communication.cpp index 60181e877..2c71e37c7 100644 --- a/src/glue/communication.cpp +++ b/src/glue/communication.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -73,8 +73,14 @@ storage::Result ToBoltEdge(const query::EdgeAccessor return ToBoltEdge(edge.impl_, db, view); } -storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage &db, storage::View view) { +storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage *db, storage::View view) { + auto check_db = [db]() { + if (db == nullptr) [[unlikely]] + throw communication::bolt::ValueException("Database needed for TypeValue conversion."); + }; + switch (value.type()) { + // No database needed case query::TypedValue::Type::Null: return Value(); case query::TypedValue::Type::Bool: @@ -85,16 +91,16 @@ storage::Result ToBoltValue(const query::TypedValue &value, const storage return Value(value.ValueDouble()); case query::TypedValue::Type::String: return Value(std::string(value.ValueString())); - case query::TypedValue::Type::List: { - std::vector values; - values.reserve(value.ValueList().size()); - for (const auto &v : value.ValueList()) { - auto maybe_value = ToBoltValue(v, db, view); - if (maybe_value.HasError()) return maybe_value.GetError(); - values.emplace_back(std::move(*maybe_value)); - } - return Value(std::move(values)); - } + case query::TypedValue::Type::Date: + return Value(value.ValueDate()); + case query::TypedValue::Type::LocalTime: + return Value(value.ValueLocalTime()); + case query::TypedValue::Type::LocalDateTime: + return Value(value.ValueLocalDateTime()); + case query::TypedValue::Type::Duration: + return Value(value.ValueDuration()); + + // Database potentially not required case query::TypedValue::Type::Map: { std::map map; for (const auto &kv : value.ValueMap()) { @@ -104,35 +110,48 @@ storage::Result ToBoltValue(const query::TypedValue &value, const storage } return Value(std::move(map)); } + + // Database is required + case query::TypedValue::Type::List: { + check_db(); + std::vector values; + values.reserve(value.ValueList().size()); + for (const auto &v : value.ValueList()) { + auto maybe_value = ToBoltValue(v, db, view); + if (maybe_value.HasError()) return maybe_value.GetError(); + values.emplace_back(std::move(*maybe_value)); + } + return Value(std::move(values)); + } case query::TypedValue::Type::Vertex: { - auto maybe_vertex = ToBoltVertex(value.ValueVertex(), db, view); + check_db(); + auto maybe_vertex = ToBoltVertex(value.ValueVertex(), *db, view); if (maybe_vertex.HasError()) return maybe_vertex.GetError(); return Value(std::move(*maybe_vertex)); } case query::TypedValue::Type::Edge: { - auto maybe_edge = ToBoltEdge(value.ValueEdge(), db, view); + check_db(); + auto maybe_edge = ToBoltEdge(value.ValueEdge(), *db, view); if (maybe_edge.HasError()) return maybe_edge.GetError(); return Value(std::move(*maybe_edge)); } case query::TypedValue::Type::Path: { - auto maybe_path = ToBoltPath(value.ValuePath(), db, view); + check_db(); + auto maybe_path = ToBoltPath(value.ValuePath(), *db, view); if (maybe_path.HasError()) return maybe_path.GetError(); return Value(std::move(*maybe_path)); } - case query::TypedValue::Type::Date: - return Value(value.ValueDate()); - case query::TypedValue::Type::LocalTime: - return Value(value.ValueLocalTime()); - case query::TypedValue::Type::LocalDateTime: - return Value(value.ValueLocalDateTime()); - case query::TypedValue::Type::Duration: - return Value(value.ValueDuration()); - case query::TypedValue::Type::Function: - throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value"); - case query::TypedValue::Type::Graph: - auto maybe_graph = ToBoltGraph(value.ValueGraph(), db, view); + case query::TypedValue::Type::Graph: { + check_db(); + auto maybe_graph = ToBoltGraph(value.ValueGraph(), *db, view); if (maybe_graph.HasError()) return maybe_graph.GetError(); return Value(std::move(*maybe_graph)); + } + + // Unsupported conversions + case query::TypedValue::Type::Function: { + throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value"); + } } } diff --git a/src/glue/communication.hpp b/src/glue/communication.hpp index 0e3b39f4d..737f32db2 100644 --- a/src/glue/communication.hpp +++ b/src/glue/communication.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -65,7 +65,7 @@ storage::Result> ToBoltGraph(c /// @param storage::View for ToBoltVertex and ToBoltEdge. /// /// @throw std::bad_alloc -storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage &db, +storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage *db, storage::View view); query::TypedValue ToTypedValue(const communication::bolt::Value &value); diff --git a/src/kvstore/kvstore.cpp b/src/kvstore/kvstore.cpp index 877d6f9bd..1219b8527 100644 --- a/src/kvstore/kvstore.cpp +++ b/src/kvstore/kvstore.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -51,7 +51,7 @@ KVStore &KVStore::operator=(KVStore &&other) { return *this; } -bool KVStore::Put(const std::string &key, const std::string &value) { +bool KVStore::Put(std::string_view key, std::string_view value) { auto s = pimpl_->db->Put(rocksdb::WriteOptions(), key, value); return s.ok(); } @@ -65,7 +65,7 @@ bool KVStore::PutMultiple(const std::map &items) { return s.ok(); } -std::optional KVStore::Get(const std::string &key) const noexcept { +std::optional KVStore::Get(std::string_view key) const noexcept { std::string value; auto s = pimpl_->db->Get(rocksdb::ReadOptions(), key, &value); if (!s.ok()) return std::nullopt; diff --git a/src/kvstore/kvstore.hpp b/src/kvstore/kvstore.hpp index a67d01c8c..b9675d75b 100644 --- a/src/kvstore/kvstore.hpp +++ b/src/kvstore/kvstore.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -61,7 +61,7 @@ class KVStore final { * @return true if the value has been successfully stored. * In case of any error false is going to be returned. */ - bool Put(const std::string &key, const std::string &value); + bool Put(std::string_view key, std::string_view value); /** * Store values under the given keys. @@ -81,7 +81,7 @@ class KVStore final { * @return Value for the given key. std::nullopt in case of any error * OR the value doesn't exist. */ - std::optional Get(const std::string &key) const noexcept; + std::optional Get(std::string_view key) const noexcept; /** * Deletes the key and corresponding value from storage. diff --git a/src/memgraph.cpp b/src/memgraph.cpp index cf954560d..860a77f37 100644 --- a/src/memgraph.cpp +++ b/src/memgraph.cpp @@ -11,7 +11,6 @@ #include #include "audit/log.hpp" -#include "communication/metrics.hpp" #include "communication/websocket/auth.hpp" #include "communication/websocket/server.hpp" #include "dbms/constants.hpp" @@ -33,9 +32,9 @@ #include "query/procedure/module.hpp" #include "query/procedure/py_module.hpp" #include "requests/requests.hpp" +#include "storage/v2/durability/durability.hpp" #include "telemetry/telemetry.hpp" #include "utils/signals.hpp" -#include "utils/skip_list.hpp" #include "utils/sysinfo/memory.hpp" #include "utils/system_info.hpp" #include "utils/terminate_handler.hpp" @@ -73,7 +72,7 @@ void InitFromCypherlFile(memgraph::query::InterpreterContext &ctx, memgraph::dbm spdlog::warn("{} The rest of the init-file will be run.", e.what()); } if (audit_log) { - audit_log->Record("", "", line, {}, memgraph::dbms::kDefaultDB); + audit_log->Record("", "", line, {}, std::string{memgraph::dbms::kDefaultDB}); } } } @@ -300,8 +299,7 @@ int main(int argc, char **argv) { memgraph::storage::Config db_config{ .gc = {.type = memgraph::storage::Config::Gc::Type::PERIODIC, .interval = std::chrono::seconds(FLAGS_storage_gc_cycle_sec)}, - .items = {.properties_on_edges = FLAGS_storage_properties_on_edges, - .enable_schema_metadata = FLAGS_storage_enable_schema_metadata}, + .durability = {.storage_directory = FLAGS_data_directory, .recover_on_startup = FLAGS_storage_recover_on_startup || FLAGS_data_recovery_on_startup, .snapshot_retention_count = FLAGS_storage_snapshot_retention_count, @@ -323,7 +321,9 @@ int main(int argc, char **argv) { .id_name_mapper_directory = FLAGS_data_directory + "/rocksdb_id_name_mapper", .durability_directory = FLAGS_data_directory + "/rocksdb_durability", .wal_directory = FLAGS_data_directory + "/rocksdb_wal"}, - .storage_mode = memgraph::flags::ParseStorageMode()}; + .salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges, + .enable_schema_metadata = FLAGS_storage_enable_schema_metadata}, + .salient.storage_mode = memgraph::flags::ParseStorageMode()}; memgraph::utils::Scheduler jemalloc_purge_scheduler; jemalloc_purge_scheduler.Run("Jemalloc purge", std::chrono::seconds(FLAGS_storage_gc_cycle_sec), @@ -388,7 +388,7 @@ int main(int argc, char **argv) { memgraph::dbms::DbmsHandler dbms_handler(db_config #ifdef MG_ENTERPRISE , - &auth_, FLAGS_data_recovery_on_startup, FLAGS_storage_delete_on_drop + &auth_, FLAGS_data_recovery_on_startup #endif ); auto db_acc = dbms_handler.Get(); diff --git a/src/mg_import_csv.cpp b/src/mg_import_csv.cpp index e8212b5f4..abf289fa3 100644 --- a/src/mg_import_csv.cpp +++ b/src/mg_import_csv.cpp @@ -707,12 +707,11 @@ int main(int argc, char *argv[]) { std::unordered_map node_id_map; memgraph::storage::Config config{ - - .items = {.properties_on_edges = FLAGS_storage_properties_on_edges}, .durability = {.storage_directory = FLAGS_data_directory, .recover_on_startup = false, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = FLAGS_storage_properties_on_edges}}, }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; auto store = memgraph::dbms::CreateInMemoryStorage(config, repl_state); diff --git a/src/query/auth_query_handler.hpp b/src/query/auth_query_handler.hpp index 908dd3ebc..693103354 100644 --- a/src/query/auth_query_handler.hpp +++ b/src/query/auth_query_handler.hpp @@ -57,7 +57,7 @@ class AuthQueryHandler { /// Return true if main database set successfully /// @throw QueryRuntimeException if an error ocurred. - virtual bool SetMainDatabase(const std::string &db, const std::string &username) = 0; + virtual bool SetMainDatabase(std::string_view db, const std::string &username) = 0; /// Delete database from all users /// @throw QueryRuntimeException if an error ocurred. diff --git a/src/query/context.hpp b/src/query/context.hpp index 3040d6e10..f1522053c 100644 --- a/src/query/context.hpp +++ b/src/query/context.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source diff --git a/src/query/db_accessor.hpp b/src/query/db_accessor.hpp index ed7dde409..71b997d9e 100644 --- a/src/query/db_accessor.hpp +++ b/src/query/db_accessor.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -541,7 +541,10 @@ class DbAccessor final { void AdvanceCommand() { accessor_->AdvanceCommand(); } - utils::BasicResult Commit() { return accessor_->Commit(); } + utils::BasicResult Commit(storage::CommitReplArgs reparg = {}, + storage::DatabaseAccessProtector db_acc = {}) { + return accessor_->Commit(std::move(reparg), std::move(db_acc)); + } void Abort() { accessor_->Abort(); } diff --git a/src/query/exceptions.hpp b/src/query/exceptions.hpp index ac8cc8fe8..8a2a46bc6 100644 --- a/src/query/exceptions.hpp +++ b/src/query/exceptions.hpp @@ -195,6 +195,12 @@ class DatabaseContextRequiredException : public QueryRuntimeException { SPECIALIZE_GET_EXCEPTION_NAME(DatabaseContextRequiredException) }; +class ConcurrentSystemQueriesException : public QueryRuntimeException { + public: + using QueryRuntimeException::QueryRuntimeException; + SPECIALIZE_GET_EXCEPTION_NAME(ConcurrentSystemQueriesException) +}; + class WriteVertexOperationInEdgeImportModeException : public QueryException { public: WriteVertexOperationInEdgeImportModeException() diff --git a/src/query/frontend/ast/ast.hpp b/src/query/frontend/ast/ast.hpp index b5e058491..ed40109b6 100644 --- a/src/query/frontend/ast/ast.hpp +++ b/src/query/frontend/ast/ast.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -3612,7 +3612,7 @@ class MultiDatabaseQuery : public memgraph::query::Query { DEFVISITABLE(QueryVisitor); - enum class Action { CREATE, USE, DROP }; + enum class Action { CREATE, USE, DROP, SHOW }; memgraph::query::MultiDatabaseQuery::Action action_; std::string db_name_; diff --git a/src/query/frontend/ast/cypher_main_visitor.cpp b/src/query/frontend/ast/cypher_main_visitor.cpp index 2d93fd757..c9ca477c9 100644 --- a/src/query/frontend/ast/cypher_main_visitor.cpp +++ b/src/query/frontend/ast/cypher_main_visitor.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -2888,6 +2888,14 @@ antlrcpp::Any CypherMainVisitor::visitDropDatabase(MemgraphCypher::DropDatabaseC return mdb_query; } +antlrcpp::Any CypherMainVisitor::visitShowDatabase(MemgraphCypher::ShowDatabaseContext * /*ctx*/) { + auto *mdb_query = storage_->Create(); + mdb_query->db_name_ = ""; + mdb_query->action_ = MultiDatabaseQuery::Action::SHOW; + query_ = mdb_query; + return mdb_query; +} + antlrcpp::Any CypherMainVisitor::visitShowDatabases(MemgraphCypher::ShowDatabasesContext * /*ctx*/) { query_ = storage_->Create(); return query_; diff --git a/src/query/frontend/ast/cypher_main_visitor.hpp b/src/query/frontend/ast/cypher_main_visitor.hpp index 1aa887ad7..809ca421b 100644 --- a/src/query/frontend/ast/cypher_main_visitor.hpp +++ b/src/query/frontend/ast/cypher_main_visitor.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -997,6 +997,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor { */ antlrcpp::Any visitDropDatabase(MemgraphCypher::DropDatabaseContext *ctx) override; + /** + * @return MultiDatabaseQuery* + */ + antlrcpp::Any visitShowDatabase(MemgraphCypher::ShowDatabaseContext *ctx) override; + /** * @return ShowDatabasesQuery* */ diff --git a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 index d585acbb1..c3a35414a 100644 --- a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 +++ b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 @@ -480,6 +480,7 @@ transactionId : literal ; multiDatabaseQuery : createDatabase | useDatabase | dropDatabase + | showDatabase ; createDatabase : CREATE DATABASE databaseName ; @@ -488,6 +489,8 @@ useDatabase : USE DATABASE databaseName ; dropDatabase : DROP DATABASE databaseName ; +showDatabase : SHOW DATABASE ; + showDatabases : SHOW DATABASES ; edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ; diff --git a/src/query/frontend/semantic/required_privileges.cpp b/src/query/frontend/semantic/required_privileges.cpp index 04772cded..48fe10cd5 100644 --- a/src/query/frontend/semantic/required_privileges.cpp +++ b/src/query/frontend/semantic/required_privileges.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -106,6 +106,7 @@ class PrivilegeExtractor : public QueryVisitor, public HierarchicalTreeVis AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_EDIT); break; case MultiDatabaseQuery::Action::USE: + case MultiDatabaseQuery::Action::SHOW: AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_USE); break; } diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index c629683f3..cc16a0763 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -148,6 +148,7 @@ void memgraph::query::CurrentDB::CleanupDBTransaction(bool abort) { namespace memgraph::query { constexpr std::string_view kSchemaAssert = "SCHEMA.ASSERT"; +constexpr int kSystemTxTryMS = 100; //!< Duration of the unique try_lock_for template constexpr auto kAlwaysFalse = false; @@ -272,12 +273,22 @@ inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode return replication::ReplicationMode::ASYNC; } -class ReplQueryHandler final : public query::ReplicationQueryHandler { +class ReplQueryHandler { public: - explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler) : dbms_handler_(dbms_handler), handler_{*dbms_handler} {} + struct ReplicaInfo { + std::string name; + std::string socket_address; + ReplicationQuery::SyncMode sync_mode; + std::optional timeout; + uint64_t current_timestamp_of_replica; + uint64_t current_number_of_timestamp_behind_master; + ReplicationQuery::ReplicaState state; + }; + + explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler) : handler_{*dbms_handler} {} /// @throw QueryRuntimeException if an error ocurred. - void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) override { + void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) { if (replication_role == ReplicationQuery::ReplicationRole::MAIN) { if (!handler_.SetReplicationRoleMain()) { throw QueryRuntimeException("Couldn't set role to main!"); @@ -299,7 +310,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } /// @throw QueryRuntimeException if an error ocurred. - ReplicationQuery::ReplicationRole ShowReplicationRole() const override { + ReplicationQuery::ReplicationRole ShowReplicationRole() const { switch (handler_.GetRole()) { case memgraph::replication::ReplicationRole::MAIN: return ReplicationQuery::ReplicationRole::MAIN; @@ -311,8 +322,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { /// @throw QueryRuntimeException if an error ocurred. void RegisterReplica(const std::string &name, const std::string &socket_address, - const ReplicationQuery::SyncMode sync_mode, - const std::chrono::seconds replica_check_frequency) override { + const ReplicationQuery::SyncMode sync_mode, const std::chrono::seconds replica_check_frequency) { if (handler_.IsReplica()) { // replica can't register another replica throw QueryRuntimeException("Replica can't register another replica!"); @@ -340,7 +350,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } /// @throw QueryRuntimeException if an error occurred. - void DropReplica(std::string_view replica_name) override { + void DropReplica(std::string_view replica_name) { auto const result = handler_.UnregisterReplica(replica_name); switch (result) { using enum memgraph::dbms::UnregisterReplicaResult; @@ -355,8 +365,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } } - using Replica = ReplicationQueryHandler::Replica; - std::vector ShowReplicas() const override { + std::vector ShowReplicas(const dbms::Database &db) const { if (handler_.IsReplica()) { // replica can't show registered replicas (it shouldn't have any) throw QueryRuntimeException("Replica can't show registered replicas (it shouldn't have any)!"); @@ -364,20 +373,12 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { // TODO: Combine results? Have a single place with clients??? // Also authentication checks (replica + database visibility) - std::vector repl_infos{}; - dbms_handler_->ForOne([&repl_infos](dbms::Database *db) -> bool { - auto infos = db->storage()->ReplicasInfo(); - if (!infos.empty()) { - repl_infos = std::move(infos); - return true; - } - return false; - }); - std::vector replicas; + const auto repl_infos = db.storage()->ReplicasInfo(); + std::vector replicas; replicas.reserve(repl_infos.size()); - const auto from_info = [](const auto &repl_info) -> Replica { - Replica replica; + const auto from_info = [](const auto &repl_info) -> ReplicaInfo { + ReplicaInfo replica; replica.name = repl_info.name; replica.socket_address = repl_info.endpoint.SocketAddress(); switch (repl_info.mode) { @@ -416,7 +417,6 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } private: - dbms::DbmsHandler *dbms_handler_; dbms::ReplicationHandler handler_; }; @@ -711,8 +711,8 @@ Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_ } // namespace Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters ¶meters, - dbms::DbmsHandler *dbms_handler, const query::InterpreterConfig &config, - std::vector *notifications) { + dbms::DbmsHandler *dbms_handler, CurrentDB ¤t_db, + const query::InterpreterConfig &config, std::vector *notifications) { // TODO: MemoryResource for EvaluationContext, it should probably be passed as // the argument to Callback. EvaluationContext evaluation_context; @@ -786,8 +786,9 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & callback.header = { "name", "socket_address", "sync_mode", "current_timestamp_of_replica", "number_of_timestamp_behind_master", "state"}; - callback.fn = [handler = ReplQueryHandler{dbms_handler}, replica_nfields = callback.header.size()] { - const auto &replicas = handler.ShowReplicas(); + callback.fn = [handler = ReplQueryHandler{dbms_handler}, replica_nfields = callback.header.size(), + db_acc = current_db.db_acc_] { + const auto &replicas = handler.ShowReplicas(*db_acc->get()); auto typed_replicas = std::vector>{}; typed_replicas.reserve(replicas.size()); for (const auto &replica : replicas) { @@ -963,12 +964,19 @@ Callback HandleStreamQuery(StreamQuery *stream_query, const Parameters ¶mete throw utils::BasicException("Parameter BATCH_LIMIT cannot hold negative value"); } - callback.fn = [streams = db_acc->streams(), stream_name = stream_query->stream_name_, batch_limit, timeout]() { + callback.fn = [db_acc, streams = db_acc->streams(), stream_name = stream_query->stream_name_, batch_limit, + timeout]() { + if (db_acc.is_deleting()) { + throw QueryException("Can not start stream while database is being dropped."); + } streams->StartWithLimit(stream_name, static_cast(batch_limit.value()), timeout); return std::vector>{}; }; } else { - callback.fn = [streams = db_acc->streams(), stream_name = stream_query->stream_name_]() { + callback.fn = [db_acc, streams = db_acc->streams(), stream_name = stream_query->stream_name_]() { + if (db_acc.is_deleting()) { + throw QueryException("Can not start stream while database is being dropped."); + } streams->Start(stream_name); return std::vector>{}; }; @@ -1457,8 +1465,7 @@ PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper, std::function handler; if (query_upper == "BEGIN") { - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); // TODO: Evaluate doing move(extras). Currently the extras is very small, but this will be important if it ever // becomes large. handler = [this, extras = extras] { @@ -2260,14 +2267,14 @@ PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transa PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, bool in_explicit_transaction, std::vector *notifications, dbms::DbmsHandler &dbms_handler, - const InterpreterConfig &config) { + CurrentDB ¤t_db, const InterpreterConfig &config) { if (in_explicit_transaction) { throw ReplicationModificationInMulticommandTxException(); } auto *replication_query = utils::Downcast(parsed_query.query); - auto callback = - HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, config, notifications); + auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, current_db, config, + notifications); return PreparedQuery{callback.header, std::move(parsed_query.required_privileges), [callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr{nullptr}]( @@ -2865,7 +2872,7 @@ auto ShowTransactions(const std::unordered_set &interpreters, con auto get_interpreter_db_name = [&]() -> std::string const & { static std::string all; - return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->id() : all; + return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->name() : all; }; if (transaction_id.has_value() && (interpreter->username_ == username || privilege_checker(get_interpreter_db_name()))) { @@ -3047,7 +3054,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici case DatabaseInfoQuery::InfoType::EDGE_TYPES: { header = {"edge types"}; handler = [storage = current_db.db_acc_->get()->storage(), dba] { - if (!storage->config_.items.enable_schema_metadata) { + if (!storage->config_.salient.items.enable_schema_metadata) { throw QueryRuntimeException( "The metadata collection for edge-types is disabled. To enable it, restart your instance and set the " "storage-enable-schema-metadata flag to True."); @@ -3067,7 +3074,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici case DatabaseInfoQuery::InfoType::NODE_LABELS: { header = {"node labels"}; handler = [storage = current_db.db_acc_->get()->storage(), dba] { - if (!storage->config_.items.enable_schema_metadata) { + if (!storage->config_.salient.items.enable_schema_metadata) { throw QueryRuntimeException( "The metadata collection for node-labels is disabled. To enable it, restart your instance and set the " "storage-enable-schema-metadata flag to True."); @@ -3126,7 +3133,7 @@ PreparedQuery PrepareSystemInfoQuery(ParsedQuery parsed_query, bool in_explicit_ const int64_t vm_max_map_count_storage_info = vm_max_map_count.has_value() ? vm_max_map_count.value() : memgraph::utils::VM_MAX_MAP_COUNT_DEFAULT; std::vector> results{ - {TypedValue("name"), TypedValue(storage->id())}, + {TypedValue("name"), TypedValue(storage->name())}, {TypedValue("vertex_count"), TypedValue(static_cast(info.vertex_count))}, {TypedValue("edge_count"), TypedValue(static_cast(info.edge_count))}, {TypedValue("average_degree"), TypedValue(info.average_degree)}, @@ -3392,8 +3399,6 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur if (!license::global_license_checker.IsEnterpriseValidFast()) { throw QueryException("Trying to use enterprise feature without a valid license."); } - // TODO: Remove once replicas support multi-tenant replication - if (!current_db.db_acc_) throw DatabaseContextRequiredException("Multi database queries require a defined database."); auto *query = utils::Downcast(parsed_query.query); auto *db_handler = interpreter_context->dbms_handler; @@ -3401,7 +3406,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur const bool is_replica = interpreter_context->repl_state->IsReplica(); switch (query->action_) { - case MultiDatabaseQuery::Action::CREATE: + case MultiDatabaseQuery::Action::CREATE: { if (is_replica) { throw QueryException("Query forbidden on the replica!"); } @@ -3442,8 +3447,8 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur RWType::W, "" // No target DB possible }; - - case MultiDatabaseQuery::Action::USE: + } + case MultiDatabaseQuery::Action::USE: { if (current_db.in_explicit_db_) { throw QueryException("Database switching is prohibited if session explicitly defines the used database"); } @@ -3458,7 +3463,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur std::string res; try { - if (current_db.db_acc_ && db_name == current_db.db_acc_->get()->id()) { + if (current_db.db_acc_ && db_name == current_db.db_acc_->get()->name()) { res = "Already using " + db_name; } else { auto tmp = db_handler->Get(db_name); @@ -3479,11 +3484,12 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur }, RWType::NONE, query->db_name_}; - - case MultiDatabaseQuery::Action::DROP: + } + case MultiDatabaseQuery::Action::DROP: { if (is_replica) { throw QueryException("Query forbidden on the replica!"); } + return PreparedQuery{ {"STATUS"}, std::move(parsed_query.required_privileges), @@ -3493,10 +3499,10 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur try { // Remove database - auto success = db_handler->Delete(db_name); + auto success = db_handler->TryDelete(db_name); if (!success.HasError()) { // Remove from auth - auth->DeleteDatabase(db_name); + if (auth) auth->DeleteDatabase(db_name); } else { switch (success.GetError()) { case dbms::DeleteError::DEFAULT_DB: @@ -3524,48 +3530,56 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur }, RWType::W, query->db_name_}; - } + } + case MultiDatabaseQuery::Action::SHOW: { + return PreparedQuery{ + {"Current"}, + std::move(parsed_query.required_privileges), + [db_acc = current_db.db_acc_, pull_plan = std::shared_ptr(nullptr)]( + AnyStream *stream, std::optional n) mutable -> std::optional { + if (!pull_plan) { + std::vector> results; + auto db_name = db_acc ? TypedValue{db_acc->get()->storage()->name()} : TypedValue{}; + results.push_back({std::move(db_name)}); + pull_plan = std::make_shared(std::move(results)); + } + + if (pull_plan->Pull(stream, n)) { + return QueryHandlerResult::NOTHING; + } + return std::nullopt; + }, + RWType::NONE, + "" // No target DB + }; + } + }; #else throw QueryException("Query not supported."); #endif } -PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, CurrentDB ¤t_db, - InterpreterContext *interpreter_context, +PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, InterpreterContext *interpreter_context, const std::optional &username) { #ifdef MG_ENTERPRISE - - // TODO: split query into two, Databases (no need for current_db), & Current database (uses current_db) - MG_ASSERT(current_db.db_acc_, "Show Database Level query expects a current DB"); - storage::Storage *storage = current_db.db_acc_->get()->storage(); - if (!license::global_license_checker.IsEnterpriseValidFast()) { throw QueryException("Trying to use enterprise feature without a valid license."); } - // TODO pick directly from ic auto *db_handler = interpreter_context->dbms_handler; AuthQueryHandler *auth = interpreter_context->auth; Callback callback; - callback.header = {"Name", "Current"}; - callback.fn = [auth, storage, db_handler, username]() mutable -> std::vector> { + callback.header = {"Name"}; + callback.fn = [auth, db_handler, username]() mutable -> std::vector> { std::vector> status; - const auto &in_use = storage->id(); - bool found_current = false; - auto gen_status = [&](T all, K denied) { Sort(all); Sort(denied); status.reserve(all.size()); for (const auto &name : all) { - TypedValue use(""); - if (!found_current && Same(name, in_use)) { - use = TypedValue("*"); - found_current = true; - } - status.push_back({TypedValue(name), std::move(use)}); + status.push_back({TypedValue(name)}); } // No denied databases (no need to filter them out) @@ -3595,7 +3609,6 @@ PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, CurrentDB &cur } } - if (!found_current) throw QueryRuntimeException("Missing current database!"); return status; }; @@ -3631,15 +3644,13 @@ void Interpreter::BeginTransaction(QueryExtras const &extras) { void Interpreter::CommitTransaction() { const auto prepared_query = PrepareTransactionQuery("COMMIT"); prepared_query.query_handler(nullptr, {}); - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } void Interpreter::RollbackTransaction() { const auto prepared_query = PrepareTransactionQuery("ROLLBACK"); prepared_query.query_handler(nullptr, {}); - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } #if MG_ENTERPRISE @@ -3655,11 +3666,6 @@ void Interpreter::SetCurrentDB(std::string_view db_name, bool in_explicit_db) { Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, const std::map ¶ms, QueryExtras const &extras) { - // TODO: Remove once the interpreter is storage/tx independent and could run without an associated database - if (!current_db_.db_acc_) { - throw DatabaseContextRequiredException("Database required for the query."); - } - // Handle transaction control queries. const auto upper_case_query = utils::ToUpperCase(query_string); const auto trimmed_query = utils::Trim(upper_case_query); @@ -3673,18 +3679,16 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, {}}; } - if (!in_explicit_transaction_) { - transaction_queries_->clear(); - } - // Don't save BEGIN, COMMIT or ROLLBACK - transaction_queries_->push_back(query_string); + // NOTE: query_string is not BEGIN, COMMIT or ROLLBACK // All queries other than transaction control queries advance the command in // an explicit transaction block. if (in_explicit_transaction_) { + transaction_queries_->push_back(query_string); AdvanceCommand(); } else { - query_executions_.clear(); + ResetInterpreter(); + transaction_queries_->push_back(query_string); if (current_db_.db_transactional_accessor_ /* && !in_explicit_transaction_*/) { // If we're not in an explicit transaction block and we have an open // transaction, abort it since we're about to prepare a new query. @@ -3742,6 +3746,37 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, // field with an improved estimate. query_execution->summary["cost_estimate"] = 0.0; + // System queries require strict ordering; since there is no MVCC-like thing, we allow single queries + bool system_queries = utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query); + + // TODO Split SHOW REPLICAS (which needs the db) and other replication queries + auto system_transaction_guard = std::invoke([&]() -> std::optional { + if (system_queries) { + // TODO: Ordering between system and data queries + // Start a system transaction + auto system_unique = std::unique_lock{interpreter_context_->dbms_handler->system_lock_, std::defer_lock}; + if (!system_unique.try_lock_for(std::chrono::milliseconds(kSystemTxTryMS))) { + throw ConcurrentSystemQueriesException("Multiple concurrent system queries are not supported."); + } + return std::optional{std::in_place, std::move(system_unique), + *interpreter_context_->dbms_handler}; + } + return std::nullopt; + }); + + // Some queries do not require a database to be executed (current_db_ won't be passed on to the Prepare*; special + // case for use database which overwrites the current database) + bool no_db_required = system_queries || utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query); + if (!no_db_required && !current_db_.db_acc_) { + throw DatabaseContextRequiredException("Database required for the query."); + } + // Some queries require an active transaction in order to be prepared. // TODO: make a better analysis visitor over the `parsed_query.query` bool requires_db_transaction = @@ -3760,11 +3795,6 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, SetupDatabaseTransaction(could_commit, unique); } - // TODO: none database transaction (assuming mutually exclusive from DB transactions) - // if (!requires_db_transaction) { - // /* something */ - // } - utils::Timer planning_timer; PreparedQuery prepared_query; utils::MemoryResource *memory_resource = @@ -3806,7 +3836,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, /// TODO: make replication DB agnostic prepared_query = PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications, - *interpreter_context_->dbms_handler, interpreter_context_->config); + *interpreter_context_->dbms_handler, current_db_, interpreter_context_->config); } else if (utils::Downcast(parsed_query.query)) { prepared_query = PrepareLockPathQuery(std::move(parsed_query), in_explicit_transaction_, current_db_); } else if (utils::Downcast(parsed_query.query)) { @@ -3848,12 +3878,11 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, throw MultiDatabaseQueryInMulticommandTxException(); } /// SYSTEM (Replication) + INTERPRETER - prepared_query = - PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_); + // DMG_ASSERT(system_guard); + prepared_query = PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_ + /*, *system_guard*/); } else if (utils::Downcast(parsed_query.query)) { - /// SYSTEM PURE ("SHOW DATABASES") - /// INTERPRETER (TODO: "SHOW DATABASE") - prepared_query = PrepareShowDatabasesQuery(std::move(parsed_query), current_db_, interpreter_context_, username_); + prepared_query = PrepareShowDatabasesQuery(std::move(parsed_query), interpreter_context_, username_); } else if (utils::Downcast(parsed_query.query)) { if (in_explicit_transaction_) { throw EdgeImportModeModificationInMulticommandTxException(); @@ -3878,10 +3907,12 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, // Set the target db to the current db (some queries have different target from the current db) if (!query_execution->prepared_query->db) { - query_execution->prepared_query->db = current_db_.db_acc_->get()->id(); + query_execution->prepared_query->db = current_db_.db_acc_->get()->name(); } query_execution->summary["db"] = *query_execution->prepared_query->db; + // prepare is done, move system txn guard to be owned by interpreter + system_transaction_guard_ = std::move(system_transaction_guard); return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, query_execution->prepared_query->db}; } catch (const utils::BasicException &) { @@ -3893,9 +3924,11 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, throw; } } + void Interpreter::SetupDatabaseTransaction(bool couldCommit, bool unique) { current_db_.SetupDatabaseTransaction(GetIsolationLevelOverride(), couldCommit, unique); } + void Interpreter::SetupInterpreterTransaction(const QueryExtras &extras) { metrics::IncrementCounter(metrics::ActiveTransactions); transaction_status_.store(TransactionStatus::ACTIVE, std::memory_order_release); @@ -3974,7 +4007,9 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int continue; } - auto maybe_commit_error = db_accessor.Commit(); + bool is_main = interpreter_context->repl_state->IsMain(); + auto maybe_commit_error = db_accessor.Commit({.is_main = is_main}, db_acc); + if (maybe_commit_error.HasError()) { const auto &error = maybe_commit_error.GetError(); @@ -4025,10 +4060,35 @@ void Interpreter::Commit() { // We should document clearly that all results should be pulled to complete // a query. current_transaction_.reset(); - if (!current_db_.db_transactional_accessor_) return; + if (!current_db_.db_transactional_accessor_ || !current_db_.db_acc_) { + // No database nor db transaction; check for system transaction + if (!system_transaction_guard_) return; - // TODO: Better (or removed) check - if (!current_db_.db_acc_) return; + // TODO Distinguish between data and system transaction state + // Think about updating the status to a struct with bitfield + // Clean transaction status on exit + utils::OnScopeExit clean_status([this]() { + system_transaction_guard_.reset(); + // System transactions are not terminable + // Durability has happened at time of PULL + // Commit is doing replication and timestamp update + // The DBMS does not support MVCC, so doing durability here doesn't change the overall logic; we cannot abort! + // What we are trying to do is set the transaction back to IDLE + // We cannot simply put it to IDLE, since the status is used as a syncronization method and we have to follow + // its logic. There are 2 states when we could update to IDLE (ACTIVE and TERMINATED). + auto expected = TransactionStatus::ACTIVE; + while (!transaction_status_.compare_exchange_weak(expected, TransactionStatus::IDLE)) { + if (expected == TransactionStatus::TERMINATED) { + continue; + } + expected = TransactionStatus::ACTIVE; + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + }); + + system_transaction_guard_->Commit(); + return; + } auto *db = current_db_.db_acc_->get(); /* @@ -4098,19 +4158,19 @@ void Interpreter::Commit() { }; utils::OnScopeExit members_reseter(reset_necessary_members); - auto commit_confirmed_by_all_sync_repplicas = true; + auto commit_confirmed_by_all_sync_replicas = true; - auto maybe_commit_error = - current_db_.db_transactional_accessor_->Commit(std::nullopt, interpreter_context_->repl_state->IsMain()); + bool is_main = interpreter_context_->repl_state->IsMain(); + auto maybe_commit_error = current_db_.db_transactional_accessor_->Commit({.is_main = is_main}, current_db_.db_acc_); if (maybe_commit_error.HasError()) { const auto &error = maybe_commit_error.GetError(); std::visit( [&execution_db_accessor = current_db_.execution_db_accessor_, - &commit_confirmed_by_all_sync_repplicas](T &&arg) { + &commit_confirmed_by_all_sync_replicas](const T &arg) { using ErrorType = std::remove_cvref_t; if constexpr (std::is_same_v) { - commit_confirmed_by_all_sync_repplicas = false; + commit_confirmed_by_all_sync_replicas = false; } else if constexpr (std::is_same_v) { const auto &constraint_violation = arg; auto &label_name = execution_db_accessor->LabelToName(constraint_violation.label); @@ -4150,7 +4210,6 @@ void Interpreter::Commit() { if (trigger_context && db->trigger_store()->AfterCommitTriggers().size() > 0) { db->AddTask([this, trigger_context = std::move(*trigger_context), user_transaction = std::shared_ptr(std::move(current_db_.db_transactional_accessor_))]() mutable { - // TODO: Should this take the db_ and not Access()? RunTriggersAfterCommit(*current_db_.db_acc_, interpreter_context_, std::move(trigger_context), &this->transaction_status_); user_transaction->FinalizeTransaction(); @@ -4159,7 +4218,7 @@ void Interpreter::Commit() { } SPDLOG_DEBUG("Finished committing the transaction"); - if (!commit_confirmed_by_all_sync_repplicas) { + if (!commit_confirmed_by_all_sync_replicas) { throw ReplicationException("At least one SYNC replica has not confirmed committing last transaction."); } } diff --git a/src/query/interpreter.hpp b/src/query/interpreter.hpp index 5cb73cb07..7754fb406 100644 --- a/src/query/interpreter.hpp +++ b/src/query/interpreter.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,6 +16,7 @@ #include #include "dbms/database.hpp" +#include "dbms/dbms_handler.hpp" #include "memory/query_memory_control.hpp" #include "query/auth_checker.hpp" #include "query/auth_query_handler.hpp" @@ -67,45 +68,6 @@ inline constexpr size_t kExecutionPoolMaxBlockSize = 1024UL; // 2 ^ 10 enum class QueryHandlerResult { COMMIT, ABORT, NOTHING }; -class ReplicationQueryHandler { - public: - ReplicationQueryHandler() = default; - virtual ~ReplicationQueryHandler() = default; - - ReplicationQueryHandler(const ReplicationQueryHandler &) = default; - ReplicationQueryHandler &operator=(const ReplicationQueryHandler &) = default; - - ReplicationQueryHandler(ReplicationQueryHandler &&) = default; - ReplicationQueryHandler &operator=(ReplicationQueryHandler &&) = default; - - struct Replica { - std::string name; - std::string socket_address; - ReplicationQuery::SyncMode sync_mode; - std::optional timeout; - uint64_t current_timestamp_of_replica; - uint64_t current_number_of_timestamp_behind_master; - ReplicationQuery::ReplicaState state; - }; - - /// @throw QueryRuntimeException if an error ocurred. - virtual void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) = 0; - - /// @throw QueryRuntimeException if an error ocurred. - virtual ReplicationQuery::ReplicationRole ShowReplicationRole() const = 0; - - /// @throw QueryRuntimeException if an error ocurred. - virtual void RegisterReplica(const std::string &name, const std::string &socket_address, - ReplicationQuery::SyncMode sync_mode, - const std::chrono::seconds replica_check_frequency) = 0; - - /// @throw QueryRuntimeException if an error ocurred. - virtual void DropReplica(std::string_view replica_name) = 0; - - /// @throw QueryRuntimeException if an error ocurred. - virtual std::vector ShowReplicas() const = 0; -}; - class AnalyzeGraphQueryHandler { public: AnalyzeGraphQueryHandler() = default; @@ -281,7 +243,38 @@ class Interpreter final { void SetUser(std::string_view username); + struct SystemTransactionGuard { + explicit SystemTransactionGuard(std::unique_lock guard, dbms::DbmsHandler &dbms_handler) + : system_guard_(std::move(guard)), dbms_handler_{&dbms_handler} { + dbms_handler_->NewSystemTransaction(); + } + SystemTransactionGuard &operator=(SystemTransactionGuard &&) = default; + SystemTransactionGuard(SystemTransactionGuard &&) = default; + + ~SystemTransactionGuard() { + if (system_guard_.owns_lock()) dbms_handler_->ResetSystemTransaction(); + } + + dbms::AllSyncReplicaStatus Commit() { return dbms_handler_->Commit(); } + + private: + std::unique_lock system_guard_; + dbms::DbmsHandler *dbms_handler_; + }; + + std::optional system_transaction_guard_{}; + private: + void ResetInterpreter() { + query_executions_.clear(); + system_guard.reset(); + system_transaction_guard_.reset(); + transaction_queries_->clear(); + if (current_db_.db_acc_ && current_db_.db_acc_->is_deleting()) { + current_db_.db_acc_.reset(); + } + } + struct QueryExecution { std::variant execution_memory; utils::ResourceWithOutOfMemoryException execution_memory_with_exception; @@ -340,6 +333,9 @@ class Interpreter final { // TODO Figure out how this would work for multi-database // Exists only during a single transaction (for now should be okay as is) std::vector> query_executions_; + // TODO: our upgradable lock guard for system + std::optional system_guard; + // all queries that are run as part of the current transaction utils::Synchronized, utils::SpinLock> transaction_queries_; @@ -435,8 +431,7 @@ std::map Interpreter::Pull(TStream *result_stream, std: // NOTE: we cannot clear query_execution inside the Abort and Commit // methods as we will delete summary contained in them which we need // after our query finished executing. - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } else { // We can only clear this execution as some of the queries // in the transaction can be in unfinished state diff --git a/src/query/interpreter_context.cpp b/src/query/interpreter_context.cpp index 75d734645..cace25ec6 100644 --- a/src/query/interpreter_context.cpp +++ b/src/query/interpreter_context.cpp @@ -56,7 +56,7 @@ std::vector> InterpreterContext::TerminateTransactions( std::iter_swap(it, not_found_midpoint); auto get_interpreter_db_name = [&]() -> std::string const & { static std::string all; - return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->id() : all; + return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->name() : all; }; if (interpreter->username_ == username || privilege_checker(get_interpreter_db_name())) { killed = true; // Note: this is used by the above `clean_status` (OnScopeExit) diff --git a/src/query/interpreter_context.hpp b/src/query/interpreter_context.hpp index af8648376..9b54dbd3a 100644 --- a/src/query/interpreter_context.hpp +++ b/src/query/interpreter_context.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -22,6 +22,8 @@ #include "query/cypher_query_interpreter.hpp" #include "query/typed_value.hpp" #include "replication/state.hpp" +#include "storage/v2/config.hpp" +#include "storage/v2/transaction.hpp" #include "utils/gatekeeper.hpp" #include "utils/skip_list.hpp" #include "utils/spin_lock.hpp" @@ -57,6 +59,7 @@ struct InterpreterContext { // GLOBAL memgraph::replication::ReplicationState *repl_state; + AuthQueryHandler *auth; AuthChecker *auth_checker; diff --git a/src/query/plan/operator.hpp b/src/query/plan/operator.hpp index 8fa3d3a7c..516ef2e38 100644 --- a/src/query/plan/operator.hpp +++ b/src/query/plan/operator.hpp @@ -916,11 +916,11 @@ struct ExpansionLambda { /// Currently expanded node symbol. Symbol inner_node_symbol; /// Expression used in lambda during expansion. - Expression *expression; + Expression *expression = nullptr; /// Currently expanded accumulated path symbol. - std::optional accumulated_path_symbol; + std::optional accumulated_path_symbol = std::nullopt; /// Currently expanded accumulated weight symbol. - std::optional accumulated_weight_symbol; + std::optional accumulated_weight_symbol = std::nullopt; ExpansionLambda Clone(AstStorage *storage) const { ExpansionLambda object; diff --git a/src/query/stream/streams.cpp b/src/query/stream/streams.cpp index 57011aecb..101ca592c 100644 --- a/src/query/stream/streams.cpp +++ b/src/query/stream/streams.cpp @@ -644,6 +644,25 @@ void Streams::Drop(const std::string &stream_name) { // TODO(antaljanosbenjamin) Release the transformation } +void Streams::DropAll() { + streams_.WithLock([this](StreamsMap &streams) { + bool durability_ok = true; + for (auto &[name, stream] : streams) { + // streams_ is write locked, which means there is no access to it outside of this function, thus only the Test + // function can be executing with the consumer, nothing else. + // By acquiring the write lock here for the consumer, we make sure there is + // no running Test function for this consumer, therefore it can be erased. + std::visit([&](const auto &stream_data) { stream_data.stream_source->Lock(); }, stream); + if (!storage_.Delete(name)) { + durability_ok = false; + } + } + + streams.clear(); + return durability_ok; // TODO: do we need special case for this cleanup if false + }); +} + void Streams::Start(const std::string &stream_name) { auto locked_streams = streams_.Lock(); auto it = GetStream(*locked_streams, stream_name); diff --git a/src/query/stream/streams.hpp b/src/query/stream/streams.hpp index 2c89341d1..bad1f8c98 100644 --- a/src/query/stream/streams.hpp +++ b/src/query/stream/streams.hpp @@ -110,6 +110,11 @@ class Streams final { /// @throws StreamsException if the stream doesn't exist or if the persisted metadata can't be deleted. void Drop(const std::string &stream_name); + /// Deletes all existing streams and all the data that was persisted. + /// + /// @throws StreamsException if the persisted metadata can't be deleted. + void DropAll(); + /// Start consuming from a stream. /// /// @param stream_name name of the stream that needs to be started diff --git a/src/replication/include/replication/messages.hpp b/src/replication/include/replication/messages.hpp index 57cf29351..21b2ffeee 100644 --- a/src/replication/include/replication/messages.hpp +++ b/src/replication/include/replication/messages.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -32,13 +32,44 @@ struct FrequentHeartbeatRes { static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader); static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder); FrequentHeartbeatRes() = default; - explicit FrequentHeartbeatRes(bool success) : success(success) {} - - bool success; }; using FrequentHeartbeatRpc = rpc::RequestResponse; +struct SystemHeartbeatReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SystemHeartbeatReq *self, memgraph::slk::Reader *reader); + static void Save(const SystemHeartbeatReq &self, memgraph::slk::Builder *builder); + SystemHeartbeatReq() = default; +}; + +struct SystemHeartbeatRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SystemHeartbeatRes *self, memgraph::slk::Reader *reader); + static void Save(const SystemHeartbeatRes &self, memgraph::slk::Builder *builder); + SystemHeartbeatRes() = default; + explicit SystemHeartbeatRes(uint64_t system_timestamp) : system_timestamp(system_timestamp) {} + + uint64_t system_timestamp; +}; + +using SystemHeartbeatRpc = rpc::RequestResponse; + void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder); } // namespace memgraph::replication + +namespace memgraph::slk { +void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder); +void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader); +void Save(const memgraph::replication::FrequentHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/); +void Load(memgraph::replication::FrequentHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/); +void Save(const memgraph::replication::SystemHeartbeatRes &self, memgraph::slk::Builder *builder); +void Load(memgraph::replication::SystemHeartbeatRes *self, memgraph::slk::Reader *reader); +void Save(const memgraph::replication::SystemHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/); +void Load(memgraph::replication::SystemHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/); +} // namespace memgraph::slk diff --git a/src/replication/include/replication/replication_client.hpp b/src/replication/include/replication/replication_client.hpp index 16e1010bf..a228c6a7a 100644 --- a/src/replication/include/replication/replication_client.hpp +++ b/src/replication/include/replication/replication_client.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,6 +15,7 @@ #include "replication/messages.hpp" #include "rpc/client.hpp" #include "utils/scheduler.hpp" +#include "utils/synchronized.hpp" #include "utils/thread_pool.hpp" #include @@ -22,8 +23,10 @@ namespace memgraph::replication { +struct ReplicationClient; + template -concept InvocableWithStringView = std::invocable; +concept FrequentCheckCB = std::invocable; struct ReplicationClient { explicit ReplicationClient(const memgraph::replication::ReplicationClientConfig &config); @@ -34,24 +37,27 @@ struct ReplicationClient { ReplicationClient(ReplicationClient &&) noexcept = delete; ReplicationClient &operator=(ReplicationClient &&) noexcept = delete; - template + template void StartFrequentCheck(F &&callback) { // Help the user to get the most accurate replica state possible. if (replica_check_frequency_ > std::chrono::seconds(0)) { - replica_checker_.Run("Replica Checker", replica_check_frequency_, [this, cb = std::forward(callback)] { - try { - bool success = false; - { - auto stream{rpc_client_.Stream()}; - success = stream.AwaitResponse().success; - } - if (success) { - cb(name_); - } - } catch (const rpc::RpcFailedException &) { - // Nothing to do...wait for a reconnect - } - }); + replica_checker_.Run("Replica Checker", replica_check_frequency_, + [this, cb = std::forward(callback), reconnect = false]() mutable { + try { + { + auto stream{rpc_client_.Stream()}; + stream.AwaitResponse(); + } + cb(reconnect, *this); + reconnect = false; + } catch (const rpc::RpcFailedException &) { + // Nothing to do...wait for a reconnect + // NOTE: Here we are communicating with the instance connection. + // We don't have access to the undelying client; so the only thing we can do it + // tell the callback that this is a reconnection and to check the state + reconnect = true; + } + }); } } @@ -60,6 +66,13 @@ struct ReplicationClient { rpc::Client rpc_client_; std::chrono::seconds replica_check_frequency_; + // TODO: Better, this was the easiest place to put this + enum class State { + BEHIND, + READY, + }; + utils::Synchronized state_{State::BEHIND}; + memgraph::replication::ReplicationMode mode_{memgraph::replication::ReplicationMode::SYNC}; // This thread pool is used for background tasks so we don't // block the main storage thread diff --git a/src/replication/include/replication/state.hpp b/src/replication/include/replication/state.hpp index 76aec1053..ecd2b8397 100644 --- a/src/replication/include/replication/state.hpp +++ b/src/replication/include/replication/state.hpp @@ -45,7 +45,7 @@ struct RoleMainData { RoleMainData &operator=(RoleMainData &&) = default; ReplicationEpoch epoch_; - std::list registered_replicas_{}; + std::list registered_replicas_{}; // TODO: data race issues }; struct RoleReplicaData { @@ -79,7 +79,8 @@ struct ReplicationState { bool IsMain() const { return GetRole() == ReplicationRole::MAIN; } bool IsReplica() const { return GetRole() == ReplicationRole::REPLICA; } - bool ShouldPersist() const { return nullptr != durability_; } + bool HasDurability() const { return nullptr != durability_; } + bool TryPersistRoleMain(std::string new_epoch); bool TryPersistRoleReplica(const ReplicationServerConfig &config); bool TryPersistUnregisterReplica(std::string_view name); diff --git a/src/replication/messages.cpp b/src/replication/messages.cpp index 4503e9df2..1b1942e7f 100644 --- a/src/replication/messages.cpp +++ b/src/replication/messages.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,12 +16,8 @@ namespace memgraph::slk { // Serialize code for FrequentHeartbeatRes -void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.success, builder); -} -void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->success, reader); -} +void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {} +void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {} // Serialize code for FrequentHeartbeatReq void Save(const memgraph::replication::FrequentHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/) { @@ -31,6 +27,22 @@ void Load(memgraph::replication::FrequentHeartbeatReq * /*self*/, memgraph::slk: /* Nothing to serialize */ } +// Serialize code for SystemHeartbeatRes +void Save(const memgraph::replication::SystemHeartbeatRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.system_timestamp, builder); +} +void Load(memgraph::replication::SystemHeartbeatRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->system_timestamp, reader); +} + +// Serialize code for SystemHeartbeatReq +void Save(const memgraph::replication::SystemHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/) { + /* Nothing to serialize */ +} +void Load(memgraph::replication::SystemHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/) { + /* Nothing to serialize */ +} + } // namespace memgraph::slk namespace memgraph::replication { @@ -41,6 +53,12 @@ constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUEN constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes", nullptr}; +constexpr utils::TypeInfo SystemHeartbeatReq::kType{utils::TypeId::REP_SYSTEM_HEARTBEAT_REQ, "SystemHeartbeatReq", + nullptr}; + +constexpr utils::TypeInfo SystemHeartbeatRes::kType{utils::TypeId::REP_SYSTEM_HEARTBEAT_RES, "SystemHeartbeatRes", + nullptr}; + void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) { memgraph::slk::Save(self, builder); } @@ -54,11 +72,24 @@ void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reade memgraph::slk::Load(self, reader); } +void SystemHeartbeatReq::Save(const SystemHeartbeatReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemHeartbeatReq::Load(SystemHeartbeatReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void SystemHeartbeatRes::Save(const SystemHeartbeatRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemHeartbeatRes::Load(SystemHeartbeatRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) { FrequentHeartbeatReq req; FrequentHeartbeatReq::Load(&req, req_reader); memgraph::slk::Load(&req, req_reader); - FrequentHeartbeatRes res{true}; + FrequentHeartbeatRes res{}; memgraph::slk::Save(res, res_builder); } diff --git a/src/replication/state.cpp b/src/replication/state.cpp index 60c390e17..8097bb6f4 100644 --- a/src/replication/state.cpp +++ b/src/replication/state.cpp @@ -34,6 +34,7 @@ ReplicationState::ReplicationState(std::optional durabili repl_dir /= kReplicationDirectory; utils::EnsureDirOrDie(repl_dir); durability_ = std::make_unique(std::move(repl_dir)); + spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); auto replicationData = FetchReplicationData(); if (replicationData.HasError()) { @@ -54,7 +55,7 @@ ReplicationState::ReplicationState(std::optional durabili } bool ReplicationState::TryPersistRoleReplica(const ReplicationServerConfig &config) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto data = durability::ReplicationRoleEntry{.role = durability::ReplicaRole{ .config = config, @@ -78,7 +79,7 @@ bool ReplicationState::TryPersistRoleReplica(const ReplicationServerConfig &conf } bool ReplicationState::TryPersistRoleMain(std::string new_epoch) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto data = durability::ReplicationRoleEntry{.role = durability::MainRole{.epoch = ReplicationEpoch{std::move(new_epoch)}}}; @@ -92,7 +93,7 @@ bool ReplicationState::TryPersistRoleMain(std::string new_epoch) { } bool ReplicationState::TryPersistUnregisterReplica(std::string_view name) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto key = BuildReplicaKey(name); @@ -104,7 +105,7 @@ bool ReplicationState::TryPersistUnregisterReplica(std::string_view name) { // TODO: FetchEpochData (agnostic of FetchReplicationData, but should be done before) auto ReplicationState::FetchReplicationData() -> FetchReplicationResult_t { - if (!ShouldPersist()) return FetchReplicationError::NOTHING_FETCHED; + if (!HasDurability()) return FetchReplicationError::NOTHING_FETCHED; const auto replication_data = durability_->Get(durability::kReplicationRoleName); if (!replication_data.has_value()) { return FetchReplicationError::NOTHING_FETCHED; @@ -199,7 +200,7 @@ bool ReplicationState::HandleVersionMigration(durability::ReplicationRoleEntry & } bool ReplicationState::TryPersistRegisteredReplica(const ReplicationClientConfig &config) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; // If any replicas are persisted then Role must be persisted if (role_persisted != RolePersisted::YES) { diff --git a/src/rpc/client.hpp b/src/rpc/client.hpp index 1fd3fff8d..a9ae7202d 100644 --- a/src/rpc/client.hpp +++ b/src/rpc/client.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -105,11 +105,15 @@ class Client { utils::OnScopeExit res_cleanup([&, response_data_size] { self_->client_->ShiftData(response_data_size); }); utils::TypeId res_id{utils::TypeId::UNKNOWN}; - slk::Load(&res_id, &res_reader); - // NOLINTNEXTLINE(cppcoreguidelines-init-variables) rpc::Version version; - slk::Load(&version, &res_reader); + + try { + slk::Load(&res_id, &res_reader); + slk::Load(&version, &res_reader); + } catch (const slk::SlkReaderException &) { + throw SlkRpcFailedException(); + } if (version != rpc::current_version) { // V1 we introduced versioning with, absolutely no backwards compatibility, diff --git a/src/rpc/exceptions.hpp b/src/rpc/exceptions.hpp index 346c53a9a..f278b2414 100644 --- a/src/rpc/exceptions.hpp +++ b/src/rpc/exceptions.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -45,4 +45,12 @@ class GenericRpcFailedException : public RpcFailedException { SPECIALIZE_GET_EXCEPTION_NAME(GenericRpcFailedException); }; +class SlkRpcFailedException : public RpcFailedException { + public: + SlkRpcFailedException() + : RpcFailedException("Received malformed message from cluster. Please raise an issue on Memgraph GitHub issues.") {} + + SPECIALIZE_GET_EXCEPTION_NAME(SlkRpcFailedException); +}; + } // namespace memgraph::rpc diff --git a/src/rpc/protocol.cpp b/src/rpc/protocol.cpp index 8bc77579b..2a9c8ea72 100644 --- a/src/rpc/protocol.cpp +++ b/src/rpc/protocol.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,7 +13,7 @@ #include -#include "rpc/messages.hpp" +#include "rpc/exceptions.hpp" #include "rpc/server.hpp" #include "rpc/version.hpp" #include "slk/serialization.hpp" @@ -46,10 +46,14 @@ void Session::Execute() { // Load the request ID. utils::TypeId req_id{utils::TypeId::UNKNOWN}; - slk::Load(&req_id, &req_reader); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) rpc::Version version; - slk::Load(&version, &req_reader); + try { + slk::Load(&req_id, &req_reader); + slk::Load(&version, &req_reader); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } if (version != rpc::current_version) { // V1 we introduced versioning with, absolutely no backwards compatibility, @@ -76,12 +80,20 @@ void Session::Execute() { SPDLOG_TRACE("[RpcServer] received {}", extended_it->second.req_type.name); slk::Save(extended_it->second.res_type.id, &res_builder); slk::Save(rpc::current_version, &res_builder); - extended_it->second.callback(endpoint_, &req_reader, &res_builder); + try { + extended_it->second.callback(endpoint_, &req_reader, &res_builder); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } } else { SPDLOG_TRACE("[RpcServer] received {}", it->second.req_type.name); slk::Save(it->second.res_type.id, &res_builder); slk::Save(rpc::current_version, &res_builder); - it->second.callback(&req_reader, &res_builder); + try { + it->second.callback(&req_reader, &res_builder); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } } // Finalize the SLK streams. diff --git a/src/rpc/version.hpp b/src/rpc/version.hpp index 29e7f8d3a..b234a3ccc 100644 --- a/src/rpc/version.hpp +++ b/src/rpc/version.hpp @@ -22,6 +22,12 @@ using Version = uint64_t; // probability of accidental match/conformance with pre 2.13 versions constexpr auto v1 = Version{2023'10'30'0'2'13}; -constexpr auto current_version = v1; +// TypeId has been changed, they were not stable +// Added stable numbering for replication types to be in +// 2000-2999 range. We shouldn't need to version bump again +// for any TypeIds that get added. +constexpr auto v2 = Version{2023'12'07'0'2'14}; + +constexpr auto current_version = v2; } // namespace memgraph::rpc diff --git a/src/slk/serialization.hpp b/src/slk/serialization.hpp index 9ca99527d..791f52e8c 100644 --- a/src/slk/serialization.hpp +++ b/src/slk/serialization.hpp @@ -60,6 +60,11 @@ void Save(const std::vector &obj, Builder *builder); template void Load(std::vector *obj, Reader *reader); +template +void Save(const std::array &obj, Builder *builder); +template +void Load(std::array *obj, Reader *reader); + template void Save(const std::set &obj, Builder *builder); template @@ -201,6 +206,24 @@ inline void Load(std::vector *obj, Reader *reader) { } } +template +inline void Save(const std::array &obj, Builder *builder) { + uint64_t size = obj.size(); + Save(size, builder); + for (const auto &item : obj) { + Save(item, builder); + } +} + +template +inline void Load(std::array *obj, Reader *reader) { + uint64_t size = 0; + Load(&size, reader); + for (uint64_t i = 0; i < size; ++i) { + Load(&(*obj)[i], reader); + } +} + template inline void Save(const std::set &obj, Builder *builder) { uint64_t size = obj.size(); diff --git a/src/storage/v2/config.hpp b/src/storage/v2/config.hpp index dee2afe87..3533594ce 100644 --- a/src/storage/v2/config.hpp +++ b/src/storage/v2/config.hpp @@ -14,10 +14,12 @@ #include #include #include + #include "storage/v2/isolation_level.hpp" #include "storage/v2/storage_mode.hpp" #include "utils/exceptions.hpp" #include "utils/logging.hpp" +#include "utils/uuid.hpp" namespace memgraph::storage { @@ -27,6 +29,41 @@ class StorageConfigException : public utils::BasicException { SPECIALIZE_GET_EXCEPTION_NAME(StorageConfigException) }; +struct SalientConfig { + std::string name; + utils::UUID uuid; + StorageMode storage_mode{StorageMode::IN_MEMORY_TRANSACTIONAL}; + struct Items { + bool properties_on_edges{true}; + bool enable_schema_metadata{false}; + friend bool operator==(const Items &lrh, const Items &rhs) = default; + } items; + + friend bool operator==(const SalientConfig &, const SalientConfig &) = default; +}; + +inline void to_json(nlohmann::json &data, SalientConfig::Items const &items) { + data = nlohmann::json{{"properties_on_edges", items.properties_on_edges}, + {"enable_schema_metadata", items.enable_schema_metadata}}; +} + +inline void from_json(const nlohmann::json &data, SalientConfig::Items &items) { + data.at("properties_on_edges").get_to(items.properties_on_edges); + data.at("enable_schema_metadata").get_to(items.enable_schema_metadata); +} + +inline void to_json(nlohmann::json &data, SalientConfig const &config) { + data = nlohmann::json{ + {"items", config.items}, {"name", config.name}, {"uuid", config.uuid}, {"storage_mode", config.storage_mode}}; +} + +inline void from_json(const nlohmann::json &data, SalientConfig &config) { + data.at("items").get_to(config.items); + data.at("name").get_to(config.name); + data.at("uuid").get_to(config.uuid); + data.at("storage_mode").get_to(config.storage_mode); +} + /// Pass this class to the \ref Storage constructor to change the behavior of /// the storage. This class also defines the default behavior. struct Config { @@ -36,46 +73,40 @@ struct Config { Type type{Type::PERIODIC}; std::chrono::milliseconds interval{std::chrono::milliseconds(1000)}; friend bool operator==(const Gc &lrh, const Gc &rhs) = default; - } gc; - - struct Items { - bool properties_on_edges{true}; - bool enable_schema_metadata{false}; - friend bool operator==(const Items &lrh, const Items &rhs) = default; - } items; + } gc; // SYSTEM FLAG struct Durability { enum class SnapshotWalMode { DISABLED, PERIODIC_SNAPSHOT, PERIODIC_SNAPSHOT_WITH_WAL }; - std::filesystem::path storage_directory{"storage"}; + std::filesystem::path storage_directory{"storage"}; // PER INSTANCE SYSTEM FLAG-> root folder...ish - bool recover_on_startup{false}; + bool recover_on_startup{false}; // PER INSTANCE SYSTEM FLAG - SnapshotWalMode snapshot_wal_mode{SnapshotWalMode::DISABLED}; + SnapshotWalMode snapshot_wal_mode{SnapshotWalMode::DISABLED}; // PER DATABASE - std::chrono::milliseconds snapshot_interval{std::chrono::minutes(2)}; - uint64_t snapshot_retention_count{3}; + std::chrono::milliseconds snapshot_interval{std::chrono::minutes(2)}; // PER DATABASE + uint64_t snapshot_retention_count{3}; // PER DATABASE - uint64_t wal_file_size_kibibytes{20 * 1024}; - uint64_t wal_file_flush_every_n_tx{100000}; + uint64_t wal_file_size_kibibytes{20 * 1024}; // PER DATABASE + uint64_t wal_file_flush_every_n_tx{100000}; // PER DATABASE - bool snapshot_on_exit{false}; - bool restore_replication_state_on_startup{false}; + bool snapshot_on_exit{false}; // PER DATABASE + bool restore_replication_state_on_startup{false}; // PER INSTANCE - uint64_t items_per_batch{1'000'000}; - uint64_t recovery_thread_count{8}; + uint64_t items_per_batch{1'000'000}; // PER DATABASE + uint64_t recovery_thread_count{8}; // PER INSTANCE SYSTEM FLAG // deprecated - bool allow_parallel_index_creation{false}; + bool allow_parallel_index_creation{false}; // KILL - bool allow_parallel_schema_creation{false}; + bool allow_parallel_schema_creation{false}; // PER DATABASE friend bool operator==(const Durability &lrh, const Durability &rhs) = default; } durability; struct Transaction { IsolationLevel isolation_level{IsolationLevel::SNAPSHOT_ISOLATION}; friend bool operator==(const Transaction &lrh, const Transaction &rhs) = default; - } transaction; + } transaction; // PER DATABASE struct DiskConfig { std::filesystem::path main_storage_directory{"storage/rocksdb_main_storage"}; @@ -89,9 +120,9 @@ struct Config { friend bool operator==(const DiskConfig &lrh, const DiskConfig &rhs) = default; } disk; - std::string name; - bool force_on_disk{false}; - StorageMode storage_mode{StorageMode::IN_MEMORY_TRANSACTIONAL}; + SalientConfig salient; + + bool force_on_disk{false}; // TODO: cleanup.... remove + make the default storage_mode ON_DISK_TRANSACTIONAL if true friend bool operator==(const Config &lrh, const Config &rhs) = default; }; diff --git a/src/storage/v2/database_access.hpp b/src/storage/v2/database_access.hpp new file mode 100644 index 000000000..de7a1d7d4 --- /dev/null +++ b/src/storage/v2/database_access.hpp @@ -0,0 +1,25 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include + +namespace memgraph::storage { + +/** + * @brief We need to protect the database using a DatabaseAccess, and we need to keep the replication/storage/dbms + * untied. To achieve that we are using std::any, but beware to pass in the correct type using DatabaseAccess = + * memgraph::utils::Gatekeeper::Accessor; + */ +using DatabaseAccessProtector = std::any; + +} // namespace memgraph::storage diff --git a/src/storage/v2/disk/storage.cpp b/src/storage/v2/disk/storage.cpp index ef182b439..c62125b70 100644 --- a/src/storage/v2/disk/storage.cpp +++ b/src/storage/v2/disk/storage.cpp @@ -951,7 +951,7 @@ Result DiskStorage::DiskAccessor::CreateEdge(VertexAccessor *from, EdgeRef edge(gid); bool edge_import_mode_active = disk_storage->edge_import_status_ == EdgeImportMode::ACTIVE; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto acc = edge_import_mode_active ? disk_storage->edge_import_mode_cache_->AccessToEdges() : transaction_.edges_->access(); auto *delta = CreateDeleteObjectDelta(&transaction_); @@ -975,7 +975,7 @@ Result DiskStorage::DiskAccessor::CreateEdge(VertexAccessor *from, transaction_.manyDeltasCache.Invalidate(from_vertex, edge_type, EdgeDirection::OUT); transaction_.manyDeltasCache.Invalidate(to_vertex, edge_type, EdgeDirection::IN); - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } storage_->edge_count_.fetch_add(1, std::memory_order_acq_rel); @@ -1283,7 +1283,7 @@ bool DiskStorage::DeleteEdgeFromConnectivityIndex(Transaction *transaction, cons const auto src_vertex_gid = modified_edge.second.src_vertex_gid.ToString(); const auto dst_vertex_gid = modified_edge.second.dest_vertex_gid.ToString(); - if (!config_.items.properties_on_edges) { + if (!config_.salient.items.properties_on_edges) { /// If the object was created then flush it, otherwise since properties on edges are false /// edge wasn't modified for sure. if (root_action == Delta::Action::DELETE_OBJECT && @@ -1400,7 +1400,7 @@ std::optional DiskStorage::CreateEdgeFromDisk(const VertexAccessor } EdgeRef edge(gid); - if (config_.items.properties_on_edges) { + if (config_.salient.items.properties_on_edges) { auto acc = edge_import_mode_active ? edge_import_mode_cache_->AccessToEdges() : transaction->edges_->access(); auto *delta = CreateDeleteDeserializedObjectDelta(transaction, old_disk_key, std::move(read_ts)); auto [it, inserted] = acc.insert(Edge(gid, delta)); @@ -1458,7 +1458,8 @@ std::vector DiskStorage::OutEdges(const VertexAccessor *src_vertex if (!edge_types.empty() && !utils::Contains(edge_types, edge_type_id)) continue; auto edge_gid = Gid::FromString(edge_gid_str); - auto properties_str = config_.items.properties_on_edges ? utils::GetPropertiesFromEdgeValue(edge_val_str) : ""; + auto properties_str = + config_.salient.items.properties_on_edges ? utils::GetPropertiesFromEdgeValue(edge_val_str) : ""; const auto edge = std::invoke([this, destination, &edge_val_str, transaction, view, src_vertex, edge_type_id, edge_gid, &properties_str, &edge_gid_str]() { @@ -1599,7 +1600,7 @@ DiskStorage::CheckExistingVerticesBeforeCreatingUniqueConstraint(LabelId label, // NOLINTNEXTLINE(google-default-arguments) utils::BasicResult DiskStorage::DiskAccessor::Commit( - const std::optional desired_commit_timestamp, bool /*is_main*/) { + CommitReplArgs reparg, DatabaseAccessProtector /*db_acc*/) { MG_ASSERT(is_transaction_active_, "The transaction is already terminated!"); MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!"); @@ -1610,7 +1611,7 @@ utils::BasicResult DiskStorage::DiskAccessor::Co // This is usually done by the MVCC, but it does not handle the metadata deltas transaction_.EnsureCommitTimestampExists(); std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(disk_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(disk_storage->CommitTimestamp(reparg.desired_commit_timestamp)); transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release); for (const auto &md_delta : transaction_.md_deltas) { @@ -1686,7 +1687,7 @@ utils::BasicResult DiskStorage::DiskAccessor::Co }))) { } else { std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(disk_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(disk_storage->CommitTimestamp(reparg.desired_commit_timestamp)); transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release); if (edge_import_mode_active) { diff --git a/src/storage/v2/disk/storage.hpp b/src/storage/v2/disk/storage.hpp index 219cb8272..54cf81496 100644 --- a/src/storage/v2/disk/storage.hpp +++ b/src/storage/v2/disk/storage.hpp @@ -145,8 +145,8 @@ class DiskStorage final : public Storage { ConstraintsInfo ListAllConstraints() const override; // NOLINTNEXTLINE(google-default-arguments) - utils::BasicResult Commit(std::optional desired_commit_timestamp = {}, - bool is_main = true) override; + utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) override; void UpdateObjectsCountOnAbort(); diff --git a/src/storage/v2/durability/durability.cpp b/src/storage/v2/durability/durability.cpp index 6a89b7b5a..92c4d11e8 100644 --- a/src/storage/v2/durability/durability.cpp +++ b/src/storage/v2/durability/durability.cpp @@ -428,7 +428,7 @@ std::optional Recovery::RecoverData(std::string *uuid, Replication } try { auto info = LoadWal(wal_file.path, &indices_constraints, last_loaded_timestamp, vertices, edges, name_id_mapper, - edge_count, config.items); + edge_count, config.salient.items); recovery_info.next_vertex_id = std::max(recovery_info.next_vertex_id, info.next_vertex_id); recovery_info.next_edge_id = std::max(recovery_info.next_edge_id, info.next_edge_id); recovery_info.next_timestamp = std::max(recovery_info.next_timestamp, info.next_timestamp); diff --git a/src/storage/v2/durability/snapshot.cpp b/src/storage/v2/durability/snapshot.cpp index 52872222b..12b260496 100644 --- a/src/storage/v2/durability/snapshot.cpp +++ b/src/storage/v2/durability/snapshot.cpp @@ -222,7 +222,7 @@ std::vector ReadBatchInfos(Decoder &snapshot) { template void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList &edges, const uint64_t from_offset, - const uint64_t edges_count, const Config::Items items, TFunc get_property_from_id) { + const uint64_t edges_count, const SalientConfig::Items items, TFunc get_property_from_id) { Decoder snapshot; snapshot.Initialize(path, kSnapshotMagic); @@ -420,7 +420,7 @@ template LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::path &path, utils::SkipList &vertices, utils::SkipList &edges, const uint64_t from_offset, const uint64_t vertices_count, - const Config::Items items, const bool snapshot_has_edges, + const SalientConfig::Items items, const bool snapshot_has_edges, TEdgeTypeFromIdFunc get_edge_type_from_id) { Decoder snapshot; snapshot.Initialize(path, kSnapshotMagic); @@ -621,7 +621,7 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils utils::SkipList *edges, std::deque> *epoch_history, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items) { + SalientConfig::Items items) { RecoveryInfo ret; RecoveredIndicesAndConstraints indices_constraints; @@ -1177,8 +1177,8 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, edges, items = config.items, &get_property_from_id](const size_t /*batch_index*/, - const BatchInfo &batch) { + [path, edges, items = config.salient.items, &get_property_from_id](const size_t /*batch_index*/, + const BatchInfo &batch) { LoadPartialEdges(path, *edges, batch.offset, batch.count, items, get_property_from_id); }, edge_batches); @@ -1218,7 +1218,7 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, vertices, edges, edge_count, items = config.items, snapshot_has_edges, &get_edge_type_from_id, + [path, vertices, edges, edge_count, items = config.salient.items, snapshot_has_edges, &get_edge_type_from_id, &highest_edge_gid, &recovery_info](const size_t batch_index, const BatchInfo &batch) { const auto result = LoadPartialConnectivity(path, *vertices, *edges, batch.offset, batch.count, items, snapshot_has_edges, get_edge_type_from_id); @@ -1391,7 +1391,8 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis if (!IsVersionSupported(*version)) throw RecoveryFailure(fmt::format("Invalid snapshot version {}", *version)); if (*version == 14U) { - return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config.items); + return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count, + config.salient.items); } if (*version == 15U) { return LoadSnapshotVersion15(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config); @@ -1470,8 +1471,8 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, edges, items = config.items, &get_property_from_id](const size_t /*batch_index*/, - const BatchInfo &batch) { + [path, edges, items = config.salient.items, &get_property_from_id](const size_t /*batch_index*/, + const BatchInfo &batch) { LoadPartialEdges(path, *edges, batch.offset, batch.count, items, get_property_from_id); }, edge_batches); @@ -1511,7 +1512,7 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, vertices, edges, edge_count, items = config.items, snapshot_has_edges, &get_edge_type_from_id, + [path, vertices, edges, edge_count, items = config.salient.items, snapshot_has_edges, &get_edge_type_from_id, &highest_edge_gid, &recovery_info](const size_t batch_index, const BatchInfo &batch) { const auto result = LoadPartialConnectivity(path, *vertices, *edges, batch.offset, batch.count, items, snapshot_has_edges, get_edge_type_from_id); @@ -1868,7 +1869,7 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files auto items_in_current_batch{0UL}; auto batch_start_offset{0UL}; // Store all edges. - if (storage->config_.items.properties_on_edges) { + if (storage->config_.salient.items.properties_on_edges) { offset_edges = snapshot.GetPosition(); batch_start_offset = offset_edges; auto acc = edges->access(); diff --git a/src/storage/v2/durability/wal.cpp b/src/storage/v2/durability/wal.cpp index 8c28a6b6d..e808f01a3 100644 --- a/src/storage/v2/durability/wal.cpp +++ b/src/storage/v2/durability/wal.cpp @@ -548,7 +548,7 @@ WalDeltaData::Type SkipWalDeltaData(BaseDecoder *decoder) { return delta.type; } -void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Config::Items items, const Delta &delta, +void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, SalientConfig::Items items, const Delta &delta, const Vertex &vertex, uint64_t timestamp) { // When converting a Delta to a WAL delta the logic is inverted. That is // because the Delta's represent undo actions and we want to store redo @@ -709,7 +709,7 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConstraints *indices_constraints, const std::optional last_loaded_timestamp, utils::SkipList *vertices, utils::SkipList *edges, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items) { + SalientConfig::Items items) { spdlog::info("Trying to load WAL file {}.", path); RecoveryInfo ret; @@ -983,8 +983,8 @@ RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConst } WalFile::WalFile(const std::filesystem::path &wal_directory, const std::string_view uuid, - const std::string_view epoch_id, Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, - utils::FileRetainer *file_retainer) + const std::string_view epoch_id, SalientConfig::Items items, NameIdMapper *name_id_mapper, + uint64_t seq_num, utils::FileRetainer *file_retainer) : items_(items), name_id_mapper_(name_id_mapper), path_(wal_directory / MakeWalName()), @@ -1026,7 +1026,7 @@ WalFile::WalFile(const std::filesystem::path &wal_directory, const std::string_v wal_.Sync(); } -WalFile::WalFile(std::filesystem::path current_wal_path, Config::Items items, NameIdMapper *name_id_mapper, +WalFile::WalFile(std::filesystem::path current_wal_path, SalientConfig::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, utils::FileRetainer *file_retainer) : items_(items), diff --git a/src/storage/v2/durability/wal.hpp b/src/storage/v2/durability/wal.hpp index 8f6492ac7..20d88b040 100644 --- a/src/storage/v2/durability/wal.hpp +++ b/src/storage/v2/durability/wal.hpp @@ -193,7 +193,7 @@ WalDeltaData ReadWalDeltaData(BaseDecoder *decoder); WalDeltaData::Type SkipWalDeltaData(BaseDecoder *decoder); /// Function used to encode a `Delta` that originated from a `Vertex`. -void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Config::Items items, const Delta &delta, +void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, SalientConfig::Items items, const Delta &delta, const Vertex &vertex, uint64_t timestamp); /// Function used to encode a `Delta` that originated from an `Edge`. @@ -213,15 +213,17 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConstraints *indices_constraints, std::optional last_loaded_timestamp, utils::SkipList *vertices, utils::SkipList *edges, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items); + SalientConfig::Items items); /// WalFile class used to append deltas and operations to the WAL file. class WalFile { public: WalFile(const std::filesystem::path &wal_directory, std::string_view uuid, std::string_view epoch_id, - Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, utils::FileRetainer *file_retainer); - WalFile(std::filesystem::path current_wal_path, Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, - uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, utils::FileRetainer *file_retainer); + SalientConfig::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, + utils::FileRetainer *file_retainer); + WalFile(std::filesystem::path current_wal_path, SalientConfig::Items items, NameIdMapper *name_id_mapper, + uint64_t seq_num, uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, + utils::FileRetainer *file_retainer); WalFile(const WalFile &) = delete; WalFile(WalFile &&) = delete; @@ -268,7 +270,7 @@ class WalFile { private: void UpdateStats(uint64_t timestamp); - Config::Items items_; + SalientConfig::Items items_; NameIdMapper *name_id_mapper_; Encoder wal_; std::filesystem::path path_; diff --git a/src/storage/v2/edge_accessor.cpp b/src/storage/v2/edge_accessor.cpp index 4d9cb423b..5d653fbcc 100644 --- a/src/storage/v2/edge_accessor.cpp +++ b/src/storage/v2/edge_accessor.cpp @@ -27,7 +27,7 @@ namespace memgraph::storage { bool EdgeAccessor::IsDeleted() const { - if (!storage_->config_.items.properties_on_edges) { + if (!storage_->config_.salient.items.properties_on_edges) { return false; } return edge_.ptr->deleted; @@ -38,7 +38,7 @@ bool EdgeAccessor::IsVisible(const View view) const { bool deleted = true; // When edges don't have properties, their isolation level is still dictated by MVCC -> // iterate over the deltas of the from_vertex_ and see which deltas can be applied on edges. - if (!storage_->config_.items.properties_on_edges) { + if (!storage_->config_.salient.items.properties_on_edges) { Delta *delta = nullptr; { auto guard = std::shared_lock{from_vertex_->lock}; @@ -120,7 +120,7 @@ VertexAccessor EdgeAccessor::DeletedEdgeToVertex() const { Result EdgeAccessor::SetProperty(PropertyId property, const PropertyValue &value) { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -153,7 +153,7 @@ Result EdgeAccessor::SetProperty(PropertyId property, co Result EdgeAccessor::InitProperties(const std::map &properties) { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -175,7 +175,7 @@ Result EdgeAccessor::InitProperties(const std::map>> EdgeAccessor::UpdateProperties( std::map &properties) const { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -198,7 +198,7 @@ Result>> EdgeAc } Result> EdgeAccessor::ClearProperties() { - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -222,7 +222,7 @@ Result> EdgeAccessor::ClearProperties() { } Result EdgeAccessor::GetProperty(PropertyId property, View view) const { - if (!storage_->config_.items.properties_on_edges) return PropertyValue(); + if (!storage_->config_.salient.items.properties_on_edges) return PropertyValue(); bool exists = true; bool deleted = false; PropertyValue value; @@ -265,7 +265,7 @@ Result EdgeAccessor::GetProperty(PropertyId property, View view) } Result> EdgeAccessor::Properties(View view) const { - if (!storage_->config_.items.properties_on_edges) return std::map{}; + if (!storage_->config_.salient.items.properties_on_edges) return std::map{}; bool exists = true; bool deleted = false; std::map properties; @@ -317,7 +317,7 @@ Result> EdgeAccessor::Properties(View view) } Gid EdgeAccessor::Gid() const noexcept { - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { return edge_.ptr->gid; } return edge_.gid; diff --git a/src/storage/v2/inmemory/replication/recovery.cpp b/src/storage/v2/inmemory/replication/recovery.cpp index 536c7c8fc..d6f2b464c 100644 --- a/src/storage/v2/inmemory/replication/recovery.cpp +++ b/src/storage/v2/inmemory/replication/recovery.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -44,7 +44,7 @@ class InMemoryCurrentWalHandler { ////// CurrentWalHandler ////// InMemoryCurrentWalHandler::InMemoryCurrentWalHandler(InMemoryStorage const *storage, rpc::Client &rpc_client) - : stream_(rpc_client.Stream(storage->id())) {} + : stream_(rpc_client.Stream(storage->uuid())) {} void InMemoryCurrentWalHandler::AppendFilename(const std::string &filename) { replication::Encoder encoder(stream_.GetBuilder()); @@ -69,10 +69,10 @@ void InMemoryCurrentWalHandler::AppendBufferData(const uint8_t *buffer, const si replication::CurrentWalRes InMemoryCurrentWalHandler::Finalize() { return stream_.AwaitResponse(); } ////// ReplicationClient Helpers ////// -replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client, +replication::WalFilesRes TransferWalFiles(const utils::UUID &uuid, rpc::Client &client, const std::vector &wal_files) { MG_ASSERT(!wal_files.empty(), "Wal files list is empty!"); - auto stream = client.Stream(std::move(db_name), wal_files.size()); + auto stream = client.Stream(uuid, wal_files.size()); replication::Encoder encoder(stream.GetBuilder()); for (const auto &wal : wal_files) { spdlog::debug("Sending wal file: {}", wal); @@ -81,8 +81,9 @@ replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &clie return stream.AwaitResponse(); } -replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path) { - auto stream = client.Stream(std::move(db_name)); +replication::SnapshotRes TransferSnapshot(const utils::UUID &uuid, rpc::Client &client, + const std::filesystem::path &path) { + auto stream = client.Stream(uuid); replication::Encoder encoder(stream.GetBuilder()); encoder.WriteFile(path); return stream.AwaitResponse(); diff --git a/src/storage/v2/inmemory/replication/recovery.hpp b/src/storage/v2/inmemory/replication/recovery.hpp index 2025800ab..730822a62 100644 --- a/src/storage/v2/inmemory/replication/recovery.hpp +++ b/src/storage/v2/inmemory/replication/recovery.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,10 +19,11 @@ class InMemoryStorage; ////// ReplicationClient Helpers ////// -replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client, +replication::WalFilesRes TransferWalFiles(const utils::UUID &uuid, rpc::Client &client, const std::vector &wal_files); -replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path); +replication::SnapshotRes TransferSnapshot(const utils::UUID &uuid, rpc::Client &client, + const std::filesystem::path &path); uint64_t ReplicateCurrentWal(const InMemoryStorage *storage, rpc::Client &client, durability::WalFile const &wal_file); diff --git a/src/storage/v2/inmemory/storage.cpp b/src/storage/v2/inmemory/storage.cpp index 6eeefb3ce..44d5dab2b 100644 --- a/src/storage/v2/inmemory/storage.cpp +++ b/src/storage/v2/inmemory/storage.cpp @@ -66,13 +66,13 @@ auto FindEdges(const View view, EdgeTypeId edge_type, const VertexAccessor *from using OOMExceptionEnabler = utils::MemoryTracker::OutOfMemoryExceptionEnabler; InMemoryStorage::InMemoryStorage(Config config) - : Storage(config, config.storage_mode), + : Storage(config, config.salient.storage_mode), recovery_{config.durability.storage_directory / durability::kSnapshotDirectory, config.durability.storage_directory / durability::kWalDirectory}, lock_file_path_(config.durability.storage_directory / durability::kLockFile), uuid_(utils::GenerateUUID()), global_locker_(file_retainer_.AddLocker()) { - MG_ASSERT(config.storage_mode != StorageMode::ON_DISK_TRANSACTIONAL, + MG_ASSERT(config.salient.storage_mode != StorageMode::ON_DISK_TRANSACTIONAL, "Invalid storage mode sent to InMemoryStorage constructor!"); if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED || config_.durability.snapshot_on_exit || config_.durability.recover_on_startup) { @@ -177,7 +177,8 @@ InMemoryStorage::~InMemoryStorage() { InMemoryStorage::InMemoryAccessor::InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level, StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role) - : Accessor(tag, storage, isolation_level, storage_mode, replication_role), config_(storage->config_.items) {} + : Accessor(tag, storage, isolation_level, storage_mode, replication_role), + config_(storage->config_.salient.items) {} InMemoryStorage::InMemoryAccessor::InMemoryAccessor(InMemoryAccessor &&other) noexcept : Accessor(std::move(other)), config_(other.config_) {} @@ -319,7 +320,7 @@ Result InMemoryStorage::InMemoryAccessor::CreateEdge(VertexAccesso if (to_vertex->deleted) return Error::DELETED_OBJECT; } - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } auto *mem_storage = static_cast(storage_); @@ -408,7 +409,7 @@ Result InMemoryStorage::InMemoryAccessor::CreateEdgeEx(VertexAcces if (to_vertex->deleted) return Error::DELETED_OBJECT; } - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } @@ -745,7 +746,7 @@ Result InMemoryStorage::InMemoryAccessor::EdgeChangeType(EdgeAcces // NOLINTNEXTLINE(google-default-arguments) utils::BasicResult InMemoryStorage::InMemoryAccessor::Commit( - const std::optional desired_commit_timestamp, bool is_main) { + CommitReplArgs reparg, DatabaseAccessProtector db_acc) { MG_ASSERT(is_transaction_active_, "The transaction is already terminated!"); MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!"); @@ -754,47 +755,14 @@ utils::BasicResult InMemoryStorage::InMemoryAcce auto *mem_storage = static_cast(storage_); // TODO: duplicated transaction finalisation in md_deltas and deltas processing cases - if (!transaction_.md_deltas.empty()) { - // This is usually done by the MVCC, but it does not handle the metadata deltas - transaction_.EnsureCommitTimestampExists(); - - // Save these so we can mark them used in the commit log. - uint64_t start_timestamp = transaction_.start_timestamp; - - std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(mem_storage->CommitTimestamp(desired_commit_timestamp)); - - // Write transaction to WAL while holding the engine lock to make sure - // that committed transactions are sorted by the commit timestamp in the - // WAL files. We supply the new commit timestamp to the function so that - // it knows what will be the final commit timestamp. The WAL must be - // written before actually committing the transaction (before setting - // the commit timestamp) so that no other transaction can see the - // modifications before they are written to disk. - // Replica can log only the write transaction received from Main - // so the Wal files are consistent - if (is_main || desired_commit_timestamp.has_value()) { - could_replicate_all_sync_replicas = - mem_storage->AppendToWalDataDefinition(transaction_, *commit_timestamp_); // protected by engine_guard - // TODO: release lock, and update all deltas to have a local copy of the commit timestamp - transaction_.commit_timestamp->store(*commit_timestamp_, - std::memory_order_release); // protected by engine_guard - // Replica can only update the last commit timestamp with - // the commits received from main. - if (is_main || desired_commit_timestamp.has_value()) { - // Update the last commit timestamp - mem_storage->repl_storage_state_.last_commit_timestamp_.store(*commit_timestamp_); // protected by engine_guard - } - // Release engine lock because we don't have to hold it anymore - engine_guard.unlock(); - - mem_storage->commit_log_->MarkFinished(start_timestamp); - } - } else if (transaction_.deltas.use().empty()) { + if (transaction_.deltas.use().empty() && transaction_.md_deltas.empty()) { // We don't have to update the commit timestamp here because no one reads // it. mem_storage->commit_log_->MarkFinished(transaction_.start_timestamp); } else { + // This is usually done by the MVCC, but it does not handle the metadata deltas + transaction_.EnsureCommitTimestampExists(); + if (transaction_.constraint_verification_info.NeedsExistenceConstraintVerification()) { const auto vertices_to_update = transaction_.constraint_verification_info.GetVerticesForExistenceConstraintChecking(); @@ -822,7 +790,7 @@ utils::BasicResult InMemoryStorage::InMemoryAcce std::unique_lock engine_guard(storage_->engine_lock_); auto *mem_unique_constraints = static_cast(storage_->constraints_.unique_constraints_.get()); - commit_timestamp_.emplace(mem_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(mem_storage->CommitTimestamp(reparg.desired_commit_timestamp)); if (transaction_.constraint_verification_info.NeedsUniqueConstraintVerification()) { // Before committing and validating vertices against unique constraints, @@ -846,6 +814,16 @@ utils::BasicResult InMemoryStorage::InMemoryAcce } if (!unique_constraint_violation) { + [[maybe_unused]] bool const is_main_or_replica_write = + reparg.IsMain() || reparg.desired_commit_timestamp.has_value(); + + // TODO Figure out if we can assert this + // DMG_ASSERT(is_main_or_replica_write, "Should only get here on writes"); + // Currently there are queries that write to some subsystem that are allowed on a replica + // ex. analyze graph stats + // There are probably others. We not to check all of them and figure out if they are allowed and what are + // they even doing here... + // Write transaction to WAL while holding the engine lock to make sure // that committed transactions are sorted by the commit timestamp in the // WAL files. We supply the new commit timestamp to the function so that @@ -855,18 +833,16 @@ utils::BasicResult InMemoryStorage::InMemoryAcce // modifications before they are written to disk. // Replica can log only the write transaction received from Main // so the Wal files are consistent - if (is_main || desired_commit_timestamp.has_value()) { - could_replicate_all_sync_replicas = - mem_storage->AppendToWalDataManipulation(transaction_, *commit_timestamp_); // protected by engine_guard - } + if (is_main_or_replica_write) { + could_replicate_all_sync_replicas = mem_storage->AppendToWal(transaction_, *commit_timestamp_, + std::move(db_acc)); // protected by engine_guard - // TODO: release lock, and update all deltas to have a local copy of the commit timestamp - MG_ASSERT(transaction_.commit_timestamp != nullptr, "Invalid database state!"); - transaction_.commit_timestamp->store(*commit_timestamp_, - std::memory_order_release); // protected by engine_guard - // Replica can only update the last commit timestamp with - // the commits received from main. - if (is_main || desired_commit_timestamp.has_value()) { + // TODO: release lock, and update all deltas to have a local copy of the commit timestamp + MG_ASSERT(transaction_.commit_timestamp != nullptr, "Invalid database state!"); + transaction_.commit_timestamp->store(*commit_timestamp_, + std::memory_order_release); // protected by engine_guard + // Replica can only update the last commit timestamp with + // the commits received from main. // Update the last commit timestamp mem_storage->repl_storage_state_.last_commit_timestamp_.store( *commit_timestamp_); // protected by engine_guard @@ -1692,7 +1668,7 @@ StorageInfo InMemoryStorage::GetBaseInfo(bool force_directory) { --it; if (it != end && *it != "databases") { // Default DB points to the root (for back-compatibility); update to the "database" dir - return dir / "databases" / dbms::kDefaultDB; + return dir / dbms::kMultiTenantDir / dbms::kDefaultDB; } } } @@ -1727,8 +1703,8 @@ bool InMemoryStorage::InitializeWalFile(memgraph::replication::ReplicationEpoch if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL) return false; if (!wal_file_) { - wal_file_.emplace(recovery_.wal_directory_, uuid_, epoch.id(), config_.items, name_id_mapper_.get(), wal_seq_num_++, - &file_retainer_); + wal_file_.emplace(recovery_.wal_directory_, uuid_, epoch.id(), config_.salient.items, name_id_mapper_.get(), + wal_seq_num_++, &file_retainer_); } return true; } @@ -1752,7 +1728,8 @@ void InMemoryStorage::FinalizeWalFile() { } } -bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction, uint64_t final_commit_timestamp) { +bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp, + DatabaseAccessProtector db_acc) { if (!InitializeWalFile(repl_storage_state_.epoch_)) { return true; } @@ -1760,7 +1737,7 @@ bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction // A single transaction will always be contained in a single WAL file. auto current_commit_timestamp = transaction.commit_timestamp->load(std::memory_order_acquire); - repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this); + repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this, db_acc); auto append_deltas = [&](auto callback) { // Helper lambda that traverses the delta chain on order to find the first @@ -1909,26 +1886,15 @@ bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction } }; - append_deltas([&](const Delta &delta, const auto &parent, uint64_t timestamp) { - wal_file_->AppendDelta(delta, parent, timestamp); - repl_storage_state_.AppendDelta(delta, parent, timestamp); - }); - - // Add a delta that indicates that the transaction is fully written to the WAL - // file.replication_clients_.WithLock - wal_file_->AppendTransactionEnd(final_commit_timestamp); - FinalizeWalFile(); - - return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this); -} - -bool InMemoryStorage::AppendToWalDataDefinition(const Transaction &transaction, uint64_t final_commit_timestamp) { - if (!InitializeWalFile(repl_storage_state_.epoch_)) { - return true; + // Handle MVCC deltas + if (!transaction.deltas.use().empty()) { + append_deltas([&](const Delta &delta, const auto &parent, uint64_t timestamp) { + wal_file_->AppendDelta(delta, parent, timestamp); + repl_storage_state_.AppendDelta(delta, parent, timestamp); + }); } - repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this); - + // Handle metadata deltas for (const auto &md_delta : transaction.md_deltas) { switch (md_delta.action) { case MetadataDelta::Action::LABEL_INDEX_CREATE: { @@ -1998,7 +1964,7 @@ bool InMemoryStorage::AppendToWalDataDefinition(const Transaction &transaction, wal_file_->AppendTransactionEnd(final_commit_timestamp); FinalizeWalFile(); - return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this); + return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this, std::move(db_acc)); } void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label, diff --git a/src/storage/v2/inmemory/storage.hpp b/src/storage/v2/inmemory/storage.hpp index 45d0f270f..49e52f4b9 100644 --- a/src/storage/v2/inmemory/storage.hpp +++ b/src/storage/v2/inmemory/storage.hpp @@ -214,8 +214,8 @@ class InMemoryStorage final : public Storage { /// case the transaction is automatically aborted. /// @throw std::bad_alloc // NOLINTNEXTLINE(google-default-arguments) - utils::BasicResult Commit(std::optional desired_commit_timestamp = {}, - bool is_main = true) override; + utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) override; /// @throw std::bad_alloc void Abort() override; @@ -301,7 +301,7 @@ class InMemoryStorage final : public Storage { /// @throw std::bad_alloc Result CreateEdgeEx(VertexAccessor *from, VertexAccessor *to, EdgeTypeId edge_type, storage::Gid gid); - Config::Items config_; + SalientConfig::Items config_; }; class ReplicationAccessor final : public InMemoryAccessor { @@ -368,9 +368,8 @@ class InMemoryStorage final : public Storage { StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) override; /// Return true in all cases excepted if any sync replicas have not sent confirmation. - [[nodiscard]] bool AppendToWalDataManipulation(const Transaction &transaction, uint64_t final_commit_timestamp); - /// Return true in all cases excepted if any sync replicas have not sent confirmation. - [[nodiscard]] bool AppendToWalDataDefinition(const Transaction &transaction, uint64_t final_commit_timestamp); + [[nodiscard]] bool AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp, + DatabaseAccessProtector db_acc); /// Return true in all cases excepted if any sync replicas have not sent confirmation. void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label, uint64_t final_commit_timestamp); diff --git a/src/storage/v2/replication/replication_client.cpp b/src/storage/v2/replication/replication_client.cpp index 3bc1b3d32..a1470dcce 100644 --- a/src/storage/v2/replication/replication_client.cpp +++ b/src/storage/v2/replication/replication_client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,14 +10,13 @@ // licenses/APL.txt. #include "replication/replication_client.hpp" -#include "storage/v2/durability/durability.hpp" #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" #include "utils/exceptions.hpp" +#include "utils/on_scope_exit.hpp" #include "utils/variant_helpers.hpp" #include -#include namespace { template @@ -29,14 +28,26 @@ namespace memgraph::storage { ReplicationStorageClient::ReplicationStorageClient(::memgraph::replication::ReplicationClient &client) : client_{client} {} -void ReplicationStorageClient::CheckReplicaState(Storage *storage) { +void ReplicationStorageClient::UpdateReplicaState(Storage *storage, DatabaseAccessProtector db_acc) { uint64_t current_commit_timestamp{kTimestampInitialId}; auto &replStorageState = storage->repl_storage_state_; - auto stream{client_.rpc_client_.Stream( - storage->id(), replStorageState.last_commit_timestamp_, std::string{replStorageState.epoch_.id()})}; - const auto replica = stream.AwaitResponse(); + auto hb_stream{client_.rpc_client_.Stream( + storage->uuid(), replStorageState.last_commit_timestamp_, std::string{replStorageState.epoch_.id()})}; + const auto replica = hb_stream.AwaitResponse(); + +#ifdef MG_ENTERPRISE // Multi-tenancy is only supported in enterprise + if (!replica.success) { // Replica is missing the current database + client_.state_.WithLock([&](auto &state) { + spdlog::debug("Replica '{}' missing database '{}' - '{}'", client_.name_, storage->name(), + std::string{storage->uuid()}); + state = memgraph::replication::ReplicationClient::State::BEHIND; + }); + return; + } +#endif + std::optional branching_point; if (replica.epoch_id != replStorageState.epoch_.id() && replica.current_commit_timestamp != kTimestampInitialId) { auto const &history = replStorageState.history; @@ -70,8 +81,9 @@ void ReplicationStorageClient::CheckReplicaState(Storage *storage) { } else { spdlog::debug("Replica '{}' is behind", client_.name_); state = replication::ReplicaState::RECOVERY; - client_.thread_pool_.AddTask( - [storage, current_commit_timestamp, this] { this->RecoverReplica(current_commit_timestamp, storage); }); + client_.thread_pool_.AddTask([storage, current_commit_timestamp, gk = std::move(db_acc), this] { + this->RecoverReplica(current_commit_timestamp, storage); + }); } }); } @@ -82,16 +94,18 @@ TimestampInfo ReplicationStorageClient::GetTimestampInfo(Storage const *storage) info.current_number_of_timestamp_behind_master = 0; try { - auto stream{client_.rpc_client_.Stream(storage->id())}; + auto stream{client_.rpc_client_.Stream(storage->uuid())}; const auto response = stream.AwaitResponse(); const auto is_success = response.success; - if (!is_success) { - replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); - LogRpcFailure(); - } + auto main_time_stamp = storage->repl_storage_state_.last_commit_timestamp_.load(); info.current_timestamp_of_replica = response.current_commit_timestamp; info.current_number_of_timestamp_behind_master = response.current_commit_timestamp - main_time_stamp; + + if (!is_success || info.current_number_of_timestamp_behind_master != 0) { + replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); + LogRpcFailure(); + } } catch (const rpc::RpcFailedException &) { replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); LogRpcFailure(); // mutex already unlocked, if the new enqueued task dispatches immediately it probably @@ -106,13 +120,15 @@ void ReplicationStorageClient::LogRpcFailure() { utils::MessageWithLink("Couldn't replicate data to {}.", client_.name_, "https://memgr.ph/replication")); } -void ReplicationStorageClient::TryCheckReplicaStateAsync(Storage *storage) { - client_.thread_pool_.AddTask([storage, this] { this->TryCheckReplicaStateSync(storage); }); +void ReplicationStorageClient::TryCheckReplicaStateAsync(Storage *storage, DatabaseAccessProtector db_acc) { + client_.thread_pool_.AddTask([storage, db_acc = std::move(db_acc), this]() mutable { + this->TryCheckReplicaStateSync(storage, std::move(db_acc)); + }); } -void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage) { +void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage, DatabaseAccessProtector db_acc) { try { - CheckReplicaState(storage); + UpdateReplicaState(storage, std::move(db_acc)); } catch (const rpc::VersionMismatchRpcFailedException &) { replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); spdlog::error( @@ -126,7 +142,8 @@ void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage) { } } -void ReplicationStorageClient::StartTransactionReplication(const uint64_t current_wal_seq_num, Storage *storage) { +void ReplicationStorageClient::StartTransactionReplication(const uint64_t current_wal_seq_num, Storage *storage, + DatabaseAccessProtector db_acc) { auto locked_state = replica_state_.Lock(); switch (*locked_state) { using enum replication::ReplicaState; @@ -150,7 +167,7 @@ void ReplicationStorageClient::StartTransactionReplication(const uint64_t curren case MAYBE_BEHIND: spdlog::error( utils::MessageWithLink("Couldn't replicate data to {}.", client_.name_, "https://memgr.ph/replication")); - TryCheckReplicaStateAsync(storage); + TryCheckReplicaStateAsync(storage, std::move(db_acc)); return; case READY: MG_ASSERT(!replica_stream_); @@ -165,7 +182,7 @@ void ReplicationStorageClient::StartTransactionReplication(const uint64_t curren } } -bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) { +bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage, DatabaseAccessProtector db_acc) { // We can only check the state because it guarantees to be only // valid during a single transaction replication (if the assumption // that this and other transaction replication functions can only be @@ -174,18 +191,26 @@ bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) return false; } - if (replica_stream_->IsDefunct()) return false; + if (!replica_stream_ || replica_stream_->IsDefunct()) { + replica_state_.WithLock([this](auto &state) { + replica_stream_.reset(); + state = replication::ReplicaState::MAYBE_BEHIND; + }); + LogRpcFailure(); + return false; + } - auto task = [storage, this]() { + auto task = [storage, db_acc = std::move(db_acc), this]() mutable { MG_ASSERT(replica_stream_, "Missing stream for transaction deltas"); try { auto response = replica_stream_->Finalize(); - return replica_state_.WithLock([storage, &response, this](auto &state) { + return replica_state_.WithLock([storage, &response, db_acc = std::move(db_acc), this](auto &state) mutable { replica_stream_.reset(); if (!response.success || state == replication::ReplicaState::RECOVERY) { state = replication::ReplicaState::RECOVERY; - client_.thread_pool_.AddTask( - [storage, &response, this] { this->RecoverReplica(response.current_commit_timestamp, storage); }); + client_.thread_pool_.AddTask([storage, &response, db_acc = std::move(db_acc), this] { + this->RecoverReplica(response.current_commit_timestamp, storage); + }); return false; } state = replication::ReplicaState::READY; @@ -202,16 +227,16 @@ bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) }; if (client_.mode_ == memgraph::replication::ReplicationMode::ASYNC) { - client_.thread_pool_.AddTask([task = std::move(task)] { (void)task(); }); + client_.thread_pool_.AddTask([task = std::move(task)]() mutable { (void)task(); }); return true; } return task(); } -void ReplicationStorageClient::Start(Storage *storage) { - spdlog::trace("Replication client started for database \"{}\"", storage->id()); - TryCheckReplicaStateSync(storage); +void ReplicationStorageClient::Start(Storage *storage, DatabaseAccessProtector db_acc) { + spdlog::trace("Replication client started for database \"{}\"", storage->name()); + TryCheckReplicaStateSync(storage, std::move(db_acc)); } void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph::storage::Storage *storage) { @@ -233,12 +258,12 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: std::visit(utils::Overloaded{ [&replica_commit, mem_storage, &rpcClient](RecoverySnapshot const &snapshot) { spdlog::debug("Sending the latest snapshot file: {}", snapshot); - auto response = TransferSnapshot(mem_storage->id(), rpcClient, snapshot); + auto response = TransferSnapshot(mem_storage->uuid(), rpcClient, snapshot); replica_commit = response.current_commit_timestamp; }, [&replica_commit, mem_storage, &rpcClient](RecoveryWals const &wals) { spdlog::debug("Sending the latest wal files"); - auto response = TransferWalFiles(mem_storage->id(), rpcClient, wals); + auto response = TransferWalFiles(mem_storage->uuid(), rpcClient, wals); replica_commit = response.current_commit_timestamp; spdlog::debug("Wal files successfully transferred."); }, @@ -246,11 +271,11 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: std::unique_lock transaction_guard(mem_storage->engine_lock_); if (mem_storage->wal_file_ && mem_storage->wal_file_->SequenceNumber() == current_wal.current_wal_seq_num) { + utils::OnScopeExit on_exit([mem_storage]() { mem_storage->wal_file_->EnableFlushing(); }); mem_storage->wal_file_->DisableFlushing(); transaction_guard.unlock(); spdlog::debug("Sending current wal file"); replica_commit = ReplicateCurrentWal(mem_storage, rpcClient, *mem_storage->wal_file_); - mem_storage->wal_file_->EnableFlushing(); } else { spdlog::debug("Cannot recover using current wal file"); } @@ -291,14 +316,14 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: ReplicaStream::ReplicaStream(Storage *storage, rpc::Client &rpc_client, const uint64_t current_seq_num) : storage_{storage}, stream_(rpc_client.Stream( - storage->id(), storage->repl_storage_state_.last_commit_timestamp_.load(), current_seq_num)) { + storage->uuid(), storage->repl_storage_state_.last_commit_timestamp_.load(), current_seq_num)) { replication::Encoder encoder{stream_.GetBuilder()}; encoder.WriteString(storage->repl_storage_state_.epoch_.id()); } void ReplicaStream::AppendDelta(const Delta &delta, const Vertex &vertex, uint64_t final_commit_timestamp) { replication::Encoder encoder(stream_.GetBuilder()); - EncodeDelta(&encoder, storage_->name_id_mapper_.get(), storage_->config_.items, delta, vertex, + EncodeDelta(&encoder, storage_->name_id_mapper_.get(), storage_->config_.salient.items, delta, vertex, final_commit_timestamp); } diff --git a/src/storage/v2/replication/replication_client.hpp b/src/storage/v2/replication/replication_client.hpp index 4ef00f623..05f92ef0a 100644 --- a/src/storage/v2/replication/replication_client.hpp +++ b/src/storage/v2/replication/replication_client.hpp @@ -16,6 +16,7 @@ #include "replication/messages.hpp" #include "replication/replication_client.hpp" #include "rpc/client.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/durability/storage_global_operation.hpp" #include "storage/v2/id_types.hpp" #include "storage/v2/indices/label_index_stats.hpp" @@ -100,13 +101,27 @@ class ReplicationStorageClient { auto State() const -> replication::ReplicaState { return replica_state_.WithLock(std::identity()); } auto GetTimestampInfo(Storage const *storage) -> TimestampInfo; - void Start(Storage *storage); - void StartTransactionReplication(uint64_t current_wal_seq_num, Storage *storage); + /** + * @brief Check the replica state + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void Start(Storage *storage, DatabaseAccessProtector db_acc); + + /** + * @brief Start a new transaction replication (open up a stream) + * + * @param current_wal_seq_num + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void StartTransactionReplication(uint64_t current_wal_seq_num, Storage *storage, DatabaseAccessProtector db_acc); // Replication clients can be removed at any point // so to avoid any complexity of checking if the client was removed whenever // we want to send part of transaction and to avoid adding some GC logic this - // function will run a callback if, after previously callling + // function will run a callback if, after previously calling // StartTransactionReplication, stream is created. template void IfStreamingTransaction(F &&callback) { @@ -118,7 +133,11 @@ class ReplicationStorageClient { return; } if (!replica_stream_ || replica_stream_->IsDefunct()) { - replica_state_.WithLock([](auto &state) { state = replication::ReplicaState::MAYBE_BEHIND; }); + replica_state_.WithLock([this](auto &state) { + replica_stream_.reset(); + state = replication::ReplicaState::MAYBE_BEHIND; + }); + LogRpcFailure(); return; } try { @@ -126,20 +145,56 @@ class ReplicationStorageClient { } catch (const rpc::RpcFailedException &) { replica_state_.WithLock([](auto &state) { state = replication::ReplicaState::MAYBE_BEHIND; }); LogRpcFailure(); + return; } } - // Return whether the transaction could be finalized on the replication client or not. - [[nodiscard]] bool FinalizeTransactionReplication(Storage *storage); + /** + * @brief Return whether the transaction could be finalized on the replication client or not. + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + * @return true + * @return false + */ + [[nodiscard]] bool FinalizeTransactionReplication(Storage *storage, DatabaseAccessProtector db_acc); + + /** + * @brief Asynchronously try to check the replica state and start a recovery thread if necessary + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void TryCheckReplicaStateAsync(Storage *storage, DatabaseAccessProtector db_acc); // TODO Move back to private + + auto &Client() { return client_; } - void TryCheckReplicaStateAsync(Storage *storage); // TODO Move back to private private: + /** + * @brief Get necessary recovery steps and execute them. + * + * @param replica_commit the commit up to which we should recover to + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ void RecoverReplica(uint64_t replica_commit, memgraph::storage::Storage *storage); - void CheckReplicaState(Storage *storage); + /** + * @brief Check replica state + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void UpdateReplicaState(Storage *storage, DatabaseAccessProtector db_acc); + void LogRpcFailure(); - void TryCheckReplicaStateSync(Storage *storage); - void FrequentCheck(Storage *storage); + + /** + * @brief Synchronously try to check the replica state and start a recovery thread if necessary + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void TryCheckReplicaStateSync(Storage *storage, DatabaseAccessProtector db_acc); ::memgraph::replication::ReplicationClient &client_; // TODO Do not store the stream, make is a local variable diff --git a/src/storage/v2/replication/replication_storage_state.cpp b/src/storage/v2/replication/replication_storage_state.cpp index a443c7171..08a941dca 100644 --- a/src/storage/v2/replication/replication_storage_state.cpp +++ b/src/storage/v2/replication/replication_storage_state.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,10 +16,11 @@ namespace memgraph::storage { -void ReplicationStorageState::InitializeTransaction(uint64_t seq_num, Storage *storage) { - replication_clients_.WithLock([=](auto &clients) { +void ReplicationStorageState::InitializeTransaction(uint64_t seq_num, Storage *storage, + DatabaseAccessProtector db_acc) { + replication_clients_.WithLock([=, db_acc = std::move(db_acc)](auto &clients) mutable { for (auto &client : clients) { - client->StartTransactionReplication(seq_num, storage); + client->StartTransactionReplication(seq_num, storage, std::move(db_acc)); } }); } @@ -52,12 +53,16 @@ void ReplicationStorageState::AppendOperation(durability::StorageMetadataOperati }); } -bool ReplicationStorageState::FinalizeTransaction(uint64_t timestamp, Storage *storage) { - return replication_clients_.WithLock([=](auto &clients) { +bool ReplicationStorageState::FinalizeTransaction(uint64_t timestamp, Storage *storage, + DatabaseAccessProtector db_acc) { + return replication_clients_.WithLock([=, db_acc = std::move(db_acc)](auto &clients) mutable { bool finalized_on_all_replicas = true; + MG_ASSERT(clients.empty() || db_acc.has_value(), + "Any clients assumes we are MAIN, we should have gatekeeper_access_wrapper so we can correctly " + "handle ASYNC tasks"); for (ReplicationClientPtr &client : clients) { client->IfStreamingTransaction([&](auto &stream) { stream.AppendTransactionEnd(timestamp); }); - const auto finalized = client->FinalizeTransactionReplication(storage); + const auto finalized = client->FinalizeTransactionReplication(storage, std::move(db_acc)); if (client->Mode() == memgraph::replication::ReplicationMode::SYNC) { finalized_on_all_replicas = finalized && finalized_on_all_replicas; @@ -83,7 +88,8 @@ std::vector ReplicationStorageState::ReplicasInfo(const Storage *st std::vector replica_infos; replica_infos.reserve(clients.size()); auto const asReplicaInfo = [storage](ReplicationClientPtr const &client) -> ReplicaInfo { - return {client->Name(), client->Mode(), client->Endpoint(), client->State(), client->GetTimestampInfo(storage)}; + const auto ts = client->GetTimestampInfo(storage); + return {client->Name(), client->Mode(), client->Endpoint(), client->State(), ts}; }; std::transform(clients.begin(), clients.end(), std::back_inserter(replica_infos), asReplicaInfo); return replica_infos; diff --git a/src/storage/v2/replication/replication_storage_state.hpp b/src/storage/v2/replication/replication_storage_state.hpp index e3d6b94a0..adbf87aa9 100644 --- a/src/storage/v2/replication/replication_storage_state.hpp +++ b/src/storage/v2/replication/replication_storage_state.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -25,6 +25,7 @@ #include "replication/config.hpp" #include "replication/epoch.hpp" #include "replication/state.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/replication/enums.hpp" #include "storage/v2/replication/global.hpp" #include "storage/v2/replication/rpc.hpp" @@ -39,13 +40,13 @@ class ReplicationStorageClient; struct ReplicationStorageState { // Only MAIN can send - void InitializeTransaction(uint64_t seq_num, Storage *storage); + void InitializeTransaction(uint64_t seq_num, Storage *storage, DatabaseAccessProtector db_acc); void AppendDelta(const Delta &delta, const Vertex &vertex, uint64_t timestamp); void AppendDelta(const Delta &delta, const Edge &edge, uint64_t timestamp); void AppendOperation(durability::StorageMetadataOperation operation, LabelId label, const std::set &properties, const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats, uint64_t final_commit_timestamp); - bool FinalizeTransaction(uint64_t timestamp, Storage *storage); + bool FinalizeTransaction(uint64_t timestamp, Storage *storage, DatabaseAccessProtector db_acc); // Getters auto GetReplicaState(std::string_view name) const -> std::optional; diff --git a/src/storage/v2/replication/rpc.cpp b/src/storage/v2/replication/rpc.cpp index b722dfebf..27fc1a0d6 100644 --- a/src/storage/v2/replication/rpc.cpp +++ b/src/storage/v2/replication/rpc.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,6 +10,9 @@ // licenses/APL.txt. #include "storage/v2/replication/rpc.hpp" +#include +#include "slk/streams.hpp" +#include "utils/enum.hpp" #include "utils/typeinfo.hpp" namespace memgraph { @@ -56,6 +59,38 @@ void TimestampRes::Save(const TimestampRes &self, memgraph::slk::Builder *builde memgraph::slk::Save(self, builder); } void TimestampRes::Load(TimestampRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void CreateDatabaseReq::Save(const CreateDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void CreateDatabaseReq::Load(CreateDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void CreateDatabaseRes::Save(const CreateDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void CreateDatabaseRes::Load(CreateDatabaseRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void DropDatabaseReq::Save(const DropDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void DropDatabaseReq::Load(DropDatabaseReq *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void DropDatabaseRes::Save(const DropDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void DropDatabaseRes::Load(DropDatabaseRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void SystemRecoveryReq::Save(const SystemRecoveryReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemRecoveryReq::Load(SystemRecoveryReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void SystemRecoveryRes::Save(const SystemRecoveryRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemRecoveryRes::Load(SystemRecoveryRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} } // namespace storage::replication @@ -95,18 +130,34 @@ constexpr utils::TypeInfo storage::replication::TimestampReq::kType{utils::TypeI constexpr utils::TypeInfo storage::replication::TimestampRes::kType{utils::TypeId::REP_TIMESTAMP_RES, "TimestampRes", nullptr}; +constexpr utils::TypeInfo storage::replication::CreateDatabaseReq::kType{utils::TypeId::REP_CREATE_DATABASE_REQ, + "CreateDatabaseReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::CreateDatabaseRes::kType{utils::TypeId::REP_CREATE_DATABASE_RES, + "CreateDatabaseRes", nullptr}; + +constexpr utils::TypeInfo storage::replication::DropDatabaseReq::kType{utils::TypeId::REP_DROP_DATABASE_REQ, + "DropDatabaseReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::DropDatabaseRes::kType{utils::TypeId::REP_DROP_DATABASE_RES, + "DropDatabaseRes", nullptr}; + +constexpr utils::TypeInfo storage::replication::SystemRecoveryReq::kType{utils::TypeId::REP_SYSTEM_RECOVERY_REQ, + "SystemRecoveryReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::SystemRecoveryRes::kType{utils::TypeId::REP_SYSTEM_RECOVERY_RES, + "SystemRecoveryRes", nullptr}; + // Autogenerated SLK serialization code namespace slk { // Serialize code for TimestampRes void Save(const memgraph::storage::replication::TimestampRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::TimestampRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -114,23 +165,21 @@ void Load(memgraph::storage::replication::TimestampRes *self, memgraph::slk::Rea // Serialize code for TimestampReq void Save(const memgraph::storage::replication::TimestampReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::TimestampReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for CurrentWalRes void Save(const memgraph::storage::replication::CurrentWalRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::CurrentWalRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -138,23 +187,21 @@ void Load(memgraph::storage::replication::CurrentWalRes *self, memgraph::slk::Re // Serialize code for CurrentWalReq void Save(const memgraph::storage::replication::CurrentWalReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::CurrentWalReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for WalFilesRes void Save(const memgraph::storage::replication::WalFilesRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -162,25 +209,23 @@ void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Read // Serialize code for WalFilesReq void Save(const memgraph::storage::replication::WalFilesReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.file_number, builder); } void Load(memgraph::storage::replication::WalFilesReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->file_number, reader); } // Serialize code for SnapshotRes void Save(const memgraph::storage::replication::SnapshotRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::SnapshotRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -188,24 +233,22 @@ void Load(memgraph::storage::replication::SnapshotRes *self, memgraph::slk::Read // Serialize code for SnapshotReq void Save(const memgraph::storage::replication::SnapshotReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::SnapshotReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for HeartbeatRes void Save(const memgraph::storage::replication::HeartbeatRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); memgraph::slk::Save(self.epoch_id, builder); } void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); memgraph::slk::Load(&self->epoch_id, reader); @@ -214,13 +257,13 @@ void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Rea // Serialize code for HeartbeatReq void Save(const memgraph::storage::replication::HeartbeatReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.main_commit_timestamp, builder); memgraph::slk::Save(self.epoch_id, builder); } void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->main_commit_timestamp, reader); memgraph::slk::Load(&self->epoch_id, reader); } @@ -228,13 +271,11 @@ void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Rea // Serialize code for AppendDeltasRes void Save(const memgraph::storage::replication::AppendDeltasRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -242,15 +283,124 @@ void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk:: // Serialize code for AppendDeltasReq void Save(const memgraph::storage::replication::AppendDeltasReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.previous_commit_timestamp, builder); memgraph::slk::Save(self.seq_num, builder); } void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->previous_commit_timestamp, reader); memgraph::slk::Load(&self->seq_num, reader); } + +// Serialize SalientConfig + +void Save(const memgraph::storage::SalientConfig &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.name, builder); + memgraph::slk::Save(self.uuid, builder); + memgraph::slk::Save(utils::EnumToNum<3, uint8_t>(self.storage_mode), builder); + memgraph::slk::Save(self.items.properties_on_edges, builder); + memgraph::slk::Save(self.items.enable_schema_metadata, builder); +} + +void Load(memgraph::storage::SalientConfig *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->name, reader); + memgraph::slk::Load(&self->uuid, reader); + uint8_t sm = 0; + memgraph::slk::Load(&sm, reader); + if (!utils::NumToEnum<3>(sm, self->storage_mode)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } + memgraph::slk::Load(&self->items.properties_on_edges, reader); + memgraph::slk::Load(&self->items.enable_schema_metadata, reader); +} + +// Serialize code for CreateDatabaseReq + +void Save(const memgraph::storage::replication::CreateDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.epoch_id, builder); + memgraph::slk::Save(self.expected_group_timestamp, builder); + memgraph::slk::Save(self.new_group_timestamp, builder); + memgraph::slk::Save(self.config, builder); +} + +void Load(memgraph::storage::replication::CreateDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->epoch_id, reader); + memgraph::slk::Load(&self->expected_group_timestamp, reader); + memgraph::slk::Load(&self->new_group_timestamp, reader); + memgraph::slk::Load(&self->config, reader); +} + +// Serialize code for CreateDatabaseRes + +void Save(const memgraph::storage::replication::CreateDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::CreateDatabaseRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + +// Serialize code for DropDatabaseReq + +void Save(const memgraph::storage::replication::DropDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.epoch_id, builder); + memgraph::slk::Save(self.expected_group_timestamp, builder); + memgraph::slk::Save(self.new_group_timestamp, builder); + memgraph::slk::Save(self.uuid, builder); +} + +void Load(memgraph::storage::replication::DropDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->epoch_id, reader); + memgraph::slk::Load(&self->expected_group_timestamp, reader); + memgraph::slk::Load(&self->new_group_timestamp, reader); + memgraph::slk::Load(&self->uuid, reader); +} + +// Serialize code for DropDatabaseRes + +void Save(const memgraph::storage::replication::DropDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::DropDatabaseRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + +// Serialize code for SystemRecoveryReq + +void Save(const memgraph::storage::replication::SystemRecoveryReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.forced_group_timestamp, builder); + memgraph::slk::Save(self.database_configs, builder); +} + +void Load(memgraph::storage::replication::SystemRecoveryReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->forced_group_timestamp, reader); + memgraph::slk::Load(&self->database_configs, reader); +} + +// Serialize code for SystemRecoveryRes + +void Save(const memgraph::storage::replication::SystemRecoveryRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::SystemRecoveryRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + } // namespace slk } // namespace memgraph diff --git a/src/storage/v2/replication/rpc.hpp b/src/storage/v2/replication/rpc.hpp index 9e2f0b35e..62f8b680c 100644 --- a/src/storage/v2/replication/rpc.hpp +++ b/src/storage/v2/replication/rpc.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,6 +19,9 @@ #include "rpc/messages.hpp" #include "slk/serialization.hpp" #include "slk/streams.hpp" +#include "storage/v2/config.hpp" +#include "utils/enum.hpp" +#include "utils/uuid.hpp" namespace memgraph::storage::replication { @@ -29,10 +32,10 @@ struct AppendDeltasReq { static void Load(AppendDeltasReq *self, memgraph::slk::Reader *reader); static void Save(const AppendDeltasReq &self, memgraph::slk::Builder *builder); AppendDeltasReq() = default; - AppendDeltasReq(std::string name, uint64_t previous_commit_timestamp, uint64_t seq_num) - : db_name(std::move(name)), previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {} + AppendDeltasReq(const utils::UUID &uuid, uint64_t previous_commit_timestamp, uint64_t seq_num) + : uuid{uuid}, previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {} - std::string db_name; + utils::UUID uuid; uint64_t previous_commit_timestamp; uint64_t seq_num; }; @@ -44,10 +47,9 @@ struct AppendDeltasRes { static void Load(AppendDeltasRes *self, memgraph::slk::Reader *reader); static void Save(const AppendDeltasRes &self, memgraph::slk::Builder *builder); AppendDeltasRes() = default; - AppendDeltasRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + AppendDeltasRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -61,10 +63,10 @@ struct HeartbeatReq { static void Load(HeartbeatReq *self, memgraph::slk::Reader *reader); static void Save(const HeartbeatReq &self, memgraph::slk::Builder *builder); HeartbeatReq() = default; - HeartbeatReq(std::string name, uint64_t main_commit_timestamp, std::string epoch_id) - : db_name(std::move(name)), main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {} + HeartbeatReq(const utils::UUID &uuid, uint64_t main_commit_timestamp, std::string epoch_id) + : uuid{uuid}, main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {} - std::string db_name; + utils::UUID uuid; uint64_t main_commit_timestamp; std::string epoch_id; }; @@ -76,13 +78,9 @@ struct HeartbeatRes { static void Load(HeartbeatRes *self, memgraph::slk::Reader *reader); static void Save(const HeartbeatRes &self, memgraph::slk::Builder *builder); HeartbeatRes() = default; - HeartbeatRes(std::string name, bool success, uint64_t current_commit_timestamp, std::string epoch_id) - : db_name(std::move(name)), - success(success), - current_commit_timestamp(current_commit_timestamp), - epoch_id(std::move(epoch_id)) {} + HeartbeatRes(bool success, uint64_t current_commit_timestamp, std::string epoch_id) + : success(success), current_commit_timestamp(current_commit_timestamp), epoch_id(std::move(epoch_id)) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; std::string epoch_id; @@ -97,9 +95,9 @@ struct SnapshotReq { static void Load(SnapshotReq *self, memgraph::slk::Reader *reader); static void Save(const SnapshotReq &self, memgraph::slk::Builder *builder); SnapshotReq() = default; - explicit SnapshotReq(std::string name) : db_name(std::move(name)) {} + explicit SnapshotReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct SnapshotRes { @@ -109,10 +107,9 @@ struct SnapshotRes { static void Load(SnapshotRes *self, memgraph::slk::Reader *reader); static void Save(const SnapshotRes &self, memgraph::slk::Builder *builder); SnapshotRes() = default; - SnapshotRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + SnapshotRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -126,9 +123,9 @@ struct WalFilesReq { static void Load(WalFilesReq *self, memgraph::slk::Reader *reader); static void Save(const WalFilesReq &self, memgraph::slk::Builder *builder); WalFilesReq() = default; - explicit WalFilesReq(std::string name, uint64_t file_number) : db_name(std::move(name)), file_number(file_number) {} + explicit WalFilesReq(const utils::UUID &uuid, uint64_t file_number) : uuid{uuid}, file_number(file_number) {} - std::string db_name; + utils::UUID uuid; uint64_t file_number; }; @@ -139,10 +136,9 @@ struct WalFilesRes { static void Load(WalFilesRes *self, memgraph::slk::Reader *reader); static void Save(const WalFilesRes &self, memgraph::slk::Builder *builder); WalFilesRes() = default; - WalFilesRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + WalFilesRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -156,9 +152,9 @@ struct CurrentWalReq { static void Load(CurrentWalReq *self, memgraph::slk::Reader *reader); static void Save(const CurrentWalReq &self, memgraph::slk::Builder *builder); CurrentWalReq() = default; - explicit CurrentWalReq(std::string name) : db_name(std::move(name)) {} + explicit CurrentWalReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct CurrentWalRes { @@ -168,10 +164,9 @@ struct CurrentWalRes { static void Load(CurrentWalRes *self, memgraph::slk::Reader *reader); static void Save(const CurrentWalRes &self, memgraph::slk::Builder *builder); CurrentWalRes() = default; - CurrentWalRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + CurrentWalRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -185,9 +180,9 @@ struct TimestampReq { static void Load(TimestampReq *self, memgraph::slk::Reader *reader); static void Save(const TimestampReq &self, memgraph::slk::Builder *builder); TimestampReq() = default; - explicit TimestampReq(std::string name) : db_name(std::move(name)) {} + explicit TimestampReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct TimestampRes { @@ -197,15 +192,117 @@ struct TimestampRes { static void Load(TimestampRes *self, memgraph::slk::Reader *reader); static void Save(const TimestampRes &self, memgraph::slk::Builder *builder); TimestampRes() = default; - TimestampRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + TimestampRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; using TimestampRpc = rpc::RequestResponse; + +struct CreateDatabaseReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(CreateDatabaseReq *self, memgraph::slk::Reader *reader); + static void Save(const CreateDatabaseReq &self, memgraph::slk::Builder *builder); + CreateDatabaseReq() = default; + CreateDatabaseReq(std::string epoch_id, uint64_t expected_group_timestamp, uint64_t new_group_timestamp, + storage::SalientConfig config) + : epoch_id(std::move(epoch_id)), + expected_group_timestamp{expected_group_timestamp}, + new_group_timestamp(new_group_timestamp), + config(std::move(config)) {} + + std::string epoch_id; + uint64_t expected_group_timestamp; + uint64_t new_group_timestamp; + storage::SalientConfig config; +}; + +struct CreateDatabaseRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(CreateDatabaseRes *self, memgraph::slk::Reader *reader); + static void Save(const CreateDatabaseRes &self, memgraph::slk::Builder *builder); + CreateDatabaseRes() = default; + explicit CreateDatabaseRes(Result res) : result(res) {} + + Result result; +}; + +using CreateDatabaseRpc = rpc::RequestResponse; + +struct DropDatabaseReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(DropDatabaseReq *self, memgraph::slk::Reader *reader); + static void Save(const DropDatabaseReq &self, memgraph::slk::Builder *builder); + DropDatabaseReq() = default; + DropDatabaseReq(std::string epoch_id, uint64_t expected_group_timestamp, uint64_t new_group_timestamp, + const utils::UUID &uuid) + : epoch_id(std::move(epoch_id)), + expected_group_timestamp{expected_group_timestamp}, + new_group_timestamp(new_group_timestamp), + uuid(uuid) {} + + std::string epoch_id; + uint64_t expected_group_timestamp; + uint64_t new_group_timestamp; + utils::UUID uuid; +}; + +struct DropDatabaseRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(DropDatabaseRes *self, memgraph::slk::Reader *reader); + static void Save(const DropDatabaseRes &self, memgraph::slk::Builder *builder); + DropDatabaseRes() = default; + explicit DropDatabaseRes(Result res) : result(res) {} + + Result result; +}; + +using DropDatabaseRpc = rpc::RequestResponse; + +struct SystemRecoveryReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SystemRecoveryReq *self, memgraph::slk::Reader *reader); + static void Save(const SystemRecoveryReq &self, memgraph::slk::Builder *builder); + SystemRecoveryReq() = default; + SystemRecoveryReq(uint64_t forced_group_timestamp, std::vector database_configs) + : forced_group_timestamp{forced_group_timestamp}, database_configs(std::move(database_configs)) {} + + uint64_t forced_group_timestamp; + std::vector database_configs; +}; + +struct SystemRecoveryRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(SystemRecoveryRes *self, memgraph::slk::Reader *reader); + static void Save(const SystemRecoveryRes &self, memgraph::slk::Builder *builder); + SystemRecoveryRes() = default; + explicit SystemRecoveryRes(Result res) : result(res) {} + + Result result; +}; + +using SystemRecoveryRpc = rpc::RequestResponse; + } // namespace memgraph::storage::replication // SLK serialization declarations @@ -259,4 +356,28 @@ void Save(const memgraph::storage::replication::AppendDeltasReq &self, memgraph: void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::Reader *reader); +void Save(const memgraph::storage::replication::CreateDatabaseReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::CreateDatabaseReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::CreateDatabaseRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::CreateDatabaseRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::DropDatabaseReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::DropDatabaseReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::DropDatabaseRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::DropDatabaseRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::SystemRecoveryReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::SystemRecoveryReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::SystemRecoveryRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::SystemRecoveryRes *self, memgraph::slk::Reader *reader); + } // namespace memgraph::slk diff --git a/src/storage/v2/storage.cpp b/src/storage/v2/storage.cpp index 52dcad243..9045f20e9 100644 --- a/src/storage/v2/storage.cpp +++ b/src/storage/v2/storage.cpp @@ -44,8 +44,7 @@ Storage::Storage(Config config, StorageMode storage_mode) isolation_level_(config.transaction.isolation_level), storage_mode_(storage_mode), indices_(config, storage_mode), - constraints_(config, storage_mode), - id_(config.name) { + constraints_(config, storage_mode) { spdlog::info("Created database with {} storage mode.", StorageModeToString(storage_mode)); } @@ -319,7 +318,7 @@ EdgeInfoForDeletion Storage::Accessor::PrepareDeletableEdges(const std::unordere const auto &[edge_type, opposing_vertex, edge] = item; if (!vertices.contains(opposing_vertex)) { partial_delete_vertices.insert(opposing_vertex); - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge.ptr->gid : edge.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge.ptr->gid : edge.gid; edge_ids.insert(edge_gid); } }; @@ -381,7 +380,7 @@ Result>> Storage::Accessor::ClearEdgesOn /// TODO: (andi) Again here, no need to lock the edge if using on disk storage. std::unique_lock guard; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto edge_ptr = edge_ref.ptr; guard = std::unique_lock{edge_ptr->lock}; @@ -398,12 +397,12 @@ Result>> Storage::Accessor::ClearEdgesOn edge_type = edge_type, opposing_vertex = opposing_vertex, edge_ref = edge_ref, this]() { attached_edges_to_vertex->pop_back(); - if (this->storage_->config_.items.properties_on_edges) { + if (this->storage_->config_.salient.items.properties_on_edges) { auto *edge_ptr = edge_ref.ptr; MarkEdgeAsDeleted(edge_ptr); } - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; auto const [_, was_inserted] = deleted_edge_ids.insert(edge_gid); bool const edge_cleared_from_both_directions = !was_inserted; if (edge_cleared_from_both_directions) { @@ -453,7 +452,7 @@ Result>> Storage::Accessor::DetachRemain auto mid = std::partition( edges_attached_to_vertex->begin(), edges_attached_to_vertex->end(), [this, &set_for_erasure](auto &edge) { auto const &[edge_type, opposing_vertex, edge_ref] = edge; - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; return !set_for_erasure.contains(edge_gid); }); @@ -465,7 +464,7 @@ Result>> Storage::Accessor::DetachRemain for (auto it = mid; it != edges_attached_to_vertex->end(); it++) { auto const &[edge_type, opposing_vertex, edge_ref] = *it; std::unique_lock guard; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto edge_ptr = edge_ref.ptr; guard = std::unique_lock{edge_ptr->lock}; // this can happen only if we marked edges for deletion with no nodes, @@ -475,7 +474,7 @@ Result>> Storage::Accessor::DetachRemain CreateAndLinkDelta(&transaction_, vertex_ptr, deletion_delta, edge_type, opposing_vertex, edge_ref); - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; auto const [_, was_inserted] = partially_detached_edge_ids.insert(edge_gid); bool const edge_cleared_from_both_directions = !was_inserted; if (edge_cleared_from_both_directions) { @@ -488,7 +487,6 @@ Result>> Storage::Accessor::DetachRemain }}; std::invoke(atomic_memory_block); - return std::make_optional(); }; diff --git a/src/storage/v2/storage.hpp b/src/storage/v2/storage.hpp index cf93ae3d9..94c4ccdf9 100644 --- a/src/storage/v2/storage.hpp +++ b/src/storage/v2/storage.hpp @@ -12,6 +12,8 @@ #pragma once #include +#include +#include #include #include #include @@ -24,6 +26,7 @@ #include "storage/v2/all_vertices_iterable.hpp" #include "storage/v2/commit_log.hpp" #include "storage/v2/config.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/durability/paths.hpp" #include "storage/v2/durability/wal.hpp" #include "storage/v2/edge_accessor.hpp" @@ -52,7 +55,6 @@ extern const Event ActiveLabelPropertyIndices; } // namespace memgraph::metrics namespace memgraph::storage { - struct Transaction; class EdgeAccessor; @@ -108,6 +110,15 @@ struct EdgeInfoForDeletion { std::unordered_set partial_dest_vertices{}; }; +struct CommitReplArgs { + // REPLICA on recipt of Deltas will have a desired commit timestamp + std::optional desired_commit_timestamp = std::nullopt; + + bool is_main = true; + + bool IsMain() { return is_main; } +}; + class Storage { friend class ReplicationServer; friend class ReplicationStorageClient; @@ -122,7 +133,9 @@ class Storage { virtual ~Storage() = default; - const std::string &id() const { return id_; } + const std::string &name() const { return config_.salient.name; } + + const utils::UUID &uuid() const { return config_.salient.uuid; } class Accessor { public: @@ -216,8 +229,8 @@ class Storage { virtual ConstraintsInfo ListAllConstraints() const = 0; // NOLINTNEXTLINE(google-default-arguments) - virtual utils::BasicResult Commit( - std::optional desired_commit_timestamp = {}, bool is_main = true) = 0; + virtual utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) = 0; virtual void Abort() = 0; @@ -241,7 +254,7 @@ class Storage { StorageMode GetCreationStorageMode() const noexcept; - const std::string &id() const { return storage_->id(); } + const std::string &id() const { return storage_->name(); } std::vector ListAllPossiblyPresentVertexLabels() const; @@ -399,7 +412,6 @@ class Storage { std::atomic vertex_id_{0}; std::atomic edge_id_{0}; - const std::string id_; //!< High-level assigned ID }; } // namespace memgraph::storage diff --git a/src/storage/v2/storage_mode.hpp b/src/storage/v2/storage_mode.hpp index c02d3c177..f4a133f38 100644 --- a/src/storage/v2/storage_mode.hpp +++ b/src/storage/v2/storage_mode.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,14 +11,19 @@ #pragma once +#include #include #include - namespace memgraph::storage { enum class StorageMode : std::uint8_t { IN_MEMORY_ANALYTICAL, IN_MEMORY_TRANSACTIONAL, ON_DISK_TRANSACTIONAL }; -bool IsTransactional(const StorageMode storage_mode) noexcept; +inline constexpr std::array storage_mode_mappings{ + std::pair{std::string_view{"IN_MEMORY_TRANSACTIONAL"}, memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL}, + std::pair{std::string_view{"IN_MEMORY_ANALYTICAL"}, memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL}, + std::pair{std::string_view{"ON_DISK_TRANSACTIONAL"}, memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL}}; + +bool IsTransactional(StorageMode storage_mode) noexcept; std::string_view StorageModeToString(memgraph::storage::StorageMode storage_mode); diff --git a/src/storage/v2/vertex_accessor.cpp b/src/storage/v2/vertex_accessor.cpp index 6559cdee4..ff5062444 100644 --- a/src/storage/v2/vertex_accessor.cpp +++ b/src/storage/v2/vertex_accessor.cpp @@ -114,7 +114,7 @@ Result VertexAccessor::AddLabel(LabelId label) { }}; std::invoke(atomic_memory_block); - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_node_labels_.try_insert(label); } diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 276927725..bac3e78f3 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -24,8 +24,8 @@ find_package(Threads REQUIRED) add_library(mg-utils STATIC ${utils_src_files}) add_library(mg::utils ALIAS mg-utils) -target_link_libraries(mg-utils PUBLIC Boost::headers fmt::fmt spdlog::spdlog) -target_link_libraries(mg-utils PRIVATE librdtsc stdc++fs Threads::Threads gflags json uuid rt) +target_link_libraries(mg-utils PUBLIC Boost::headers fmt::fmt spdlog::spdlog json) +target_link_libraries(mg-utils PRIVATE librdtsc stdc++fs Threads::Threads gflags uuid rt) set(settings_src_files settings.cpp) diff --git a/src/utils/enum.hpp b/src/utils/enum.hpp index 505802088..4f1ffbd0d 100644 --- a/src/utils/enum.hpp +++ b/src/utils/enum.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -54,4 +54,34 @@ std::optional StringToEnum(const auto &value, const auto &mappings) { return mapping_iter->second; } + +template +requires std::integral +inline T EnumToNum(Enum res) { + static_assert(std::numeric_limits::max() >= static_cast(Enum::N)); + return static_cast(res); +} + +template +requires std::integral +inline bool NumToEnum(T input, Enum &res) { + if (input >= EnumToNum(Enum::N)) return false; + res = static_cast(input); + return true; +} + +template +requires std::integral +inline T EnumToNum(Enum res) { + static_assert(std::numeric_limits::max() >= Num); + return static_cast(res); +} + +template +requires std::integral +inline bool NumToEnum(T input, Enum &res) { + if (input >= Num) return false; + res = static_cast(input); + return true; +} } // namespace memgraph::utils diff --git a/src/utils/gatekeeper.hpp b/src/utils/gatekeeper.hpp index 21dad2543..862cad982 100644 --- a/src/utils/gatekeeper.hpp +++ b/src/utils/gatekeeper.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -78,21 +78,33 @@ struct EvalResult { template EvalResult(run_t, Func &&, T &) -> EvalResult>; +template +struct GKInternals { + template + explicit GKInternals(Args &&...args) : value_{std::in_place, std::forward(args)...} {} + + std::optional value_; + uint64_t count_ = 0; + std::atomic_bool is_deleting = false; + std::mutex mutex_; // TODO change to something cheaper? + std::condition_variable cv_; +}; + template struct Gatekeeper { template - explicit Gatekeeper(Args &&...args) : value_{std::in_place, std::forward(args)...} {} + explicit Gatekeeper(Args &&...args) : pimpl_(std::make_unique>(std::forward(args)...)) {} Gatekeeper(Gatekeeper const &) = delete; - Gatekeeper(Gatekeeper &&) noexcept = delete; + Gatekeeper(Gatekeeper &&) noexcept = default; Gatekeeper &operator=(Gatekeeper const &) = delete; - Gatekeeper &operator=(Gatekeeper &&) = delete; + Gatekeeper &operator=(Gatekeeper &&) noexcept = default; struct Accessor { friend Gatekeeper; private: - explicit Accessor(Gatekeeper *owner) : owner_{owner} { ++owner_->count_; } + explicit Accessor(Gatekeeper *owner) : owner_{owner->pimpl_.get()} { ++owner_->count_; } public: Accessor(Accessor const &other) : owner_{other.owner_} { @@ -139,6 +151,14 @@ struct Gatekeeper { return *this; } + [[nodiscard]] bool is_deleting() const { return owner_->is_deleting; } + + void prepare_for_deletion() { + if (owner_) { + owner_->is_deleting = true; + } + } + ~Accessor() { reset(); } auto get() -> T * { return std::addressof(*owner_->value_); } @@ -159,18 +179,26 @@ struct Gatekeeper { } // Completely invalidated the accessor if return true - [[nodiscard]] bool try_delete(std::chrono::milliseconds timeout = std::chrono::milliseconds(100)) { + template + [[nodiscard]] bool try_delete(std::chrono::milliseconds timeout = std::chrono::milliseconds(100), + Func &&predicate = {}) { // Prevent new access auto guard = std::unique_lock{owner_->mutex_}; if (!owner_->cv_.wait_for(guard, timeout, [this] { return owner_->count_ == 1; })) { return false; } - // Delete value + // Already deleted + if (owner_->value_ == std::nullopt) return true; + // Delete value if ok + if (!predicate(*owner_->value_)) return false; owner_->value_ = std::nullopt; return true; } - explicit operator bool() const { return owner_ != nullptr; } + explicit operator bool() const { + return owner_ != nullptr // we have access + && !owner_->is_deleting; // AND we are allowed to use it + } void reset() { if (owner_) { @@ -186,28 +214,27 @@ struct Gatekeeper { friend bool operator==(Accessor const &lhs, Accessor const &rhs) { return lhs.owner_ == rhs.owner_; } private: - Gatekeeper *owner_ = nullptr; + GKInternals *owner_ = nullptr; }; std::optional access() { - auto guard = std::unique_lock{mutex_}; - if (value_) { + auto guard = std::unique_lock{pimpl_->mutex_}; + if (pimpl_->value_) { return Accessor{this}; } return std::nullopt; } ~Gatekeeper() { + if (!pimpl_) return; // Moved out, nothing to do + pimpl_->is_deleting = true; // wait for count to drain to 0 - auto lock = std::unique_lock{mutex_}; - cv_.wait(lock, [this] { return count_ == 0; }); + auto lock = std::unique_lock{pimpl_->mutex_}; + pimpl_->cv_.wait(lock, [this] { return pimpl_->count_ == 0; }); } private: - std::optional value_; - uint64_t count_ = 0; - std::mutex mutex_; // TODO change to something cheaper? - std::condition_variable cv_; + std::unique_ptr> pimpl_; }; } // namespace memgraph::utils diff --git a/src/utils/resource_lock.hpp b/src/utils/resource_lock.hpp index 7d6be2685..7a3ef9444 100644 --- a/src/utils/resource_lock.hpp +++ b/src/utils/resource_lock.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -48,6 +48,15 @@ struct ResourceLock { } return false; } + + template + bool try_lock_for(const std::chrono::duration &timeout_duration) { + auto lock = std::unique_lock{mtx}; + if (!cv.wait_for(lock, timeout_duration, [this] { return state == UNLOCKED; })) return false; + state = UNIQUE; + return true; + } + bool try_lock_shared() { auto lock = std::unique_lock{mtx}; if (state != UNIQUE) { @@ -71,6 +80,22 @@ struct ResourceLock { } } + void upgrade_to_unique() { + auto lock = std::unique_lock{mtx}; + cv.wait(lock, [this] { return count == 1; }); + state = UNIQUE; + count = 0; + } + + template + bool try_upgrade_to_unique(const std::chrono::duration &timeout_duration) { + auto lock = std::unique_lock{mtx}; + if (!cv.wait_for(lock, timeout_duration, [this] { return count == 1; })) return false; + state = UNIQUE; + count = 0; + return true; + } + private: std::mutex mtx; std::condition_variable cv; @@ -78,4 +103,46 @@ struct ResourceLock { uint64_t count = 0; }; +struct ResourceLockGuard { + private: + enum states { UNIQUE, SHARED }; + + public: + explicit ResourceLockGuard(ResourceLock &thing) + : ptr{&thing}, state{[this]() { + ptr->lock_shared(); + return SHARED; + }()} {} + + void upgrade_to_unique() { + if (state == SHARED) { + ptr->upgrade_to_unique(); + state = UNIQUE; + } + } + + template + bool try_upgrade_to_unique(const std::chrono::duration &timeout_duration) { + if (state != SHARED) return true; // already locked + if (!ptr->try_upgrade_to_unique(timeout_duration)) return false; // timeout + state = UNIQUE; + return true; + } + + ~ResourceLockGuard() { + switch (state) { + case UNIQUE: + ptr->unlock(); + break; + case SHARED: + ptr->unlock_shared(); + break; + } + } + + private: + ResourceLock *ptr; + states state; +}; + } // namespace memgraph::utils diff --git a/src/utils/settings.cpp b/src/utils/settings.cpp index 330b43f48..4768edc42 100644 --- a/src/utils/settings.cpp +++ b/src/utils/settings.cpp @@ -27,6 +27,7 @@ void Settings::Finalize() { std::lock_guard settings_guard{settings_lock_}; storage_.reset(); on_change_callbacks_.clear(); + validations_.clear(); } void Settings::RegisterSetting(std::string name, const std::string &default_value, OnChangeCallback callback, diff --git a/src/utils/thread_pool.hpp b/src/utils/thread_pool.hpp index 8597a78a1..c6f0bb6ee 100644 --- a/src/utils/thread_pool.hpp +++ b/src/utils/thread_pool.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -23,6 +23,16 @@ namespace memgraph::utils { +template +struct CopyMovableFunctionWrapper { + CopyMovableFunctionWrapper(Func &&func) : func_{std::make_shared(std::move(func))} {} + + void operator()() { (*func_)(); } + + private: + std::shared_ptr func_; +}; + class ThreadPool { using TaskSignature = std::function; diff --git a/src/utils/typeinfo.hpp b/src/utils/typeinfo.hpp index 944d35fab..88202945c 100644 --- a/src/utils/typeinfo.hpp +++ b/src/utils/typeinfo.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,9 +17,10 @@ namespace memgraph::utils { enum class TypeId : uint64_t { + UNKNOWN = 0, + // Operators - UNKNOWN, - LOGICAL_OPERATOR, + LOGICAL_OPERATOR = 1000, ONCE, NODE_CREATION_INFO, CREATE_NODE, @@ -69,7 +70,8 @@ enum class TypeId : uint64_t { HASH_JOIN, // Replication - REP_APPEND_DELTAS_REQ, + // NOTE: these NEED to be stable in the 2000+ range (see rpc version) + REP_APPEND_DELTAS_REQ = 2000, REP_APPEND_DELTAS_RES, REP_HEARTBEAT_REQ, REP_HEARTBEAT_RES, @@ -83,9 +85,17 @@ enum class TypeId : uint64_t { REP_CURRENT_WAL_RES, REP_TIMESTAMP_REQ, REP_TIMESTAMP_RES, + REP_CREATE_DATABASE_REQ, + REP_CREATE_DATABASE_RES, + REP_DROP_DATABASE_REQ, + REP_DROP_DATABASE_RES, + REP_SYSTEM_HEARTBEAT_REQ, + REP_SYSTEM_HEARTBEAT_RES, + REP_SYSTEM_RECOVERY_REQ, + REP_SYSTEM_RECOVERY_RES, // AST - AST_LABELIX, + AST_LABELIX = 3000, AST_PROPERTYIX, AST_EDGETYPEIX, AST_TREE, @@ -191,8 +201,9 @@ enum class TypeId : uint64_t { AST_SHOW_DATABASES, AST_EDGE_IMPORT_MODE_QUERY, AST_PATTERN_COMPREHENSION, + // Symbol - SYMBOL, + SYMBOL = 4000, }; /// Type information on a C++ type. diff --git a/src/utils/uuid.cpp b/src/utils/uuid.cpp index 9b13b8965..fbcf662de 100644 --- a/src/utils/uuid.cpp +++ b/src/utils/uuid.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,8 +10,8 @@ // licenses/APL.txt. #include "utils/uuid.hpp" - #include +#include "slk/serialization.hpp" namespace memgraph::utils { @@ -24,3 +24,13 @@ std::string GenerateUUID() { } } // namespace memgraph::utils + +// Serialize UUID +namespace memgraph::slk { +void Save(const memgraph::utils::UUID &self, memgraph::slk::Builder *builder) { + const auto &arr = static_cast(self); + memgraph::slk::Save(arr, builder); +} + +void Load(memgraph::utils::UUID *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(&self->uuid, reader); } +} // namespace memgraph::slk diff --git a/src/utils/uuid.hpp b/src/utils/uuid.hpp index 8bbb1a1a1..bca55d73b 100644 --- a/src/utils/uuid.hpp +++ b/src/utils/uuid.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,8 +11,22 @@ #pragma once +#include +#include +#include #include +namespace memgraph::utils { +struct UUID; +} + +namespace memgraph::slk { +class Reader; +class Builder; +void Save(const ::memgraph::utils::UUID &self, Builder *builder); +void Load(::memgraph::utils::UUID *self, Reader *reader); +} // namespace memgraph::slk + namespace memgraph::utils { /** @@ -20,4 +34,35 @@ namespace memgraph::utils { */ std::string GenerateUUID(); +struct UUID { + using arr_t = std::array; + + UUID() { uuid_generate(uuid.data()); } + explicit operator std::string() const { + auto decoded = std::array{}; + uuid_unparse(uuid.data(), decoded.data()); + return std::string{decoded.data(), UUID_STR_LEN - 1}; + } + + explicit operator arr_t() const { return uuid; } + + friend bool operator==(UUID const &, UUID const &) = default; + + private: + friend void to_json(nlohmann::json &j, const UUID &uuid); + friend void from_json(const nlohmann::json &j, UUID &uuid); + friend void ::memgraph::slk::Load(UUID *self, slk::Reader *reader); + explicit UUID(arr_t const &arr) : uuid(arr) {} + + arr_t uuid; +}; + +inline void to_json(nlohmann::json &j, const UUID &uuid) { j = nlohmann::json(uuid.uuid); } + +inline void from_json(const nlohmann::json &j, UUID &uuid) { + auto arr = UUID::arr_t{}; + j.get_to(arr); + uuid = UUID(arr); +} + } // namespace memgraph::utils diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt index fcf7f45b6..6b14dd141 100644 --- a/tests/e2e/CMakeLists.txt +++ b/tests/e2e/CMakeLists.txt @@ -77,6 +77,10 @@ add_subdirectory(query_modules_storage_modes) add_subdirectory(garbage_collection) add_subdirectory(query_planning) +if (MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + add_subdirectory(replication_experimental) +endif () + copy_e2e_python_files(pytest_runner pytest_runner.sh "") copy_e2e_python_files(x x.sh "") file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/memgraph-selfsigned.crt DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/tests/e2e/analytical_mode/CMakeLists.txt b/tests/e2e/analytical_mode/CMakeLists.txt index e22830770..1756c9980 100644 --- a/tests/e2e/analytical_mode/CMakeLists.txt +++ b/tests/e2e/analytical_mode/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_analytical_mode_e2e_python_files(common.py) copy_analytical_mode_e2e_python_files(free_memory.py) + +copy_e2e_files(analytical_mode workloads.yaml) diff --git a/tests/e2e/analyze_graph/CMakeLists.txt b/tests/e2e/analyze_graph/CMakeLists.txt index 1b96eb960..0faa37caa 100644 --- a/tests/e2e/analyze_graph/CMakeLists.txt +++ b/tests/e2e/analyze_graph/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_analyze_graph_e2e_python_files(common.py) copy_analyze_graph_e2e_python_files(optimize_indexes.py) + +copy_e2e_files(analyze_graph workloads.yaml) diff --git a/tests/e2e/batched_procedures/CMakeLists.txt b/tests/e2e/batched_procedures/CMakeLists.txt index 19aacb15c..28eb46bee 100644 --- a/tests/e2e/batched_procedures/CMakeLists.txt +++ b/tests/e2e/batched_procedures/CMakeLists.txt @@ -7,3 +7,5 @@ copy_batched_procedures_e2e_python_files(conftest.py) copy_batched_procedures_e2e_python_files(simple_read.py) add_subdirectory(procedures) + +copy_e2e_files(batched_procedures workloads.yaml) diff --git a/tests/e2e/concurrent_query_modules/CMakeLists.txt b/tests/e2e/concurrent_query_modules/CMakeLists.txt index 6c92387cb..9f3996585 100644 --- a/tests/e2e/concurrent_query_modules/CMakeLists.txt +++ b/tests/e2e/concurrent_query_modules/CMakeLists.txt @@ -6,3 +6,5 @@ copy_concurrent_query_modules_e2e_python_files(client.py) copy_concurrent_query_modules_e2e_python_files(con_query_modules.py) add_subdirectory(test_query_modules) + +copy_e2e_files(concurrent_query_modules workloads.yaml) diff --git a/tests/e2e/configuration/CMakeLists.txt b/tests/e2e/configuration/CMakeLists.txt index 0411c70e3..b02f69639 100644 --- a/tests/e2e/configuration/CMakeLists.txt +++ b/tests/e2e/configuration/CMakeLists.txt @@ -1,7 +1,9 @@ function(copy_configuration_check_e2e_python_files FILE_NAME) - copy_e2e_python_files(write_procedures ${FILE_NAME}) + copy_e2e_python_files(configuration ${FILE_NAME}) endfunction() copy_configuration_check_e2e_python_files(default_config.py) copy_configuration_check_e2e_python_files(configuration_check.py) copy_configuration_check_e2e_python_files(storage_info.py) + +copy_e2e_files(configuration workloads.yaml) diff --git a/tests/e2e/configuration/default_config.py b/tests/e2e/configuration/default_config.py index d0029bbd7..13f137794 100644 --- a/tests/e2e/configuration/default_config.py +++ b/tests/e2e/configuration/default_config.py @@ -174,11 +174,6 @@ startup_config_dict = { "Default storage mode Memgraph uses. Allowed values: IN_MEMORY_TRANSACTIONAL, IN_MEMORY_ANALYTICAL, ON_DISK_TRANSACTIONAL", ), "storage_wal_file_size_kib": ("20480", "20480", "Minimum file size of each WAL file."), - "storage_delete_on_drop": ( - "true", - "true", - "If set to true the query 'DROP DATABASE x' will delete the underlying storage as well.", - ), "stream_transaction_conflict_retries": ( "30", "30", diff --git a/tests/e2e/constraints/CMakeLists.txt b/tests/e2e/constraints/CMakeLists.txt index 0c4ff72d9..b099dab4d 100644 --- a/tests/e2e/constraints/CMakeLists.txt +++ b/tests/e2e/constraints/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_constraint_validation_e2e_python_files(common.py) copy_constraint_validation_e2e_python_files(constraints_validation.py) + +copy_e2e_files(constraint_validation workloads.yaml) diff --git a/tests/e2e/disk_storage/CMakeLists.txt b/tests/e2e/disk_storage/CMakeLists.txt index 777277178..5e0822ef8 100644 --- a/tests/e2e/disk_storage/CMakeLists.txt +++ b/tests/e2e/disk_storage/CMakeLists.txt @@ -13,3 +13,5 @@ copy_disk_storage_e2e_python_files(snapshot_disabled.py) copy_disk_storage_e2e_python_files(lock_data_dir_disabled.py) copy_disk_storage_e2e_python_files(create_edge_from_indices.py) copy_disk_storage_e2e_python_files(storage_info.py) + +copy_e2e_files(disk_storage workloads.yaml) diff --git a/tests/e2e/fine_grained_access/CMakeLists.txt b/tests/e2e/fine_grained_access/CMakeLists.txt index 6b277694f..71a02cd4b 100644 --- a/tests/e2e/fine_grained_access/CMakeLists.txt +++ b/tests/e2e/fine_grained_access/CMakeLists.txt @@ -7,3 +7,5 @@ copy_fine_grained_access_e2e_python_files(create_delete_filtering_tests.py) copy_fine_grained_access_e2e_python_files(edge_type_filtering_tests.py) copy_fine_grained_access_e2e_python_files(path_filtering_tests.py) copy_fine_grained_access_e2e_python_files(show_db.py) + +copy_e2e_files(fine_grained_access workloads.yaml) diff --git a/tests/e2e/fine_grained_access/show_db.py b/tests/e2e/fine_grained_access/show_db.py index 546d4e24a..c5378dca6 100644 --- a/tests/e2e/fine_grained_access/show_db.py +++ b/tests/e2e/fine_grained_access/show_db.py @@ -23,13 +23,20 @@ def test_show_databases_w_user(): user3_connection = common.connect(username="user3", password="test") assert common.execute_and_fetch_all(admin_connection.cursor(), "SHOW DATABASES") == [ - ("db1", ""), - ("db2", ""), - ("memgraph", "*"), + ("db1",), + ("db2",), + ("memgraph",), ] - assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASES") == [("db1", ""), ("memgraph", "*")] - assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASES") == [("db2", "*")] - assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASES") == [("db1", "*"), ("db2", "")] + assert common.execute_and_fetch_all(admin_connection.cursor(), "SHOW DATABASE") == [("memgraph",)] + + assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASES") == [("db1",), ("memgraph",)] + assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASE") == [("memgraph",)] + + assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASES") == [("db2",)] + assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASE") == [("db2",)] + + assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASES") == [("db1",), ("db2",)] + assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASE") == [("db1",)] if __name__ == "__main__": diff --git a/tests/e2e/garbage_collection/CMakeLists.txt b/tests/e2e/garbage_collection/CMakeLists.txt index 690edf344..e515247ec 100644 --- a/tests/e2e/garbage_collection/CMakeLists.txt +++ b/tests/e2e/garbage_collection/CMakeLists.txt @@ -5,3 +5,5 @@ endfunction() garbage_collection_e2e_python_files(common.py) garbage_collection_e2e_python_files(conftest.py) garbage_collection_e2e_python_files(gc_periodic.py) + +copy_e2e_files(garbage_collection workloads.yaml) diff --git a/tests/e2e/graphql/CMakeLists.txt b/tests/e2e/graphql/CMakeLists.txt index 7ad1624b6..384d534d8 100644 --- a/tests/e2e/graphql/CMakeLists.txt +++ b/tests/e2e/graphql/CMakeLists.txt @@ -8,3 +8,5 @@ copy_graphql_e2e_python_files(callable_alias_mapping.json) add_subdirectory(graphql_library_config) add_subdirectory(temporary_procedures) + +copy_e2e_files(graphql workloads.yaml) diff --git a/tests/e2e/import_mode/CMakeLists.txt b/tests/e2e/import_mode/CMakeLists.txt index e316b7e82..b48b4f12d 100644 --- a/tests/e2e/import_mode/CMakeLists.txt +++ b/tests/e2e/import_mode/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_import_mode_e2e_python_files(common.py) copy_import_mode_e2e_python_files(test_command.py) + +copy_e2e_files(import_mode workloads.yaml) diff --git a/tests/e2e/index_hints/CMakeLists.txt b/tests/e2e/index_hints/CMakeLists.txt index 5261baacc..38b3baef0 100644 --- a/tests/e2e/index_hints/CMakeLists.txt +++ b/tests/e2e/index_hints/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_index_hints_e2e_python_files(common.py) copy_index_hints_e2e_python_files(index_hints.py) + +copy_e2e_files(index_hints workloads.yaml) diff --git a/tests/e2e/init_file_flags/CMakeLists.txt b/tests/e2e/init_file_flags/CMakeLists.txt index 8d98898e8..50ac5e8e5 100644 --- a/tests/e2e/init_file_flags/CMakeLists.txt +++ b/tests/e2e/init_file_flags/CMakeLists.txt @@ -10,3 +10,5 @@ copy_init_file_flags_e2e_python_files(init_file_setup.py) copy_init_file_flags_e2e_python_files(init_data_file_setup.py) copy_init_file_flags_e2e_files(init_file.cypherl) + +copy_e2e_files(init_file_flags workloads.yaml) diff --git a/tests/e2e/inspect_query/CMakeLists.txt b/tests/e2e/inspect_query/CMakeLists.txt index f0dbdb7cc..4b9b3d82d 100644 --- a/tests/e2e/inspect_query/CMakeLists.txt +++ b/tests/e2e/inspect_query/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_inspect_query_e2e_python_files(common.py) copy_inspect_query_e2e_python_files(inspect_query.py) + +copy_e2e_files(inspect_query workloads.yaml) diff --git a/tests/e2e/isolation_levels/CMakeLists.txt b/tests/e2e/isolation_levels/CMakeLists.txt index a5f31a79d..1835d75df 100644 --- a/tests/e2e/isolation_levels/CMakeLists.txt +++ b/tests/e2e/isolation_levels/CMakeLists.txt @@ -2,3 +2,5 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__isolation_levels isolation_levels.cpp) target_link_libraries(memgraph__e2e__isolation_levels gflags mgclient mg-utils mg-io Threads::Threads) + +copy_e2e_files(isolation_levels workloads.yaml) diff --git a/tests/e2e/isolation_levels/isolation_levels.cpp b/tests/e2e/isolation_levels/isolation_levels.cpp index 2ead05750..751e63594 100644 --- a/tests/e2e/isolation_levels/isolation_levels.cpp +++ b/tests/e2e/isolation_levels/isolation_levels.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -87,18 +87,13 @@ void SwitchToDB(const std::string &name, std::unique_ptr &client) { void SwitchToCleanDB(std::unique_ptr &client) { SwitchToDB("clean", client); } void SwitchToSameDB(std::unique_ptr &main, std::unique_ptr &client) { - MG_ASSERT(main->Execute("SHOW DATABASES;")); + MG_ASSERT(main->Execute("SHOW DATABASE;")); auto dbs = main->FetchAll(); MG_ASSERT(dbs, "Failed to show databases"); - for (const auto &elem : *dbs) { - MG_ASSERT(!elem.empty(), "Show databases wrong output"); - const auto &active = elem[1].ValueString(); - if (active == "*") { - const auto &name = elem[0].ValueString(); - SwitchToDB(std::string(name), client); - break; - } - } + MG_ASSERT(!dbs->empty(), "Show databases wrong output"); + MG_ASSERT(!(*dbs)[0].empty(), "Show databases wrong output"); + const auto &name = (*dbs)[0][0].ValueString(); + SwitchToDB(std::string(name), client); } void TestSnapshotIsolation(std::unique_ptr &client) { diff --git a/tests/e2e/lba_procedures/CMakeLists.txt b/tests/e2e/lba_procedures/CMakeLists.txt index 8e1ebb41b..9547ef430 100644 --- a/tests/e2e/lba_procedures/CMakeLists.txt +++ b/tests/e2e/lba_procedures/CMakeLists.txt @@ -11,3 +11,5 @@ copy_lba_procedures_e2e_python_files(read_permission_queries.py) copy_lba_procedures_e2e_python_files(update_permission_queries.py) add_subdirectory(procedures) + +copy_e2e_files(lba_procedures workloads.yaml) diff --git a/tests/e2e/load_csv/CMakeLists.txt b/tests/e2e/load_csv/CMakeLists.txt index 368915dbe..6c1ebc38a 100644 --- a/tests/e2e/load_csv/CMakeLists.txt +++ b/tests/e2e/load_csv/CMakeLists.txt @@ -11,3 +11,5 @@ copy_load_csv_e2e_files(simple.csv) copy_load_csv_e2e_python_files(load_csv_nullif.py) copy_load_csv_e2e_files(nullif.csv) + +copy_e2e_files(load_csv workloads.yaml) diff --git a/tests/e2e/magic_functions/CMakeLists.txt b/tests/e2e/magic_functions/CMakeLists.txt index 3ab627e22..0f009f635 100644 --- a/tests/e2e/magic_functions/CMakeLists.txt +++ b/tests/e2e/magic_functions/CMakeLists.txt @@ -8,3 +8,5 @@ copy_magic_functions_e2e_python_files(conftest.py) copy_magic_functions_e2e_python_files(function_example.py) add_subdirectory(functions) + +copy_e2e_files(functions workloads.yaml) diff --git a/tests/e2e/memory/CMakeLists.txt b/tests/e2e/memory/CMakeLists.txt index 3c4cdc279..256107724 100644 --- a/tests/e2e/memory/CMakeLists.txt +++ b/tests/e2e/memory/CMakeLists.txt @@ -49,3 +49,5 @@ target_link_libraries(memgraph__e2e__procedure_memory_limit gflags mgclient mg-u add_executable(memgraph__e2e__procedure_memory_limit_multi_proc procedure_memory_limit_multi_proc.cpp) target_link_libraries(memgraph__e2e__procedure_memory_limit_multi_proc gflags mgclient mg-utils mg-io) + +copy_e2e_files(memory workloads.yaml) diff --git a/tests/e2e/mock_api/CMakeLists.txt b/tests/e2e/mock_api/CMakeLists.txt index aa170dc62..ef5845b26 100644 --- a/tests/e2e/mock_api/CMakeLists.txt +++ b/tests/e2e/mock_api/CMakeLists.txt @@ -6,3 +6,5 @@ add_subdirectory(procedures) copy_mock_python_api_e2e_files(common.py) copy_mock_python_api_e2e_files(test_compare_mock.py) + +copy_e2e_files(mock_python_api workloads.yaml) diff --git a/tests/e2e/module_file_manager/CMakeLists.txt b/tests/e2e/module_file_manager/CMakeLists.txt index 84d8845ff..d8eea3f9b 100644 --- a/tests/e2e/module_file_manager/CMakeLists.txt +++ b/tests/e2e/module_file_manager/CMakeLists.txt @@ -2,3 +2,5 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__module_file_manager module_file_manager.cpp) target_link_libraries(memgraph__e2e__module_file_manager gflags mgclient mg-utils mg-io Threads::Threads) + +copy_e2e_files(module_file_manager workloads.yaml) diff --git a/tests/e2e/monitoring_server/CMakeLists.txt b/tests/e2e/monitoring_server/CMakeLists.txt index 4c2c441e2..7062e978d 100644 --- a/tests/e2e/monitoring_server/CMakeLists.txt +++ b/tests/e2e/monitoring_server/CMakeLists.txt @@ -6,3 +6,5 @@ target_link_libraries(memgraph__e2e__monitoring_server mgclient mg-utils json gf add_executable(memgraph__e2e__monitoring_server_ssl monitoring_ssl.cpp) target_link_libraries(memgraph__e2e__monitoring_server_ssl mgclient mg-utils json gflags Boost::headers) + +copy_e2e_files(monitoring_server workloads.yaml) diff --git a/tests/e2e/python_query_modules_reloading/CMakeLists.txt b/tests/e2e/python_query_modules_reloading/CMakeLists.txt index ee8f29f90..27320e91b 100644 --- a/tests/e2e/python_query_modules_reloading/CMakeLists.txt +++ b/tests/e2e/python_query_modules_reloading/CMakeLists.txt @@ -6,3 +6,5 @@ copy_query_modules_reloading_procedures_e2e_python_files(common.py) copy_query_modules_reloading_procedures_e2e_python_files(test_reload_query_module.py) add_subdirectory(procedures) + +copy_e2e_files(python_query_modules_reloading workloads.yaml) diff --git a/tests/e2e/queries/CMakeLists.txt b/tests/e2e/queries/CMakeLists.txt index f672b8591..720599a18 100644 --- a/tests/e2e/queries/CMakeLists.txt +++ b/tests/e2e/queries/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_queries_e2e_python_files(common.py) copy_queries_e2e_python_files(queries.py) + +copy_e2e_files(queries workloads.yaml) diff --git a/tests/e2e/query_modules/CMakeLists.txt b/tests/e2e/query_modules/CMakeLists.txt index a97bbf1a5..3af2b80b6 100644 --- a/tests/e2e/query_modules/CMakeLists.txt +++ b/tests/e2e/query_modules/CMakeLists.txt @@ -7,3 +7,5 @@ copy_query_modules_e2e_python_files(conftest.py) copy_query_modules_e2e_python_files(convert_test.py) copy_query_modules_e2e_python_files(mgps_test.py) copy_query_modules_e2e_python_files(schema_test.py) + +copy_e2e_files(query_modules workloads.yaml) diff --git a/tests/e2e/replication/CMakeLists.txt b/tests/e2e/replication/CMakeLists.txt index 39f179a3d..aa96226f9 100644 --- a/tests/e2e/replication/CMakeLists.txt +++ b/tests/e2e/replication/CMakeLists.txt @@ -17,3 +17,5 @@ copy_e2e_python_files(replication_show edge_delete.py) copy_e2e_python_files_from_parent_folder(replication_show ".." memgraph.py) copy_e2e_python_files_from_parent_folder(replication_show ".." interactive_mg_runner.py) copy_e2e_python_files_from_parent_folder(replication_show ".." mg_utils.py) + +copy_e2e_files(replication workloads.yaml) diff --git a/tests/e2e/replication/constraints.cpp b/tests/e2e/replication/constraints.cpp index 01c1217f2..6f7e2991a 100644 --- a/tests/e2e/replication/constraints.cpp +++ b/tests/e2e/replication/constraints.cpp @@ -49,7 +49,7 @@ int main(int argc, char **argv) { const auto label_name = (*data)[0][1].ValueString(); const auto property_name = (*data)[0][2].ValueList()[0].ValueString(); if (label_name != "Node" || property_name != "id") { - LOG_FATAL("{} does NOT hava valid constraint created.", database_endpoint); + LOG_FATAL("{} does NOT have a valid constraint created.", database_endpoint); } } else { LOG_FATAL("Unable to get CONSTRAINT INFO from {}", database_endpoint); diff --git a/tests/e2e/replication/show_while_creating_invalid_state.py b/tests/e2e/replication/show_while_creating_invalid_state.py index a94310f0a..8da0c560a 100644 --- a/tests/e2e/replication/show_while_creating_invalid_state.py +++ b/tests/e2e/replication/show_while_creating_invalid_state.py @@ -308,7 +308,7 @@ def test_basic_recovery(connection): "--bolt-port", "7687", "--log-level=TRACE", - "--storage-recover-on-startup=true", + "--data-recovery-on-startup=true", "--replication-restore-state-on-startup=true", ], "log_file": "main.log", diff --git a/tests/e2e/replication_experimental/CMakeLists.txt b/tests/e2e/replication_experimental/CMakeLists.txt new file mode 100644 index 000000000..cd6e09f38 --- /dev/null +++ b/tests/e2e/replication_experimental/CMakeLists.txt @@ -0,0 +1,10 @@ +find_package(gflags REQUIRED) + +copy_e2e_python_files(replication_experiment common.py) +copy_e2e_python_files(replication_experiment conftest.py) +copy_e2e_python_files(replication_experiment multitenancy.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." memgraph.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." interactive_mg_runner.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." mg_utils.py) + +copy_e2e_files(replication_experiment workloads.yaml) diff --git a/tests/e2e/replication_experimental/common.py b/tests/e2e/replication_experimental/common.py new file mode 100644 index 000000000..dc104d628 --- /dev/null +++ b/tests/e2e/replication_experimental/common.py @@ -0,0 +1,25 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import typing + +import mgclient + + +def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}) -> typing.List[tuple]: + cursor.execute(query, params) + return cursor.fetchall() + + +def connect(**kwargs) -> mgclient.Connection: + connection = mgclient.connect(**kwargs) + connection.autocommit = True + return connection diff --git a/tests/e2e/replication_experimental/conftest.py b/tests/e2e/replication_experimental/conftest.py new file mode 100644 index 000000000..f91333cbf --- /dev/null +++ b/tests/e2e/replication_experimental/conftest.py @@ -0,0 +1,33 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import pytest +from common import connect, execute_and_fetch_all + + +@pytest.fixture(scope="function") +def connection(): + connection_holder = None + role_holder = None + + def inner_connection(port, role): + nonlocal connection_holder, role_holder + connection_holder = connect(host="localhost", port=port) + role_holder = role + return connection_holder + + yield inner_connection + + # Only main instance can be cleaned up because replicas do NOT accept + # writes. + if role_holder == "main": + cursor = connection_holder.cursor() + execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n;") diff --git a/tests/e2e/replication_experimental/multitenancy.py b/tests/e2e/replication_experimental/multitenancy.py new file mode 100644 index 000000000..7eb699341 --- /dev/null +++ b/tests/e2e/replication_experimental/multitenancy.py @@ -0,0 +1,1046 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import atexit +import os +import shutil +import sys +import tempfile +import time +from functools import partial + +import interactive_mg_runner +import mgclient +import pytest +from common import execute_and_fetch_all +from mg_utils import mg_sleep_and_assert + +interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +interactive_mg_runner.PROJECT_DIR = os.path.normpath( + os.path.join(interactive_mg_runner.SCRIPT_DIR, "..", "..", "..", "..") +) +interactive_mg_runner.BUILD_DIR = os.path.normpath(os.path.join(interactive_mg_runner.PROJECT_DIR, "build")) +interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactive_mg_runner.BUILD_DIR, "memgraph")) + +BOLT_PORTS = {"main": 7687, "replica_1": 7688, "replica_2": 7689} +REPLICATION_PORTS = {"replica_1": 10001, "replica_2": 10002} + +MEMGRAPH_INSTANCES_DESCRIPTION = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};"], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};"], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, +} + +TEMP_DIR = tempfile.TemporaryDirectory().name + +MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY = { + "replica_1": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['replica_1']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "replica1.log", + "data_directory": TEMP_DIR + "/replica1", + }, + "replica_2": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['replica_2']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "replica2.log", + "data_directory": TEMP_DIR + "/replica2", + }, + "main": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['main']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "main.log", + "data_directory": TEMP_DIR + "/main", + }, +} + + +def safe_execute(function, *args): + try: + function(*args) + except: + pass + + +def setup_replication(connection): + # Setup replica1 + cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor, f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};") + # Setup replica2 + cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + execute_and_fetch_all(cursor, f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};") + # Setup main + cursor = connection(BOLT_PORTS["main"], "main").cursor() + execute_and_fetch_all(cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';") + execute_and_fetch_all(cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';") + + +def setup_main(main_cursor): + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + + +def show_replicas_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return set(execute_and_fetch_all(cursor, "SHOW REPLICAS;")) + + return func + + +def show_databases_func(cursor): + def func(): + return execute_and_fetch_all(cursor, "SHOW DATABASES;") + + return func + + +def get_number_of_nodes_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return execute_and_fetch_all(cursor, "MATCH (n) RETURN count(*);")[0][0] + + return func + + +def get_number_of_edges_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return execute_and_fetch_all(cursor, "MATCH ()-[r]->() RETURN count(*);")[0][0] + + return func + + +def test_manual_databases_create_multitenancy_replication(connection): + # Goal: to show that replication can be established against REPLICA which already + # has the clean databases we need + # 0/ MAIN CREATE DATABASE A + B + # REPLICA CREATE DATABASE A + B + # Setup replication + # 1/ Write to MAIN A, Write to MAIN B + # 2/ Validate replication of changes to A + B have arrived at REPLICA + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(cursor, "USE DATABASE A;") + execute_and_fetch_all(cursor, "CREATE ();") + execute_and_fetch_all(cursor, "USE DATABASE B;") + execute_and_fetch_all(cursor, "CREATE ()-[:EDGE]->();") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 1 + assert get_number_of_edges_func(cursor_replica, "A")() == 0 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 1 + + cursor_replica2 = connection(BOLT_PORTS["replica_1"], "replica_2").cursor() + assert get_number_of_nodes_func(cursor_replica2, "A")() == 1 + assert get_number_of_edges_func(cursor_replica2, "A")() == 0 + assert get_number_of_nodes_func(cursor_replica2, "B")() == 2 + assert get_number_of_edges_func(cursor_replica2, "B")() == 1 + + +def test_manual_databases_create_multitenancy_replication_branching(connection): + # Goal: to show that replication can be established against REPLICA which already + # has all the databases and the same data + # 0/ MAIN CREATE DATABASE A + B and fill with data + # REPLICA CREATE DATABASE A + B and fil with exact data + # Setup REPLICA + # 1/ Registering REPLICA on MAIN should not fail due to tenant branching + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + failed = False + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + +def test_manual_databases_create_multitenancy_replication_dirty_replica(connection): + # Goal: to show that replication can be established against REPLICA which already + # has all the databases we need, even when they branched + # 0/ MAIN CREATE DATABASE A + # REPLICA CREATE DATABASE A + # REPLICA write to A + # Setup REPLICA + # 1/ Register replica; should fail + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + failed = False + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + +def test_manual_databases_create_multitenancy_replication_main_behind(connection): + # Goal: to show that replication can be established against REPLICA which has + # different branched databases + # 0/ REPLICA CREATE DATABASE A + # REPLICA write to A + # Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Check that database has been replicated + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +def test_automatic_databases_create_multitenancy_replication(connection): + # Goal: to show that replication can be established against REPLICA where a new databases + # needs replication + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 0 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + cursor_replica = connection(BOLT_PORTS["replica_2"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 0 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_automatic_databases_multitenancy_replication_predefined(connection): + # Goal: to show that replication can be established against REPLICA which doesn't + # have any additional databases; MAIN's database clean at registration time + # 0/ MAIN CREATE DATABASE A + B + # Setup replication + # 1/ Write to MAIN A, Write to MAIN B + # 2/ Validate replication of changes to A + B have arrived at REPLICA + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(cursor, "USE DATABASE A;") + execute_and_fetch_all(cursor, "CREATE ();") + execute_and_fetch_all(cursor, "USE DATABASE B;") + execute_and_fetch_all(cursor, "CREATE ()-[:EDGE]->();") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 1 + assert get_number_of_edges_func(cursor_replica, "A")() == 0 + + +def test_automatic_databases_create_multitenancy_replication_dirty_main(connection): + # Goal: to show that replication can be established against REPLICA which doesn't + # have any additional databases; MAIN's database dirty at registration time + # 0/ MAIN CREATE DATABASE A + # MAIN write to A + # Setup replication + # 1/ Validate + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + actual_data = execute_and_fetch_all(cursor_replica, "MATCH (n) RETURN count(*);") + assert actual_data[0][0] == 1 # one node + actual_data = execute_and_fetch_all(cursor_replica, "MATCH ()-[r]->() RETURN count(*);") + assert actual_data[0][0] == 0 # zero relationships + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_w_fc(connection, replica_name): + # Goal: show that a replica can be recovered with the frequent checker + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + time.sleep(3) # In order for the frequent check to run + # Check that the FC did invalidate + expected_data = { + "replica_1": { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "invalid"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + }, + "replica_2": { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "invalid"), + }, + } + assert expected_data[replica_name] == show_replicas_func(main_cursor, "A")() + # Restart + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_wo_fc(connection, replica_name): + # Goal: show that a replica can be recovered without the frequent checker detecting it being down + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_w_fc_w_rec(connection, replica_name): + # Goal: show that a replica recovers data on reconnect + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + # Tmp dir should already be removed, but sometimes its not... + safe_execute(shutil.rmtree, TEMP_DIR) + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY) + setup_replication(connection) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, replica_name) + safe_execute(execute_and_fetch_all, main_cursor, "USE DATABASE A;") + safe_execute(execute_and_fetch_all, main_cursor, "CREATE (:Node{on:'A'});") + safe_execute(execute_and_fetch_all, main_cursor, "USE DATABASE B;") + safe_execute(execute_and_fetch_all, main_cursor, "CREATE (:Node{on:'B'});") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, replica_name) + + # 4/ + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + + mg_sleep_and_assert(8, get_number_of_nodes_func(cursor_replica, "A")) + mg_sleep_and_assert(3, get_number_of_edges_func(cursor_replica, "A")) + + mg_sleep_and_assert(3, get_number_of_nodes_func(cursor_replica, "B")) + mg_sleep_and_assert(0, get_number_of_edges_func(cursor_replica, "B")) + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_drop_replica(connection, replica_name): + # Goal: show that the cluster can recover if a replica is dropped and registered again + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Drop and add the same replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + execute_and_fetch_all(main_cursor, f"DROP REPLICA {replica_name};") + sync = {"replica_1": "SYNC", "replica_2": "ASYNC"} + execute_and_fetch_all( + main_cursor, + f"REGISTER REPLICA {replica_name} {sync[replica_name]} TO '127.0.0.1:{REPLICATION_PORTS[replica_name]}';", + ) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_multitenancy_replication_restart_main(connection): + # Goal: show that the cluster can restore to a correct state if the MAIN restarts + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart main and write new data + # 4/ Validate data on replica + + # 0/ + # Tmp dir should already be removed, but sometimes its not... + safe_execute(shutil.rmtree, TEMP_DIR) + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY) + setup_replication(connection) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, "main") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, "main") + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + + # 4/ + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + assert get_number_of_nodes_func(cursor_replica, "A")() == 8 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 3 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + cursor_replica = connection(BOLT_PORTS["replica_2"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + assert get_number_of_nodes_func(cursor_replica, "A")() == 8 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 3 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_automatic_databases_drop_multitenancy_replication(connection): + # Goal: show that drop database can be replicated + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ DROP DATABASE A/B + # 5/ Check that the drop replicated + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + # 4/ + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + execute_and_fetch_all(main_cursor, "DROP DATABASE B;") + + # 5/ + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_drop_multitenancy_replication_restart_replica(connection, replica_name): + # Goal: show that the drop database can be restored + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart SYNC replica and drop database + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE B;") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +def test_multitenancy_drop_while_replica_using(connection): + # Goal: show that the replica can handle a transaction on a database being dropped (will persist until tx finishes) + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ Start A transaction on replica 1, Use A on replica2 + # 5/ Check that the drop replicated + # 6/ Validate that the transaction is still active and working and that the replica2 is not pointing to anything + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 4/ + replica1_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + replica2_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + + execute_and_fetch_all(replica1_cursor, "USE DATABASE A;") + execute_and_fetch_all(replica1_cursor, "BEGIN") + execute_and_fetch_all(replica2_cursor, "USE DATABASE A;") + + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + + # 5/ + # TODO Remove this once there is a replica state for the system + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + # 6/ + assert execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN count(*);")[0][0] == 1 + execute_and_fetch_all(replica1_cursor, "COMMIT") + failed = False + try: + execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + failed = False + try: + execute_and_fetch_all(replica2_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + +def test_multitenancy_drop_and_recreate_while_replica_using(connection): + # Goal: show that the replica can handle a transaction on a database being dropped and the same name reused + # Original storage should persist in a nameless state until tx is over + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ Start A transaction on replica 1, Use A on replica2 + # 5/ Check that the drop/create replicated + # 6/ Validate that the transaction is still active and working and that the replica2 is not pointing to anything + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 4/ + replica1_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + replica2_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + + execute_and_fetch_all(replica1_cursor, "USE DATABASE A;") + execute_and_fetch_all(replica1_cursor, "BEGIN") + execute_and_fetch_all(replica2_cursor, "USE DATABASE A;") + + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + + # 5/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 6/ + assert execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN count(*);")[0][0] == 1 + execute_and_fetch_all(replica1_cursor, "COMMIT") + failed = False + try: + execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + failed = False + try: + execute_and_fetch_all(replica2_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + +if __name__ == "__main__": + interactive_mg_runner.cleanup_directories_on_exit() + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/replication_experimental/workloads.yaml b/tests/e2e/replication_experimental/workloads.yaml new file mode 100644 index 000000000..e48515f4f --- /dev/null +++ b/tests/e2e/replication_experimental/workloads.yaml @@ -0,0 +1,4 @@ +workloads: + - name: "Replicate multitenancy" + binary: "tests/e2e/pytest_runner.sh" + args: ["replication_experimental/multitenancy.py"] diff --git a/tests/e2e/run.sh b/tests/e2e/run.sh index 1aba6a517..88b70ae32 100755 --- a/tests/e2e/run.sh +++ b/tests/e2e/run.sh @@ -25,7 +25,7 @@ if [ "$#" -eq 0 ]; then # NOTE: If you want to run all tests under specific folder/section just # replace the dot (root directory below) with the folder name, e.g. # `--workloads-root-directory replication`. - python3 runner.py --workloads-root-directory . + python3 runner.py --workloads-root-directory "$SCRIPT_DIR/../../build" elif [ "$#" -eq 1 ]; then if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then print_help @@ -34,7 +34,7 @@ elif [ "$#" -eq 1 ]; then # NOTE: --workload-name comes from each individual folder/section # workloads.yaml file. E.g. `streams/workloads.yaml` has a list of # `workloads:` and each workload has it's `-name`. - python3 runner.py --workloads-root-directory . --workload-name "$1" + python3 runner.py --workloads-root-directory "$SCRIPT_DIR/../../build" --workload-name "$1" else print_help fi diff --git a/tests/e2e/runner.py b/tests/e2e/runner.py index 949670d43..ae022d4d8 100755 --- a/tests/e2e/runner.py +++ b/tests/e2e/runner.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + # Copyright 2021 Memgraph Ltd. # # Use of this software is governed by the Business Source License diff --git a/tests/e2e/server/CMakeLists.txt b/tests/e2e/server/CMakeLists.txt index a408f4a2e..2e62f2035 100644 --- a/tests/e2e/server/CMakeLists.txt +++ b/tests/e2e/server/CMakeLists.txt @@ -6,3 +6,5 @@ target_link_libraries(memgraph__e2e__server_connection mgclient mg-utils gflags) add_executable(memgraph__e2e__server_ssl_connection server_ssl_connection.cpp) target_link_libraries(memgraph__e2e__server_ssl_connection mgclient mg-utils gflags) + +copy_e2e_files(server workloads.yaml) diff --git a/tests/e2e/set_properties/CMakeLists.txt b/tests/e2e/set_properties/CMakeLists.txt index 10cc03584..66a8039b7 100644 --- a/tests/e2e/set_properties/CMakeLists.txt +++ b/tests/e2e/set_properties/CMakeLists.txt @@ -6,3 +6,5 @@ copy_set_properties_e2e_python_files(common.py) copy_set_properties_e2e_python_files(set_properties.py) add_subdirectory(procedures) + +copy_e2e_files(set_properties workloads.yaml) diff --git a/tests/e2e/show_index_info/CMakeLists.txt b/tests/e2e/show_index_info/CMakeLists.txt index dd9bd28bb..b5d154355 100644 --- a/tests/e2e/show_index_info/CMakeLists.txt +++ b/tests/e2e/show_index_info/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_show_index_info_e2e_python_files(common.py) copy_show_index_info_e2e_python_files(test_show_index_info.py) + +copy_e2e_files(show_index_info workloads.yaml) diff --git a/tests/e2e/streams/CMakeLists.txt b/tests/e2e/streams/CMakeLists.txt index 3c0ffac98..cbca225f7 100644 --- a/tests/e2e/streams/CMakeLists.txt +++ b/tests/e2e/streams/CMakeLists.txt @@ -11,3 +11,5 @@ copy_streams_e2e_python_files(pulsar_streams_tests.py) add_subdirectory(transformations) copy_e2e_python_files_from_parent_folder(streams ".." mg_utils.py) + +copy_e2e_files(streams workloads.yaml) diff --git a/tests/e2e/temporal_types/CMakeLists.txt b/tests/e2e/temporal_types/CMakeLists.txt index aad9561fe..dac9c2000 100644 --- a/tests/e2e/temporal_types/CMakeLists.txt +++ b/tests/e2e/temporal_types/CMakeLists.txt @@ -4,3 +4,4 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__temporal_roundtrip roundtrip.cpp) target_link_libraries(memgraph__e2e__temporal_roundtrip PUBLIC mgclient mg-utils gflags) +copy_e2e_files(temporal_roundtrip workloads.yaml) diff --git a/tests/e2e/transaction_queue/CMakeLists.txt b/tests/e2e/transaction_queue/CMakeLists.txt index 574c46bfd..f2e7db170 100644 --- a/tests/e2e/transaction_queue/CMakeLists.txt +++ b/tests/e2e/transaction_queue/CMakeLists.txt @@ -6,3 +6,5 @@ copy_query_modules_reloading_procedures_e2e_python_files(common.py) copy_query_modules_reloading_procedures_e2e_python_files(test_transaction_queue.py) add_subdirectory(procedures) + +copy_e2e_files(transaction_queue workloads.yaml) diff --git a/tests/e2e/transaction_rollback/CMakeLists.txt b/tests/e2e/transaction_rollback/CMakeLists.txt index a64d3bfeb..4b9fd289f 100644 --- a/tests/e2e/transaction_rollback/CMakeLists.txt +++ b/tests/e2e/transaction_rollback/CMakeLists.txt @@ -7,3 +7,5 @@ transaction_rollback_e2e_python_files(conftest.py) transaction_rollback_e2e_python_files(transaction.py) add_subdirectory(procedures) + +copy_e2e_files(transaction_rollback workloads.yaml) diff --git a/tests/e2e/triggers/CMakeLists.txt b/tests/e2e/triggers/CMakeLists.txt index 7b540d59f..8f5fe7676 100644 --- a/tests/e2e/triggers/CMakeLists.txt +++ b/tests/e2e/triggers/CMakeLists.txt @@ -27,3 +27,5 @@ endfunction() copy_triggers_e2e_python_files(common.py) copy_triggers_e2e_python_files(triggers_properties_false.py) + +copy_e2e_files(triggers workloads.yaml) diff --git a/tests/e2e/write_procedures/CMakeLists.txt b/tests/e2e/write_procedures/CMakeLists.txt index 27a9a73e2..f7dc2d8b3 100644 --- a/tests/e2e/write_procedures/CMakeLists.txt +++ b/tests/e2e/write_procedures/CMakeLists.txt @@ -8,3 +8,5 @@ copy_write_procedures_e2e_python_files(simple_write.py) copy_write_procedures_e2e_python_files(read_subgraph.py) add_subdirectory(procedures) + +copy_e2e_files(write_procedures workloads.yaml) diff --git a/tests/integration/telemetry/client.cpp b/tests/integration/telemetry/client.cpp index 34e1c2a67..8c32664fb 100644 --- a/tests/integration/telemetry/client.cpp +++ b/tests/integration/telemetry/client.cpp @@ -44,7 +44,7 @@ int main(int argc, char **argv) { memgraph::dbms::DbmsHandler dbms_handler(db_config #ifdef MG_ENTERPRISE , - &auth_, false, false + &auth_, false #endif ); memgraph::query::InterpreterContext interpreter_context_({}, &dbms_handler, &repl_state, &auth_handler, diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 956cba781..8f305b91a 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -415,6 +415,9 @@ if(MG_ENTERPRISE) add_unit_test_with_custom_main(dbms_handler.cpp) target_link_libraries(${test_prefix}dbms_handler mg-query mg-auth mg-glue mg-dbms) + + add_unit_test(multi_tenancy.cpp) + target_link_libraries(${test_prefix}multi_tenancy mg-query mg-auth mg-glue mg-dbms) else() add_unit_test_with_custom_main(dbms_handler_community.cpp) target_link_libraries(${test_prefix}dbms_handler_community mg-query mg-auth mg-glue mg-dbms) diff --git a/tests/unit/bolt_encoder.cpp b/tests/unit/bolt_encoder.cpp index 83add3cd3..dd275a2fa 100644 --- a/tests/unit/bolt_encoder.cpp +++ b/tests/unit/bolt_encoder.cpp @@ -206,11 +206,11 @@ void TestVertexAndEdgeWithDifferentStorages(std::unique_ptr vals; - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va1)), *db, - memgraph::storage::View::NEW)); - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va2)), *db, - memgraph::storage::View::NEW)); - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::EdgeAccessor(ea)), *db, + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va1)), + db.get(), memgraph::storage::View::NEW)); + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va2)), + db.get(), memgraph::storage::View::NEW)); + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::EdgeAccessor(ea)), db.get(), memgraph::storage::View::NEW)); bolt_encoder.MessageRecord(vals); diff --git a/tests/unit/database_get_info.cpp b/tests/unit/database_get_info.cpp index 95ed3f326..8a268580e 100644 --- a/tests/unit/database_get_info.cpp +++ b/tests/unit/database_get_info.cpp @@ -108,7 +108,7 @@ TYPED_TEST(InfoTest, InfoCheck) { auto v2 = acc->CreateVertex(); auto v3 = acc->CreateVertex(); auto v4 = acc->CreateVertex(); - auto v5 = acc->CreateVertex(); + [[maybe_unused]] auto v5 = acc->CreateVertex(); ASSERT_FALSE(v2.AddLabel(lbl).HasError()); ASSERT_FALSE(v3.AddLabel(lbl).HasError()); diff --git a/tests/unit/dbms_database.cpp b/tests/unit/dbms_database.cpp index 20e1f55ac..535c0c055 100644 --- a/tests/unit/dbms_database.cpp +++ b/tests/unit/dbms_database.cpp @@ -29,7 +29,8 @@ memgraph::storage::Config default_conf(std::string name = "") { return {.durability = {.storage_directory = storage_directory / name, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / name / "disk"}}; + .disk = {.main_storage_directory = storage_directory / name / "disk"}, + .salient.name = name.empty() ? std::string{"memgraph"} : name}; } class DBMS_Database : public ::testing::Test { @@ -55,20 +56,21 @@ TEST_F(DBMS_Database, New) { .durability = {.storage_directory = storage_directory / "db2", .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / "disk"}}; - auto db2 = db_handler.New("db2", db_config, generic_repl_state); + .disk = {.main_storage_directory = storage_directory / "disk"}, + .salient.name = "db2"}; + auto db2 = db_handler.New(db_config, generic_repl_state); ASSERT_TRUE(db2.HasValue() && db2.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "db2")); } { // With default config - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db3.HasValue() && db3.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "db3")); - auto db4 = db_handler.New("db4", default_conf("four"), generic_repl_state); + auto db4 = db_handler.New(default_conf("four"), generic_repl_state); ASSERT_TRUE(db4.HasValue() && db4.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "four")); - auto db5 = db_handler.New("db5", default_conf("db3"), generic_repl_state); + auto db5 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db5.HasError() && db5.GetError() == memgraph::dbms::NewError::EXISTS); } @@ -77,15 +79,15 @@ TEST_F(DBMS_Database, New) { ASSERT_EQ(all.size(), 3); ASSERT_EQ(all[0], "db2"); ASSERT_EQ(all[1], "db3"); - ASSERT_EQ(all[2], "db4"); + ASSERT_EQ(all[2], "four"); } TEST_F(DBMS_Database, Get) { memgraph::dbms::DatabaseHandler db_handler; - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -107,9 +109,9 @@ TEST_F(DBMS_Database, Get) { TEST_F(DBMS_Database, Delete) { memgraph::dbms::DatabaseHandler db_handler; - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -119,7 +121,7 @@ TEST_F(DBMS_Database, Delete) { // Release accessor to storage db1.GetValue().reset(); // Delete from handler - ASSERT_TRUE(db_handler.Delete("db1")); + ASSERT_TRUE(db_handler.TryDelete("db1")); ASSERT_FALSE(db_handler.Get("db1")); auto all = db_handler.All(); std::sort(all.begin(), all.end()); @@ -129,8 +131,8 @@ TEST_F(DBMS_Database, Delete) { } { - ASSERT_THROW(db_handler.Delete("db0"), memgraph::utils::BasicException); - ASSERT_THROW(db_handler.Delete("db1"), memgraph::utils::BasicException); + ASSERT_THROW(db_handler.TryDelete("db0"), memgraph::utils::BasicException); + ASSERT_THROW(db_handler.TryDelete("db1"), memgraph::utils::BasicException); auto all = db_handler.All(); std::sort(all.begin(), all.end()); ASSERT_EQ(all.size(), 2); @@ -144,17 +146,18 @@ TEST_F(DBMS_Database, DeleteAndRecover) { memgraph::dbms::DatabaseHandler db_handler; { - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); memgraph::storage::Config conf_w_snap{ .durability = {.storage_directory = storage_directory / "db3", .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, .snapshot_on_exit = true}, - .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}}; + .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}, + .salient.name = "db3"}; - auto db3 = db_handler.New("db3", conf_w_snap, generic_repl_state); + auto db3 = db_handler.New(conf_w_snap, generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -184,23 +187,24 @@ TEST_F(DBMS_Database, DeleteAndRecover) { } // Delete from handler - ASSERT_TRUE(db_handler.Delete("db1")); - ASSERT_TRUE(db_handler.Delete("db2")); - ASSERT_TRUE(db_handler.Delete("db3")); + ASSERT_TRUE(db_handler.TryDelete("db1")); + ASSERT_TRUE(db_handler.TryDelete("db2")); + ASSERT_TRUE(db_handler.TryDelete("db3")); { // Recover graphs (only db3) - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); memgraph::storage::Config conf_w_rec{ .durability = {.storage_directory = storage_directory / "db3", .recover_on_startup = true, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}}; + .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}, + .salient.name = "db3"}; - auto db3 = db_handler.New("db3", conf_w_rec, generic_repl_state); + auto db3 = db_handler.New(conf_w_rec, generic_repl_state); // Check content { diff --git a/tests/unit/dbms_handler.cpp b/tests/unit/dbms_handler.cpp index 0ea4197fb..e0d566240 100644 --- a/tests/unit/dbms_handler.cpp +++ b/tests/unit/dbms_handler.cpp @@ -25,8 +25,23 @@ #include "query/config.hpp" #include "query/interpreter.hpp" +namespace { +std::set GetDirs(auto path) { + std::set dirs; + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(path)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + dirs.emplace(name); + } + } + return dirs; +} +} // namespace + // Global std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / "MG_test_unit_dbms_handler"}; +std::filesystem::path db_dir{storage_directory / "databases"}; static memgraph::storage::Config storage_conf; std::unique_ptr> auth; @@ -52,7 +67,7 @@ class TestEnvironment : public ::testing::Environment { auth = std::make_unique>( storage_directory / "auth"); - ptr_ = std::make_unique(storage_conf, auth.get(), false, true); + ptr_ = std::make_unique(storage_conf, auth.get(), false); } void TearDown() override { @@ -74,7 +89,7 @@ TEST(DBMS_Handler, Init) { std::vector dirs = {"snapshots", "streams", "triggers", "wal"}; for (const auto &dir : dirs) ASSERT_TRUE(std::filesystem::exists(storage_directory / dir)) << (storage_directory / dir); - const auto db_path = storage_directory / "databases" / memgraph::dbms::kDefaultDB; + const auto db_path = db_dir / memgraph::dbms::kDefaultDB; ASSERT_TRUE(std::filesystem::exists(db_path)); for (const auto &dir : dirs) { std::error_code ec; @@ -92,10 +107,14 @@ TEST(DBMS_Handler, New) { ASSERT_EQ(all[0], memgraph::dbms::kDefaultDB); } { + const auto dirs = GetDirs(db_dir); auto db1 = dbms.New("db1"); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db1.GetValue()); - ASSERT_TRUE(std::filesystem::exists(storage_directory / "databases" / "db1")); + // New flow doesn't make db named directories + ASSERT_FALSE(std::filesystem::exists(db_dir / "db1")); + const auto dirs_w_db1 = GetDirs(db_dir); + ASSERT_EQ(dirs_w_db1.size(), dirs.size() + 1); ASSERT_TRUE(db1.GetValue()->storage() != nullptr); ASSERT_TRUE(db1.GetValue()->streams() != nullptr); ASSERT_TRUE(db1.GetValue()->trigger_store() != nullptr); @@ -111,9 +130,13 @@ TEST(DBMS_Handler, New) { ASSERT_TRUE(db2.HasError() && db2.GetError() == memgraph::dbms::NewError::EXISTS); } { + const auto dirs = GetDirs(db_dir); auto db3 = dbms.New("db3"); ASSERT_TRUE(db3.HasValue()); - ASSERT_TRUE(std::filesystem::exists(storage_directory / "databases" / "db3")); + // New flow doesn't make db named directories + ASSERT_FALSE(std::filesystem::exists(db_dir / "db3")); + const auto dirs_w_db3 = GetDirs(db_dir); + ASSERT_EQ(dirs_w_db3.size(), dirs.size() + 1); ASSERT_TRUE(db3.GetValue()->storage() != nullptr); ASSERT_TRUE(db3.GetValue()->streams() != nullptr); ASSERT_TRUE(db3.GetValue()->trigger_store() != nullptr); @@ -156,16 +179,16 @@ TEST(DBMS_Handler, Delete) { auto db1_acc = dbms.Get("db1"); // Holds access to database { - auto del = dbms.Delete(memgraph::dbms::kDefaultDB); + auto del = dbms.TryDelete(memgraph::dbms::kDefaultDB); ASSERT_TRUE(del.HasError() && del.GetError() == memgraph::dbms::DeleteError::DEFAULT_DB); } { - auto del = dbms.Delete("non-existent"); + auto del = dbms.TryDelete("non-existent"); ASSERT_TRUE(del.HasError() && del.GetError() == memgraph::dbms::DeleteError::NON_EXISTENT); } { // db1_acc is using db1 - auto del = dbms.Delete("db1"); + auto del = dbms.TryDelete("db1"); ASSERT_TRUE(del.HasError()); ASSERT_TRUE(del.GetError() == memgraph::dbms::DeleteError::USING); } @@ -173,15 +196,17 @@ TEST(DBMS_Handler, Delete) { // Reset db1_acc (releases access) so delete will succeed db1_acc.reset(); ASSERT_FALSE(db1_acc); - auto del = dbms.Delete("db1"); + auto del = dbms.TryDelete("db1"); ASSERT_FALSE(del.HasError()) << (int)del.GetError(); - auto del2 = dbms.Delete("db1"); + auto del2 = dbms.TryDelete("db1"); ASSERT_TRUE(del2.HasError() && del2.GetError() == memgraph::dbms::DeleteError::NON_EXISTENT); } { - auto del = dbms.Delete("db3"); + const auto dirs = GetDirs(db_dir); + auto del = dbms.TryDelete("db3"); ASSERT_FALSE(del.HasError()); - ASSERT_FALSE(std::filesystem::exists(storage_directory / "databases" / "db3")); + const auto dirs_wo_db3 = GetDirs(db_dir); + ASSERT_EQ(dirs_wo_db3.size(), dirs.size() - 1); } } diff --git a/tests/unit/dbms_handler_community.cpp b/tests/unit/dbms_handler_community.cpp index 860f70ba0..58f8dd2ad 100644 --- a/tests/unit/dbms_handler_community.cpp +++ b/tests/unit/dbms_handler_community.cpp @@ -90,9 +90,9 @@ TEST(DBMS_Handler, Get) { ASSERT_TRUE(default_db->streams() != nullptr); ASSERT_TRUE(default_db->trigger_store() != nullptr); ASSERT_TRUE(default_db->thread_pool() != nullptr); - ASSERT_EQ(default_db->storage()->id(), memgraph::dbms::kDefaultDB); + ASSERT_EQ(default_db->storage()->name(), memgraph::dbms::kDefaultDB); auto conf = storage_conf; - conf.name = memgraph::dbms::kDefaultDB; + conf.salient.name = memgraph::dbms::kDefaultDB; ASSERT_EQ(default_db->storage()->config_, conf); } diff --git a/tests/unit/interpreter_faker.hpp b/tests/unit/interpreter_faker.hpp index 5823c6a87..3b6075911 100644 --- a/tests/unit/interpreter_faker.hpp +++ b/tests/unit/interpreter_faker.hpp @@ -21,8 +21,9 @@ struct InterpreterFaker { } auto Prepare(const std::string &query, const std::map ¶ms = {}) { - ResultStreamFaker stream(interpreter.current_db_.db_acc_->get()->storage()); const auto [header, _1, qid, _2] = interpreter.Prepare(query, params, {}); + auto &db = interpreter.current_db_.db_acc_; + ResultStreamFaker stream(db ? db->get()->storage() : nullptr); stream.Header(header); return std::make_pair(std::move(stream), qid); } diff --git a/tests/unit/multi_tenancy.cpp b/tests/unit/multi_tenancy.cpp new file mode 100644 index 000000000..5581dcada --- /dev/null +++ b/tests/unit/multi_tenancy.cpp @@ -0,0 +1,378 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include +#include +#include +#include + +#include "communication/bolt/v1/value.hpp" +#include "communication/result_stream_faker.hpp" +#include "csv/parsing.hpp" +#include "dbms/dbms_handler.hpp" +#include "disk_test_utils.hpp" +#include "flags/run_time_configurable.hpp" +#include "glue/communication.hpp" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "interpreter_faker.hpp" +#include "license/license.hpp" +#include "query/auth_checker.hpp" +#include "query/config.hpp" +#include "query/exceptions.hpp" +#include "query/interpreter.hpp" +#include "query/interpreter_context.hpp" +#include "query/metadata.hpp" +#include "query/stream.hpp" +#include "query/typed_value.hpp" +#include "query_common.hpp" +#include "replication/state.hpp" +#include "storage/v2/inmemory/storage.hpp" +#include "storage/v2/isolation_level.hpp" +#include "storage/v2/property_value.hpp" +#include "storage/v2/storage_mode.hpp" +#include "utils/logging.hpp" +#include "utils/lru_cache.hpp" +#include "utils/synchronized.hpp" + +namespace { +std::set GetDirs(auto path) { + std::set dirs; + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(path)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + dirs.emplace(name); + } + } + return dirs; +} + +auto RunMtQuery(auto &interpreter, const std::string &query, std::string_view res) { + auto [stream, qid] = interpreter.Prepare(query); + ASSERT_EQ(stream.GetHeader().size(), 1U); + EXPECT_EQ(stream.GetHeader()[0], "STATUS"); + interpreter.Pull(&stream, 1); + ASSERT_EQ(stream.GetSummary().count("has_more"), 1); + ASSERT_FALSE(stream.GetSummary().at("has_more").ValueBool()); + ASSERT_EQ(stream.GetResults()[0].size(), 1U); + ASSERT_EQ(stream.GetResults()[0][0].ValueString(), res); +} + +auto RunQuery(auto &interpreter, const std::string &query) { + auto [stream, qid] = interpreter.Prepare(query); + interpreter.Pull(&stream, 1); + return stream.GetResults(); +} + +void UseDatabase(auto &interpreter, const std::string &name, std::string_view res) { + RunMtQuery(interpreter, "USE DATABASE " + name, res); +} + +void DropDatabase(auto &interpreter, const std::string &name, std::string_view res) { + RunMtQuery(interpreter, "DROP DATABASE " + name, res); +} +} // namespace + +class MultiTenantTest : public ::testing::Test { + public: + std::filesystem::path data_directory = std::filesystem::temp_directory_path() / "MG_tests_unit_multi_tenancy"; + + MultiTenantTest() = default; + + memgraph::storage::Config config{ + [&]() { + memgraph::storage::Config config{}; + UpdatePaths(config, data_directory); + return config; + }() // iile + }; + + struct MinMemgraph { + explicit MinMemgraph(const memgraph::storage::Config &conf) + : dbms{conf, + reinterpret_cast< + memgraph::utils::Synchronized *>(0), + true}, + interpreter_context{{}, &dbms, &dbms.ReplicationState()} { + memgraph::utils::global_settings.Initialize(conf.durability.storage_directory / "settings"); + memgraph::license::RegisterLicenseSettings(memgraph::license::global_license_checker, + memgraph::utils::global_settings); + memgraph::flags::run_time::Initialize(); + memgraph::license::global_license_checker.CheckEnvLicense(); + } + + ~MinMemgraph() { memgraph::utils::global_settings.Finalize(); } + + auto NewInterpreter() { return InterpreterFaker{&interpreter_context, dbms.Get()}; } + + memgraph::dbms::DbmsHandler dbms; + memgraph::query::InterpreterContext interpreter_context; + }; + + void SetUp() override { + TearDown(); + min_mg.emplace(config); + } + + void TearDown() override { + min_mg.reset(); + if (std::filesystem::exists(data_directory)) std::filesystem::remove_all(data_directory); + } + + auto NewInterpreter() { return min_mg->NewInterpreter(); } + + auto &DBMS() { return min_mg->dbms; } + + std::optional min_mg; +}; + +TEST_F(MultiTenantTest, SimpleCreateDrop) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using both + // 3) Drop databases while the other is using + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto create = [&](auto &interpreter, const std::string &name, bool success) { + RunMtQuery(interpreter, "CREATE DATABASE " + name, + success ? ("Successfully created database " + name) : (name + " already exists.")); + }; + + create(interpreter1, "db1", true); + create(interpreter1, "db1", false); + create(interpreter2, "db1", false); + create(interpreter2, "db2", true); + create(interpreter1, "db2", false); + create(interpreter2, "db3", true); + create(interpreter2, "db4", true); + + // 3 + UseDatabase(interpreter1, "db2", "Using db2"); + UseDatabase(interpreter1, "db2", "Already using db2"); + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + ASSERT_THROW(DropDatabase(interpreter1, memgraph::dbms::kDefaultDB.data(), ""), + memgraph::query::QueryRuntimeException); // default db + + DropDatabase(interpreter1, "db1", "Successfully deleted db1"); + ASSERT_THROW(DropDatabase(interpreter2, "db1", ""), memgraph::query::QueryRuntimeException); // No db1 + ASSERT_THROW(DropDatabase(interpreter1, "db1", ""), memgraph::query::QueryRuntimeException); // No db1 + + ASSERT_THROW(DropDatabase(interpreter1, "db2", ""), memgraph::query::QueryRuntimeException); // i2 using db2 + ASSERT_THROW(DropDatabase(interpreter1, "db4", ""), memgraph::query::QueryRuntimeException); // i1 using db4 +} + +TEST_F(MultiTenantTest, DbmsNewTryDelete) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Try delete databases while the interpreters are using them + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + ASSERT_FALSE(dbms.TryDelete("db1").HasError()); + ASSERT_TRUE(dbms.TryDelete("db2").HasError()); + ASSERT_FALSE(dbms.TryDelete("db3").HasError()); + ASSERT_TRUE(dbms.TryDelete("db4").HasError()); +} + +TEST_F(MultiTenantTest, DbmsUpdate) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Try to update databases + + auto &dbms = DBMS(); + auto interpreter1 = this->NewInterpreter(); + + // Update clean default db + auto default_db = dbms.Get(); + const auto old_uuid = default_db->config().salient.uuid; + const memgraph::utils::UUID new_uuid{/* random */}; + const memgraph::storage::SalientConfig &config{.name = "memgraph", .uuid = new_uuid}; + auto new_default = dbms.Update(config); + ASSERT_TRUE(new_default.HasValue()); + ASSERT_NE(new_uuid, old_uuid); + ASSERT_EQ(default_db->storage(), new_default.GetValue()->storage()); + + // Add node to default + RunQuery(interpreter1, "CREATE (:Node)"); + + // Fail to update dirty default db + const memgraph::storage::SalientConfig &failing_config{.name = "memgraph", .uuid = {}}; + auto failed_update = dbms.Update(failing_config); + ASSERT_TRUE(failed_update.HasError()); + + // Succeed when updating with the same config + auto same_update = dbms.Update(config); + ASSERT_TRUE(same_update.HasValue()); + ASSERT_EQ(new_default.GetValue()->storage(), same_update.GetValue()->storage()); + + // Create new db + auto db1 = dbms.New("db1"); + ASSERT_FALSE(db1.HasError()); + RunMtQuery(interpreter1, "USE DATABASE db1", "Using db1"); + RunQuery(interpreter1, "CREATE (:NewNode)"); + RunQuery(interpreter1, "CREATE (:NewNode)"); + const auto db1_config_old = db1.GetValue()->config(); + + // Begin a transaction on db1 + auto interpreter2 = this->NewInterpreter(); + RunMtQuery(interpreter2, "USE DATABASE db1", "Using db1"); + ASSERT_EQ(RunQuery(interpreter2, "SHOW DATABASE")[0][0].ValueString(), "db1"); + RunQuery(interpreter2, "BEGIN"); + + // Update and check the new db in clean + auto interpreter3 = this->NewInterpreter(); + const memgraph::storage::SalientConfig &db1_config_new{.name = "db1", .uuid = {}}; + auto new_db1 = dbms.Update(db1_config_new); + ASSERT_TRUE(new_db1.HasValue()); + ASSERT_NE(db1_config_new.uuid, db1_config_old.salient.uuid); + RunMtQuery(interpreter3, "USE DATABASE db1", "Using db1"); + ASSERT_EQ(RunQuery(interpreter3, "MATCH(n) RETURN count(*)")[0][0].ValueInt(), 0); + + // Check that the interpreter1 is still valid, but lacking a db + ASSERT_THROW(RunQuery(interpreter1, "CREATE (:Node)"), memgraph::query::DatabaseContextRequiredException); + + // Check that the interpreter2 is still valid and pointing to the old db1 (until commit) + RunQuery(interpreter2, "CREATE (:NewNode)"); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(n) RETURN count(*)")[0][0].ValueInt(), 3); + RunQuery(interpreter2, "COMMIT"); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); +} + +TEST_F(MultiTenantTest, DbmsNewDelete) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Defer delete databases while the interpreters are using them + // 4) Database should be a zombie until the using interpreter retries to query it + // 5) Check it is deleted from disk + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + + ASSERT_FALSE(dbms.Delete("db1").HasError()); + ASSERT_FALSE(dbms.Delete("db2").HasError()); + ASSERT_FALSE(dbms.Delete("db3").HasError()); + ASSERT_FALSE(dbms.Delete("db4").HasError()); + + // 4 + ASSERT_EQ(dbms.All().size(), 1); + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 3); // All used databases remain on disk, but unusable + ASSERT_THROW(RunQuery(interpreter1, "MATCH(:Node{on:db4}) RETURN count(*)"), + memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(:Node{on:db2}) RETURN count(*)"), + memgraph::query::DatabaseContextRequiredException); + + // 5 + using namespace std::chrono_literals; + std::this_thread::sleep_for(100ms); // Wait for the filesystem to be updated + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 1); // Databases deleted from disk + ASSERT_THROW(RunQuery(interpreter1, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); +} + +TEST_F(MultiTenantTest, DbmsNewDeleteWTx) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Defer delete databases while the interpreters are using them + // 4) Interpreters that had an open transaction before should still be working + // 5) New transactions on deleted databases should throw + // 6) Switching databases should still be possible + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + + RunQuery(interpreter1, "BEGIN"); + RunQuery(interpreter2, "BEGIN"); + + ASSERT_FALSE(dbms.Delete("db1").HasError()); + ASSERT_FALSE(dbms.Delete("db2").HasError()); + ASSERT_FALSE(dbms.Delete("db3").HasError()); + ASSERT_FALSE(dbms.Delete("db4").HasError()); + + // 4 + ASSERT_EQ(dbms.All().size(), 1); + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 3); // All used databases remain on disk, and usable + ASSERT_EQ(RunQuery(interpreter1, "MATCH(:Node{on:\"db4\"}) RETURN count(*)")[0][0].ValueInt(), 4); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(:Node{on:\"db2\"}) RETURN count(*)")[0][0].ValueInt(), 2); + RunQuery(interpreter1, "MATCH(n:Node{on:\"db4\"}) DELETE n"); + RunQuery(interpreter2, "CREATE(:Node{on:\"db2\"})"); + ASSERT_EQ(RunQuery(interpreter1, "MATCH(:Node{on:\"db4\"}) RETURN count(*)")[0][0].ValueInt(), 0); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(:Node{on:\"db2\"}) RETURN count(*)")[0][0].ValueInt(), 3); + RunQuery(interpreter1, "COMMIT"); + RunQuery(interpreter2, "COMMIT"); + + // 5 + using namespace std::chrono_literals; + std::this_thread::sleep_for(100ms); // Wait for the filesystem to be updated + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 1); // Only the active databases remain + ASSERT_THROW(RunQuery(interpreter1, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + + // 6 + UseDatabase(interpreter2, memgraph::dbms::kDefaultDB.data(), "Using memgraph"); + UseDatabase(interpreter1, memgraph::dbms::kDefaultDB.data(), "Using memgraph"); +} diff --git a/tests/unit/query_dump.cpp b/tests/unit/query_dump.cpp index a1165789b..1817f1671 100644 --- a/tests/unit/query_dump.cpp +++ b/tests/unit/query_dump.cpp @@ -267,7 +267,7 @@ memgraph::storage::EdgeAccessor CreateEdge(memgraph::storage::Storage::Accessor } template -void VerifyQueries(const std::vector> &results, TArgs &&...args) { +void VerifyQueries(const std::vector> &results, TArgs &&... args) { std::vector expected{std::forward(args)...}; std::vector got; got.reserve(results.size()); @@ -704,11 +704,13 @@ TYPED_TEST(DumpTest, CheckStateVertexWithMultipleProperties) { config.disk = disk_test_utils::GenerateOnDiskConfig("query-dump-s1").disk; config.force_on_disk = true; } - auto on_exit_s1 = memgraph::utils::OnScopeExit{[&]() { - if constexpr (std::is_same_v) { + auto clean_up_s1 = memgraph::utils::OnScopeExit{[&] { + if (std::is_same::value) { disk_test_utils::RemoveRocksDbDirs("query-dump-s1"); } + std::filesystem::remove_all(config.durability.storage_directory); }}; + memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(config)); memgraph::utils::Gatekeeper db_gk(config, repl_state); @@ -823,11 +825,13 @@ TYPED_TEST(DumpTest, CheckStateSimpleGraph) { config.disk = disk_test_utils::GenerateOnDiskConfig("query-dump-s2").disk; config.force_on_disk = true; } - auto on_exit_s2 = memgraph::utils::OnScopeExit{[&]() { - if constexpr (std::is_same_v) { + auto clean_up_s2 = memgraph::utils::OnScopeExit{[&] { + if (std::is_same::value) { disk_test_utils::RemoveRocksDbDirs("query-dump-s2"); } + std::filesystem::remove_all(config.durability.storage_directory); }}; + memgraph::replication::ReplicationState repl_state{ReplicationStateRootPath(config)}; memgraph::utils::Gatekeeper db_gk{config, repl_state}; auto db_acc_opt = db_gk.access(); diff --git a/tests/unit/storage_v2_durability_inmemory.cpp b/tests/unit/storage_v2_durability_inmemory.cpp index bdec38c00..433242c0f 100644 --- a/tests/unit/storage_v2_durability_inmemory.cpp +++ b/tests/unit/storage_v2_durability_inmemory.cpp @@ -803,8 +803,8 @@ INSTANTIATE_TEST_CASE_P(EdgesWithoutProperties, DurabilityTest, ::testing::Value TEST_P(DurabilityTest, SnapshotOnExit) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{.durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}}; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -819,8 +819,10 @@ TEST_P(DurabilityTest, SnapshotOnExit) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -840,10 +842,12 @@ TEST_P(DurabilityTest, SnapshotPeriodic) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::milliseconds(2000)}}; + .snapshot_interval = std::chrono::milliseconds(2000)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -856,8 +860,10 @@ TEST_P(DurabilityTest, SnapshotPeriodic) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -882,13 +888,16 @@ TEST_P(DurabilityTest, SnapshotFallback) { auto const snapshot_interval = std::chrono::milliseconds(3000); memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = snapshot_interval, - .snapshot_retention_count = 10, // We don't anticipate that we make this many - }}; + + .durability = + { + .storage_directory = storage_directory, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, + .snapshot_interval = snapshot_interval, + .snapshot_retention_count = 10, // We don't anticipate that we make this many + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -926,8 +935,10 @@ TEST_P(DurabilityTest, SnapshotFallback) { } // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -946,8 +957,10 @@ TEST_P(DurabilityTest, SnapshotFallback) { TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { // Create unrelated snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -975,10 +988,12 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::milliseconds(2000)}}; + .snapshot_interval = std::chrono::milliseconds(2000)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -1019,8 +1034,10 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -1032,8 +1049,10 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { TEST_P(DurabilityTest, SnapshotRetention) { // Create unrelated snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1051,11 +1070,13 @@ TEST_P(DurabilityTest, SnapshotRetention) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, .snapshot_interval = std::chrono::milliseconds(2000), - .snapshot_retention_count = 3}}; + .snapshot_retention_count = 3}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Restore unrelated snapshots after the database has been started. @@ -1089,8 +1110,10 @@ TEST_P(DurabilityTest, SnapshotRetention) { } // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1109,8 +1132,10 @@ TEST_P(DurabilityTest, SnapshotRetention) { TEST_P(DurabilityTest, SnapshotMixedUUID) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1127,8 +1152,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1136,8 +1163,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { // Create another snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1158,8 +1187,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1178,8 +1209,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { TEST_P(DurabilityTest, SnapshotBackup) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1197,10 +1230,12 @@ TEST_P(DurabilityTest, SnapshotBackup) { // Start storage without recovery. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::minutes(20)}}; + .snapshot_interval = std::chrono::minutes(20)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; } @@ -1215,8 +1250,10 @@ TEST_P(DurabilityTest, SnapshotBackup) { TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), false); @@ -1231,8 +1268,10 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false); @@ -1251,8 +1290,10 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), true); @@ -1270,8 +1311,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -1283,8 +1326,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), true); @@ -1326,8 +1371,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutProp ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false); @@ -1347,12 +1394,14 @@ TEST_P(DurabilityTest, WalBasic) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1365,8 +1414,10 @@ TEST_P(DurabilityTest, WalBasic) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1386,13 +1437,15 @@ TEST_P(DurabilityTest, WalBackup) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1411,11 +1464,13 @@ TEST_P(DurabilityTest, WalBackup) { // Start storage without recovery. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; } @@ -1431,12 +1486,14 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1450,8 +1507,10 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Recover WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1460,13 +1519,15 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Recover WALs and create more WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -1478,8 +1539,10 @@ TEST_P(DurabilityTest, WalAppendToExisting) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1502,12 +1565,14 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1541,8 +1606,10 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1642,12 +1709,14 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1655,7 +1724,7 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { auto indices = [&] { auto acc = db.Access(); auto res = acc->ListAllIndices(); - acc->Commit(); + (void)acc->Commit(); return res; }(); // iile for (const auto &index : indices.label) { @@ -1671,7 +1740,7 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { auto constraints = [&] { auto acc = db.Access(); auto res = acc->ListAllConstraints(); - acc->Commit(); + (void)acc->Commit(); return res; }(); // iile for (const auto &constraint : constraints.existence) { @@ -1698,8 +1767,10 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1736,14 +1807,17 @@ TEST_P(DurabilityTest, WalTransactionOrdering) { // Create WAL. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 100000, - .wal_file_flush_every_n_tx = kFlushWalEvery, - }}; + + .durability = + { + .storage_directory = storage_directory, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 100000, + .wal_file_flush_every_n_tx = kFlushWalEvery, + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc1 = db.Access(); @@ -1835,8 +1909,10 @@ TEST_P(DurabilityTest, WalTransactionOrdering) { } // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1869,12 +1945,14 @@ TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1898,8 +1976,10 @@ TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_EXTENDED_WITH_BASE_INDICES_AND_CONSTRAINTS, GetParam()); @@ -1921,12 +2001,14 @@ TEST_P(DurabilityTest, WalDeathResilience) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Create one million vertices. @@ -1958,14 +2040,17 @@ TEST_P(DurabilityTest, WalDeathResilience) { uint64_t count = 0; { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery, - }}; + + .durability = + { + .storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery, + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1992,8 +2077,10 @@ TEST_P(DurabilityTest, WalDeathResilience) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -2021,13 +2108,15 @@ TEST_P(DurabilityTest, WalMissingSecond) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2047,13 +2136,15 @@ TEST_P(DurabilityTest, WalMissingSecond) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; const uint64_t kNumVertices = 1000; @@ -2102,8 +2193,10 @@ TEST_P(DurabilityTest, WalMissingSecond) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2116,13 +2209,15 @@ TEST_P(DurabilityTest, WalCorruptSecond) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2142,13 +2237,15 @@ TEST_P(DurabilityTest, WalCorruptSecond) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; const uint64_t kNumVertices = 1000; @@ -2196,8 +2293,10 @@ TEST_P(DurabilityTest, WalCorruptSecond) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2210,13 +2309,15 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) { // Create WALs { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2237,8 +2338,10 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) { } // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // The extended dataset shouldn't be recovered because its WAL transaction was @@ -2260,13 +2363,15 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) { // Create WALs { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2310,8 +2415,10 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -2339,12 +2446,14 @@ TEST_P(DurabilityTest, WalAndSnapshot) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::milliseconds(2000), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::milliseconds(2000), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2358,8 +2467,10 @@ TEST_P(DurabilityTest, WalAndSnapshot) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2378,8 +2489,10 @@ TEST_P(DurabilityTest, WalAndSnapshot) { TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2393,8 +2506,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -2403,13 +2518,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Recover snapshot and create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -2421,8 +2538,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2441,8 +2560,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2456,8 +2577,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -2466,13 +2589,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Recover snapshot and create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -2487,13 +2612,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { memgraph::storage::Gid vertex_gid; { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2513,8 +2640,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam(), @@ -2551,13 +2680,15 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2579,13 +2710,15 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = 1}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = 1}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Restore unrelated snapshots after the database has been started. @@ -2614,8 +2747,10 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Recover and verify data. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2634,8 +2769,10 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2648,11 +2785,13 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { // Create unrelated snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2671,11 +2810,13 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2698,8 +2839,10 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2719,8 +2862,10 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true, .items_per_batch = 13}}; + + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true, .items_per_batch = 13}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2735,12 +2880,14 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_on_exit = false, - .items_per_batch = 13, - .allow_parallel_index_creation = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_on_exit = false, + .items_per_batch = 13, + .allow_parallel_index_creation = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2755,12 +2902,14 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(DurabilityTest, ConstraintsRecoveryFunctionSetting) { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_on_exit = false, - .items_per_batch = 13, - .allow_parallel_schema_creation = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_on_exit = false, + .items_per_batch = 13, + .allow_parallel_schema_creation = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; // Create snapshot. { config.durability.recover_on_startup = false; diff --git a/tests/unit/storage_v2_edge_inmemory.cpp b/tests/unit/storage_v2_edge_inmemory.cpp index befa52462..96fa1debe 100644 --- a/tests/unit/storage_v2_edge_inmemory.cpp +++ b/tests/unit/storage_v2_edge_inmemory.cpp @@ -28,7 +28,7 @@ INSTANTIATE_TEST_CASE_P(EdgesWithoutProperties, StorageEdgeTest, ::testing::Valu // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -219,7 +219,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -392,7 +392,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -538,7 +538,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -808,7 +808,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1078,7 +1078,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -1305,7 +1305,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1574,7 +1574,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1843,7 +1843,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -2069,7 +2069,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2492,7 +2492,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2916,7 +2916,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -3276,7 +3276,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3416,7 +3416,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3746,7 +3746,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3991,7 +3991,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4637,7 +4637,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -4768,7 +4768,7 @@ TEST(StorageWithProperties, EdgePropertyCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create the vertex. @@ -5060,7 +5060,7 @@ TEST(StorageWithProperties, EdgePropertyAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertySerializationError) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -5170,7 +5170,7 @@ TEST(StorageWithProperties, EdgePropertySerializationError) { TEST(StorageWithProperties, EdgePropertyClear) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid; auto property1 = store->NameToProperty("property1"); auto property2 = store->NameToProperty("property2"); @@ -5286,7 +5286,7 @@ TEST(StorageWithProperties, EdgePropertyClear) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithoutProperties, EdgePropertyAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = false}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = false}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -5355,7 +5355,7 @@ TEST(StorageWithoutProperties, EdgePropertyAbort) { TEST(StorageWithoutProperties, EdgePropertyClear) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = false}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = false}}})); memgraph::storage::Gid gid; { auto acc = store->Access(ReplicationRole::MAIN); @@ -5382,7 +5382,7 @@ TEST(StorageWithoutProperties, EdgePropertyClear) { TEST(StorageWithProperties, EdgeNonexistentPropertyAPI) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); auto property = store->NameToProperty("property"); diff --git a/tests/unit/storage_v2_edge_ondisk.cpp b/tests/unit/storage_v2_edge_ondisk.cpp index 57ba1a562..823edf16e 100644 --- a/tests/unit/storage_v2_edge_ondisk.cpp +++ b/tests/unit/storage_v2_edge_ondisk.cpp @@ -31,7 +31,7 @@ const std::string testSuite = "storage_v2_edge_ondisk"; // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -224,7 +224,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -399,7 +399,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -547,7 +547,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -819,7 +819,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1091,7 +1091,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1320,7 +1320,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1591,7 +1591,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1862,7 +1862,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2090,7 +2090,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2515,7 +2515,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2941,7 +2941,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3303,7 +3303,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3445,7 +3445,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3777,7 +3777,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4024,7 +4024,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4672,7 +4672,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -4808,7 +4808,7 @@ TEST(StorageWithProperties, EdgePropertyCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -5109,7 +5109,7 @@ TEST(StorageWithProperties, EdgePropertyAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertySerializationError) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -5228,7 +5228,7 @@ TEST(StorageWithProperties, EdgePropertySerializationError) { TEST(StorageWithProperties, EdgePropertyClear) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid; auto property1 = store->NameToProperty("property1"); @@ -5350,7 +5350,7 @@ TEST(StorageWithProperties, EdgePropertyClear) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithoutProperties, EdgePropertyAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = false; + config.salient.items.properties_on_edges = false; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -5424,7 +5424,7 @@ TEST(StorageWithoutProperties, EdgePropertyAbort) { TEST(StorageWithoutProperties, EdgePropertyClear) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = false; + config.salient.items.properties_on_edges = false; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid; { @@ -5454,7 +5454,7 @@ TEST(StorageWithoutProperties, EdgePropertyClear) { TEST(StorageWithProperties, EdgeNonexistentPropertyAPI) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); auto property = store->NameToProperty("property"); diff --git a/tests/unit/storage_v2_replication.cpp b/tests/unit/storage_v2_replication.cpp index 9399b7ba0..92c5abc11 100644 --- a/tests/unit/storage_v2_replication.cpp +++ b/tests/unit/storage_v2_replication.cpp @@ -64,26 +64,35 @@ class ReplicationTest : public ::testing::Test { void TearDown() override { Clear(); } Config main_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, storage_directory); return config; }(); Config repl_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, repl_storage_directory); return config; }(); Config repl2_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, repl2_storage_directory); return config; }(); @@ -107,15 +116,17 @@ struct MinMemgraph { , reinterpret_cast< memgraph::utils::Synchronized *>(0), - true, false + true #endif }, repl_state{dbms.ReplicationState()}, - db{*dbms.Get().get()}, + db_acc{dbms.Get()}, + db{*db_acc.get()}, repl_handler(dbms) { } memgraph::dbms::DbmsHandler dbms; memgraph::replication::ReplicationState &repl_state; + memgraph::dbms::DatabaseAccess db_acc; memgraph::dbms::Database &db; ReplicationHandler repl_handler; }; @@ -152,7 +163,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ASSERT_TRUE(v.AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value)) .HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -178,7 +189,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); ASSERT_TRUE(v->RemoveLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -197,7 +208,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); ASSERT_TRUE(acc->DeleteVertex(&*v).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -224,7 +235,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ASSERT_TRUE(edge.SetProperty(main.db.storage()->NameToProperty(edge_property), PropertyValue(edge_property_value)) .HasValue()); edge_gid.emplace(edge.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } const auto find_edge = [&](const auto &edges, const Gid edge_gid) -> std::optional { @@ -261,7 +272,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto edge = find_edge(out_edges->edges, *edge_gid); ASSERT_TRUE(edge); ASSERT_TRUE(acc->DeleteEdge(&*edge).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -287,25 +298,25 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE(unique_acc->CreateIndex(main.db.storage()->NameToLabel(label)).HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), l_stats); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE( unique_acc->CreateIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property), lp_stats); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -313,7 +324,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ->CreateExistenceConstraint(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -322,7 +333,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { {main.db.storage()->NameToProperty(property), main.db.storage()->NameToProperty(property_extra)}) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { @@ -360,24 +371,24 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { { auto unique_acc = main.db.UniqueAccess(); unique_acc->DeleteLabelIndexStats(main.db.storage()->NameToLabel(label)); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE(unique_acc->DropIndex(main.db.storage()->NameToLabel(label)).HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->DeleteLabelPropertyIndexStats(main.db.storage()->NameToLabel(label)); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE( unique_acc->DropIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -385,7 +396,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ->DropExistenceConstraint(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -393,7 +404,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { main.db.storage()->NameToLabel(label), {main.db.storage()->NameToProperty(property), main.db.storage()->NameToProperty(property_extra)}), memgraph::storage::UniqueConstraints::DeletionStatus::SUCCESS); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { @@ -455,7 +466,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) { ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value)) .HasValue()); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } const auto check_replica = [&](memgraph::dbms::Database &replica_database) { @@ -477,7 +488,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } // REPLICA1 should contain the new vertex @@ -515,7 +526,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { // Create the vertex before registering a replica auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } } @@ -531,13 +542,13 @@ TEST_F(ReplicationTest, RecoveryProcess) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } } @@ -560,7 +571,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { ASSERT_TRUE( v->SetProperty(main.db.storage()->NameToProperty(property_name), PropertyValue(property_value)).HasValue()); } - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } static constexpr const auto *vertex_label = "vertex_label"; @@ -594,7 +605,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { ASSERT_TRUE(v); ASSERT_TRUE(v->AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); } - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica.db.Access(); @@ -663,7 +674,7 @@ TEST_F(ReplicationTest, BasicAsynchronousReplicationTest) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); created_vertices.push_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); if (i == 0) { ASSERT_EQ(main.db.storage()->GetReplicaState("REPLICA_ASYNC"), ReplicaState::REPLICATING); @@ -723,13 +734,13 @@ TEST_F(ReplicationTest, EpochTest) { auto acc = main.db.Access(); const auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica1.db.Access(); const auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica2.db.Access(); @@ -756,13 +767,13 @@ TEST_F(ReplicationTest, EpochTest) { { auto acc = main.db.Access(); acc->CreateVertex(); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica1.db.Access(); auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, replica1.db_acc).HasError()); } // Replica1 should forward it's vertex to Replica2 { @@ -790,7 +801,7 @@ TEST_F(ReplicationTest, EpochTest) { auto acc = main.db.Access(); const auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } // Replica1 is not compatible with the main so it shouldn't contain // it's newest vertex