From ece4b0dba88b0e259ab5045986a26db60e3820ef Mon Sep 17 00:00:00 2001 From: DavIvek Date: Tue, 7 Nov 2023 13:34:03 +0100 Subject: [PATCH] Fix cached plan not getting invalidated (#1348) --- src/dbms/database.cpp | 1 + src/dbms/database.hpp | 8 +- src/query/cypher_query_interpreter.cpp | 33 +++---- src/query/cypher_query_interpreter.hpp | 37 +++----- src/query/interpreter.cpp | 16 +--- src/query/trigger.cpp | 2 +- src/query/trigger.hpp | 2 +- src/utils/lru_cache.hpp | 68 +++++++++++++ tests/e2e/configuration/default_config.py | 2 +- tests/unit/CMakeLists.txt | 3 + tests/unit/interpreter.cpp | 54 ++++++----- tests/unit/lru_cache.cpp | 111 ++++++++++++++++++++++ 12 files changed, 251 insertions(+), 86 deletions(-) create mode 100644 src/utils/lru_cache.hpp create mode 100644 tests/unit/lru_cache.cpp diff --git a/src/dbms/database.cpp b/src/dbms/database.cpp index b0862f913..411e282e8 100644 --- a/src/dbms/database.cpp +++ b/src/dbms/database.cpp @@ -24,6 +24,7 @@ namespace memgraph::dbms { Database::Database(storage::Config config, const replication::ReplicationState &repl_state) : trigger_store_(config.durability.storage_directory / "triggers"), streams_{config.durability.storage_directory / "streams"}, + plan_cache_{FLAGS_query_plan_cache_max_size}, repl_state_(&repl_state) { if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk || utils::DirExists(config.disk.main_storage_directory)) { diff --git a/src/dbms/database.hpp b/src/dbms/database.hpp index d2a36368b..457aa1c1d 100644 --- a/src/dbms/database.hpp +++ b/src/dbms/database.hpp @@ -24,6 +24,8 @@ #include "query/trigger.hpp" #include "storage/v2/storage.hpp" #include "utils/gatekeeper.hpp" +#include "utils/lru_cache.hpp" +#include "utils/synchronized.hpp" namespace memgraph::dbms { @@ -146,9 +148,9 @@ class Database { /** * @brief Returns the PlanCache vector raw pointer * - * @return utils::SkipList* + * @return utils::Synchronized>, utils::RWSpinLock> */ - utils::SkipList *plan_cache() { return &plan_cache_; } + query::PlanCacheLRU *plan_cache() { return &plan_cache_; } private: std::unique_ptr storage_; //!< Underlying storage @@ -157,7 +159,7 @@ class Database { query::stream::Streams streams_; //!< Streams associated with the storage // TODO: Move to a better place - utils::SkipList plan_cache_; //!< Plan cache associated with the storage + query::PlanCacheLRU plan_cache_; //!< Plan cache associated with the storage const replication::ReplicationState *repl_state_; }; diff --git a/src/query/cypher_query_interpreter.cpp b/src/query/cypher_query_interpreter.cpp index 3deb5ccb5..d3ddc22c4 100644 --- a/src/query/cypher_query_interpreter.cpp +++ b/src/query/cypher_query_interpreter.cpp @@ -12,15 +12,16 @@ #include "query/cypher_query_interpreter.hpp" #include "query/frontend/ast/cypher_main_visitor.hpp" #include "query/frontend/opencypher/parser.hpp" +#include "utils/synchronized.hpp" // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) DEFINE_bool(query_cost_planner, true, "Use the cost-estimating query planner."); // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_VALIDATED_int32(query_plan_cache_ttl, 60, "Time to live for cached query plans, in seconds.", +DEFINE_VALIDATED_int32(query_plan_cache_max_size, 1000, "Maximum number of query plans to cache.", FLAG_IN_RANGE(0, std::numeric_limits::max())); namespace memgraph::query { -CachedPlan::CachedPlan(std::unique_ptr plan) : plan_(std::move(plan)) {} +PlanWrapper::PlanWrapper(std::unique_ptr plan) : plan_(std::move(plan)) {} ParsedQuery ParseQuery(const std::string &query_string, const std::map ¶ms, utils::SkipList *cache, const InterpreterConfig::Query &query_config) { @@ -127,28 +128,24 @@ std::unique_ptr MakeLogicalPlan(AstStorage ast_storage, CypherQuery std::move(symbol_table)); } -std::shared_ptr CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query, - const Parameters ¶meters, utils::SkipList *plan_cache, - DbAccessor *db_accessor, - const std::vector &predefined_identifiers) { - std::optional::Accessor> plan_cache_access; +std::shared_ptr CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query, + const Parameters ¶meters, PlanCacheLRU *plan_cache, + DbAccessor *db_accessor, + const std::vector &predefined_identifiers) { if (plan_cache) { - plan_cache_access.emplace(plan_cache->access()); - auto it = plan_cache_access->find(hash); - if (it != plan_cache_access->end()) { - if (it->second->IsExpired()) { - plan_cache_access->remove(hash); - } else { - return it->second; - } + auto existing_plan = plan_cache->WithLock([&](auto &cache) { return cache.get(hash); }); + if (existing_plan.has_value()) { + return existing_plan.value(); } } - auto plan = std::make_shared( + auto plan = std::make_shared( MakeLogicalPlan(std::move(ast_storage), query, parameters, db_accessor, predefined_identifiers)); - if (plan_cache_access) { - plan_cache_access->insert({hash, plan}); + + if (plan_cache) { + plan_cache->WithLock([&](auto &cache) { cache.put(hash, plan); }); } + return plan; } } // namespace memgraph::query diff --git a/src/query/cypher_query_interpreter.hpp b/src/query/cypher_query_interpreter.hpp index b920beb8b..f33fa61e2 100644 --- a/src/query/cypher_query_interpreter.hpp +++ b/src/query/cypher_query_interpreter.hpp @@ -17,12 +17,14 @@ #include "query/frontend/stripped.hpp" #include "query/plan/planner.hpp" #include "utils/flag_validation.hpp" +#include "utils/lru_cache.hpp" +#include "utils/synchronized.hpp" #include "utils/timer.hpp" // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) DECLARE_bool(query_cost_planner); // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) -DECLARE_int32(query_plan_cache_ttl); +DECLARE_int32(query_plan_cache_max_size); namespace memgraph::query { @@ -45,23 +47,17 @@ class LogicalPlan { virtual const AstStorage &GetAstStorage() const = 0; }; -class CachedPlan { +class PlanWrapper { public: - explicit CachedPlan(std::unique_ptr plan); + explicit PlanWrapper(std::unique_ptr plan); const auto &plan() const { return plan_->GetRoot(); } double cost() const { return plan_->GetCost(); } const auto &symbol_table() const { return plan_->GetSymbolTable(); } const auto &ast_storage() const { return plan_->GetAstStorage(); } - bool IsExpired() const { - // NOLINTNEXTLINE (modernize-use-nullptr) - return cache_timer_.Elapsed() > std::chrono::seconds(FLAGS_query_plan_cache_ttl); - }; - private: std::unique_ptr plan_; - utils::Timer cache_timer_; }; struct CachedQuery { @@ -82,18 +78,6 @@ struct QueryCacheEntry { CachedQuery second; }; -struct PlanCacheEntry { - bool operator==(const PlanCacheEntry &other) const { return first == other.first; } - bool operator<(const PlanCacheEntry &other) const { return first < other.first; } - bool operator==(const uint64_t &other) const { return first == other; } - bool operator<(const uint64_t &other) const { return first < other; } - - uint64_t first; - // TODO: Maybe store the query string here and use it as a key with the hash - // so that we eliminate the risk of hash collisions. - std::shared_ptr second; -}; - /** * A container for data related to the parsing of a query. */ @@ -129,6 +113,9 @@ class SingleNodeLogicalPlan final : public LogicalPlan { SymbolTable symbol_table_; }; +using PlanCacheLRU = + utils::Synchronized>, utils::RWSpinLock>; + std::unique_ptr MakeLogicalPlan(AstStorage ast_storage, CypherQuery *query, const Parameters ¶meters, DbAccessor *db_accessor, const std::vector &predefined_identifiers); @@ -141,9 +128,9 @@ std::unique_ptr MakeLogicalPlan(AstStorage ast_storage, CypherQuery * If an identifier is contained there, we inject it at that place and remove it, * because a predefined identifier can be used only in one scope. */ -std::shared_ptr CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query, - const Parameters ¶meters, utils::SkipList *plan_cache, - DbAccessor *db_accessor, - const std::vector &predefined_identifiers = {}); +std::shared_ptr CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query, + const Parameters ¶meters, PlanCacheLRU *plan_cache, + DbAccessor *db_accessor, + const std::vector &predefined_identifiers = {}); } // namespace memgraph::query diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index 08e91bfda..2e5bba2ff 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -1213,7 +1213,7 @@ struct TxTimeout { }; struct PullPlan { - explicit PullPlan(std::shared_ptr plan, const Parameters ¶meters, bool is_profile_query, + explicit PullPlan(std::shared_ptr plan, const Parameters ¶meters, bool is_profile_query, DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory, std::optional username, std::atomic *transaction_status, std::shared_ptr tx_timer, @@ -1226,7 +1226,7 @@ struct PullPlan { std::map *summary); private: - std::shared_ptr plan_ = nullptr; + std::shared_ptr plan_ = nullptr; plan::UniqueCursorPtr cursor_ = nullptr; Frame frame_; ExecutionContext ctx_; @@ -1253,7 +1253,7 @@ struct PullPlan { bool use_monotonic_memory_; }; -PullPlan::PullPlan(const std::shared_ptr plan, const Parameters ¶meters, const bool is_profile_query, +PullPlan::PullPlan(const std::shared_ptr plan, const Parameters ¶meters, const bool is_profile_query, DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory, std::optional username, std::atomic *transaction_status, std::shared_ptr tx_timer, TriggerContextCollector *trigger_context_collector, @@ -2106,10 +2106,7 @@ PreparedQuery PrepareAnalyzeGraphQuery(ParsedQuery parsed_query, bool in_explici // Creating an index influences computed plan costs. auto invalidate_plan_cache = [plan_cache = current_db.db_acc_->get()->plan_cache()] { - auto access = plan_cache->access(); - for (auto &kv : access) { - access.remove(kv.first); - } + plan_cache->WithLock([&](auto &cache) { cache.reset(); }); }; utils::OnScopeExit cache_invalidator(invalidate_plan_cache); @@ -2154,10 +2151,7 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans // Creating an index influences computed plan costs. auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] { - auto access = plan_cache->access(); - for (auto &kv : access) { - access.remove(kv.first); - } + plan_cache->WithLock([&](auto &cache) { cache.reset(); }); }; auto *storage = db_acc->storage(); diff --git a/src/query/trigger.cpp b/src/query/trigger.cpp index 91aceb079..7998714c1 100644 --- a/src/query/trigger.cpp +++ b/src/query/trigger.cpp @@ -169,7 +169,7 @@ Trigger::TriggerPlan::TriggerPlan(std::unique_ptr logical_plan, std std::shared_ptr Trigger::GetPlan(DbAccessor *db_accessor, const query::AuthChecker *auth_checker) const { std::lock_guard plan_guard{plan_lock_}; - if (!parsed_statements_.is_cacheable || !trigger_plan_ || trigger_plan_->cached_plan.IsExpired()) { + if (!parsed_statements_.is_cacheable || !trigger_plan_) { auto identifiers = GetPredefinedIdentifiers(event_type_); AstStorage ast_storage; diff --git a/src/query/trigger.hpp b/src/query/trigger.hpp index 499bf634c..a6e19032e 100644 --- a/src/query/trigger.hpp +++ b/src/query/trigger.hpp @@ -62,7 +62,7 @@ struct Trigger { explicit TriggerPlan(std::unique_ptr logical_plan, std::vector identifiers); - CachedPlan cached_plan; + PlanWrapper cached_plan; std::vector identifiers; }; std::shared_ptr GetPlan(DbAccessor *db_accessor, const query::AuthChecker *auth_checker) const; diff --git a/src/utils/lru_cache.hpp b/src/utils/lru_cache.hpp new file mode 100644 index 000000000..1ab636670 --- /dev/null +++ b/src/utils/lru_cache.hpp @@ -0,0 +1,68 @@ +// Copyright 2023 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include +#include +#include +#include + +namespace memgraph::utils { + +/// A simple LRU cache implementation. +/// It is not thread-safe. + +template +class LRUCache { + public: + LRUCache(int cache_size_) : cache_size(cache_size_){}; + + void put(const TKey &key, const TVal &val) { + auto it = item_map.find(key); + if (it != item_map.end()) { + item_list.erase(it->second); + item_map.erase(it); + } + item_list.push_front(std::make_pair(key, val)); + item_map.insert(std::make_pair(key, item_list.begin())); + try_clean(); + }; + std::optional get(const TKey &key) { + if (!exists(key)) { + return std::nullopt; + } + auto it = item_map.find(key); + item_list.splice(item_list.begin(), item_list, it->second); + return it->second->second; + } + void reset() { + item_list.clear(); + item_map.clear(); + }; + std::size_t size() { return item_map.size(); }; + + private: + void try_clean() { + while (item_map.size() > cache_size) { + auto last_it_elem_it = item_list.end(); + last_it_elem_it--; + item_map.erase(last_it_elem_it->first); + item_list.pop_back(); + } + }; + bool exists(const TKey &key) { return (item_map.count(key) > 0); }; + + std::list> item_list; + std::unordered_map item_map; + std::size_t cache_size; +}; +} // namespace memgraph::utils diff --git a/tests/e2e/configuration/default_config.py b/tests/e2e/configuration/default_config.py index 13f286909..9a251c15c 100644 --- a/tests/e2e/configuration/default_config.py +++ b/tests/e2e/configuration/default_config.py @@ -184,7 +184,7 @@ startup_config_dict = { "Set to true to enable telemetry. We collect information about the running system (CPU and memory information) and information about the database runtime (vertex and edge counts and resource usage) to allow for easier improvement of the product.", ), "query_cost_planner": ("true", "true", "Use the cost-estimating query planner."), - "query_plan_cache_ttl": ("60", "60", "Time to live for cached query plans, in seconds."), + "query_plan_cache_max_size": ("1000", "1000", "Maximum number of query plans to cache."), "query_vertex_count_to_expand_existing": ( "10", "10", diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 1b7dece1c..b7a90f22d 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -296,6 +296,9 @@ target_link_libraries(${test_prefix}utils_java_string_formatter mg-utils) add_unit_test(utils_resource_lock.cpp) target_link_libraries(${test_prefix}utils_resource_lock mg-utils) +add_unit_test(lru_cache.cpp) +target_link_libraries(${test_prefix}lru_cache mg-utils) + # Test mg-storage-v2 add_unit_test(commit_log_v2.cpp) target_link_libraries(${test_prefix}commit_log_v2 gflags mg-utils mg-storage-v2) diff --git a/tests/unit/interpreter.cpp b/tests/unit/interpreter.cpp index 679cff692..5eb7cd539 100644 --- a/tests/unit/interpreter.cpp +++ b/tests/unit/interpreter.cpp @@ -37,6 +37,8 @@ #include "storage/v2/property_value.hpp" #include "storage/v2/storage_mode.hpp" #include "utils/logging.hpp" +#include "utils/lru_cache.hpp" +#include "utils/synchronized.hpp" namespace { @@ -662,7 +664,7 @@ TYPED_TEST(InterpreterTest, UniqueConstraintTest) { } TYPED_TEST(InterpreterTest, ExplainQuery) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto stream = this->Interpret("EXPLAIN MATCH (n) RETURN *;"); ASSERT_EQ(stream.GetHeader().size(), 1U); @@ -676,16 +678,16 @@ TYPED_TEST(InterpreterTest, ExplainQuery) { ++expected_it; } // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for EXPLAIN ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) RETURN *;"); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ExplainQueryMultiplePulls) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto [stream, qid] = this->Prepare("EXPLAIN MATCH (n) RETURN *;"); ASSERT_EQ(stream.GetHeader().size(), 1U); @@ -709,16 +711,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryMultiplePulls) { ASSERT_EQ(stream.GetResults()[2].size(), 1U); EXPECT_EQ(stream.GetResults()[2].front().ValueString(), *expected_it); // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for EXPLAIN ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) RETURN *;"); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ExplainQueryInMulticommandTransaction) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); this->Interpret("BEGIN"); auto stream = this->Interpret("EXPLAIN MATCH (n) RETURN *;"); @@ -734,16 +736,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryInMulticommandTransaction) { ++expected_it; } // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for EXPLAIN ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) RETURN *;"); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ExplainQueryWithParams) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto stream = this->Interpret("EXPLAIN MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue(42)}}); @@ -758,16 +760,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryWithParams) { ++expected_it; } // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for EXPLAIN ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue("something else")}}); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ProfileQuery) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto stream = this->Interpret("PROFILE MATCH (n) RETURN *;"); std::vector expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"}; @@ -781,16 +783,16 @@ TYPED_TEST(InterpreterTest, ProfileQuery) { ++expected_it; } // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for PROFILE ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) RETURN *;"); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ProfileQueryMultiplePulls) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto [stream, qid] = this->Prepare("PROFILE MATCH (n) RETURN *;"); std::vector expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"}; @@ -817,11 +819,11 @@ TYPED_TEST(InterpreterTest, ProfileQueryMultiplePulls) { ASSERT_EQ(stream.GetResults()[2][0].ValueString(), *expected_it); // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for PROFILE ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) RETURN *;"); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } @@ -832,7 +834,7 @@ TYPED_TEST(InterpreterTest, ProfileQueryInMulticommandTransaction) { } TYPED_TEST(InterpreterTest, ProfileQueryWithParams) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto stream = this->Interpret("PROFILE MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue(42)}}); @@ -847,16 +849,16 @@ TYPED_TEST(InterpreterTest, ProfileQueryWithParams) { ++expected_it; } // We should have a plan cache for MATCH ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for PROFILE ... and for inner MATCH ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue("something else")}}); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } TYPED_TEST(InterpreterTest, ProfileQueryWithLiterals) { - EXPECT_EQ(this->db->plan_cache()->size(), 0U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U); auto stream = this->Interpret("PROFILE UNWIND range(1, 1000) AS x CREATE (:Node {id: x});", {}); std::vector expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"}; @@ -870,11 +872,11 @@ TYPED_TEST(InterpreterTest, ProfileQueryWithLiterals) { ++expected_it; } // We should have a plan cache for UNWIND ... - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); // We should have AST cache for PROFILE ... and for inner UNWIND ... EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); this->Interpret("UNWIND range(42, 4242) AS x CREATE (:Node {id: x});", {}); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U); } @@ -1100,7 +1102,7 @@ TYPED_TEST(InterpreterTest, CacheableQueries) { SCOPED_TRACE("Cacheable query"); this->Interpret("RETURN 1"); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 1U); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); } { @@ -1109,7 +1111,7 @@ TYPED_TEST(InterpreterTest, CacheableQueries) { // result signature could be changed this->Interpret("CALL mg.load_all()"); EXPECT_EQ(this->interpreter_context.ast_cache.size(), 1U); - EXPECT_EQ(this->db->plan_cache()->size(), 1U); + EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U); } } diff --git a/tests/unit/lru_cache.cpp b/tests/unit/lru_cache.cpp new file mode 100644 index 000000000..f6176184e --- /dev/null +++ b/tests/unit/lru_cache.cpp @@ -0,0 +1,111 @@ +// Copyright 2023 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include "utils/lru_cache.hpp" +#include +#include "gtest/gtest.h" + +TEST(LRUCacheTest, BasicTest) { + memgraph::utils::LRUCache cache(2); + cache.put(1, 1); + cache.put(2, 2); + + std::optional value; + value = cache.get(1); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 1); + + cache.put(3, 3); + + value = cache.get(2); + EXPECT_FALSE(value.has_value()); + + cache.put(4, 4); + + value = cache.get(1); + EXPECT_FALSE(value.has_value()); + + value = cache.get(3); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 3); + + value = cache.get(4); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 4); + + EXPECT_EQ(cache.size(), 2); +} + +TEST(LRUCacheTest, DuplicatePutTest) { + memgraph::utils::LRUCache cache(2); + cache.put(1, 1); + cache.put(2, 2); + cache.put(1, 10); + + std::optional value; + value = cache.get(1); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 10); + + value = cache.get(2); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 2); +} + +TEST(LRUCacheTest, ResizeTest) { + memgraph::utils::LRUCache cache(2); + cache.put(1, 1); + cache.put(2, 2); + cache.put(3, 3); + + std::optional value; + value = cache.get(1); + EXPECT_FALSE(value.has_value()); + + value = cache.get(2); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 2); + + value = cache.get(3); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 3); +} + +TEST(LRUCacheTest, EmptyCacheTest) { + memgraph::utils::LRUCache cache(2); + + std::optional value; + value = cache.get(1); + EXPECT_FALSE(value.has_value()); + + cache.put(1, 1); + value = cache.get(1); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 1); +} + +TEST(LRUCacheTest, LargeCacheTest) { + const int CACHE_SIZE = 10000; + memgraph::utils::LRUCache cache(CACHE_SIZE); + + std::optional value; + for (int i = 0; i < CACHE_SIZE; i++) { + value = cache.get(i); + EXPECT_FALSE(value.has_value()); + cache.put(i, i); + } + + for (int i = 0; i < CACHE_SIZE; i++) { + value = cache.get(i); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), i); + } +}