Fix cached plan not getting invalidated (#1348)
This commit is contained in:
parent
66487a6dce
commit
ece4b0dba8
@ -24,6 +24,7 @@ namespace memgraph::dbms {
|
||||
Database::Database(storage::Config config, const replication::ReplicationState &repl_state)
|
||||
: trigger_store_(config.durability.storage_directory / "triggers"),
|
||||
streams_{config.durability.storage_directory / "streams"},
|
||||
plan_cache_{FLAGS_query_plan_cache_max_size},
|
||||
repl_state_(&repl_state) {
|
||||
if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk ||
|
||||
utils::DirExists(config.disk.main_storage_directory)) {
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "query/trigger.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "utils/gatekeeper.hpp"
|
||||
#include "utils/lru_cache.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
@ -146,9 +148,9 @@ class Database {
|
||||
/**
|
||||
* @brief Returns the PlanCache vector raw pointer
|
||||
*
|
||||
* @return utils::SkipList<query::PlanCacheEntry>*
|
||||
* @return utils::Synchronized<utils::LRUCache<uint64_t, std::shared_ptr<PlanWrapper>>, utils::RWSpinLock>
|
||||
*/
|
||||
utils::SkipList<query::PlanCacheEntry> *plan_cache() { return &plan_cache_; }
|
||||
query::PlanCacheLRU *plan_cache() { return &plan_cache_; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<storage::Storage> storage_; //!< Underlying storage
|
||||
@ -157,7 +159,7 @@ class Database {
|
||||
query::stream::Streams streams_; //!< Streams associated with the storage
|
||||
|
||||
// TODO: Move to a better place
|
||||
utils::SkipList<query::PlanCacheEntry> plan_cache_; //!< Plan cache associated with the storage
|
||||
query::PlanCacheLRU plan_cache_; //!< Plan cache associated with the storage
|
||||
|
||||
const replication::ReplicationState *repl_state_;
|
||||
};
|
||||
|
@ -12,15 +12,16 @@
|
||||
#include "query/cypher_query_interpreter.hpp"
|
||||
#include "query/frontend/ast/cypher_main_visitor.hpp"
|
||||
#include "query/frontend/opencypher/parser.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(query_cost_planner, true, "Use the cost-estimating query planner.");
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_VALIDATED_int32(query_plan_cache_ttl, 60, "Time to live for cached query plans, in seconds.",
|
||||
DEFINE_VALIDATED_int32(query_plan_cache_max_size, 1000, "Maximum number of query plans to cache.",
|
||||
FLAG_IN_RANGE(0, std::numeric_limits<int32_t>::max()));
|
||||
|
||||
namespace memgraph::query {
|
||||
CachedPlan::CachedPlan(std::unique_ptr<LogicalPlan> plan) : plan_(std::move(plan)) {}
|
||||
PlanWrapper::PlanWrapper(std::unique_ptr<LogicalPlan> plan) : plan_(std::move(plan)) {}
|
||||
|
||||
ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::string, storage::PropertyValue> ¶ms,
|
||||
utils::SkipList<QueryCacheEntry> *cache, const InterpreterConfig::Query &query_config) {
|
||||
@ -127,28 +128,24 @@ std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery
|
||||
std::move(symbol_table));
|
||||
}
|
||||
|
||||
std::shared_ptr<CachedPlan> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
|
||||
const Parameters ¶meters, utils::SkipList<PlanCacheEntry> *plan_cache,
|
||||
DbAccessor *db_accessor,
|
||||
const std::vector<Identifier *> &predefined_identifiers) {
|
||||
std::optional<utils::SkipList<PlanCacheEntry>::Accessor> plan_cache_access;
|
||||
std::shared_ptr<PlanWrapper> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
|
||||
const Parameters ¶meters, PlanCacheLRU *plan_cache,
|
||||
DbAccessor *db_accessor,
|
||||
const std::vector<Identifier *> &predefined_identifiers) {
|
||||
if (plan_cache) {
|
||||
plan_cache_access.emplace(plan_cache->access());
|
||||
auto it = plan_cache_access->find(hash);
|
||||
if (it != plan_cache_access->end()) {
|
||||
if (it->second->IsExpired()) {
|
||||
plan_cache_access->remove(hash);
|
||||
} else {
|
||||
return it->second;
|
||||
}
|
||||
auto existing_plan = plan_cache->WithLock([&](auto &cache) { return cache.get(hash); });
|
||||
if (existing_plan.has_value()) {
|
||||
return existing_plan.value();
|
||||
}
|
||||
}
|
||||
|
||||
auto plan = std::make_shared<CachedPlan>(
|
||||
auto plan = std::make_shared<PlanWrapper>(
|
||||
MakeLogicalPlan(std::move(ast_storage), query, parameters, db_accessor, predefined_identifiers));
|
||||
if (plan_cache_access) {
|
||||
plan_cache_access->insert({hash, plan});
|
||||
|
||||
if (plan_cache) {
|
||||
plan_cache->WithLock([&](auto &cache) { cache.put(hash, plan); });
|
||||
}
|
||||
|
||||
return plan;
|
||||
}
|
||||
} // namespace memgraph::query
|
||||
|
@ -17,12 +17,14 @@
|
||||
#include "query/frontend/stripped.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
#include "utils/flag_validation.hpp"
|
||||
#include "utils/lru_cache.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(query_cost_planner);
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_int32(query_plan_cache_ttl);
|
||||
DECLARE_int32(query_plan_cache_max_size);
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
@ -45,23 +47,17 @@ class LogicalPlan {
|
||||
virtual const AstStorage &GetAstStorage() const = 0;
|
||||
};
|
||||
|
||||
class CachedPlan {
|
||||
class PlanWrapper {
|
||||
public:
|
||||
explicit CachedPlan(std::unique_ptr<LogicalPlan> plan);
|
||||
explicit PlanWrapper(std::unique_ptr<LogicalPlan> plan);
|
||||
|
||||
const auto &plan() const { return plan_->GetRoot(); }
|
||||
double cost() const { return plan_->GetCost(); }
|
||||
const auto &symbol_table() const { return plan_->GetSymbolTable(); }
|
||||
const auto &ast_storage() const { return plan_->GetAstStorage(); }
|
||||
|
||||
bool IsExpired() const {
|
||||
// NOLINTNEXTLINE (modernize-use-nullptr)
|
||||
return cache_timer_.Elapsed() > std::chrono::seconds(FLAGS_query_plan_cache_ttl);
|
||||
};
|
||||
|
||||
private:
|
||||
std::unique_ptr<LogicalPlan> plan_;
|
||||
utils::Timer cache_timer_;
|
||||
};
|
||||
|
||||
struct CachedQuery {
|
||||
@ -82,18 +78,6 @@ struct QueryCacheEntry {
|
||||
CachedQuery second;
|
||||
};
|
||||
|
||||
struct PlanCacheEntry {
|
||||
bool operator==(const PlanCacheEntry &other) const { return first == other.first; }
|
||||
bool operator<(const PlanCacheEntry &other) const { return first < other.first; }
|
||||
bool operator==(const uint64_t &other) const { return first == other; }
|
||||
bool operator<(const uint64_t &other) const { return first < other; }
|
||||
|
||||
uint64_t first;
|
||||
// TODO: Maybe store the query string here and use it as a key with the hash
|
||||
// so that we eliminate the risk of hash collisions.
|
||||
std::shared_ptr<CachedPlan> second;
|
||||
};
|
||||
|
||||
/**
|
||||
* A container for data related to the parsing of a query.
|
||||
*/
|
||||
@ -129,6 +113,9 @@ class SingleNodeLogicalPlan final : public LogicalPlan {
|
||||
SymbolTable symbol_table_;
|
||||
};
|
||||
|
||||
using PlanCacheLRU =
|
||||
utils::Synchronized<utils::LRUCache<uint64_t, std::shared_ptr<query::PlanWrapper>>, utils::RWSpinLock>;
|
||||
|
||||
std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery *query, const Parameters ¶meters,
|
||||
DbAccessor *db_accessor,
|
||||
const std::vector<Identifier *> &predefined_identifiers);
|
||||
@ -141,9 +128,9 @@ std::unique_ptr<LogicalPlan> MakeLogicalPlan(AstStorage ast_storage, CypherQuery
|
||||
* If an identifier is contained there, we inject it at that place and remove it,
|
||||
* because a predefined identifier can be used only in one scope.
|
||||
*/
|
||||
std::shared_ptr<CachedPlan> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
|
||||
const Parameters ¶meters, utils::SkipList<PlanCacheEntry> *plan_cache,
|
||||
DbAccessor *db_accessor,
|
||||
const std::vector<Identifier *> &predefined_identifiers = {});
|
||||
std::shared_ptr<PlanWrapper> CypherQueryToPlan(uint64_t hash, AstStorage ast_storage, CypherQuery *query,
|
||||
const Parameters ¶meters, PlanCacheLRU *plan_cache,
|
||||
DbAccessor *db_accessor,
|
||||
const std::vector<Identifier *> &predefined_identifiers = {});
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -1213,7 +1213,7 @@ struct TxTimeout {
|
||||
};
|
||||
|
||||
struct PullPlan {
|
||||
explicit PullPlan(std::shared_ptr<CachedPlan> plan, const Parameters ¶meters, bool is_profile_query,
|
||||
explicit PullPlan(std::shared_ptr<PlanWrapper> plan, const Parameters ¶meters, bool is_profile_query,
|
||||
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
|
||||
std::optional<std::string> username, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer,
|
||||
@ -1226,7 +1226,7 @@ struct PullPlan {
|
||||
std::map<std::string, TypedValue> *summary);
|
||||
|
||||
private:
|
||||
std::shared_ptr<CachedPlan> plan_ = nullptr;
|
||||
std::shared_ptr<PlanWrapper> plan_ = nullptr;
|
||||
plan::UniqueCursorPtr cursor_ = nullptr;
|
||||
Frame frame_;
|
||||
ExecutionContext ctx_;
|
||||
@ -1253,7 +1253,7 @@ struct PullPlan {
|
||||
bool use_monotonic_memory_;
|
||||
};
|
||||
|
||||
PullPlan::PullPlan(const std::shared_ptr<CachedPlan> plan, const Parameters ¶meters, const bool is_profile_query,
|
||||
PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters ¶meters, const bool is_profile_query,
|
||||
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
|
||||
std::optional<std::string> username, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer, TriggerContextCollector *trigger_context_collector,
|
||||
@ -2106,10 +2106,7 @@ PreparedQuery PrepareAnalyzeGraphQuery(ParsedQuery parsed_query, bool in_explici
|
||||
|
||||
// Creating an index influences computed plan costs.
|
||||
auto invalidate_plan_cache = [plan_cache = current_db.db_acc_->get()->plan_cache()] {
|
||||
auto access = plan_cache->access();
|
||||
for (auto &kv : access) {
|
||||
access.remove(kv.first);
|
||||
}
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
utils::OnScopeExit cache_invalidator(invalidate_plan_cache);
|
||||
|
||||
@ -2154,10 +2151,7 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans
|
||||
|
||||
// Creating an index influences computed plan costs.
|
||||
auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
|
||||
auto access = plan_cache->access();
|
||||
for (auto &kv : access) {
|
||||
access.remove(kv.first);
|
||||
}
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
|
||||
auto *storage = db_acc->storage();
|
||||
|
@ -169,7 +169,7 @@ Trigger::TriggerPlan::TriggerPlan(std::unique_ptr<LogicalPlan> logical_plan, std
|
||||
std::shared_ptr<Trigger::TriggerPlan> Trigger::GetPlan(DbAccessor *db_accessor,
|
||||
const query::AuthChecker *auth_checker) const {
|
||||
std::lock_guard plan_guard{plan_lock_};
|
||||
if (!parsed_statements_.is_cacheable || !trigger_plan_ || trigger_plan_->cached_plan.IsExpired()) {
|
||||
if (!parsed_statements_.is_cacheable || !trigger_plan_) {
|
||||
auto identifiers = GetPredefinedIdentifiers(event_type_);
|
||||
|
||||
AstStorage ast_storage;
|
||||
|
@ -62,7 +62,7 @@ struct Trigger {
|
||||
|
||||
explicit TriggerPlan(std::unique_ptr<LogicalPlan> logical_plan, std::vector<IdentifierInfo> identifiers);
|
||||
|
||||
CachedPlan cached_plan;
|
||||
PlanWrapper cached_plan;
|
||||
std::vector<IdentifierInfo> identifiers;
|
||||
};
|
||||
std::shared_ptr<TriggerPlan> GetPlan(DbAccessor *db_accessor, const query::AuthChecker *auth_checker) const;
|
||||
|
68
src/utils/lru_cache.hpp
Normal file
68
src/utils/lru_cache.hpp
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
namespace memgraph::utils {
|
||||
|
||||
/// A simple LRU cache implementation.
|
||||
/// It is not thread-safe.
|
||||
|
||||
template <class TKey, class TVal>
|
||||
class LRUCache {
|
||||
public:
|
||||
LRUCache(int cache_size_) : cache_size(cache_size_){};
|
||||
|
||||
void put(const TKey &key, const TVal &val) {
|
||||
auto it = item_map.find(key);
|
||||
if (it != item_map.end()) {
|
||||
item_list.erase(it->second);
|
||||
item_map.erase(it);
|
||||
}
|
||||
item_list.push_front(std::make_pair(key, val));
|
||||
item_map.insert(std::make_pair(key, item_list.begin()));
|
||||
try_clean();
|
||||
};
|
||||
std::optional<TVal> get(const TKey &key) {
|
||||
if (!exists(key)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
auto it = item_map.find(key);
|
||||
item_list.splice(item_list.begin(), item_list, it->second);
|
||||
return it->second->second;
|
||||
}
|
||||
void reset() {
|
||||
item_list.clear();
|
||||
item_map.clear();
|
||||
};
|
||||
std::size_t size() { return item_map.size(); };
|
||||
|
||||
private:
|
||||
void try_clean() {
|
||||
while (item_map.size() > cache_size) {
|
||||
auto last_it_elem_it = item_list.end();
|
||||
last_it_elem_it--;
|
||||
item_map.erase(last_it_elem_it->first);
|
||||
item_list.pop_back();
|
||||
}
|
||||
};
|
||||
bool exists(const TKey &key) { return (item_map.count(key) > 0); };
|
||||
|
||||
std::list<std::pair<TKey, TVal>> item_list;
|
||||
std::unordered_map<TKey, decltype(item_list.begin())> item_map;
|
||||
std::size_t cache_size;
|
||||
};
|
||||
} // namespace memgraph::utils
|
@ -184,7 +184,7 @@ startup_config_dict = {
|
||||
"Set to true to enable telemetry. We collect information about the running system (CPU and memory information) and information about the database runtime (vertex and edge counts and resource usage) to allow for easier improvement of the product.",
|
||||
),
|
||||
"query_cost_planner": ("true", "true", "Use the cost-estimating query planner."),
|
||||
"query_plan_cache_ttl": ("60", "60", "Time to live for cached query plans, in seconds."),
|
||||
"query_plan_cache_max_size": ("1000", "1000", "Maximum number of query plans to cache."),
|
||||
"query_vertex_count_to_expand_existing": (
|
||||
"10",
|
||||
"10",
|
||||
|
@ -296,6 +296,9 @@ target_link_libraries(${test_prefix}utils_java_string_formatter mg-utils)
|
||||
add_unit_test(utils_resource_lock.cpp)
|
||||
target_link_libraries(${test_prefix}utils_resource_lock mg-utils)
|
||||
|
||||
add_unit_test(lru_cache.cpp)
|
||||
target_link_libraries(${test_prefix}lru_cache mg-utils)
|
||||
|
||||
# Test mg-storage-v2
|
||||
add_unit_test(commit_log_v2.cpp)
|
||||
target_link_libraries(${test_prefix}commit_log_v2 gflags mg-utils mg-storage-v2)
|
||||
|
@ -37,6 +37,8 @@
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/lru_cache.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
@ -662,7 +664,7 @@ TYPED_TEST(InterpreterTest, UniqueConstraintTest) {
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ExplainQuery) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto stream = this->Interpret("EXPLAIN MATCH (n) RETURN *;");
|
||||
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
||||
@ -676,16 +678,16 @@ TYPED_TEST(InterpreterTest, ExplainQuery) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) RETURN *;");
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ExplainQueryMultiplePulls) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto [stream, qid] = this->Prepare("EXPLAIN MATCH (n) RETURN *;");
|
||||
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
||||
@ -709,16 +711,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryMultiplePulls) {
|
||||
ASSERT_EQ(stream.GetResults()[2].size(), 1U);
|
||||
EXPECT_EQ(stream.GetResults()[2].front().ValueString(), *expected_it);
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) RETURN *;");
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ExplainQueryInMulticommandTransaction) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
this->Interpret("BEGIN");
|
||||
auto stream = this->Interpret("EXPLAIN MATCH (n) RETURN *;");
|
||||
@ -734,16 +736,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryInMulticommandTransaction) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) RETURN *;");
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ExplainQueryWithParams) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto stream =
|
||||
this->Interpret("EXPLAIN MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue(42)}});
|
||||
@ -758,16 +760,16 @@ TYPED_TEST(InterpreterTest, ExplainQueryWithParams) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue("something else")}});
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ProfileQuery) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto stream = this->Interpret("PROFILE MATCH (n) RETURN *;");
|
||||
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"};
|
||||
@ -781,16 +783,16 @@ TYPED_TEST(InterpreterTest, ProfileQuery) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for PROFILE ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) RETURN *;");
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ProfileQueryMultiplePulls) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto [stream, qid] = this->Prepare("PROFILE MATCH (n) RETURN *;");
|
||||
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"};
|
||||
@ -817,11 +819,11 @@ TYPED_TEST(InterpreterTest, ProfileQueryMultiplePulls) {
|
||||
ASSERT_EQ(stream.GetResults()[2][0].ValueString(), *expected_it);
|
||||
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for PROFILE ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) RETURN *;");
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
@ -832,7 +834,7 @@ TYPED_TEST(InterpreterTest, ProfileQueryInMulticommandTransaction) {
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ProfileQueryWithParams) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto stream =
|
||||
this->Interpret("PROFILE MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue(42)}});
|
||||
@ -847,16 +849,16 @@ TYPED_TEST(InterpreterTest, ProfileQueryWithParams) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for MATCH ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for PROFILE ... and for inner MATCH ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("MATCH (n) WHERE n.id = $id RETURN *;", {{"id", memgraph::storage::PropertyValue("something else")}});
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
TYPED_TEST(InterpreterTest, ProfileQueryWithLiterals) {
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 0U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 0U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 0U);
|
||||
auto stream = this->Interpret("PROFILE UNWIND range(1, 1000) AS x CREATE (:Node {id: x});", {});
|
||||
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS", "RELATIVE TIME", "ABSOLUTE TIME"};
|
||||
@ -870,11 +872,11 @@ TYPED_TEST(InterpreterTest, ProfileQueryWithLiterals) {
|
||||
++expected_it;
|
||||
}
|
||||
// We should have a plan cache for UNWIND ...
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
// We should have AST cache for PROFILE ... and for inner UNWIND ...
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
this->Interpret("UNWIND range(42, 4242) AS x CREATE (:Node {id: x});", {});
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 2U);
|
||||
}
|
||||
|
||||
@ -1100,7 +1102,7 @@ TYPED_TEST(InterpreterTest, CacheableQueries) {
|
||||
SCOPED_TRACE("Cacheable query");
|
||||
this->Interpret("RETURN 1");
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
}
|
||||
|
||||
{
|
||||
@ -1109,7 +1111,7 @@ TYPED_TEST(InterpreterTest, CacheableQueries) {
|
||||
// result signature could be changed
|
||||
this->Interpret("CALL mg.load_all()");
|
||||
EXPECT_EQ(this->interpreter_context.ast_cache.size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->size(), 1U);
|
||||
EXPECT_EQ(this->db->plan_cache()->WithLock([&](auto &cache) { return cache.size(); }), 1U);
|
||||
}
|
||||
}
|
||||
|
||||
|
111
tests/unit/lru_cache.cpp
Normal file
111
tests/unit/lru_cache.cpp
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "utils/lru_cache.hpp"
|
||||
#include <optional>
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
TEST(LRUCacheTest, BasicTest) {
|
||||
memgraph::utils::LRUCache<int, int> cache(2);
|
||||
cache.put(1, 1);
|
||||
cache.put(2, 2);
|
||||
|
||||
std::optional<int> value;
|
||||
value = cache.get(1);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 1);
|
||||
|
||||
cache.put(3, 3);
|
||||
|
||||
value = cache.get(2);
|
||||
EXPECT_FALSE(value.has_value());
|
||||
|
||||
cache.put(4, 4);
|
||||
|
||||
value = cache.get(1);
|
||||
EXPECT_FALSE(value.has_value());
|
||||
|
||||
value = cache.get(3);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 3);
|
||||
|
||||
value = cache.get(4);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 4);
|
||||
|
||||
EXPECT_EQ(cache.size(), 2);
|
||||
}
|
||||
|
||||
TEST(LRUCacheTest, DuplicatePutTest) {
|
||||
memgraph::utils::LRUCache<int, int> cache(2);
|
||||
cache.put(1, 1);
|
||||
cache.put(2, 2);
|
||||
cache.put(1, 10);
|
||||
|
||||
std::optional<int> value;
|
||||
value = cache.get(1);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 10);
|
||||
|
||||
value = cache.get(2);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 2);
|
||||
}
|
||||
|
||||
TEST(LRUCacheTest, ResizeTest) {
|
||||
memgraph::utils::LRUCache<int, int> cache(2);
|
||||
cache.put(1, 1);
|
||||
cache.put(2, 2);
|
||||
cache.put(3, 3);
|
||||
|
||||
std::optional<int> value;
|
||||
value = cache.get(1);
|
||||
EXPECT_FALSE(value.has_value());
|
||||
|
||||
value = cache.get(2);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 2);
|
||||
|
||||
value = cache.get(3);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 3);
|
||||
}
|
||||
|
||||
TEST(LRUCacheTest, EmptyCacheTest) {
|
||||
memgraph::utils::LRUCache<int, int> cache(2);
|
||||
|
||||
std::optional<int> value;
|
||||
value = cache.get(1);
|
||||
EXPECT_FALSE(value.has_value());
|
||||
|
||||
cache.put(1, 1);
|
||||
value = cache.get(1);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), 1);
|
||||
}
|
||||
|
||||
TEST(LRUCacheTest, LargeCacheTest) {
|
||||
const int CACHE_SIZE = 10000;
|
||||
memgraph::utils::LRUCache<int, int> cache(CACHE_SIZE);
|
||||
|
||||
std::optional<int> value;
|
||||
for (int i = 0; i < CACHE_SIZE; i++) {
|
||||
value = cache.get(i);
|
||||
EXPECT_FALSE(value.has_value());
|
||||
cache.put(i, i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < CACHE_SIZE; i++) {
|
||||
value = cache.get(i);
|
||||
EXPECT_TRUE(value.has_value());
|
||||
EXPECT_EQ(value.value(), i);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user