Merge branch 'project-pineapples' into E112-MG-implement-partial-schema

This commit is contained in:
jbajic 2022-08-01 10:46:11 +02:00
commit f57f30c8cf
33 changed files with 865 additions and 700 deletions

View File

@ -70,6 +70,11 @@ jobs:
# branches and tags. (default: 1)
fetch-depth: 0
# This is also needed if we want do to comparison against other branches
# See https://github.community/t/checkout-code-fails-when-it-runs-lerna-run-test-since-master/17920
- name: Fetch all history for all tags and branches
run: git fetch
- name: Build combined ASAN, UBSAN and coverage binaries
run: |
# Activate toolchain.
@ -110,12 +115,22 @@ jobs:
name: "Code coverage"
path: tools/github/generated/code_coverage.tar.gz
- name: Set base branch
if: ${{ github.event_name == 'pull_request' }}
run: |
echo "BASE_BRANCH=origin/${{ github.base_ref }}" >> $GITHUB_ENV
- name: Set base branch # if we manually dispatch or push to master
if: ${{ github.event_name != 'pull_request' }}
run: |
echo "BASE_BRANCH=origin/master" >> $GITHUB_ENV
- name: Run clang-tidy
run: |
source /opt/toolchain-v4/activate
# Restrict clang-tidy results only to the modified parts
git diff -U0 master... -- src | ./tools/github/clang-tidy/clang-tidy-diff.py -p 1 -j $THREADS -path build | tee ./build/clang_tidy_output.txt
git diff -U0 ${{ env.BASE_BRANCH }}... -- src | ./tools/github/clang-tidy/clang-tidy-diff.py -p 1 -j $THREADS -path build | tee ./build/clang_tidy_output.txt
# Fail if any warning is reported
! cat ./build/clang_tidy_output.txt | ./tools/github/clang-tidy/grep_error_lines.sh > /dev/null

1
libs/.gitignore vendored
View File

@ -5,3 +5,4 @@
!CMakeLists.txt
!__main.cpp
!pulsar.patch
!antlr4.10.1.patch

View File

@ -106,6 +106,7 @@ import_external_library(antlr4 STATIC
-DWITH_LIBCXX=OFF # because of debian bug
-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=true
-DCMAKE_CXX_STANDARD=20
-DANTLR_BUILD_CPP_TESTS=OFF
BUILD_COMMAND $(MAKE) antlr4_static
INSTALL_COMMAND $(MAKE) install)

13
libs/antlr4.10.1.patch Normal file
View File

@ -0,0 +1,13 @@
diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt
index baf46cac9..2e7756de8 100644
--- a/runtime/Cpp/runtime/CMakeLists.txt
+++ b/runtime/Cpp/runtime/CMakeLists.txt
@@ -134,7 +134,7 @@ set_target_properties(antlr4_static
ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR}
COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}")
-install(TARGETS antlr4_shared
+install(TARGETS antlr4_shared OPTIONAL
EXPORT antlr4-targets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}

View File

@ -1,43 +0,0 @@
diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt
index a8503bb..11362cf 100644
--- a/runtime/Cpp/runtime/CMakeLists.txt
+++ b/runtime/Cpp/runtime/CMakeLists.txt
@@ -5,8 +5,8 @@ set(THIRDPARTY_DIR ${CMAKE_BINARY_DIR}/runtime/thirdparty)
set(UTFCPP_DIR ${THIRDPARTY_DIR}/utfcpp)
ExternalProject_Add(
utfcpp
- GIT_REPOSITORY "git://github.com/nemtrif/utfcpp"
- GIT_TAG "v3.1.1"
+ GIT_REPOSITORY "https://github.com/nemtrif/utfcpp"
+ GIT_TAG "v3.2.1"
SOURCE_DIR ${UTFCPP_DIR}
UPDATE_DISCONNECTED 1
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${UTFCPP_DIR}/install -Dgtest_force_shared_crt=ON
@@ -118,7 +118,7 @@ set_target_properties(antlr4_static
ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR}
COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}")
-install(TARGETS antlr4_shared
+install(TARGETS antlr4_shared OPTIONAL
DESTINATION lib
EXPORT antlr4-targets)
install(TARGETS antlr4_static
diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h
index 468db98..65a473b 100644
--- a/runtime/Cpp/runtime/src/support/Any.h
+++ b/runtime/Cpp/runtime/src/support/Any.h
@@ -122,12 +122,12 @@ private:
}
private:
- template<int N = 0, typename std::enable_if<N == N && std::is_nothrow_copy_constructible<T>::value, int>::type = 0>
+ template<int N = 0, typename std::enable_if<N == N && std::is_copy_constructible<T>::value, int>::type = 0>
Base* clone() const {
return new Derived<T>(value);
}
- template<int N = 0, typename std::enable_if<N == N && !std::is_nothrow_copy_constructible<T>::value, int>::type = 0>
+ template<int N = 0, typename std::enable_if<N == N && !std::is_copy_constructible<T>::value, int>::type = 0>
Base* clone() const {
return nullptr;
}

View File

@ -105,7 +105,7 @@ repo_clone_try_double () {
# Download from primary_urls might fail because the cache is not installed.
declare -A primary_urls=(
["antlr4-code"]="http://$local_cache_host/git/antlr4.git"
["antlr4-generator"]="http://$local_cache_host/file/antlr-4.9.2-complete.jar"
["antlr4-generator"]="http://$local_cache_host/file/antlr-4.10.1-complete.jar"
["cppitertools"]="http://$local_cache_host/git/cppitertools.git"
["rapidcheck"]="http://$local_cache_host/git/rapidcheck.git"
["gbenchmark"]="http://$local_cache_host/git/benchmark.git"
@ -130,7 +130,7 @@ declare -A primary_urls=(
# should fail.
declare -A secondary_urls=(
["antlr4-code"]="https://github.com/antlr/antlr4.git"
["antlr4-generator"]="http://www.antlr.org/download/antlr-4.9.2-complete.jar"
["antlr4-generator"]="https://www.antlr.org/download/antlr-4.10.1-complete.jar"
["cppitertools"]="https://github.com/ryanhaining/cppitertools.git"
["rapidcheck"]="https://github.com/emil-e/rapidcheck.git"
["gbenchmark"]="https://github.com/google/benchmark.git"
@ -152,10 +152,10 @@ declare -A secondary_urls=(
# antlr
file_get_try_double "${primary_urls[antlr4-generator]}" "${secondary_urls[antlr4-generator]}"
antlr4_tag="4.9.2" # v4.9.2
antlr4_tag="4.10.1" # v4.10.1
repo_clone_try_double "${primary_urls[antlr4-code]}" "${secondary_urls[antlr4-code]}" "antlr4" "$antlr4_tag" true
pushd antlr4
git apply ../antlr4.patch
git apply ../antlr4.10.1.patch
popd
# cppitertools v2.0 2019-12-23
@ -199,7 +199,7 @@ git apply ../rocksdb.patch
popd
# mgclient
mgclient_tag="96e95c6845463cbe88948392be58d26da0d5ffd3" # (2022-02-08)
mgclient_tag="v1.4.0" # (2022-06-14)
repo_clone_try_double "${primary_urls[mgclient]}" "${secondary_urls[mgclient]}" "mgclient" "$mgclient_tag"
sed -i 's/\${CMAKE_INSTALL_LIBDIR}/lib/' mgclient/src/CMakeLists.txt

View File

@ -1252,9 +1252,8 @@ int main(int argc, char **argv) {
// the triggers
auto storage_accessor = interpreter_context.db->Access();
auto dba = memgraph::query::DbAccessor{&storage_accessor};
interpreter_context.trigger_store.RestoreTriggers(&interpreter_context.ast_cache, &dba,
&interpreter_context.antlr_lock, interpreter_context.config.query,
interpreter_context.auth_checker);
interpreter_context.trigger_store.RestoreTriggers(
&interpreter_context.ast_cache, &dba, interpreter_context.config.query, interpreter_context.auth_checker);
}
// As the Stream transformations are using modules, they have to be restored after the query modules are loaded.

View File

@ -82,7 +82,7 @@ add_custom_command(
OUTPUT ${antlr_opencypher_generated_src} ${antlr_opencypher_generated_include}
COMMAND ${CMAKE_COMMAND} -E make_directory ${opencypher_generated}
COMMAND
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.9.2-complete.jar
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.10.1-complete.jar
-Dlanguage=Cpp -visitor -package antlropencypher
-o ${opencypher_generated}
${opencypher_lexer_grammar} ${opencypher_parser_grammar}

View File

@ -21,8 +21,7 @@ namespace memgraph::query {
CachedPlan::CachedPlan(std::unique_ptr<LogicalPlan> plan) : plan_(std::move(plan)) {}
ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::string, storage::PropertyValue> &params,
utils::SkipList<QueryCacheEntry> *cache, utils::SpinLock *antlr_lock,
const InterpreterConfig::Query &query_config) {
utils::SkipList<QueryCacheEntry> *cache, const InterpreterConfig::Query &query_config) {
// Strip the query for caching purposes. The process of stripping a query
// "normalizes" it by replacing any literals with new parameters. This
// results in just the *structure* of the query being taken into account for
@ -63,20 +62,16 @@ ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::stri
};
if (it == accessor.end()) {
{
std::unique_lock<utils::SpinLock> guard(*antlr_lock);
try {
parser = std::make_unique<frontend::opencypher::Parser>(stripped_query.query());
} catch (const SyntaxException &e) {
// There is a syntax exception in the stripped query. Re-run the parser
// on the original query to get an appropriate error messsage.
parser = std::make_unique<frontend::opencypher::Parser>(query_string);
try {
parser = std::make_unique<frontend::opencypher::Parser>(stripped_query.query());
} catch (const SyntaxException &e) {
// There is a syntax exception in the stripped query. Re-run the parser
// on the original query to get an appropriate error messsage.
parser = std::make_unique<frontend::opencypher::Parser>(query_string);
// If an exception was not thrown here, the stripper messed something
// up.
LOG_FATAL("The stripped query can't be parsed, but the original can.");
}
// If an exception was not thrown here, the stripper messed something
// up.
LOG_FATAL("The stripped query can't be parsed, but the original can.");
}
// Convert the ANTLR4 parse tree into an AST.

View File

@ -111,8 +111,7 @@ struct ParsedQuery {
};
ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::string, storage::PropertyValue> &params,
utils::SkipList<QueryCacheEntry> *cache, utils::SpinLock *antlr_lock,
const InterpreterConfig::Query &query_config);
utils::SkipList<QueryCacheEntry> *cache, const InterpreterConfig::Query &query_config);
class SingleNodeLogicalPlan final : public LogicalPlan {
public:

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
auto operators = ExtractOperators(all_children, allowed_operators);
for (auto *expression : _expressions) {
expressions.push_back(expression->accept(this));
expressions.push_back(std::any_cast<Expression *>(expression->accept(this)));
}
Expression *first_operand = expressions[0];
@ -131,7 +131,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
DMG_ASSERT(_expression, "can't happen");
auto operators = ExtractOperators(all_children, allowed_operators);
Expression *expression = _expression->accept(this);
Expression *expression = std::any_cast<Expression *>(_expression->accept(this));
for (int i = (int)operators.size() - 1; i >= 0; --i) {
expression = CreateUnaryOperatorByToken(operators[i], expression);
}

View File

@ -1278,7 +1278,7 @@ PreparedQuery PrepareExplainQuery(ParsedQuery parsed_query, std::map<std::string
// full query string) when given just the inner query to execute.
ParsedQuery parsed_inner_query =
ParseQuery(parsed_query.query_string.substr(kExplainQueryStart.size()), parsed_query.user_parameters,
&interpreter_context->ast_cache, &interpreter_context->antlr_lock, interpreter_context->config.query);
&interpreter_context->ast_cache, interpreter_context->config.query);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in EXPLAIN");
@ -1345,7 +1345,7 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
// full query string) when given just the inner query to execute.
ParsedQuery parsed_inner_query =
ParseQuery(parsed_query.query_string.substr(kProfileQueryStart.size()), parsed_query.user_parameters,
&interpreter_context->ast_cache, &interpreter_context->antlr_lock, interpreter_context->config.query);
&interpreter_context->ast_cache, interpreter_context->config.query);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in PROFILE");
@ -1663,8 +1663,7 @@ Callback CreateTrigger(TriggerQuery *trigger_query,
interpreter_context->trigger_store.AddTrigger(
std::move(trigger_name), trigger_statement, user_parameters, ToTriggerEventType(event_type),
before_commit ? TriggerPhase::BEFORE_COMMIT : TriggerPhase::AFTER_COMMIT, &interpreter_context->ast_cache,
dba, &interpreter_context->antlr_lock, interpreter_context->config.query, std::move(owner),
interpreter_context->auth_checker);
dba, interpreter_context->config.query, std::move(owner), interpreter_context->auth_checker);
return {};
}};
}
@ -2246,8 +2245,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
query_execution->summary["cost_estimate"] = 0.0;
utils::Timer parsing_timer;
ParsedQuery parsed_query = ParseQuery(query_string, params, &interpreter_context_->ast_cache,
&interpreter_context_->antlr_lock, interpreter_context_->config.query);
ParsedQuery parsed_query =
ParseQuery(query_string, params, &interpreter_context_->ast_cache, interpreter_context_->config.query);
query_execution->summary["parsing_time"] = parsing_timer.Elapsed().count();
// Some queries require an active transaction in order to be prepared.

View File

@ -173,13 +173,6 @@ struct InterpreterContext {
storage::Storage *db;
// ANTLR has singleton instance that is shared between threads. It is
// protected by locks inside of ANTLR. Unfortunately, they are not protected
// in a very good way. Once we have ANTLR version without race conditions we
// can remove this lock. This will probably never happen since ANTLR
// developers introduce more bugs in each version. Fortunately, we have
// cache so this lock probably won't impact performance much...
utils::SpinLock antlr_lock;
std::optional<double> tsc_frequency{utils::GetTSCFrequency()};
std::atomic<bool> is_shutting_down{false};

View File

@ -2617,13 +2617,13 @@ namespace {
* when there are */
TypedValue DefaultAggregationOpValue(const Aggregate::Element &element, utils::MemoryResource *memory) {
switch (element.op) {
case Aggregation::Op::COUNT:
return TypedValue(0, memory);
case Aggregation::Op::SUM:
case Aggregation::Op::MIN:
case Aggregation::Op::MAX:
case Aggregation::Op::AVG:
return TypedValue(memory);
case Aggregation::Op::COUNT:
case Aggregation::Op::SUM:
return TypedValue(0, memory);
case Aggregation::Op::COLLECT_LIST:
return TypedValue(TypedValue::TVector(memory));
case Aggregation::Op::COLLECT_MAP:
@ -2645,9 +2645,7 @@ class AggregateCursor : public Cursor {
pulled_all_input_ = true;
aggregation_it_ = aggregation_.begin();
// in case there is no input and no group_bys we need to return true
// just this once
if (aggregation_.empty() && self_.group_by_.empty()) {
if (aggregation_.empty()) {
auto *pull_memory = context.evaluation_context.memory;
// place default aggregation values on the frame
for (const auto &elem : self_.aggregations_)

View File

@ -153,10 +153,10 @@ std::vector<std::pair<Identifier, TriggerIdentifierTag>> GetPredefinedIdentifier
Trigger::Trigger(std::string name, const std::string &query,
const std::map<std::string, storage::PropertyValue> &user_parameters,
const TriggerEventType event_type, utils::SkipList<QueryCacheEntry> *query_cache,
DbAccessor *db_accessor, utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
DbAccessor *db_accessor, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::AuthChecker *auth_checker)
: name_{std::move(name)},
parsed_statements_{ParseQuery(query, user_parameters, query_cache, antlr_lock, query_config)},
parsed_statements_{ParseQuery(query, user_parameters, query_cache, query_config)},
event_type_{event_type},
owner_{std::move(owner)} {
// We check immediately if the query is valid by trying to create a plan.
@ -257,7 +257,7 @@ inline constexpr uint64_t kVersion{2};
TriggerStore::TriggerStore(std::filesystem::path directory) : storage_{std::move(directory)} {}
void TriggerStore::RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
const InterpreterConfig::Query &query_config,
const query::AuthChecker *auth_checker) {
MG_ASSERT(before_commit_triggers_.size() == 0 && after_commit_triggers_.size() == 0,
"Cannot restore trigger when some triggers already exist!");
@ -317,8 +317,8 @@ void TriggerStore::RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache
std::optional<Trigger> trigger;
try {
trigger.emplace(trigger_name, statement, user_parameters, event_type, query_cache, db_accessor, antlr_lock,
query_config, std::move(owner), auth_checker);
trigger.emplace(trigger_name, statement, user_parameters, event_type, query_cache, db_accessor, query_config,
std::move(owner), auth_checker);
} catch (const utils::BasicException &e) {
spdlog::warn("Failed to create trigger '{}' because: {}", trigger_name, e.what());
continue;
@ -336,8 +336,8 @@ void TriggerStore::AddTrigger(std::string name, const std::string &query,
const std::map<std::string, storage::PropertyValue> &user_parameters,
TriggerEventType event_type, TriggerPhase phase,
utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::AuthChecker *auth_checker) {
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::AuthChecker *auth_checker) {
std::unique_lock store_guard{store_lock_};
if (storage_.Get(name)) {
throw utils::BasicException("Trigger with the same name already exists.");
@ -345,8 +345,8 @@ void TriggerStore::AddTrigger(std::string name, const std::string &query,
std::optional<Trigger> trigger;
try {
trigger.emplace(std::move(name), query, user_parameters, event_type, query_cache, db_accessor, antlr_lock,
query_config, std::move(owner), auth_checker);
trigger.emplace(std::move(name), query, user_parameters, event_type, query_cache, db_accessor, query_config,
std::move(owner), auth_checker);
} catch (const utils::BasicException &e) {
const auto identifiers = GetPredefinedIdentifiers(event_type);
std::stringstream identifier_names_stream;

View File

@ -34,7 +34,7 @@ namespace memgraph::query {
struct Trigger {
explicit Trigger(std::string name, const std::string &query,
const std::map<std::string, storage::PropertyValue> &user_parameters, TriggerEventType event_type,
utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor, utils::SpinLock *antlr_lock,
utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::AuthChecker *auth_checker);
@ -81,14 +81,13 @@ struct TriggerStore {
explicit TriggerStore(std::filesystem::path directory);
void RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
const query::AuthChecker *auth_checker);
const InterpreterConfig::Query &query_config, const query::AuthChecker *auth_checker);
void AddTrigger(std::string name, const std::string &query,
const std::map<std::string, storage::PropertyValue> &user_parameters, TriggerEventType event_type,
TriggerPhase phase, utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::AuthChecker *auth_checker);
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::AuthChecker *auth_checker);
void DropTrigger(const std::string &name);

View File

@ -84,7 +84,7 @@ add_custom_command(
OUTPUT ${antlr_opencypher_generated_src} ${antlr_opencypher_generated_include}
COMMAND ${CMAKE_COMMAND} -E make_directory ${opencypher_generated}
COMMAND
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.9.2-complete.jar
java -jar ${CMAKE_SOURCE_DIR}/libs/antlr-4.10.1-complete.jar
-Dlanguage=Cpp -visitor -package antlropencypher
-o ${opencypher_generated}
${opencypher_lexer_grammar} ${opencypher_parser_grammar}

View File

@ -21,8 +21,7 @@ namespace memgraph::query::v2 {
CachedPlan::CachedPlan(std::unique_ptr<LogicalPlan> plan) : plan_(std::move(plan)) {}
ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::string, storage::v3::PropertyValue> &params,
utils::SkipList<QueryCacheEntry> *cache, utils::SpinLock *antlr_lock,
const InterpreterConfig::Query &query_config) {
utils::SkipList<QueryCacheEntry> *cache, const InterpreterConfig::Query &query_config) {
// Strip the query for caching purposes. The process of stripping a query
// "normalizes" it by replacing any literals with new parameters. This
// results in just the *structure* of the query being taken into account for
@ -63,20 +62,16 @@ ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::stri
};
if (it == accessor.end()) {
{
std::unique_lock<utils::SpinLock> guard(*antlr_lock);
try {
parser = std::make_unique<frontend::opencypher::Parser>(stripped_query.query());
} catch (const SyntaxException &e) {
// There is a syntax exception in the stripped query. Re-run the parser
// on the original query to get an appropriate error messsage.
parser = std::make_unique<frontend::opencypher::Parser>(query_string);
try {
parser = std::make_unique<frontend::opencypher::Parser>(stripped_query.query());
} catch (const SyntaxException &e) {
// There is a syntax exception in the stripped query. Re-run the parser
// on the original query to get an appropriate error messsage.
parser = std::make_unique<frontend::opencypher::Parser>(query_string);
// If an exception was not thrown here, the stripper messed something
// up.
LOG_FATAL("The stripped query can't be parsed, but the original can.");
}
// If an exception was not thrown here, the stripper messed something
// up.
LOG_FATAL("The stripped query can't be parsed, but the original can.");
}
// Convert the ANTLR4 parse tree into an AST.

View File

@ -111,8 +111,7 @@ struct ParsedQuery {
};
ParsedQuery ParseQuery(const std::string &query_string, const std::map<std::string, storage::v3::PropertyValue> &params,
utils::SkipList<QueryCacheEntry> *cache, utils::SpinLock *antlr_lock,
const InterpreterConfig::Query &query_config);
utils::SkipList<QueryCacheEntry> *cache, const InterpreterConfig::Query &query_config);
class SingleNodeLogicalPlan final : public LogicalPlan {
public:

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
auto operators = ExtractOperators(all_children, allowed_operators);
for (auto *expression : _expressions) {
expressions.push_back(expression->accept(this));
expressions.push_back(std::any_cast<Expression *>(expression->accept(this)));
}
Expression *first_operand = expressions[0];
@ -131,7 +131,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
DMG_ASSERT(_expression, "can't happen");
auto operators = ExtractOperators(all_children, allowed_operators);
Expression *expression = _expression->accept(this);
Expression *expression = std::any_cast<Expression *>(_expression->accept(this));
for (int i = (int)operators.size() - 1; i >= 0; --i) {
expression = CreateUnaryOperatorByToken(operators[i], expression);
}

View File

@ -1186,7 +1186,7 @@ PreparedQuery PrepareExplainQuery(ParsedQuery parsed_query, std::map<std::string
// full query string) when given just the inner query to execute.
ParsedQuery parsed_inner_query =
ParseQuery(parsed_query.query_string.substr(kExplainQueryStart.size()), parsed_query.user_parameters,
&interpreter_context->ast_cache, &interpreter_context->antlr_lock, interpreter_context->config.query);
&interpreter_context->ast_cache, interpreter_context->config.query);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in EXPLAIN");
@ -1253,7 +1253,7 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
// full query string) when given just the inner query to execute.
ParsedQuery parsed_inner_query =
ParseQuery(parsed_query.query_string.substr(kProfileQueryStart.size()), parsed_query.user_parameters,
&interpreter_context->ast_cache, &interpreter_context->antlr_lock, interpreter_context->config.query);
&interpreter_context->ast_cache, interpreter_context->config.query);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in PROFILE");
@ -1571,8 +1571,7 @@ Callback CreateTrigger(TriggerQuery *trigger_query,
interpreter_context->trigger_store.AddTrigger(
std::move(trigger_name), trigger_statement, user_parameters, ToTriggerEventType(event_type),
before_commit ? TriggerPhase::BEFORE_COMMIT : TriggerPhase::AFTER_COMMIT, &interpreter_context->ast_cache,
dba, &interpreter_context->antlr_lock, interpreter_context->config.query, std::move(owner),
interpreter_context->auth_checker);
dba, interpreter_context->config.query, std::move(owner), interpreter_context->auth_checker);
return {};
}};
}
@ -2129,8 +2128,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
query_execution->summary["cost_estimate"] = 0.0;
utils::Timer parsing_timer;
ParsedQuery parsed_query = ParseQuery(query_string, params, &interpreter_context_->ast_cache,
&interpreter_context_->antlr_lock, interpreter_context_->config.query);
ParsedQuery parsed_query =
ParseQuery(query_string, params, &interpreter_context_->ast_cache, interpreter_context_->config.query);
query_execution->summary["parsing_time"] = parsing_timer.Elapsed().count();
// Some queries require an active transaction in order to be prepared.

View File

@ -171,13 +171,6 @@ struct InterpreterContext {
storage::v3::Storage *db;
// ANTLR has singleton instance that is shared between threads. It is
// protected by locks inside of ANTLR. Unfortunately, they are not protected
// in a very good way. Once we have ANTLR version without race conditions we
// can remove this lock. This will probably never happen since ANTLR
// developers introduce more bugs in each version. Fortunately, we have
// cache so this lock probably won't impact performance much...
utils::SpinLock antlr_lock;
std::optional<double> tsc_frequency{utils::GetTSCFrequency()};
std::atomic<bool> is_shutting_down{false};

View File

@ -153,10 +153,10 @@ std::vector<std::pair<Identifier, TriggerIdentifierTag>> GetPredefinedIdentifier
Trigger::Trigger(std::string name, const std::string &query,
const std::map<std::string, storage::v3::PropertyValue> &user_parameters,
const TriggerEventType event_type, utils::SkipList<QueryCacheEntry> *query_cache,
DbAccessor *db_accessor, utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
DbAccessor *db_accessor, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::v2::AuthChecker *auth_checker)
: name_{std::move(name)},
parsed_statements_{ParseQuery(query, user_parameters, query_cache, antlr_lock, query_config)},
parsed_statements_{ParseQuery(query, user_parameters, query_cache, query_config)},
event_type_{event_type},
owner_{std::move(owner)} {
// We check immediately if the query is valid by trying to create a plan.
@ -257,7 +257,7 @@ inline constexpr uint64_t kVersion{2};
TriggerStore::TriggerStore(std::filesystem::path directory) : storage_{std::move(directory)} {}
void TriggerStore::RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
const InterpreterConfig::Query &query_config,
const query::v2::AuthChecker *auth_checker) {
MG_ASSERT(before_commit_triggers_.size() == 0 && after_commit_triggers_.size() == 0,
"Cannot restore trigger when some triggers already exist!");
@ -317,8 +317,8 @@ void TriggerStore::RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache
std::optional<Trigger> trigger;
try {
trigger.emplace(trigger_name, statement, user_parameters, event_type, query_cache, db_accessor, antlr_lock,
query_config, std::move(owner), auth_checker);
trigger.emplace(trigger_name, statement, user_parameters, event_type, query_cache, db_accessor, query_config,
std::move(owner), auth_checker);
} catch (const utils::BasicException &e) {
spdlog::warn("Failed to create trigger '{}' because: {}", trigger_name, e.what());
continue;
@ -336,8 +336,8 @@ void TriggerStore::AddTrigger(std::string name, const std::string &query,
const std::map<std::string, storage::v3::PropertyValue> &user_parameters,
TriggerEventType event_type, TriggerPhase phase,
utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::v2::AuthChecker *auth_checker) {
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::v2::AuthChecker *auth_checker) {
std::unique_lock store_guard{store_lock_};
if (storage_.Get(name)) {
throw utils::BasicException("Trigger with the same name already exists.");
@ -345,8 +345,8 @@ void TriggerStore::AddTrigger(std::string name, const std::string &query,
std::optional<Trigger> trigger;
try {
trigger.emplace(std::move(name), query, user_parameters, event_type, query_cache, db_accessor, antlr_lock,
query_config, std::move(owner), auth_checker);
trigger.emplace(std::move(name), query, user_parameters, event_type, query_cache, db_accessor, query_config,
std::move(owner), auth_checker);
} catch (const utils::BasicException &e) {
const auto identifiers = GetPredefinedIdentifiers(event_type);
std::stringstream identifier_names_stream;

View File

@ -35,8 +35,8 @@ struct Trigger {
explicit Trigger(std::string name, const std::string &query,
const std::map<std::string, storage::v3::PropertyValue> &user_parameters,
TriggerEventType event_type, utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const AuthChecker *auth_checker);
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::v2::AuthChecker *auth_checker);
void Execute(DbAccessor *dba, utils::MonotonicBufferResource *execution_memory, double max_execution_time_sec,
std::atomic<bool> *is_shutting_down, const TriggerContext &context,
@ -81,14 +81,13 @@ struct TriggerStore {
explicit TriggerStore(std::filesystem::path directory);
void RestoreTriggers(utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
const query::v2::AuthChecker *auth_checker);
const InterpreterConfig::Query &query_config, const query::v2::AuthChecker *auth_checker);
void AddTrigger(std::string name, const std::string &query,
const std::map<std::string, storage::v3::PropertyValue> &user_parameters, TriggerEventType event_type,
TriggerPhase phase, utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
utils::SpinLock *antlr_lock, const InterpreterConfig::Query &query_config,
std::optional<std::string> owner, const query::v2::AuthChecker *auth_checker);
const InterpreterConfig::Query &query_config, std::optional<std::string> owner,
const query::v2::AuthChecker *auth_checker);
void DropTrigger(const std::string &name);

View File

@ -73,6 +73,16 @@ Feature: Aggregations
| n |
| 5 |
Scenario: Count test 07:
Given an empty graph
When executing query:
"""
RETURN count(null)
"""
Then the result should be:
| count(null) |
| 0 |
Scenario: Sum test 01:
Given an empty graph
And having executed
@ -114,6 +124,16 @@ Feature: Aggregations
| 4 | 0 |
| 4 | 1 |
Scenario: Sum test 04:
Given an empty graph
When executing query:
"""
RETURN sum(null)
"""
Then the result should be:
| sum(null) |
| 0 |
Scenario: Avg test 01:
Given an empty graph
And having executed
@ -155,6 +175,16 @@ Feature: Aggregations
| 2.0 | 0 |
| 4.0 | 1 |
Scenario: Avg test 04:
Given an empty graph
When executing query:
"""
RETURN avg(null)
"""
Then the result should be:
| avg(null) |
| null |
Scenario: Min test 01:
Given an empty graph
And having executed
@ -196,6 +226,16 @@ Feature: Aggregations
| 1 | 0 |
| 4 | 1 |
Scenario: Min test 04:
Given an empty graph
When executing query:
"""
RETURN min(null)
"""
Then the result should be:
| min(null) |
| null |
Scenario: Max test 01:
Given an empty graph
And having executed
@ -237,6 +277,16 @@ Feature: Aggregations
| 3 | 0 |
| 4 | 1 |
Scenario: Max test 04:
Given an empty graph
When executing query:
"""
RETURN max(null)
"""
Then the result should be:
| max(null) |
| null |
Scenario: Collect test 01:
Given an empty graph
And having executed
@ -279,3 +329,18 @@ Feature: Aggregations
| n |
| {a_key: 13, b_key: 11, c_key: 12} |
Scenario: Combined aggregations - some evauluates to null:
Given an empty graph
And having executed
"""
CREATE (f)
CREATE (n {property: 1})
"""
When executing query:
"""
MATCH (n) RETURN count(n) < n.property, count(n.property), count(n), avg(n.property), min(n.property), max(n.property), sum(n.property)
"""
Then the result should be:
| count(n) < n.property | count(n.property) | count(n) | avg(n.property) | min(n.property) | max(n.property) | sum(n.property) |
| false | 1 | 1 | 1.0 | 1 | 1 | 1 |
| null | 0 | 1 | null | null | null | 0 |

View File

@ -20,6 +20,15 @@
#include "utils/signals.hpp"
#include "utils/stacktrace.hpp"
// This test was introduced because Antlr Cpp runtime doesn't work well in a
// highly concurrent environment. Interpreter `interpret.hpp` contains
// `antlr_lock` used to avoid crashes.
// v4.6 and before -> Crashes.
// v4.8 -> Does NOT crash but sometimes this tests does NOT finish.
// Looks like a deadlock. -> The lock is still REQUIRED.
// v4.9 -> Seems to be working.
// v4.10 -> Seems to be working as well. -> antlr_lock removed
using namespace std::chrono_literals;
TEST(Antlr, Sigsegv) {

View File

@ -17,3 +17,6 @@ endfunction(add_stress_test)
add_stress_test(long_running.cpp)
target_link_libraries(${test_prefix}long_running mg-communication mg-io mg-utils)
add_stress_test(parser.cpp)
target_link_libraries(${test_prefix}parser mg-communication mg-io mg-utils mgclient)

View File

@ -26,6 +26,11 @@ SMALL_DATASET = [
"options": ["--vertex-count", "40000", "--create-pack-size", "100"],
"timeout": 5,
},
{
"test": "parser.cpp",
"options": ["--per-worker-query-count", "1000"],
"timeout": 5,
},
{
"test": "long_running.cpp",
"options": ["--vertex-count", "1000", "--edge-count", "5000", "--max-time", "1", "--verify", "20"],
@ -42,30 +47,35 @@ SMALL_DATASET = [
# bipartite.py and create_match.py run for approx. 15min
# long_running runs for 5min x 6 times = 30min
# long_running runs for 8h
LARGE_DATASET = [
{
"test": "bipartite.py",
"options": ["--u-count", "300", "--v-count", "300"],
"timeout": 30,
},
{
"test": "create_match.py",
"options": ["--vertex-count", "500000", "--create-pack-size", "500"],
"timeout": 30,
},
] + [
{
"test": "long_running.cpp",
"options": ["--vertex-count", "10000", "--edge-count", "40000", "--max-time", "5", "--verify", "60"],
"timeout": 16,
},
] * 6 + [
{
"test": "long_running.cpp",
"options": ["--vertex-count", "200000", "--edge-count", "1000000", "--max-time", "480", "--verify", "300"],
"timeout": 500,
},
]
LARGE_DATASET = (
[
{
"test": "bipartite.py",
"options": ["--u-count", "300", "--v-count", "300"],
"timeout": 30,
},
{
"test": "create_match.py",
"options": ["--vertex-count", "500000", "--create-pack-size", "500"],
"timeout": 30,
},
]
+ [
{
"test": "long_running.cpp",
"options": ["--vertex-count", "10000", "--edge-count", "40000", "--max-time", "5", "--verify", "60"],
"timeout": 16,
},
]
* 6
+ [
{
"test": "long_running.cpp",
"options": ["--vertex-count", "200000", "--edge-count", "1000000", "--max-time", "480", "--verify", "300"],
"timeout": 500,
},
]
)
# paths
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@ -101,8 +111,7 @@ def run_test(args, test, options, timeout):
# find binary
if test.endswith(".py"):
logging = "DEBUG" if args.verbose else "WARNING"
binary = [args.python, "-u", os.path.join(SCRIPT_DIR, test),
"--logging", logging]
binary = [args.python, "-u", os.path.join(SCRIPT_DIR, test), "--logging", logging]
elif test.endswith(".cpp"):
exe = os.path.join(BUILD_DIR, "tests", "stress", test[:-4])
binary = [exe]
@ -112,11 +121,10 @@ def run_test(args, test, options, timeout):
# start test
cmd = binary + ["--worker-count", str(THREADS)] + options
start = time.time()
ret_test = subprocess.run(cmd, cwd = SCRIPT_DIR, timeout = timeout * 60)
ret_test = subprocess.run(cmd, cwd=SCRIPT_DIR, timeout=timeout * 60)
if ret_test.returncode != 0:
raise Exception("Test '{}' binary returned non-zero ({})!".format(
test, ret_test.returncode))
raise Exception("Test '{}' binary returned non-zero ({})!".format(test, ret_test.returncode))
runtime = time.time() - start
print(" Done after {:.3f} seconds".format(runtime))
@ -125,39 +133,54 @@ def run_test(args, test, options, timeout):
# parse arguments
parser = argparse.ArgumentParser(description = "Run stress tests on Memgraph.")
parser.add_argument("--memgraph", default = os.path.join(BUILD_DIR,
"memgraph"))
parser.add_argument("--log-file", default = "")
parser.add_argument("--data-directory", default = "")
parser.add_argument("--python", default = os.path.join(SCRIPT_DIR,
"ve3", "bin", "python3"), type = str)
parser.add_argument("--large-dataset", action = "store_const",
const = True, default = False)
parser.add_argument("--use-ssl", action = "store_const",
const = True, default = False)
parser.add_argument("--verbose", action = "store_const",
const = True, default = False)
parser = argparse.ArgumentParser(description="Run stress tests on Memgraph.")
parser.add_argument("--memgraph", default=os.path.join(BUILD_DIR, "memgraph"))
parser.add_argument("--log-file", default="")
parser.add_argument("--data-directory", default="")
parser.add_argument("--python", default=os.path.join(SCRIPT_DIR, "ve3", "bin", "python3"), type=str)
parser.add_argument("--large-dataset", action="store_const", const=True, default=False)
parser.add_argument("--use-ssl", action="store_const", const=True, default=False)
parser.add_argument("--verbose", action="store_const", const=True, default=False)
args = parser.parse_args()
# generate temporary SSL certs
if args.use_ssl:
# https://unix.stackexchange.com/questions/104171/create-ssl-certificate-non-interactively
subj = "/C=HR/ST=Zagreb/L=Zagreb/O=Memgraph/CN=db.memgraph.com"
subprocess.run(["openssl", "req", "-new", "-newkey", "rsa:4096",
"-days", "365", "-nodes", "-x509", "-subj", subj,
"-keyout", KEY_FILE, "-out", CERT_FILE], check=True)
subprocess.run(
[
"openssl",
"req",
"-new",
"-newkey",
"rsa:4096",
"-days",
"365",
"-nodes",
"-x509",
"-subj",
subj,
"-keyout",
KEY_FILE,
"-out",
CERT_FILE,
],
check=True,
)
# start memgraph
cwd = os.path.dirname(args.memgraph)
cmd = [args.memgraph, "--bolt-num-workers=" + str(THREADS),
"--storage-properties-on-edges=true",
"--storage-snapshot-on-exit=true",
"--storage-snapshot-interval-sec=600",
"--storage-snapshot-retention-count=1",
"--storage-wal-enabled=true",
"--storage-recover-on-startup=false",
"--query-execution-timeout-sec=1200"]
cmd = [
args.memgraph,
"--bolt-num-workers=" + str(THREADS),
"--storage-properties-on-edges=true",
"--storage-snapshot-on-exit=true",
"--storage-snapshot-interval-sec=600",
"--storage-snapshot-retention-count=1",
"--storage-wal-enabled=true",
"--storage-recover-on-startup=false",
"--query-execution-timeout-sec=1200",
]
if not args.verbose:
cmd += ["--log-level", "WARNING"]
if args.log_file:
@ -166,7 +189,7 @@ if args.data_directory:
cmd += ["--data-directory", args.data_directory]
if args.use_ssl:
cmd += ["--bolt-cert-file", CERT_FILE, "--bolt-key-file", KEY_FILE]
proc_mg = subprocess.Popen(cmd, cwd = cwd)
proc_mg = subprocess.Popen(cmd, cwd=cwd)
wait_for_server(7687)
assert proc_mg.poll() is None, "The database binary died prematurely!"
@ -174,10 +197,12 @@ assert proc_mg.poll() is None, "The database binary died prematurely!"
@atexit.register
def cleanup():
global proc_mg
if proc_mg.poll() != None: return
if proc_mg.poll() != None:
return
proc_mg.kill()
proc_mg.wait()
# run tests
runtimes = {}
dataset = LARGE_DATASET if args.large_dataset else SMALL_DATASET

79
tests/stress/parser.cpp Normal file
View File

@ -0,0 +1,79 @@
// Copyright 2022 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include <limits>
#include <random>
#include <thread>
#include <fmt/format.h>
#include <gflags/gflags.h>
#include "communication/bolt/client.hpp"
#include "io/network/endpoint.hpp"
#include "mgclient.hpp"
#include "utils/timer.hpp"
DEFINE_string(address, "127.0.0.1", "Server address");
DEFINE_int32(port, 7687, "Server port");
DEFINE_string(username, "", "Username for the database");
DEFINE_string(password, "", "Password for the database");
DEFINE_bool(use_ssl, false, "Set to true to connect with SSL to the server.");
DEFINE_int32(worker_count, 1, "The number of concurrent workers executing queries against the server.");
DEFINE_int32(per_worker_query_count, 100, "The number of queries each worker will try to execute.");
auto make_client() {
mg::Client::Params params;
params.host = FLAGS_address;
params.port = static_cast<uint16_t>(FLAGS_port);
params.use_ssl = FLAGS_use_ssl;
return mg::Client::Connect(params);
}
int main(int argc, char **argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
mg::Client::Init();
spdlog::info("Cleaning the database instance...");
auto client = make_client();
client->Execute("MATCH (n) DETACH DELETE n");
client->DiscardAll();
spdlog::info(fmt::format("Starting parser stress test with {} workers and {} queries per worker...",
FLAGS_worker_count, FLAGS_per_worker_query_count));
std::vector<std::thread> threads;
memgraph::utils::Timer timer;
for (int i = 0; i < FLAGS_worker_count; ++i) {
threads.push_back(std::thread([]() {
auto client = make_client();
std::mt19937 generator{std::random_device{}()};
std::uniform_int_distribution<uint64_t> distribution{std::numeric_limits<uint64_t>::min(),
std::numeric_limits<uint64_t>::max()};
for (int i = 0; i < FLAGS_per_worker_query_count; ++i) {
try {
auto is_executed = client->Execute(fmt::format("MATCH (n:Label{}) RETURN n;", distribution(generator)));
if (!is_executed) {
LOG_FATAL("One of the parser stress test queries failed.");
}
client->FetchAll();
} catch (const std::exception &e) {
LOG_FATAL("One of the parser stress test queries failed.");
}
}
}));
}
std::ranges::for_each(threads, [](auto &t) { t.join(); });
spdlog::info(
fmt::format("All queries executed in {:.4f}s. The parser managed to handle the load.", timer.Elapsed().count()));
mg::Client::Finalize();
return 0;
}

View File

@ -217,31 +217,40 @@ TEST_F(QueryPlanAggregateOps, WithData) {
TEST_F(QueryPlanAggregateOps, WithoutDataWithGroupBy) {
{
auto results = AggregationResults(true, {Aggregation::Op::COUNT});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Int);
EXPECT_EQ(results[0][0].ValueInt(), 0);
}
{
auto results = AggregationResults(true, {Aggregation::Op::SUM});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Int);
EXPECT_EQ(results[0][0].ValueInt(), 0);
}
{
auto results = AggregationResults(true, {Aggregation::Op::AVG});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Null);
}
{
auto results = AggregationResults(true, {Aggregation::Op::MIN});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Null);
}
{
auto results = AggregationResults(true, {Aggregation::Op::MAX});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Null);
}
{
auto results = AggregationResults(true, {Aggregation::Op::COLLECT_LIST});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::List);
}
{
auto results = AggregationResults(true, {Aggregation::Op::COLLECT_MAP});
EXPECT_EQ(results.size(), 0);
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0][0].type(), TypedValue::Type::Map);
}
}
@ -260,7 +269,7 @@ TEST_F(QueryPlanAggregateOps, WithoutDataWithoutGroupBy) {
// max
EXPECT_TRUE(results[0][3].IsNull());
// sum
EXPECT_TRUE(results[0][4].IsNull());
EXPECT_EQ(results[0][4].ValueInt(), 0);
// avg
EXPECT_TRUE(results[0][5].IsNull());
// collect list

View File

@ -891,7 +891,6 @@ class TriggerStoreTest : public ::testing::Test {
std::optional<memgraph::query::DbAccessor> dba;
memgraph::utils::SkipList<memgraph::query::QueryCacheEntry> ast_cache;
memgraph::utils::SpinLock antlr_lock;
memgraph::query::AllowEverythingAuthChecker auth_checker;
private:
@ -909,7 +908,7 @@ TEST_F(TriggerStoreTest, Restore) {
const auto reset_store = [&] {
store.emplace(testing_directory);
store->RestoreTriggers(&ast_cache, &*dba, &antlr_lock, memgraph::query::InterpreterConfig::Query{}, &auth_checker);
store->RestoreTriggers(&ast_cache, &*dba, memgraph::query::InterpreterConfig::Query{}, &auth_checker);
};
reset_store();
@ -930,12 +929,12 @@ TEST_F(TriggerStoreTest, Restore) {
store->AddTrigger(
trigger_name_before, trigger_statement,
std::map<std::string, memgraph::storage::PropertyValue>{{"parameter", memgraph::storage::PropertyValue{1}}},
event_type, memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
event_type, memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker);
store->AddTrigger(
trigger_name_after, trigger_statement,
std::map<std::string, memgraph::storage::PropertyValue>{{"parameter", memgraph::storage::PropertyValue{"value"}}},
event_type, memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
event_type, memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, {owner}, &auth_checker);
const auto check_triggers = [&] {
@ -986,16 +985,16 @@ TEST_F(TriggerStoreTest, AddTrigger) {
// Invalid query in statements
ASSERT_THROW(store.AddTrigger("trigger", "RETUR 1", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker),
memgraph::utils::BasicException);
ASSERT_THROW(store.AddTrigger("trigger", "RETURN createdEdges", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker),
memgraph::utils::BasicException);
ASSERT_THROW(store.AddTrigger("trigger", "RETURN $parameter", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker),
memgraph::utils::BasicException);
@ -1003,15 +1002,15 @@ TEST_F(TriggerStoreTest, AddTrigger) {
"trigger", "RETURN $parameter",
std::map<std::string, memgraph::storage::PropertyValue>{{"parameter", memgraph::storage::PropertyValue{1}}},
memgraph::query::TriggerEventType::VERTEX_CREATE, memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
&antlr_lock, memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker));
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker));
// Inserting with the same name
ASSERT_THROW(store.AddTrigger("trigger", "RETURN 1", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker),
memgraph::utils::BasicException);
ASSERT_THROW(store.AddTrigger("trigger", "RETURN 1", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker),
memgraph::utils::BasicException);
@ -1027,7 +1026,7 @@ TEST_F(TriggerStoreTest, DropTrigger) {
const auto *trigger_name = "trigger";
store.AddTrigger(trigger_name, "RETURN 1", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker);
ASSERT_THROW(store.DropTrigger("Unknown"), memgraph::utils::BasicException);
@ -1040,7 +1039,7 @@ TEST_F(TriggerStoreTest, TriggerInfo) {
std::vector<memgraph::query::TriggerStore::TriggerInfo> expected_info;
store.AddTrigger("trigger", "RETURN 1", {}, memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker);
expected_info.push_back({"trigger", "RETURN 1", memgraph::query::TriggerEventType::VERTEX_CREATE,
memgraph::query::TriggerPhase::BEFORE_COMMIT});
@ -1060,7 +1059,7 @@ TEST_F(TriggerStoreTest, TriggerInfo) {
check_trigger_info();
store.AddTrigger("edge_update_trigger", "RETURN 1", {}, memgraph::query::TriggerEventType::EDGE_UPDATE,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker);
expected_info.push_back({"edge_update_trigger", "RETURN 1", memgraph::query::TriggerEventType::EDGE_UPDATE,
memgraph::query::TriggerPhase::AFTER_COMMIT});
@ -1174,7 +1173,7 @@ TEST_F(TriggerStoreTest, AnyTriggerAllKeywords) {
for (const auto keyword : keywords) {
SCOPED_TRACE(keyword);
EXPECT_NO_THROW(store.AddTrigger(trigger_name, fmt::format("RETURN {}", keyword), {}, event_type,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::BEFORE_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &auth_checker));
store.DropTrigger(trigger_name);
}
@ -1199,23 +1198,23 @@ TEST_F(TriggerStoreTest, AuthCheckerUsage) {
ASSERT_NO_THROW(store->AddTrigger("successfull_trigger_1", "CREATE (n:VERTEX) RETURN n", {},
memgraph::query::TriggerEventType::EDGE_UPDATE,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &mock_checker));
ASSERT_NO_THROW(store->AddTrigger("successfull_trigger_2", "CREATE (n:VERTEX) RETURN n", {},
memgraph::query::TriggerEventType::EDGE_UPDATE,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba,
memgraph::query::InterpreterConfig::Query{}, owner, &mock_checker));
EXPECT_CALL(mock_checker, IsUserAuthorized(std::optional<std::string>{}, ElementsAre(Privilege::MATCH)))
.Times(1)
.WillOnce(Return(false));
ASSERT_THROW(store->AddTrigger("unprivileged_trigger", "MATCH (n:VERTEX) RETURN n", {},
memgraph::query::TriggerEventType::EDGE_UPDATE,
memgraph::query::TriggerPhase::AFTER_COMMIT, &ast_cache, &*dba, &antlr_lock,
memgraph::query::InterpreterConfig::Query{}, std::nullopt, &mock_checker);
, memgraph::utils::BasicException);
ASSERT_THROW(
store->AddTrigger("unprivileged_trigger", "MATCH (n:VERTEX) RETURN n", {},
memgraph::query::TriggerEventType::EDGE_UPDATE, memgraph::query::TriggerPhase::AFTER_COMMIT,
&ast_cache, &*dba, memgraph::query::InterpreterConfig::Query{}, std::nullopt, &mock_checker);
, memgraph::utils::BasicException);
store.emplace(testing_directory);
EXPECT_CALL(mock_checker, IsUserAuthorized(std::optional<std::string>{}, ElementsAre(Privilege::CREATE)))
@ -1223,8 +1222,8 @@ TEST_F(TriggerStoreTest, AuthCheckerUsage) {
.WillOnce(Return(false));
EXPECT_CALL(mock_checker, IsUserAuthorized(owner, ElementsAre(Privilege::CREATE))).Times(1).WillOnce(Return(true));
ASSERT_NO_THROW(store->RestoreTriggers(&ast_cache, &*dba, &antlr_lock, memgraph::query::InterpreterConfig::Query{},
&mock_checker));
ASSERT_NO_THROW(
store->RestoreTriggers(&ast_cache, &*dba, memgraph::query::InterpreterConfig::Query{}, &mock_checker));
const auto triggers = store->GetTriggerInfo();
ASSERT_EQ(triggers.size(), 1);