2017-08-21 21:44:35 +08:00
|
|
|
#include <cstdlib>
|
|
|
|
|
2019-10-28 21:07:31 +08:00
|
|
|
#include "communication/bolt/v1/value.hpp"
|
2017-06-15 00:53:02 +08:00
|
|
|
#include "communication/result_stream_faker.hpp"
|
2019-10-28 21:07:31 +08:00
|
|
|
#include "glue/communication.hpp"
|
2017-06-15 00:53:02 +08:00
|
|
|
#include "gmock/gmock.h"
|
|
|
|
#include "gtest/gtest.h"
|
2017-07-20 00:14:59 +08:00
|
|
|
#include "query/exceptions.hpp"
|
Flags cleanup and QueryEngine removal
Summary:
I started with cleaning flags up (removing unused ones, documenting undocumented ones). There were some flags to remove in `QueryEngine`. Seeing how we never use hardcoded queries (AFAIK last Mislav's testing also indicated they aren't faster then interpretation), when removing those unused flags the `QueryEngine` becomes obsolete. That means that a bunch of other stuff becomes obsolete, along with the hardcoded queries. So I removed it all (this has been discussed and approved on the daily).
Some flags that were previously undocumented in `docs/user_technical/installation` are now documented. The following flags are NOT documented and in my opinion should not be displayed when starting `./memgraph --help` (@mferencevic):
```
query_vertex_count_to_expand_existsing (from rule_based_planner.cpp)
query_max_plans (rule_based_planner.cpp)
```
If you think that another organization is needed w.r.t. flag visibility, comment.
@teon.banek: I had to remove some stuff from CMakeLists to make it buildable. Please review what I removed and clean up if necessary if/when this lands. If the needed changes are minor, you can also comment.
Reviewers: buda, mislav.bradac, teon.banek, mferencevic
Reviewed By: buda, mislav.bradac
Subscribers: pullbot, mferencevic, teon.banek
Differential Revision: https://phabricator.memgraph.io/D825
2017-09-22 22:17:09 +08:00
|
|
|
#include "query/interpreter.hpp"
|
2017-07-20 00:14:59 +08:00
|
|
|
#include "query/typed_value.hpp"
|
|
|
|
#include "query_common.hpp"
|
2017-06-15 00:53:02 +08:00
|
|
|
|
2019-10-28 21:07:31 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
auto ToEdgeList(const communication::bolt::Value &v) {
|
|
|
|
std::vector<communication::bolt::Edge> list;
|
|
|
|
for (auto x : v.ValueList()) {
|
|
|
|
list.push_back(x.ValueEdge());
|
|
|
|
}
|
|
|
|
return list;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2017-06-15 00:53:02 +08:00
|
|
|
// TODO: This is not a unit test, but tests/integration dir is chaotic at the
|
|
|
|
// moment. After tests refactoring is done, move/rename this.
|
|
|
|
|
2017-12-22 20:39:31 +08:00
|
|
|
class InterpreterTest : public ::testing::Test {
|
|
|
|
protected:
|
2019-11-22 00:24:01 +08:00
|
|
|
storage::Storage db_;
|
2019-10-10 17:23:33 +08:00
|
|
|
query::InterpreterContext interpreter_context_{&db_};
|
2019-10-07 23:31:25 +08:00
|
|
|
query::Interpreter interpreter_{&interpreter_context_};
|
2017-12-22 20:39:31 +08:00
|
|
|
|
2019-10-10 17:23:33 +08:00
|
|
|
/**
|
|
|
|
* Execute the given query and commit the transaction.
|
|
|
|
*
|
|
|
|
* Return the query stream.
|
|
|
|
*/
|
2018-07-02 21:34:33 +08:00
|
|
|
auto Interpret(const std::string &query,
|
Clean-up TypedValue misuse
Summary:
In a bunch of places `TypedValue` was used where `PropertyValue` should be. A lot of times it was only because `TypedValue` serialization code could be reused for `PropertyValue`, only without providing callbacks for `VERTEX`, `EDGE` and `PATH`. So first I wrote separate serialization code for `PropertyValue` and put it into storage folder. Then I fixed all the places where `TypedValue` was incorrectly used instead of `PropertyValue`. I also disabled implicit `TypedValue` to `PropertyValue` conversion in hopes of preventing misuse in the future.
After that, I wrote code for `VertexAccessor` and `EdgeAccessor` serialization and put it into `storage` folder because it was almost duplicated in distributed BFS and pull produce RPC messages. On the sender side, some subset of records (old or new or both) is serialized, and on the reciever side, records are deserialized and immediately put into transaction cache.
Then I rewrote the `TypedValue` serialization functions (`SaveCapnpTypedValue` and `LoadCapnpTypedValue`) to not take callbacks for `VERTEX`, `EDGE` and `PATH`, but use accessor serialization functions instead. That means that any code that wants to use `TypedValue` serialization must hold a reference to `GraphDbAccessor` and `DataManager`, so that should make clients reconsider if they really want to use `TypedValue` instead of `PropertyValue`.
Reviewers: teon.banek, msantl
Reviewed By: teon.banek
Subscribers: pullbot
Differential Revision: https://phabricator.memgraph.io/D1598
2018-09-13 18:12:07 +08:00
|
|
|
const std::map<std::string, PropertyValue> ¶ms = {}) {
|
2019-11-22 00:24:01 +08:00
|
|
|
ResultStreamFaker stream(&db_);
|
2019-10-10 17:23:33 +08:00
|
|
|
|
2019-10-18 22:27:47 +08:00
|
|
|
auto [header, _] = interpreter_.Prepare(query, params);
|
2019-10-10 17:23:33 +08:00
|
|
|
stream.Header(header);
|
|
|
|
auto summary = interpreter_.PullAll(&stream);
|
|
|
|
stream.Summary(summary);
|
|
|
|
|
2018-07-18 16:40:06 +08:00
|
|
|
return stream;
|
2017-12-22 20:39:31 +08:00
|
|
|
}
|
|
|
|
};
|
2017-06-15 00:53:02 +08:00
|
|
|
|
|
|
|
// Run query with different ast twice to see if query executes correctly when
|
|
|
|
// ast is read from cache.
|
2017-12-22 20:39:31 +08:00
|
|
|
TEST_F(InterpreterTest, AstCache) {
|
2017-06-15 00:53:02 +08:00
|
|
|
{
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 2 + 3");
|
2017-06-26 21:42:13 +08:00
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "2 + 3");
|
2017-06-15 00:53:02 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueInt(), 5);
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, different literals.
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 5 + 4");
|
2017-06-15 00:53:02 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueInt(), 9);
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Different ast (because of different types).
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 5.5 + 4");
|
2017-06-15 00:53:02 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueDouble(), 9.5);
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, same literals.
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 2 + 3");
|
2017-06-15 00:53:02 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueInt(), 5);
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, different literals.
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 10.5 + 1");
|
2017-06-15 00:53:02 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueDouble(), 11.5);
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
2017-06-16 19:40:42 +08:00
|
|
|
{
|
|
|
|
// Cached ast, same literals, different whitespaces.
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 10.5 + 1");
|
2017-06-26 21:42:13 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueDouble(), 11.5);
|
2017-06-26 21:42:13 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, same literals, different named header.
|
2017-12-22 20:39:31 +08:00
|
|
|
auto stream = Interpret("RETURN 10.5+1");
|
2017-06-26 21:42:13 +08:00
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "10.5+1");
|
2017-06-16 19:40:42 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueDouble(), 11.5);
|
2017-06-16 19:40:42 +08:00
|
|
|
}
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
2017-07-20 00:14:59 +08:00
|
|
|
|
|
|
|
// Run query with same ast multiple times with different parameters.
|
2017-12-22 20:39:31 +08:00
|
|
|
TEST_F(InterpreterTest, Parameters) {
|
2017-07-20 00:14:59 +08:00
|
|
|
{
|
2019-08-28 19:10:27 +08:00
|
|
|
auto stream = Interpret("RETURN $2 + $`a b`", {{"2", PropertyValue(10)},
|
|
|
|
{"a b", PropertyValue(15)}});
|
2017-07-20 00:14:59 +08:00
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "$2 + $`a b`");
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueInt(), 25);
|
2017-07-20 00:14:59 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Not needed parameter.
|
2019-08-28 19:10:27 +08:00
|
|
|
auto stream = Interpret("RETURN $2 + $`a b`", {{"2", PropertyValue(10)},
|
|
|
|
{"a b", PropertyValue(15)},
|
|
|
|
{"c", PropertyValue(10)}});
|
2017-07-20 00:14:59 +08:00
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "$2 + $`a b`");
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-08-22 20:50:57 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueInt(), 25);
|
2017-07-20 00:14:59 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, different parameters.
|
2019-08-28 19:10:27 +08:00
|
|
|
auto stream =
|
|
|
|
Interpret("RETURN $2 + $`a b`",
|
|
|
|
{{"2", PropertyValue("da")}, {"a b", PropertyValue("ne")}});
|
2017-07-20 00:14:59 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-05-27 23:29:58 +08:00
|
|
|
ASSERT_EQ(stream.GetResults()[0][0].ValueString(), "dane");
|
2017-07-20 00:14:59 +08:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Non-primitive literal.
|
2019-08-28 19:10:27 +08:00
|
|
|
auto stream = Interpret(
|
|
|
|
"RETURN $2",
|
|
|
|
{{"2", PropertyValue(std::vector<PropertyValue>{
|
|
|
|
PropertyValue(5), PropertyValue(2), PropertyValue(3)})}});
|
2017-07-20 00:14:59 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 1U);
|
|
|
|
ASSERT_EQ(stream.GetResults()[0].size(), 1U);
|
2019-10-28 21:07:31 +08:00
|
|
|
auto result = query::test_common::ToIntList(
|
|
|
|
glue::ToTypedValue(stream.GetResults()[0][0]));
|
2017-07-20 00:14:59 +08:00
|
|
|
ASSERT_THAT(result, testing::ElementsAre(5, 2, 3));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
// Cached ast, unprovided parameter.
|
2019-08-28 19:10:27 +08:00
|
|
|
ASSERT_THROW(Interpret("RETURN $2 + $`a b`", {{"2", PropertyValue("da")},
|
|
|
|
{"ab", PropertyValue("ne")}}),
|
2017-07-20 00:14:59 +08:00
|
|
|
query::UnprovidedParameterError);
|
|
|
|
}
|
|
|
|
}
|
2017-08-21 21:44:35 +08:00
|
|
|
|
|
|
|
// Test bfs end to end.
|
2017-12-22 20:39:31 +08:00
|
|
|
TEST_F(InterpreterTest, Bfs) {
|
2017-08-21 21:44:35 +08:00
|
|
|
srand(0);
|
|
|
|
const auto kNumLevels = 10;
|
|
|
|
const auto kNumNodesPerLevel = 100;
|
|
|
|
const auto kNumEdgesPerNode = 100;
|
|
|
|
const auto kNumUnreachableNodes = 1000;
|
|
|
|
const auto kNumUnreachableEdges = 100000;
|
|
|
|
const auto kReachable = "reachable";
|
|
|
|
const auto kId = "id";
|
|
|
|
|
2019-11-22 00:24:01 +08:00
|
|
|
std::vector<std::vector<query::VertexAccessor>> levels(kNumLevels);
|
2017-08-21 21:44:35 +08:00
|
|
|
int id = 0;
|
|
|
|
|
|
|
|
// Set up.
|
|
|
|
{
|
2019-11-22 00:24:01 +08:00
|
|
|
auto storage_dba = db_.Access();
|
|
|
|
query::DbAccessor dba(&storage_dba);
|
2017-08-21 21:44:35 +08:00
|
|
|
auto add_node = [&](int level, bool reachable) {
|
2019-04-15 17:36:43 +08:00
|
|
|
auto node = dba.InsertVertex();
|
2019-11-22 00:24:01 +08:00
|
|
|
CHECK(node.SetProperty(dba.NameToProperty(kId), PropertyValue(id++))
|
|
|
|
.HasValue());
|
|
|
|
CHECK(node.SetProperty(dba.NameToProperty(kReachable),
|
|
|
|
PropertyValue(reachable))
|
|
|
|
.HasValue());
|
2017-08-21 21:44:35 +08:00
|
|
|
levels[level].push_back(node);
|
|
|
|
return node;
|
|
|
|
};
|
|
|
|
|
2019-11-22 00:24:01 +08:00
|
|
|
auto add_edge = [&](auto &v1, auto &v2, bool reachable) {
|
|
|
|
auto edge = dba.InsertEdge(&v1, &v2, dba.NameToEdgeType("edge"));
|
|
|
|
CHECK(edge->SetProperty(dba.NameToProperty(kReachable),
|
|
|
|
PropertyValue(reachable))
|
|
|
|
.HasValue());
|
2017-08-21 21:44:35 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Add source node.
|
|
|
|
add_node(0, true);
|
|
|
|
|
|
|
|
// Add reachable nodes.
|
|
|
|
for (int i = 1; i < kNumLevels; ++i) {
|
|
|
|
for (int j = 0; j < kNumNodesPerLevel; ++j) {
|
|
|
|
auto node = add_node(i, true);
|
|
|
|
for (int k = 0; k < kNumEdgesPerNode; ++k) {
|
|
|
|
auto &node2 = levels[i - 1][rand() % levels[i - 1].size()];
|
|
|
|
add_edge(node2, node, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add unreachable nodes.
|
|
|
|
for (int i = 0; i < kNumUnreachableNodes; ++i) {
|
|
|
|
auto node = add_node(rand() % kNumLevels, // Not really important.
|
|
|
|
false);
|
|
|
|
for (int j = 0; j < kNumEdgesPerNode; ++j) {
|
|
|
|
auto &level = levels[rand() % kNumLevels];
|
|
|
|
auto &node2 = level[rand() % level.size()];
|
|
|
|
add_edge(node2, node, true);
|
|
|
|
add_edge(node, node2, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add unreachable edges.
|
|
|
|
for (int i = 0; i < kNumUnreachableEdges; ++i) {
|
|
|
|
auto &level1 = levels[rand() % kNumLevels];
|
|
|
|
auto &node1 = level1[rand() % level1.size()];
|
|
|
|
auto &level2 = levels[rand() % kNumLevels];
|
|
|
|
auto &node2 = level2[rand() % level2.size()];
|
|
|
|
add_edge(node1, node2, false);
|
|
|
|
}
|
|
|
|
|
2019-11-22 00:24:01 +08:00
|
|
|
ASSERT_FALSE(dba.Commit().HasError());
|
2017-08-21 21:44:35 +08:00
|
|
|
}
|
|
|
|
|
2019-10-10 17:23:33 +08:00
|
|
|
auto stream = Interpret(
|
2017-12-22 20:39:31 +08:00
|
|
|
"MATCH (n {id: 0})-[r *bfs..5 (e, n | n.reachable and "
|
2019-10-28 21:07:31 +08:00
|
|
|
"e.reachable)]->(m) RETURN n, r, m");
|
2017-08-21 21:44:35 +08:00
|
|
|
|
2019-10-28 21:07:31 +08:00
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 3U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "n");
|
|
|
|
EXPECT_EQ(stream.GetHeader()[1], "r");
|
|
|
|
EXPECT_EQ(stream.GetHeader()[2], "m");
|
2017-08-21 21:44:35 +08:00
|
|
|
ASSERT_EQ(stream.GetResults().size(), 5 * kNumNodesPerLevel);
|
|
|
|
|
2019-10-10 17:23:33 +08:00
|
|
|
auto dba = db_.Access();
|
2017-08-21 21:44:35 +08:00
|
|
|
int expected_level = 1;
|
|
|
|
int remaining_nodes_in_level = kNumNodesPerLevel;
|
|
|
|
std::unordered_set<int64_t> matched_ids;
|
|
|
|
|
|
|
|
for (const auto &result : stream.GetResults()) {
|
2019-10-28 21:07:31 +08:00
|
|
|
const auto &begin = result[0].ValueVertex();
|
|
|
|
const auto &edges = ToEdgeList(result[1]);
|
|
|
|
const auto &end = result[2].ValueVertex();
|
|
|
|
|
2017-08-21 21:44:35 +08:00
|
|
|
// Check that path is of expected length. Returned paths should be from
|
|
|
|
// shorter to longer ones.
|
|
|
|
EXPECT_EQ(edges.size(), expected_level);
|
|
|
|
// Check that starting node is correct.
|
2019-10-28 21:07:31 +08:00
|
|
|
EXPECT_EQ(edges.front().from, begin.id);
|
|
|
|
EXPECT_EQ(begin.properties.at(kId).ValueInt(), 0);
|
2017-08-21 21:44:35 +08:00
|
|
|
for (int i = 1; i < static_cast<int>(edges.size()); ++i) {
|
|
|
|
// Check that edges form a connected path.
|
2019-10-28 21:07:31 +08:00
|
|
|
EXPECT_EQ(edges[i - 1].to.AsInt(), edges[i].from.AsInt());
|
2017-08-21 21:44:35 +08:00
|
|
|
}
|
2019-10-28 21:07:31 +08:00
|
|
|
auto matched_id = end.properties.at(kId).ValueInt();
|
|
|
|
EXPECT_EQ(edges.back().to, end.id);
|
2017-08-21 21:44:35 +08:00
|
|
|
// Check that we didn't match that node already.
|
|
|
|
EXPECT_TRUE(matched_ids.insert(matched_id).second);
|
|
|
|
// Check that shortest path was found.
|
|
|
|
EXPECT_TRUE(matched_id > kNumNodesPerLevel * (expected_level - 1) &&
|
|
|
|
matched_id <= kNumNodesPerLevel * expected_level);
|
|
|
|
if (!--remaining_nodes_in_level) {
|
|
|
|
remaining_nodes_in_level = kNumNodesPerLevel;
|
|
|
|
++expected_level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-10-10 00:09:28 +08:00
|
|
|
|
2017-12-22 20:39:31 +08:00
|
|
|
TEST_F(InterpreterTest, CreateIndexInMulticommandTransaction) {
|
2019-10-30 21:05:47 +08:00
|
|
|
Interpret("BEGIN");
|
|
|
|
ASSERT_THROW(Interpret("CREATE INDEX ON :X(y)"),
|
2019-08-22 20:15:45 +08:00
|
|
|
query::IndexInMulticommandTxException);
|
2019-10-30 21:05:47 +08:00
|
|
|
Interpret("ROLLBACK");
|
2017-06-15 00:53:02 +08:00
|
|
|
}
|
2018-02-08 18:45:30 +08:00
|
|
|
|
|
|
|
// Test shortest path end to end.
|
|
|
|
TEST_F(InterpreterTest, ShortestPath) {
|
2019-10-10 17:23:33 +08:00
|
|
|
Interpret(
|
|
|
|
"CREATE (n:A {x: 1}), (m:B {x: 2}), (l:C {x: 1}), (n)-[:r1 {w: 1 "
|
|
|
|
"}]->(m)-[:r2 {w: 2}]->(l), (n)-[:r3 {w: 4}]->(l)");
|
2018-02-08 18:45:30 +08:00
|
|
|
|
2019-10-10 17:23:33 +08:00
|
|
|
auto stream =
|
|
|
|
Interpret("MATCH (n)-[e *wshortest 5 (e, n | e.w) ]->(m) return e");
|
2018-02-08 18:45:30 +08:00
|
|
|
|
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader()[0], "e");
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), 3U);
|
|
|
|
|
2019-10-10 17:23:33 +08:00
|
|
|
auto dba = db_.Access();
|
2018-02-08 18:45:30 +08:00
|
|
|
std::vector<std::vector<std::string>> expected_results{
|
|
|
|
{"r1"}, {"r2"}, {"r1", "r2"}};
|
|
|
|
|
|
|
|
for (const auto &result : stream.GetResults()) {
|
2019-10-28 21:07:31 +08:00
|
|
|
const auto &edges = ToEdgeList(result[0]);
|
2018-02-08 18:45:30 +08:00
|
|
|
|
|
|
|
std::vector<std::string> datum;
|
2019-10-28 21:07:31 +08:00
|
|
|
datum.reserve(edges.size());
|
|
|
|
|
2018-02-08 18:45:30 +08:00
|
|
|
for (const auto &edge : edges) {
|
2019-10-28 21:07:31 +08:00
|
|
|
datum.push_back(edge.type);
|
2018-02-08 18:45:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool any_match = false;
|
|
|
|
for (const auto &expected : expected_results) {
|
|
|
|
if (expected == datum) {
|
|
|
|
any_match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPECT_TRUE(any_match);
|
|
|
|
}
|
|
|
|
}
|
2019-05-13 21:26:56 +08:00
|
|
|
|
|
|
|
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
2019-11-22 00:24:01 +08:00
|
|
|
TEST_F(InterpreterTest, ExistenceConstraintTest) {
|
|
|
|
Interpret("CREATE CONSTRAINT ON (n:A) ASSERT EXISTS (n.a);");
|
|
|
|
Interpret("CREATE (:A{a:1})");
|
|
|
|
Interpret("CREATE (:A{a:2})");
|
|
|
|
ASSERT_THROW(Interpret("CREATE (:A)"), query::QueryException);
|
|
|
|
Interpret("MATCH (n:A{a:2}) SET n.a=3");
|
|
|
|
Interpret("CREATE (:A{a:2})");
|
|
|
|
Interpret("MATCH (n:A{a:2}) DETACH DELETE n");
|
|
|
|
Interpret("CREATE (n:A{a:2})");
|
2019-05-13 21:26:56 +08:00
|
|
|
}
|
2019-11-12 22:58:05 +08:00
|
|
|
|
|
|
|
TEST_F(InterpreterTest, ExplainQuery) {
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 0U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 0U);
|
|
|
|
auto stream = Interpret("EXPLAIN MATCH (n) RETURN *;");
|
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader().front(), "QUERY PLAN");
|
|
|
|
std::vector<std::string> expected_rows{" * Produce {n}", " * ScanAll (n)",
|
|
|
|
" * Once"};
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), expected_rows.size());
|
|
|
|
auto expected_it = expected_rows.begin();
|
|
|
|
for (const auto &row : stream.GetResults()) {
|
|
|
|
ASSERT_EQ(row.size(), 1U);
|
|
|
|
EXPECT_EQ(row.front().ValueString(), *expected_it);
|
|
|
|
++expected_it;
|
|
|
|
}
|
|
|
|
// We should have a plan cache for MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
Interpret("MATCH (n) RETURN *;");
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(InterpreterTest, ExplainQueryWithParams) {
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 0U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 0U);
|
|
|
|
auto stream = Interpret("EXPLAIN MATCH (n) WHERE n.id = $id RETURN *;",
|
|
|
|
{{"id", PropertyValue(42)}});
|
|
|
|
ASSERT_EQ(stream.GetHeader().size(), 1U);
|
|
|
|
EXPECT_EQ(stream.GetHeader().front(), "QUERY PLAN");
|
|
|
|
std::vector<std::string> expected_rows{" * Produce {n}", " * Filter",
|
|
|
|
" * ScanAll (n)", " * Once"};
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), expected_rows.size());
|
|
|
|
auto expected_it = expected_rows.begin();
|
|
|
|
for (const auto &row : stream.GetResults()) {
|
|
|
|
ASSERT_EQ(row.size(), 1U);
|
|
|
|
EXPECT_EQ(row.front().ValueString(), *expected_it);
|
|
|
|
++expected_it;
|
|
|
|
}
|
|
|
|
// We should have a plan cache for MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
// We should have AST cache for EXPLAIN ... and for inner MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
Interpret("MATCH (n) WHERE n.id = $id RETURN *;",
|
|
|
|
{{"id", PropertyValue("something else")}});
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(InterpreterTest, ProfileQuery) {
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 0U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 0U);
|
|
|
|
auto stream = Interpret("PROFILE MATCH (n) RETURN *;");
|
|
|
|
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS",
|
|
|
|
"RELATIVE TIME", "ABSOLUTE TIME"};
|
|
|
|
EXPECT_EQ(stream.GetHeader(), expected_header);
|
|
|
|
std::vector<std::string> expected_rows{"* Produce", "* ScanAll", "* Once"};
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), expected_rows.size());
|
|
|
|
auto expected_it = expected_rows.begin();
|
|
|
|
for (const auto &row : stream.GetResults()) {
|
|
|
|
ASSERT_EQ(row.size(), 4U);
|
|
|
|
EXPECT_EQ(row.front().ValueString(), *expected_it);
|
|
|
|
++expected_it;
|
|
|
|
}
|
|
|
|
// We should have a plan cache for MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
// We should have AST cache for PROFILE ... and for inner MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
Interpret("MATCH (n) RETURN *;");
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(InterpreterTest, ProfileQueryWithParams) {
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 0U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 0U);
|
|
|
|
auto stream = Interpret("PROFILE MATCH (n) WHERE n.id = $id RETURN *;",
|
|
|
|
{{"id", PropertyValue(42)}});
|
|
|
|
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS",
|
|
|
|
"RELATIVE TIME", "ABSOLUTE TIME"};
|
|
|
|
EXPECT_EQ(stream.GetHeader(), expected_header);
|
|
|
|
std::vector<std::string> expected_rows{"* Produce", "* Filter", "* ScanAll",
|
|
|
|
"* Once"};
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), expected_rows.size());
|
|
|
|
auto expected_it = expected_rows.begin();
|
|
|
|
for (const auto &row : stream.GetResults()) {
|
|
|
|
ASSERT_EQ(row.size(), 4U);
|
|
|
|
EXPECT_EQ(row.front().ValueString(), *expected_it);
|
|
|
|
++expected_it;
|
|
|
|
}
|
|
|
|
// We should have a plan cache for MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
// We should have AST cache for PROFILE ... and for inner MATCH ...
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
Interpret("MATCH (n) WHERE n.id = $id RETURN *;",
|
|
|
|
{{"id", PropertyValue("something else")}});
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
}
|
2019-11-25 18:02:34 +08:00
|
|
|
|
|
|
|
TEST_F(InterpreterTest, ProfileQueryWithLiterals) {
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 0U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 0U);
|
|
|
|
auto stream = Interpret(
|
|
|
|
"PROFILE UNWIND range(1, 1000) AS x CREATE (:Node {id: x});", {});
|
|
|
|
std::vector<std::string> expected_header{"OPERATOR", "ACTUAL HITS",
|
|
|
|
"RELATIVE TIME", "ABSOLUTE TIME"};
|
|
|
|
EXPECT_EQ(stream.GetHeader(), expected_header);
|
|
|
|
std::vector<std::string> expected_rows{"* CreateNode", "* Unwind", "* Once"};
|
|
|
|
ASSERT_EQ(stream.GetResults().size(), expected_rows.size());
|
|
|
|
auto expected_it = expected_rows.begin();
|
|
|
|
for (const auto &row : stream.GetResults()) {
|
|
|
|
ASSERT_EQ(row.size(), 4U);
|
|
|
|
EXPECT_EQ(row.front().ValueString(), *expected_it);
|
|
|
|
++expected_it;
|
|
|
|
}
|
|
|
|
// We should have a plan cache for UNWIND ...
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
// We should have AST cache for PROFILE ... and for inner UNWIND ...
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
Interpret("UNWIND range(42, 4242) AS x CREATE (:Node {id: x});", {});
|
|
|
|
EXPECT_EQ(interpreter_context_.plan_cache.size(), 1U);
|
|
|
|
EXPECT_EQ(interpreter_context_.ast_cache.size(), 2U);
|
|
|
|
}
|