Plan Cartesian after a merge point on master plan

Summary:
This should fix the issue of having a write followed by a read during
distributed execution. Any kind of merger of plans should behave like a
Cartesian with regards to planning the following ScanAll.

Reviewers: mtomic, mferencevic

Reviewed By: mtomic

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D1624
This commit is contained in:
Teon Banek 2018-10-02 09:58:12 +02:00
parent 75950664a7
commit 9071552ce1
2 changed files with 28 additions and 5 deletions

View File

@ -921,7 +921,9 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor {
bool PostVisit(ScanAll &scan) override {
prev_ops_.pop_back();
should_split_ = true;
if (has_scan_all_) {
// Having performed a merge point (e.g. a write or accumulation) behaves the
// same as a Cartesian, for each produced result we perform a ScanAll.
if (on_master_ || has_scan_all_) {
AddForCartesian(&scan);
}
has_scan_all_ = true;
@ -935,7 +937,7 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor {
bool PostVisit(ScanAllByLabel &scan) override {
prev_ops_.pop_back();
should_split_ = true;
if (has_scan_all_) {
if (on_master_ || has_scan_all_) {
AddForCartesian(&scan);
}
has_scan_all_ = true;
@ -948,7 +950,7 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor {
bool PostVisit(ScanAllByLabelPropertyRange &scan) override {
prev_ops_.pop_back();
should_split_ = true;
if (has_scan_all_) {
if (on_master_ || has_scan_all_) {
AddForCartesian(&scan);
}
has_scan_all_ = true;
@ -961,7 +963,7 @@ class DistributedPlanner : public HierarchicalLogicalOperatorVisitor {
bool PostVisit(ScanAllByLabelPropertyValue &scan) override {
prev_ops_.pop_back();
should_split_ = true;
if (has_scan_all_) {
if (on_master_ || has_scan_all_) {
AddForCartesian(&scan);
}
has_scan_all_ = true;

View File

@ -3150,7 +3150,7 @@ TYPED_TEST(TestPlanner, DistributedCartesianUnwind) {
CheckDistributedPlan(planner.plan(), symbol_table, expected);
}
TYPED_TEST(TestPlanner, DistributedCartesianCreateNode) {
TYPED_TEST(TestPlanner, DistributedCartesianMatchCreateNode) {
// Test MATCH (a) CREATE (b) WITH b MATCH (c) CREATE (d)
AstStorage storage;
auto *node_b = NODE("b");
@ -3175,6 +3175,27 @@ TYPED_TEST(TestPlanner, DistributedCartesianCreateNode) {
CheckDistributedPlan(planner.plan(), symbol_table, expected);
}
TYPED_TEST(TestPlanner, DistributedCartesianCreateNode) {
// Test CREATE (a) WITH a MATCH (b) RETURN b
AstStorage storage;
auto *node_a = NODE("a");
auto *node_b = NODE("b");
QUERY(SINGLE_QUERY(CREATE(PATTERN(node_a)), WITH("a"), MATCH(PATTERN(node_b)),
RETURN("b")));
auto symbol_table = MakeSymbolTable(*storage.query());
auto sym_a = symbol_table.at(*node_a->identifier_);
auto left_cart = MakeCheckers(ExpectDistributedCreateNode(true),
ExpectSynchronize(true), ExpectProduce());
auto sym_b = symbol_table.at(*node_b->identifier_);
auto right_cart = MakeCheckers(ExpectScanAll(), ExpectPullRemote({sym_b}));
auto expected = ExpectDistributed(
MakeCheckers(ExpectCartesian(left_cart, right_cart), ExpectProduce()),
MakeCheckers(ExpectScanAll()));
FakeDbAccessor dba;
auto planner = MakePlanner<TypeParam>(dba, storage, symbol_table);
CheckDistributedPlan(planner.plan(), symbol_table, expected);
}
TYPED_TEST(TestPlanner, DistributedOptionalExpand) {
// Test MATCH (n) OPTIONAL MATCH (n)-[e]-(m) RETURN e;
AstStorage storage;