From 4e9a036881845203a26b223ef5a54f0f8e6e43e4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20Bari=C5=A1i=C4=87?=
 <48765171+MarkoBarisic@users.noreply.github.com>
Date: Wed, 8 Nov 2023 13:09:02 +0100
Subject: [PATCH 01/11] Fix v2.12 release pipeline (#1445)

---
 environment/os/amzn-2.sh | 11 ++---------
 release/package/run.sh   |  4 ++++
 2 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/environment/os/amzn-2.sh b/environment/os/amzn-2.sh
index dc0467bc3..15ff29106 100755
--- a/environment/os/amzn-2.sh
+++ b/environment/os/amzn-2.sh
@@ -158,9 +158,9 @@ install() {
             continue
         fi
         if [ "$pkg" == nodejs ]; then
-            curl -sL https://rpm.nodesource.com/setup_16.x | bash -
             if ! yum list installed nodejs >/dev/null 2>/dev/null; then
-                yum install -y nodejs
+              yum install https://rpm.nodesource.com/pub_16.x/nodistro/repo/nodesource-release-nodistro-1.noarch.rpm -y
+              yum install nodejs -y --setopt=nodesource-nodejs.module_hotfixes=1
             fi
             continue
         fi
@@ -172,13 +172,6 @@ install() {
             fi
             continue
         fi
-        if [ "$pkg" == nodejs ]; then
-            curl -sL https://rpm.nodesource.com/setup_16.x | bash -
-            if ! yum list installed nodejs >/dev/null 2>/dev/null; then
-                yum install -y nodejs
-            fi
-            continue
-        fi
         if [ "$pkg" == java-11-openjdk ]; then
             amazon-linux-extras install -y java-openjdk11
             continue
diff --git a/release/package/run.sh b/release/package/run.sh
index 41231c398..bbd5ff48a 100755
--- a/release/package/run.sh
+++ b/release/package/run.sh
@@ -72,6 +72,10 @@ make_package () {
     if [[ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]]; then
         git fetch origin master:master
     fi
+
+    # Ensure we have a clean build directory
+    docker exec "$build_container" rm -rf /memgraph
+    
     docker exec "$build_container" mkdir -p /memgraph
     # TODO(gitbuda): Revisit copying the whole repo -> makese sense under CI.
     docker cp "$PROJECT_ROOT/." "$build_container:/memgraph/"

From 17915578f81bebf8e00659c493ffd50bbb897c36 Mon Sep 17 00:00:00 2001
From: Antonio Filipovic <61245998+antoniofilipovic@users.noreply.github.com>
Date: Thu, 9 Nov 2023 18:56:36 +0100
Subject: [PATCH 02/11] Fix race condition and arena tracking bug (#1468)

---
 src/memory/global_memory_control.cpp | 20 ++++++++-------
 src/memory/query_memory_control.cpp  | 37 ++++++++++------------------
 src/memory/query_memory_control.hpp  | 23 ++---------------
 src/utils/memory_tracker.hpp         |  3 ---
 4 files changed, 26 insertions(+), 57 deletions(-)

diff --git a/src/memory/global_memory_control.cpp b/src/memory/global_memory_control.cpp
index f905be7c8..57d97bcaa 100644
--- a/src/memory/global_memory_control.cpp
+++ b/src/memory/global_memory_control.cpp
@@ -61,7 +61,7 @@ void *my_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t
   // This needs to be before, to throw exception in case of too big alloc
   if (*commit) [[likely]] {
     memgraph::utils::total_memory_tracker.Alloc(static_cast<int64_t>(size));
-    if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+    if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
       GetQueriesMemoryControl().TrackAllocOnCurrentThread(size);
     }
   }
@@ -70,7 +70,7 @@ void *my_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t
   if (ptr == nullptr) [[unlikely]] {
     if (*commit) {
       memgraph::utils::total_memory_tracker.Free(static_cast<int64_t>(size));
-      if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+      if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
         GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
       }
     }
@@ -90,7 +90,7 @@ static bool my_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, boo
   if (committed) [[likely]] {
     memgraph::utils::total_memory_tracker.Free(static_cast<int64_t>(size));
 
-    if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+    if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
       GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
     }
   }
@@ -101,7 +101,7 @@ static bool my_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, boo
 static void my_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) {
   if (committed) [[likely]] {
     memgraph::utils::total_memory_tracker.Free(static_cast<int64_t>(size));
-    if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+    if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
       GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
     }
   }
@@ -118,7 +118,7 @@ static bool my_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, siz
   }
 
   memgraph::utils::total_memory_tracker.Alloc(static_cast<int64_t>(length));
-  if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+  if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
     GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
   }
 
@@ -135,7 +135,7 @@ static bool my_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, s
   }
 
   memgraph::utils::total_memory_tracker.Free(static_cast<int64_t>(length));
-  if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+  if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
     GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
   }
 
@@ -152,7 +152,7 @@ static bool my_purge_forced(extent_hooks_t *extent_hooks, void *addr, size_t siz
   }
   memgraph::utils::total_memory_tracker.Free(static_cast<int64_t>(length));
 
-  if (GetQueriesMemoryControl().IsArenaTracked(arena_ind)) [[unlikely]] {
+  if (GetQueriesMemoryControl().IsThreadTracked()) [[unlikely]] {
     GetQueriesMemoryControl().TrackFreeOnCurrentThread(size);
   }
 
@@ -179,8 +179,11 @@ void SetHooks() {
     return;
   }
 
+  // Needs init due to error we might encounter otherwise
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=13684
+  [[maybe_unused]] const auto &queries_memory_control = GetQueriesMemoryControl();
+
   for (int i = 0; i < n_arenas; i++) {
-    GetQueriesMemoryControl().InitializeArenaCounter(i);
     std::string func_name = "arena." + std::to_string(i) + ".extent_hooks";
 
     size_t hooks_len = sizeof(old_hooks);
@@ -240,7 +243,6 @@ void UnsetHooks() {
   }
 
   for (int i = 0; i < n_arenas; i++) {
-    GetQueriesMemoryControl().InitializeArenaCounter(i);
     std::string func_name = "arena." + std::to_string(i) + ".extent_hooks";
 
     MG_ASSERT(old_hooks);
diff --git a/src/memory/query_memory_control.cpp b/src/memory/query_memory_control.cpp
index d742cc8a4..91730c900 100644
--- a/src/memory/query_memory_control.cpp
+++ b/src/memory/query_memory_control.cpp
@@ -22,6 +22,7 @@
 #include "query_memory_control.hpp"
 #include "utils/exceptions.hpp"
 #include "utils/logging.hpp"
+#include "utils/memory.hpp"
 #include "utils/memory_tracker.hpp"
 #include "utils/rw_spin_lock.hpp"
 
@@ -33,20 +34,6 @@ namespace memgraph::memory {
 
 #if USE_JEMALLOC
 
-unsigned QueriesMemoryControl::GetArenaForThread() {
-  unsigned thread_arena{0};
-  size_t size_thread_arena = sizeof(thread_arena);
-  int err = mallctl("thread.arena", &thread_arena, &size_thread_arena, nullptr, 0);
-  if (err) {
-    LOG_FATAL("Can't get arena for thread.");
-  }
-  return thread_arena;
-}
-
-void QueriesMemoryControl::AddTrackingOnArena(unsigned arena_id) { arena_tracking[arena_id].fetch_add(1); }
-
-void QueriesMemoryControl::RemoveTrackingOnArena(unsigned arena_id) { arena_tracking[arena_id].fetch_sub(1); }
-
 void QueriesMemoryControl::UpdateThreadToTransactionId(const std::thread::id &thread_id, uint64_t transaction_id) {
   auto accessor = thread_id_to_transaction_id.access();
   accessor.insert({thread_id, transaction_id});
@@ -119,14 +106,6 @@ bool QueriesMemoryControl::EraseTransactionIdTracker(uint64_t transaction_id) {
   return removed;
 }
 
-bool QueriesMemoryControl::IsArenaTracked(unsigned arena_ind) {
-  return arena_tracking[arena_ind].load(std::memory_order_acquire) != 0;
-}
-
-void QueriesMemoryControl::InitializeArenaCounter(unsigned arena_ind) {
-  arena_tracking[arena_ind].store(0, std::memory_order_relaxed);
-}
-
 bool QueriesMemoryControl::CheckTransactionIdTrackerExists(uint64_t transaction_id) {
   auto transaction_id_to_tracker_accessor = transaction_id_to_tracker.access();
   return transaction_id_to_tracker_accessor.contains(transaction_id);
@@ -166,19 +145,29 @@ void QueriesMemoryControl::PauseProcedureTracking(uint64_t transaction_id) {
   query_tracker->tracker.StopProcTracking();
 }
 
+inline int &Get_Thread_Tracker() {
+  // store variable in bss segment for each thread
+  // https://cs-fundamentals.com/c-programming/memory-layout-of-c-program-code-data-segments#size-of-code-data-bss-segments
+  static thread_local int is_thread_tracked{0};
+  return is_thread_tracked;
+}
+
+bool QueriesMemoryControl::IsThreadTracked() { return Get_Thread_Tracker() == 1; }
+
 #endif
 
 void StartTrackingCurrentThreadTransaction(uint64_t transaction_id) {
 #if USE_JEMALLOC
+  Get_Thread_Tracker() = 0;
   GetQueriesMemoryControl().UpdateThreadToTransactionId(std::this_thread::get_id(), transaction_id);
-  GetQueriesMemoryControl().AddTrackingOnArena(QueriesMemoryControl::GetArenaForThread());
+  Get_Thread_Tracker() = 1;
 #endif
 }
 
 void StopTrackingCurrentThreadTransaction(uint64_t transaction_id) {
 #if USE_JEMALLOC
+  Get_Thread_Tracker() = 0;
   GetQueriesMemoryControl().EraseThreadToTransactionId(std::this_thread::get_id(), transaction_id);
-  GetQueriesMemoryControl().RemoveTrackingOnArena(QueriesMemoryControl::GetArenaForThread());
 #endif
 }
 
diff --git a/src/memory/query_memory_control.hpp b/src/memory/query_memory_control.hpp
index 9a7d6d06c..901917757 100644
--- a/src/memory/query_memory_control.hpp
+++ b/src/memory/query_memory_control.hpp
@@ -33,25 +33,6 @@ static constexpr int64_t UNLIMITED_MEMORY{0};
 // it is necessary to restart tracking at the beginning of new query for that transaction.
 class QueriesMemoryControl {
  public:
-  /*
-  Arena stats
-  */
-
-  static unsigned GetArenaForThread();
-
-  // Add counter on threads allocating inside arena
-  void AddTrackingOnArena(unsigned);
-
-  // Remove counter on threads allocating in arena
-  void RemoveTrackingOnArena(unsigned);
-
-  // Are any threads using current arena for allocations
-  // Multiple threads can allocate inside one arena
-  bool IsArenaTracked(unsigned);
-
-  // Initialize arena counter
-  void InitializeArenaCounter(unsigned);
-
   /*
     Transaction id <-> tracker
   */
@@ -94,9 +75,9 @@ class QueriesMemoryControl {
 
   void PauseProcedureTracking(uint64_t);
 
- private:
-  std::unordered_map<unsigned, std::atomic<int>> arena_tracking;
+  bool IsThreadTracked();
 
+ private:
   struct ThreadIdToTransactionId {
     std::thread::id thread_id;
     uint64_t transaction_id;
diff --git a/src/utils/memory_tracker.hpp b/src/utils/memory_tracker.hpp
index fd99a07ae..0da888161 100644
--- a/src/utils/memory_tracker.hpp
+++ b/src/utils/memory_tracker.hpp
@@ -107,9 +107,6 @@ class MemoryTracker final {
   };
 
  private:
-  enum class TrackingMode { DEFAULT, ADDITIONAL_PROC };
-
-  TrackingMode tracking_mode_{TrackingMode::DEFAULT};
   std::atomic<int64_t> amount_{0};
   std::atomic<int64_t> peak_{0};
   std::atomic<int64_t> hard_limit_{0};

From 3c413a7e50536a29416d9bd4853447e20780f0bf Mon Sep 17 00:00:00 2001
From: Josipmrden <josip.mrden@memgraph.io>
Date: Sun, 12 Nov 2023 20:45:02 +0100
Subject: [PATCH 03/11] Fix hash join expression matching (#1496)

---
 src/query/plan/operator.cpp                        |  4 ++--
 src/query/plan/rewrite/join.hpp                    |  8 ++++++++
 .../tests/memgraph_V1/features/cartesian.feature   | 14 ++++++++++++++
 3 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp
index 69748014e..792d278e8 100644
--- a/src/query/plan/operator.cpp
+++ b/src/query/plan/operator.cpp
@@ -5404,7 +5404,7 @@ class HashJoinCursor : public Cursor {
         // Check if the join value from the pulled frame is shared with any left frames
         ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
                                       storage::View::OLD);
-        auto right_value = self_.hash_join_condition_->expression1_->Accept(evaluator);
+        auto right_value = self_.hash_join_condition_->expression2_->Accept(evaluator);
         if (hashtable_.contains(right_value)) {
           // If so, finish pulling for now and proceed to joining the pulled frame
           right_op_frame_.assign(frame.elems().begin(), frame.elems().end());
@@ -5452,7 +5452,7 @@ class HashJoinCursor : public Cursor {
     while (left_op_cursor_->Pull(frame, context)) {
       ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
                                     storage::View::OLD);
-      auto left_value = self_.hash_join_condition_->expression2_->Accept(evaluator);
+      auto left_value = self_.hash_join_condition_->expression1_->Accept(evaluator);
       if (left_value.type() != TypedValue::Type::Null) {
         hashtable_[left_value].emplace_back(frame.elems().begin(), frame.elems().end());
       }
diff --git a/src/query/plan/rewrite/join.hpp b/src/query/plan/rewrite/join.hpp
index c16f5b60d..5ed2bb090 100644
--- a/src/query/plan/rewrite/join.hpp
+++ b/src/query/plan/rewrite/join.hpp
@@ -27,6 +27,7 @@
 
 #include "query/plan/operator.hpp"
 #include "query/plan/preprocess.hpp"
+#include "utils/algorithm.hpp"
 
 namespace memgraph::query::plan {
 
@@ -523,6 +524,13 @@ class JoinRewriter final : public HierarchicalLogicalOperatorVisitor {
       auto rhs_property = rhs_lookup->property_;
       filter_exprs_for_removal_.insert(filter.expression);
       filters_.EraseFilter(filter);
+
+      if (utils::Contains(right_symbols, lhs_symbol) && utils::Contains(left_symbols, rhs_symbol)) {
+        // We need to duplicate this because expressions are shared between plans
+        join_condition = join_condition->Clone(ast_storage_);
+        std::swap(join_condition->expression1_, join_condition->expression2_);
+      }
+
       return std::make_unique<HashJoin>(left_op, left_symbols, right_op, right_symbols, join_condition);
     }
 
diff --git a/tests/gql_behave/tests/memgraph_V1/features/cartesian.feature b/tests/gql_behave/tests/memgraph_V1/features/cartesian.feature
index 809a3d73a..b11d83f0e 100644
--- a/tests/gql_behave/tests/memgraph_V1/features/cartesian.feature
+++ b/tests/gql_behave/tests/memgraph_V1/features/cartesian.feature
@@ -186,6 +186,20 @@ Feature: Cartesian
             | a            | b            |
             | (:A {id: 1}) | (:B {id: 1}) |
 
+    Scenario: Multiple match with WHERE x = y 01 reversed
+        Given an empty graph
+        And having executed
+            """
+            CREATE (:A {id: 1}), (:A {id: 2}), (:B {id: 1})
+            """
+        When executing query:
+            """
+            MATCH (a:A) MATCH (b:B) WHERE b.id = a.id RETURN a, b
+            """
+        Then the result should be:
+            | a            | b            |
+            | (:A {id: 1}) | (:B {id: 1}) |
+
     Scenario: Multiple match with WHERE x = y 02
         Given an empty graph
         And having executed

From 38ad5e2146a2e35f6bd28fdb5d6c219bc9e5e1df Mon Sep 17 00:00:00 2001
From: Josipmrden <josip.mrden@memgraph.io>
Date: Sun, 12 Nov 2023 23:51:00 +0100
Subject: [PATCH 04/11] Fix parallel index loading (#1479)

---
 src/storage/v2/durability/durability.cpp | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/src/storage/v2/durability/durability.cpp b/src/storage/v2/durability/durability.cpp
index a3cf0e2bb..decbfd14a 100644
--- a/src/storage/v2/durability/durability.cpp
+++ b/src/storage/v2/durability/durability.cpp
@@ -397,7 +397,12 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
     spdlog::info("All necessary WAL files are loaded successfully.");
   }
 
-  RecoverIndicesAndConstraints(indices_constraints, indices, constraints, vertices);
+  const auto par_exec_info =
+      config.durability.allow_parallel_index_creation && !recovery_info.vertex_batches.empty()
+          ? std::make_optional(std::make_pair(recovery_info.vertex_batches, config.durability.recovery_thread_count))
+          : std::nullopt;
+
+  RecoverIndicesAndConstraints(indices_constraints, indices, constraints, vertices, par_exec_info);
 
   memgraph::metrics::Measure(memgraph::metrics::SnapshotRecoveryLatency_us,
                              std::chrono::duration_cast<std::chrono::microseconds>(timer.Elapsed()).count());

From 0756cd6898cd5ca06e51049781b644a0b5f6f8d1 Mon Sep 17 00:00:00 2001
From: Josipmrden <josip.mrden@memgraph.io>
Date: Mon, 13 Nov 2023 04:12:25 +0100
Subject: [PATCH 05/11] Add fix indexed join crash (#1478)

---
 src/query/plan/rewrite/join.hpp  |  5 +++-
 tests/e2e/CMakeLists.txt         |  1 +
 tests/e2e/queries/CMakeLists.txt |  6 +++++
 tests/e2e/queries/common.py      | 24 ++++++++++++++++++++
 tests/e2e/queries/queries.py     | 39 ++++++++++++++++++++++++++++++++
 tests/e2e/queries/workloads.yaml | 14 ++++++++++++
 6 files changed, 88 insertions(+), 1 deletion(-)
 create mode 100644 tests/e2e/queries/CMakeLists.txt
 create mode 100644 tests/e2e/queries/common.py
 create mode 100644 tests/e2e/queries/queries.py
 create mode 100644 tests/e2e/queries/workloads.yaml

diff --git a/src/query/plan/rewrite/join.hpp b/src/query/plan/rewrite/join.hpp
index 5ed2bb090..65e32b3e8 100644
--- a/src/query/plan/rewrite/join.hpp
+++ b/src/query/plan/rewrite/join.hpp
@@ -146,8 +146,11 @@ class JoinRewriter final : public HierarchicalLogicalOperatorVisitor {
 
   bool PreVisit(IndexedJoin &op) override {
     prev_ops_.push_back(&op);
-    return true;
+    RewriteBranch(&op.main_branch_);
+    RewriteBranch(&op.sub_branch_);
+    return false;
   }
+
   bool PostVisit(IndexedJoin &) override {
     prev_ops_.pop_back();
     return true;
diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt
index 7c7edb60e..71d80b7ed 100644
--- a/tests/e2e/CMakeLists.txt
+++ b/tests/e2e/CMakeLists.txt
@@ -70,6 +70,7 @@ add_subdirectory(index_hints)
 add_subdirectory(query_modules)
 add_subdirectory(constraints)
 add_subdirectory(inspect_query)
+add_subdirectory(queries)
 
 copy_e2e_python_files(pytest_runner pytest_runner.sh "")
 copy_e2e_python_files(x x.sh "")
diff --git a/tests/e2e/queries/CMakeLists.txt b/tests/e2e/queries/CMakeLists.txt
new file mode 100644
index 000000000..f672b8591
--- /dev/null
+++ b/tests/e2e/queries/CMakeLists.txt
@@ -0,0 +1,6 @@
+function(copy_queries_e2e_python_files FILE_NAME)
+    copy_e2e_python_files(queries ${FILE_NAME})
+endfunction()
+
+copy_queries_e2e_python_files(common.py)
+copy_queries_e2e_python_files(queries.py)
diff --git a/tests/e2e/queries/common.py b/tests/e2e/queries/common.py
new file mode 100644
index 000000000..6ad52539b
--- /dev/null
+++ b/tests/e2e/queries/common.py
@@ -0,0 +1,24 @@
+# Copyright 2023 Memgraph Ltd.
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+# License, and you may not use this file except in compliance with the Business Source License.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+import pytest
+from gqlalchemy import Memgraph
+
+
+@pytest.fixture
+def memgraph(**kwargs) -> Memgraph:
+    memgraph = Memgraph()
+
+    yield memgraph
+
+    memgraph.drop_indexes()
+    memgraph.ensure_constraints([])
+    memgraph.drop_database()
diff --git a/tests/e2e/queries/queries.py b/tests/e2e/queries/queries.py
new file mode 100644
index 000000000..61cea625f
--- /dev/null
+++ b/tests/e2e/queries/queries.py
@@ -0,0 +1,39 @@
+# Copyright 2023 Memgraph Ltd.
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
+# License, and you may not use this file except in compliance with the Business Source License.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+import sys
+
+import pytest
+from common import memgraph
+
+
+def test_indexed_join_with_indices(memgraph):
+    memgraph.execute(
+        "CREATE (c:A {prop: 1})-[b:TYPE]->(p:A {prop: 1}) CREATE (cf:B:A {prop : 1}) CREATE (pf:B:A {prop : 1});"
+    )
+    memgraph.execute("CREATE INDEX ON :A;")
+    memgraph.execute("CREATE INDEX ON :B;")
+    memgraph.execute("CREATE INDEX ON :A(prop);")
+    memgraph.execute("CREATE INDEX ON :B(prop);")
+
+    results = list(
+        memgraph.execute_and_fetch(
+            "match (c:A)-[b:TYPE]->(p:A) match (cf:B:A {prop : c.prop}) match (pf:B:A {prop : p.prop}) return c;"
+        )
+    )
+
+    assert len(results) == 4
+    for res in results:
+        assert res["c"].prop == 1
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__, "-rA"]))
diff --git a/tests/e2e/queries/workloads.yaml b/tests/e2e/queries/workloads.yaml
new file mode 100644
index 000000000..cb9ac4d09
--- /dev/null
+++ b/tests/e2e/queries/workloads.yaml
@@ -0,0 +1,14 @@
+queries_cluster: &queries_cluster
+  cluster:
+    main:
+      args: ["--bolt-port", "7687", "--log-level=TRACE"]
+      log_file: "queries.log"
+      setup_queries: []
+      validation_queries: []
+
+
+workloads:
+  - name: "Queries validation"
+    binary: "tests/e2e/pytest_runner.sh"
+    args: ["queries/queries.py"]
+    <<: *queries_cluster

From e90781785412cc07792b1150addaba393206a3cc Mon Sep 17 00:00:00 2001
From: Josipmrden <josip.mrden@memgraph.io>
Date: Mon, 13 Nov 2023 05:17:10 +0100
Subject: [PATCH 06/11] Fix for in list segmentation fault (#1494)

---
 src/query/frame_change.hpp                      | 17 -----------------
 src/query/interpret/eval.hpp                    |  5 ++---
 src/query/interpreter.cpp                       |  1 -
 .../features/list_operations.feature            | 16 ++++++++++++++++
 4 files changed, 18 insertions(+), 21 deletions(-)

diff --git a/src/query/frame_change.hpp b/src/query/frame_change.hpp
index 535d5ddb9..1d9ebc70c 100644
--- a/src/query/frame_change.hpp
+++ b/src/query/frame_change.hpp
@@ -43,23 +43,6 @@ struct CachedValue {
     return cache_.get_allocator().GetMemoryResource();
   }
 
-  // Func to check if cache_ contains value
-  bool CacheValue(TypedValue &&maybe_list) {
-    if (!maybe_list.IsList()) {
-      return false;
-    }
-    auto &list = maybe_list.ValueList();
-    TypedValue::Hash hash{};
-    for (auto &element : list) {
-      const auto key = hash(element);
-      auto &vector_values = cache_[key];
-      if (!IsValueInVec(vector_values, element)) {
-        vector_values.emplace_back(std::move(element));
-      }
-    }
-    return true;
-  }
-
   bool CacheValue(const TypedValue &maybe_list) {
     if (!maybe_list.IsList()) {
       return false;
diff --git a/src/query/interpret/eval.hpp b/src/query/interpret/eval.hpp
index f9ebf5467..333a7b1fa 100644
--- a/src/query/interpret/eval.hpp
+++ b/src/query/interpret/eval.hpp
@@ -315,8 +315,8 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
           return std::move(*preoperational_checks);
         }
         auto &cached_value = frame_change_collector_->GetCachedValue(*cached_id);
-        cached_value.CacheValue(std::move(list));
-        spdlog::trace("Value cached {}", *cached_id);
+        // Don't move here because we don't want to remove the element from the frame
+        cached_value.CacheValue(list);
       }
       const auto &cached_value = frame_change_collector_->GetCachedValue(*cached_id);
 
@@ -338,7 +338,6 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
     }
 
     const auto &list_value = list.ValueList();
-    spdlog::trace("Not using cache on IN LIST operator");
     auto has_null = false;
     for (const auto &element : list_value) {
       auto result = literal == element;
diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp
index 2e5bba2ff..30bb4eca2 100644
--- a/src/query/interpreter.cpp
+++ b/src/query/interpreter.cpp
@@ -1535,7 +1535,6 @@ inline static void TryCaching(const AstStorage &ast_storage, FrameChangeCollecto
       continue;
     }
     frame_change_collector->AddTrackingKey(*cached_id);
-    spdlog::trace("Tracking {} operator, by id: {}", InListOperator::kType.name, *cached_id);
   }
 }
 
diff --git a/tests/gql_behave/tests/memgraph_V1/features/list_operations.feature b/tests/gql_behave/tests/memgraph_V1/features/list_operations.feature
index eed738446..bfe6b6225 100644
--- a/tests/gql_behave/tests/memgraph_V1/features/list_operations.feature
+++ b/tests/gql_behave/tests/memgraph_V1/features/list_operations.feature
@@ -263,3 +263,19 @@ Feature: List operators
             | id |
             | 1  |
             | 2  |
+
+     Scenario: InList 01
+        Given an empty graph
+        And having executed
+            """
+            CREATE (o:Node) SET o.Status = 'This is the status';
+            """
+        When executing query:
+            """
+            match (o:Node)
+            where o.Status IN ['This is not the status', 'This is the status']
+            return o;
+            """
+        Then the result should be:
+            | o                                       |
+            | (:Node {Status: 'This is the status'})  |

From e5b2c19ea2560af4995af7831648bbdb80abc0a1 Mon Sep 17 00:00:00 2001
From: Andi <andi8647@gmail.com>
Date: Mon, 13 Nov 2023 11:45:09 +0100
Subject: [PATCH 07/11] Empty Collect() returns nothing (#1482)

---
 src/query/plan/operator.cpp                   | 16 +++++++++--
 src/query/plan/operator.hpp                   |  3 ++
 .../show_while_creating_invalid_state.py      |  2 +-
 .../memgraph_V1/features/aggregations.feature | 28 ++++++++++++++++++-
 .../features/aggregations.feature             | 28 ++++++++++++++++++-
 .../unit/query_plan_accumulate_aggregate.cpp  | 12 +++-----
 6 files changed, 75 insertions(+), 14 deletions(-)

diff --git a/src/query/plan/operator.cpp b/src/query/plan/operator.cpp
index 792d278e8..238638737 100644
--- a/src/query/plan/operator.cpp
+++ b/src/query/plan/operator.cpp
@@ -3463,7 +3463,7 @@ class AggregateCursor : public Cursor {
     SCOPED_PROFILE_OP_BY_REF(self_);
 
     if (!pulled_all_input_) {
-      ProcessAll(&frame, &context);
+      if (!ProcessAll(&frame, &context) && self_.AreAllAggregationsForCollecting()) return false;
       pulled_all_input_ = true;
       aggregation_it_ = aggregation_.begin();
 
@@ -3487,7 +3487,6 @@ class AggregateCursor : public Cursor {
         return true;
       }
     }
-
     if (aggregation_it_ == aggregation_.end()) return false;
 
     // place aggregation values on the frame
@@ -3567,12 +3566,16 @@ class AggregateCursor : public Cursor {
    * cache cardinality depends on number of
    * aggregation results, and not on the number of inputs.
    */
-  void ProcessAll(Frame *frame, ExecutionContext *context) {
+  bool ProcessAll(Frame *frame, ExecutionContext *context) {
     ExpressionEvaluator evaluator(frame, context->symbol_table, context->evaluation_context, context->db_accessor,
                                   storage::View::NEW);
+
+    bool pulled = false;
     while (input_cursor_->Pull(*frame, *context)) {
       ProcessOne(*frame, &evaluator);
+      pulled = true;
     }
+    if (!pulled) return false;
 
     // post processing
     for (size_t pos = 0; pos < self_.aggregations_.size(); ++pos) {
@@ -3606,6 +3609,7 @@ class AggregateCursor : public Cursor {
           break;
       }
     }
+    return true;
   }
 
   /**
@@ -3819,6 +3823,12 @@ UniqueCursorPtr Aggregate::MakeCursor(utils::MemoryResource *mem) const {
   return MakeUniqueCursorPtr<AggregateCursor>(mem, *this, mem);
 }
 
+auto Aggregate::AreAllAggregationsForCollecting() const -> bool {
+  return std::all_of(aggregations_.begin(), aggregations_.end(), [](const auto &agg) {
+    return agg.op == Aggregation::Op::COLLECT_LIST || agg.op == Aggregation::Op::COLLECT_MAP;
+  });
+}
+
 Skip::Skip(const std::shared_ptr<LogicalOperator> &input, Expression *expression)
     : input_(input), expression_(expression) {}
 
diff --git a/src/query/plan/operator.hpp b/src/query/plan/operator.hpp
index 4951b5137..ba844796a 100644
--- a/src/query/plan/operator.hpp
+++ b/src/query/plan/operator.hpp
@@ -1758,6 +1758,9 @@ class Aggregate : public memgraph::query::plan::LogicalOperator {
   Aggregate() = default;
   Aggregate(const std::shared_ptr<LogicalOperator> &input, const std::vector<Element> &aggregations,
             const std::vector<Expression *> &group_by, const std::vector<Symbol> &remember);
+
+  auto AreAllAggregationsForCollecting() const -> bool;
+
   bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
   UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
   std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
diff --git a/tests/e2e/replication/show_while_creating_invalid_state.py b/tests/e2e/replication/show_while_creating_invalid_state.py
index f8fae4cd6..996955dc1 100644
--- a/tests/e2e/replication/show_while_creating_invalid_state.py
+++ b/tests/e2e/replication/show_while_creating_invalid_state.py
@@ -697,7 +697,7 @@ def test_sync_replication_when_main_is_killed():
         )
 
         # 2/
-        QUERY_TO_CHECK = "MATCH (n) RETURN COLLECT(n.name);"
+        QUERY_TO_CHECK = "MATCH (n) RETURN COUNT(n.name);"
         last_result_from_main = interactive_mg_runner.MEMGRAPH_INSTANCES["main"].query(QUERY_TO_CHECK)[0][0]
         for index in range(50):
             interactive_mg_runner.MEMGRAPH_INSTANCES["main"].query(f"CREATE (p:Number {{name:{index}}})")
diff --git a/tests/gql_behave/tests/memgraph_V1/features/aggregations.feature b/tests/gql_behave/tests/memgraph_V1/features/aggregations.feature
index 8fe6a47ad..cff138432 100644
--- a/tests/gql_behave/tests/memgraph_V1/features/aggregations.feature
+++ b/tests/gql_behave/tests/memgraph_V1/features/aggregations.feature
@@ -401,4 +401,30 @@ Feature: Aggregations
             MATCH p=()-[:Z]->() WITH project(p) as graph WITH graph.edges as edges UNWIND edges as e RETURN e.prop as y ORDER BY y DESC
             """
         Then the result should be:
-            | y |
+          | y |
+
+    Scenario: Empty collect aggregation:
+      Given an empty graph
+      And having executed
+          """
+          CREATE (s:Subnet {ip: "192.168.0.1"})
+          """
+      When executing query:
+      """
+      MATCH (subnet:Subnet) WHERE FALSE WITH subnet, collect(subnet.ip) as ips RETURN id(subnet) as id
+      """
+      Then the result should be empty
+
+    Scenario: Empty count aggregation:
+      Given an empty graph
+      And having executed
+          """
+          CREATE (s:Subnet {ip: "192.168.0.1"})
+          """
+      When executing query:
+      """
+      MATCH (subnet:Subnet) WHERE FALSE WITH subnet, count(subnet.ip) as ips RETURN id(subnet) as id
+      """
+      Then the result should be:
+        | id |
+        | null  |
diff --git a/tests/gql_behave/tests/memgraph_V1_on_disk/features/aggregations.feature b/tests/gql_behave/tests/memgraph_V1_on_disk/features/aggregations.feature
index 8fe6a47ad..cff138432 100644
--- a/tests/gql_behave/tests/memgraph_V1_on_disk/features/aggregations.feature
+++ b/tests/gql_behave/tests/memgraph_V1_on_disk/features/aggregations.feature
@@ -401,4 +401,30 @@ Feature: Aggregations
             MATCH p=()-[:Z]->() WITH project(p) as graph WITH graph.edges as edges UNWIND edges as e RETURN e.prop as y ORDER BY y DESC
             """
         Then the result should be:
-            | y |
+          | y |
+
+    Scenario: Empty collect aggregation:
+      Given an empty graph
+      And having executed
+          """
+          CREATE (s:Subnet {ip: "192.168.0.1"})
+          """
+      When executing query:
+      """
+      MATCH (subnet:Subnet) WHERE FALSE WITH subnet, collect(subnet.ip) as ips RETURN id(subnet) as id
+      """
+      Then the result should be empty
+
+    Scenario: Empty count aggregation:
+      Given an empty graph
+      And having executed
+          """
+          CREATE (s:Subnet {ip: "192.168.0.1"})
+          """
+      When executing query:
+      """
+      MATCH (subnet:Subnet) WHERE FALSE WITH subnet, count(subnet.ip) as ips RETURN id(subnet) as id
+      """
+      Then the result should be:
+        | id |
+        | null  |
diff --git a/tests/unit/query_plan_accumulate_aggregate.cpp b/tests/unit/query_plan_accumulate_aggregate.cpp
index e271e0f6a..bbf3e0311 100644
--- a/tests/unit/query_plan_accumulate_aggregate.cpp
+++ b/tests/unit/query_plan_accumulate_aggregate.cpp
@@ -277,13 +277,11 @@ TYPED_TEST(QueryPlanAggregateOps, WithoutDataWithGroupBy) {
   }
   {
     auto results = this->AggregationResults(true, false, {Aggregation::Op::COLLECT_LIST});
-    EXPECT_EQ(results.size(), 1);
-    EXPECT_EQ(results[0][0].type(), TypedValue::Type::List);
+    EXPECT_EQ(results.size(), 0);
   }
   {
     auto results = this->AggregationResults(true, false, {Aggregation::Op::COLLECT_MAP});
-    EXPECT_EQ(results.size(), 1);
-    EXPECT_EQ(results[0][0].type(), TypedValue::Type::Map);
+    EXPECT_EQ(results.size(), 0);
   }
 }
 
@@ -695,13 +693,11 @@ TYPED_TEST(QueryPlanAggregateOps, WithoutDataWithDistinctAndWithGroupBy) {
   }
   {
     auto results = this->AggregationResults(true, true, {Aggregation::Op::COLLECT_LIST});
-    EXPECT_EQ(results.size(), 1);
-    EXPECT_EQ(results[0][0].type(), TypedValue::Type::List);
+    EXPECT_EQ(results.size(), 0);
   }
   {
     auto results = this->AggregationResults(true, true, {Aggregation::Op::COLLECT_MAP});
-    EXPECT_EQ(results.size(), 1);
-    EXPECT_EQ(results[0][0].type(), TypedValue::Type::Map);
+    EXPECT_EQ(results.size(), 0);
   }
 }
 

From fdab42a023840317622aacaad898dcd55177ffda Mon Sep 17 00:00:00 2001
From: DavIvek <david.ivekovic@memgraph.io>
Date: Mon, 13 Nov 2023 12:08:48 +0100
Subject: [PATCH 08/11] Use static linking on c++ query modules for glibcxx
 (#1490)

---
 query_modules/CMakeLists.txt | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/query_modules/CMakeLists.txt b/query_modules/CMakeLists.txt
index 879879e89..41dbb495c 100644
--- a/query_modules/CMakeLists.txt
+++ b/query_modules/CMakeLists.txt
@@ -13,6 +13,7 @@ string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
 add_library(example_c SHARED example.c)
 target_include_directories(example_c PRIVATE ${CMAKE_SOURCE_DIR}/include)
 target_compile_options(example_c PRIVATE -Wall)
+target_link_libraries(example_c PRIVATE -static-libgcc -static-libstdc++)
 # Strip C example in release build.
 if (lower_build_type STREQUAL "release")
   add_custom_command(TARGET example_c POST_BUILD
@@ -28,6 +29,7 @@ install(FILES example.c DESTINATION lib/memgraph/query_modules/src)
 add_library(example_cpp SHARED example.cpp)
 target_include_directories(example_cpp PRIVATE ${CMAKE_SOURCE_DIR}/include)
 target_compile_options(example_cpp PRIVATE -Wall)
+target_link_libraries(example_cpp PRIVATE -static-libgcc -static-libstdc++)
 # Strip C++ example in release build.
 if (lower_build_type STREQUAL "release")
   add_custom_command(TARGET example_cpp POST_BUILD
@@ -43,6 +45,7 @@ install(FILES example.cpp DESTINATION lib/memgraph/query_modules/src)
 add_library(schema SHARED schema.cpp)
 target_include_directories(schema PRIVATE ${CMAKE_SOURCE_DIR}/include)
 target_compile_options(schema PRIVATE -Wall)
+target_link_libraries(schema PRIVATE -static-libgcc -static-libstdc++)
 # Strip C++ example in release build.
 if (lower_build_type STREQUAL "release")
   add_custom_command(TARGET schema POST_BUILD

From 11be3972c40d0d7c72a6fe17e9739e0c662d5e52 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20Bari=C5=A1i=C4=87?=
 <48765171+MarkoBarisic@users.noreply.github.com>
Date: Mon, 13 Nov 2023 12:20:49 +0100
Subject: [PATCH 09/11] Add workflow for packaging memgraph for specific target
 OS (#1502)

---
 .github/workflows/package_specific.yaml | 315 ++++++++++++++++++++++++
 1 file changed, 315 insertions(+)
 create mode 100644 .github/workflows/package_specific.yaml

diff --git a/.github/workflows/package_specific.yaml b/.github/workflows/package_specific.yaml
new file mode 100644
index 000000000..6181524db
--- /dev/null
+++ b/.github/workflows/package_specific.yaml
@@ -0,0 +1,315 @@
+name: Package Specific
+
+# TODO(gitbuda): Cleanup docker container if GHA job was canceled.
+
+on:
+  workflow_dispatch:
+    inputs:
+      memgraph_version:
+        description: "Memgraph version to upload as. Leave this field empty if you don't want to upload binaries to S3. Format: 'X.Y.Z'"
+        required: false
+      build_type:
+        type: choice
+        description: "Memgraph Build type. Default value is Release."
+        default: 'Release'
+        options:
+          - Release
+          - RelWithDebInfo
+     build_amzn_2:
+        type: boolean
+        default: false
+      build_centos_7:
+        type: boolean
+        default: false
+      build_centos_9:
+        type: boolean
+        default: false
+      build_debian_10:
+        type: boolean
+        default: false
+      build_debian_11:
+        type: boolean
+        default: false
+      build_debian_11_arm:
+        type: boolean
+        default: false
+      build_debian_11_platform:
+        type: boolean
+        default: false
+      build_docker:
+        type: boolean
+        default: false
+      build_fedora_36:
+        type: boolean
+        default: false
+      build_ubuntu_18_04:
+        type: boolean
+        default: false
+      build_ubuntu_20_04:
+        type: boolean
+        default: false
+      build_ubuntu_22_04:
+        type: boolean
+        default: false
+      build_ubuntu_22_04_arm:
+        type: boolean
+        default: false
+
+jobs:
+  amzn-2:
+    if: ${{ github.event.inputs.build_amzn_2 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package amzn-2 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: amzn-2
+          path: build/output/amzn-2/memgraph*.rpm
+
+  centos-7:
+    if: ${{ github.event.inputs.build_centos_7 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package centos-7 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: centos-7
+          path: build/output/centos-7/memgraph*.rpm
+
+  centos-9:
+    if: ${{ github.event.inputs.build_centos_9 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package centos-9 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: centos-9
+          path: build/output/centos-9/memgraph*.rpm
+
+  debian-10:
+    if: ${{ github.event.inputs.build_debian_10 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package debian-10 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: debian-10
+          path: build/output/debian-10/memgraph*.deb
+
+  debian-11:
+    if: ${{ github.event.inputs.build_debian_11 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: debian-11
+          path: build/output/debian-11/memgraph*.deb
+
+  debian-11-arm:
+    if: ${{ github.event.inputs.build_debian_11_arm }}
+    runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
+    timeout-minutes: 120
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package debian-11-arm ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: debian-11-aarch64
+          path: build/output/debian-11-arm/memgraph*.deb
+
+  debian-11-platform:
+    if: ${{ github.event.inputs.build_debian_11_platform }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-platform
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: debian-11-platform
+          path: build/output/debian-11/memgraph*.deb
+
+  docker:
+    if: ${{ github.event.inputs.build_docker }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          cd release/package
+          ./run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-docker
+          ./run.sh docker
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: docker
+          path: build/output/docker/memgraph*.tar.gz
+
+  fedora-36:
+    if: ${{ github.event.inputs.build_fedora_36 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package fedora-36 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: fedora-36
+          path: build/output/fedora-36/memgraph*.rpm
+
+  ubuntu-18_04:
+    if: ${{ github.event.inputs.build_ubuntu_18_04 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package ubuntu-18.04 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: ubuntu-18.04
+          path: build/output/ubuntu-18.04/memgraph*.deb
+
+  ubuntu-20_04:
+    if: ${{ github.event.inputs.build_ubuntu_20_04 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package ubuntu-20.04 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: ubuntu-20.04
+          path: build/output/ubuntu-20.04/memgraph*.deb
+
+  ubuntu-22_04:
+    if: ${{ github.event.inputs.build_ubuntu_22_04 }}
+    runs-on: [self-hosted, DockerMgBuild, X64]
+    timeout-minutes: 60
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package ubuntu-22.04 ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: ubuntu-22.04
+          path: build/output/ubuntu-22.04/memgraph*.deb
+
+  ubuntu-22_04-arm:
+    if: ${{ github.event.inputs.build_ubuntu_22_04_arm }}
+    runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
+    timeout-minutes: 120
+    steps:
+      - name: "Set up repository"
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0 # Required because of release/get_version.py
+      - name: "Build package"
+        run: |
+          ./release/package/run.sh package ubuntu-22.04-arm ${{ github.event.inputs.build_type }}
+      - name: "Upload package"
+        uses: actions/upload-artifact@v3
+        with:
+          name: ubuntu-22.04-aarch64
+          path: build/output/ubuntu-22.04-arm/memgraph*.deb
+
+  upload-to-s3:
+    # only run upload if we specified version. Allows for runs without upload
+    if: "${{ github.event.inputs.memgraph_version != '' }}"
+    needs: [centos-7, centos-9, debian-10, debian-11, docker, ubuntu-1804, ubuntu-2004, ubuntu-2204, debian-11-platform, fedora-36, amzn-2, debian-11-arm, ubuntu-2204-arm]
+    runs-on: ubuntu-latest
+    steps:
+      - name: Download artifacts
+        uses: actions/download-artifact@v3
+        with:
+          # name: # if name input parameter is not provided, all artifacts are downloaded
+                  # and put in directories named after each one.
+          path: build/output/release
+      - name: Upload to S3
+        uses: jakejarvis/s3-sync-action@v0.5.1
+        env:
+          AWS_S3_BUCKET: "download.memgraph.com"
+          AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
+          AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
+          AWS_REGION: "eu-west-1"
+          SOURCE_DIR: "build/output/release"
+          DEST_DIR: "memgraph/v${{ github.event.inputs.memgraph_version }}/"

From e671a0737e5826fec1a823da8dda9a3f7331fb0c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20Bari=C5=A1i=C4=87?=
 <48765171+MarkoBarisic@users.noreply.github.com>
Date: Mon, 13 Nov 2023 12:54:19 +0100
Subject: [PATCH 10/11] Fix package specific workflow file (#1503)

---
 .github/workflows/package_specific.yaml | 83 +++++++++----------------
 1 file changed, 31 insertions(+), 52 deletions(-)

diff --git a/.github/workflows/package_specific.yaml b/.github/workflows/package_specific.yaml
index 6181524db..2d3d26ef5 100644
--- a/.github/workflows/package_specific.yaml
+++ b/.github/workflows/package_specific.yaml
@@ -15,49 +15,28 @@ on:
         options:
           - Release
           - RelWithDebInfo
-     build_amzn_2:
-        type: boolean
-        default: false
-      build_centos_7:
-        type: boolean
-        default: false
-      build_centos_9:
-        type: boolean
-        default: false
-      build_debian_10:
-        type: boolean
-        default: false
-      build_debian_11:
-        type: boolean
-        default: false
-      build_debian_11_arm:
-        type: boolean
-        default: false
-      build_debian_11_platform:
-        type: boolean
-        default: false
-      build_docker:
-        type: boolean
-        default: false
-      build_fedora_36:
-        type: boolean
-        default: false
-      build_ubuntu_18_04:
-        type: boolean
-        default: false
-      build_ubuntu_20_04:
-        type: boolean
-        default: false
-      build_ubuntu_22_04:
-        type: boolean
-        default: false
-      build_ubuntu_22_04_arm:
-        type: boolean
-        default: false
+      target_os:
+        type: choice
+        description: "Target OS for which memgraph will be packaged. Default is Ubuntu 22.04"
+        default: 'ubuntu-22_04'
+        options:
+          - amzn-2
+          - centos-7
+          - centos-9
+          - debian-10
+          - debian-11
+          - debian-11-arm
+          - debian-11-platform
+          - docker
+          - fedora-36
+          - ubuntu-18_04
+          - ubuntu-20_04
+          - ubuntu-22_04
+          - ubuntu-22_04-arm
 
 jobs:
   amzn-2:
-    if: ${{ github.event.inputs.build_amzn_2 }}
+    if: ${{ github.event.inputs.target_os == 'amzn-2' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -75,7 +54,7 @@ jobs:
           path: build/output/amzn-2/memgraph*.rpm
 
   centos-7:
-    if: ${{ github.event.inputs.build_centos_7 }}
+    if: ${{ github.event.inputs.target_os == 'centos-7' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -93,7 +72,7 @@ jobs:
           path: build/output/centos-7/memgraph*.rpm
 
   centos-9:
-    if: ${{ github.event.inputs.build_centos_9 }}
+    if: ${{ github.event.inputs.target_os == 'centos-9' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -111,7 +90,7 @@ jobs:
           path: build/output/centos-9/memgraph*.rpm
 
   debian-10:
-    if: ${{ github.event.inputs.build_debian_10 }}
+    if: ${{ github.event.inputs.target_os == 'debian-10' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -129,7 +108,7 @@ jobs:
           path: build/output/debian-10/memgraph*.deb
 
   debian-11:
-    if: ${{ github.event.inputs.build_debian_11 }}
+    if: ${{ github.event.inputs.target_os == 'debian-11' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -147,7 +126,7 @@ jobs:
           path: build/output/debian-11/memgraph*.deb
 
   debian-11-arm:
-    if: ${{ github.event.inputs.build_debian_11_arm }}
+    if: ${{ github.event.inputs.target_os == 'debian-11-arm' }}
     runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
     timeout-minutes: 120
     steps:
@@ -165,7 +144,7 @@ jobs:
           path: build/output/debian-11-arm/memgraph*.deb
 
   debian-11-platform:
-    if: ${{ github.event.inputs.build_debian_11_platform }}
+    if: ${{ github.event.inputs.target_os == 'debian-11-platform' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -183,7 +162,7 @@ jobs:
           path: build/output/debian-11/memgraph*.deb
 
   docker:
-    if: ${{ github.event.inputs.build_docker }}
+    if: ${{ github.event.inputs.target_os == 'docker' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -203,7 +182,7 @@ jobs:
           path: build/output/docker/memgraph*.tar.gz
 
   fedora-36:
-    if: ${{ github.event.inputs.build_fedora_36 }}
+    if: ${{ github.event.inputs.target_os == 'fedora-36' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -221,7 +200,7 @@ jobs:
           path: build/output/fedora-36/memgraph*.rpm
 
   ubuntu-18_04:
-    if: ${{ github.event.inputs.build_ubuntu_18_04 }}
+    if: ${{ github.event.inputs.target_os == 'ubuntu-18_04' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -239,7 +218,7 @@ jobs:
           path: build/output/ubuntu-18.04/memgraph*.deb
 
   ubuntu-20_04:
-    if: ${{ github.event.inputs.build_ubuntu_20_04 }}
+    if: ${{ github.event.inputs.target_os == 'ubuntu-20_04' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -257,7 +236,7 @@ jobs:
           path: build/output/ubuntu-20.04/memgraph*.deb
 
   ubuntu-22_04:
-    if: ${{ github.event.inputs.build_ubuntu_22_04 }}
+    if: ${{ github.event.inputs.target_os == 'ubuntu-22_04' }}
     runs-on: [self-hosted, DockerMgBuild, X64]
     timeout-minutes: 60
     steps:
@@ -275,7 +254,7 @@ jobs:
           path: build/output/ubuntu-22.04/memgraph*.deb
 
   ubuntu-22_04-arm:
-    if: ${{ github.event.inputs.build_ubuntu_22_04_arm }}
+    if: ${{ github.event.inputs.target_os == 'ubuntu-22_04-arm' }}
     runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
     timeout-minutes: 120
     steps:

From 9cc060c4b0703a3fb6e9b0935bf8db584d0ef58b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20Bari=C5=A1i=C4=87?=
 <48765171+MarkoBarisic@users.noreply.github.com>
Date: Mon, 13 Nov 2023 13:01:01 +0100
Subject: [PATCH 11/11] Fix error in upload-to-s3 job (#1504)

---
 .github/workflows/package_specific.yaml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/package_specific.yaml b/.github/workflows/package_specific.yaml
index 2d3d26ef5..c599f65ef 100644
--- a/.github/workflows/package_specific.yaml
+++ b/.github/workflows/package_specific.yaml
@@ -274,7 +274,7 @@ jobs:
   upload-to-s3:
     # only run upload if we specified version. Allows for runs without upload
     if: "${{ github.event.inputs.memgraph_version != '' }}"
-    needs: [centos-7, centos-9, debian-10, debian-11, docker, ubuntu-1804, ubuntu-2004, ubuntu-2204, debian-11-platform, fedora-36, amzn-2, debian-11-arm, ubuntu-2204-arm]
+    needs: [amzn-2, centos-7, centos-9, debian-10, debian-11, debian-11-arm, debian-11-platform, docker, fedora-36, ubuntu-18_04, ubuntu-20_04, ubuntu-22_04, ubuntu-22_04-arm]
     runs-on: ubuntu-latest
     steps:
       - name: Download artifacts