diff --git a/.github/workflows/daily_benchmark.yaml b/.github/workflows/daily_benchmark.yaml index 4cded7b0c..aa2686977 100644 --- a/.github/workflows/daily_benchmark.yaml +++ b/.github/workflows/daily_benchmark.yaml @@ -16,7 +16,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) diff --git a/.github/workflows/diff.yaml b/.github/workflows/diff.yaml index ba00941b8..411bed79c 100644 --- a/.github/workflows/diff.yaml +++ b/.github/workflows/diff.yaml @@ -27,7 +27,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -65,7 +65,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -96,7 +96,7 @@ jobs: - name: Python code analysis run: | - CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only) + CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only --diff-filter=d) for file in ${CHANGED_FILES}; do echo ${file} if [[ ${file} == *.py ]]; then @@ -137,9 +137,9 @@ jobs: tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu - name: Save code coverage - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Code analysis)" path: tools/github/generated/code_coverage.tar.gz - name: Run clang-tidy @@ -162,7 +162,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -208,9 +208,9 @@ jobs: ./cppcheck_and_clang_format diff - name: Save cppcheck and clang-format errors - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Debug build)" path: tools/github/cppcheck_and_clang_format.txt release_build: @@ -223,7 +223,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -251,7 +251,7 @@ jobs: ./continuous_integration - name: Save quality assurance status - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: "GQL Behave Status" path: | @@ -268,6 +268,7 @@ jobs: ctest -R memgraph__unit --output-on-failure -j$THREADS - name: Ensure Kafka and Pulsar are up + if: false run: | cd tests/e2e/streams/kafka docker-compose up -d @@ -275,6 +276,7 @@ jobs: docker-compose up -d - name: Run e2e tests + if: false run: | cd tests ./setup.sh /opt/toolchain-v4/activate @@ -283,7 +285,7 @@ jobs: ./run.sh - name: Ensure Kafka and Pulsar are down - if: always() + if: false run: | cd tests/e2e/streams/kafka docker-compose down @@ -323,16 +325,128 @@ jobs: cpack -G DEB --config ../CPackConfig.cmake - name: Save enterprise DEB package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: "Enterprise DEB package" path: build/output/memgraph*.deb - name: Save test data - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: - name: "Test data" + name: "Test data(Release build)" + path: | + # multiple paths could be defined + build/logs + + experimental_build_ha: + name: "High availability build" + runs-on: [self-hosted, Linux, X64, Diff] + env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + source /opt/toolchain-v4/activate + ./init + cd build + cmake -DCMAKE_BUILD_TYPE=Release -DMG_EXPERIMENTAL_HIGH_AVAILABILITY=ON .. + make -j$THREADS + - name: Run unit tests + run: | + source /opt/toolchain-v4/activate + cd build + ctest -R memgraph__unit --output-on-failure -j$THREADS + - name: Run e2e tests + if: false + run: | + cd tests + ./setup.sh /opt/toolchain-v4/activate + source ve3/bin/activate_e2e + cd e2e + ./run.sh "Coordinator" + ./run.sh "Client initiated failover" + ./run.sh "Uninitialized cluster" + - name: Save test data + uses: actions/upload-artifact@v4 + if: always() + with: + name: "Test data(High availability build)" + path: | + # multiple paths could be defined + build/logs + + experimental_build_mt: + name: "MultiTenancy replication build" + runs-on: [self-hosted, Linux, X64, Diff] + env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build MT replication experimental binaries. + cd build + cmake -DCMAKE_BUILD_TYPE=Release -D MG_EXPERIMENTAL_REPLICATION_MULTITENANCY=ON .. + make -j$THREADS + + - name: Run unit tests + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Run unit tests. + cd build + ctest -R memgraph__unit --output-on-failure -j$THREADS + + - name: Run e2e tests + if: false + run: | + cd tests + ./setup.sh /opt/toolchain-v4/activate + source ve3/bin/activate_e2e + cd e2e + + # Just the replication based e2e tests + ./run.sh "Replicate multitenancy" + ./run.sh "Show" + ./run.sh "Show while creating invalid state" + ./run.sh "Delete edge replication" + ./run.sh "Read-write benchmark" + ./run.sh "Index replication" + ./run.sh "Constraints" + + - name: Save test data + uses: actions/upload-artifact@v4 + if: always() + with: + name: "Test data(MultiTenancy replication build)" path: | # multiple paths could be defined build/logs @@ -348,7 +462,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -365,13 +479,18 @@ jobs: cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo .. make -j$THREADS memgraph + - name: Refresh Jepsen Cluster + run: | + cd tests/jepsen + ./run.sh cluster-refresh + - name: Run Jepsen tests run: | cd tests/jepsen ./run.sh test-all-individually --binary ../../build/memgraph --ignore-run-stdout-logs --ignore-run-stderr-logs - name: Save Jepsen report - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: "Jepsen Report" @@ -387,7 +506,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) diff --git a/.github/workflows/full_clang_tidy.yaml b/.github/workflows/full_clang_tidy.yaml index bce2a4a0a..10816cd7a 100644 --- a/.github/workflows/full_clang_tidy.yaml +++ b/.github/workflows/full_clang_tidy.yaml @@ -14,7 +14,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) diff --git a/.github/workflows/package_memgraph.yaml b/.github/workflows/package_memgraph.yaml index 48a61ca53..45a62f037 100644 --- a/.github/workflows/package_memgraph.yaml +++ b/.github/workflows/package_memgraph.yaml @@ -42,14 +42,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package amzn-2 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: amzn-2 path: build/output/amzn-2/memgraph*.rpm @@ -60,14 +60,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package centos-7 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: centos-7 path: build/output/centos-7/memgraph*.rpm @@ -78,14 +78,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package centos-9 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: centos-9 path: build/output/centos-9/memgraph*.rpm @@ -96,14 +96,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package debian-10 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: debian-10 path: build/output/debian-10/memgraph*.deb @@ -114,14 +114,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: debian-11 path: build/output/debian-11/memgraph*.deb @@ -132,14 +132,14 @@ jobs: timeout-minutes: 120 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package debian-11-arm ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: debian-11-aarch64 path: build/output/debian-11-arm/memgraph*.deb @@ -150,14 +150,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-platform - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: debian-11-platform path: build/output/debian-11/memgraph*.deb @@ -168,7 +168,7 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" @@ -177,7 +177,7 @@ jobs: ./run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-docker ./run.sh docker - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: docker path: build/output/docker/memgraph*.tar.gz @@ -188,14 +188,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package fedora-36 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: fedora-36 path: build/output/fedora-36/memgraph*.rpm @@ -206,14 +206,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package ubuntu-18.04 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ubuntu-18.04 path: build/output/ubuntu-18.04/memgraph*.deb @@ -224,14 +224,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package ubuntu-20.04 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ubuntu-20.04 path: build/output/ubuntu-20.04/memgraph*.deb @@ -242,14 +242,14 @@ jobs: timeout-minutes: 60 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package ubuntu-22.04 ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ubuntu-22.04 path: build/output/ubuntu-22.04/memgraph*.deb @@ -260,14 +260,14 @@ jobs: timeout-minutes: 120 steps: - name: "Set up repository" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Required because of release/get_version.py - name: "Build package" run: | ./release/package/run.sh package ubuntu-22.04-arm ${{ github.event.inputs.build_type }} - name: "Upload package" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ubuntu-22.04-aarch64 path: build/output/ubuntu-22.04-arm/memgraph*.deb @@ -279,7 +279,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: # name: # if name input parameter is not provided, all artifacts are downloaded # and put in directories named after each one. diff --git a/.github/workflows/performance_benchmarks.yaml b/.github/workflows/performance_benchmarks.yaml index d31c661de..cc030c180 100644 --- a/.github/workflows/performance_benchmarks.yaml +++ b/.github/workflows/performance_benchmarks.yaml @@ -14,7 +14,7 @@ jobs: steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) diff --git a/.github/workflows/release_debian10.yaml b/.github/workflows/release_debian10.yaml index f22be163d..36b9148c3 100644 --- a/.github/workflows/release_debian10.yaml +++ b/.github/workflows/release_debian10.yaml @@ -14,19 +14,21 @@ on: schedule: - cron: "0 22 * * *" +env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }} + jobs: community_build: name: "Community build" runs-on: [self-hosted, Linux, X64, Debian10] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - timeout-minutes: 960 + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -40,10 +42,6 @@ jobs: # Initialize dependencies. ./init - # Set default build_type to Release - INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }} - BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"} - # Build community binaries. cd build cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DMG_ENTERPRISE=OFF .. @@ -65,10 +63,11 @@ jobs: THREADS: 24 MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -110,22 +109,19 @@ jobs: tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu - name: Save code coverage - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Coverage build)" path: tools/github/generated/code_coverage.tar.gz debug_build: name: "Debug build" runs-on: [self-hosted, Linux, X64, Debian10] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -157,10 +153,6 @@ jobs: run: | ./tests/drivers/run.sh - - name: Run integration tests - run: | - tests/integration/run.sh - - name: Run cppcheck and clang-format run: | # Activate toolchain. @@ -171,23 +163,49 @@ jobs: ./cppcheck_and_clang_format diff - name: Save cppcheck and clang-format errors - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Debug build)" path: tools/github/cppcheck_and_clang_format.txt - release_build: - name: "Release build" - runs-on: [self-hosted, Linux, X64, Debian10, BigMemory] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - timeout-minutes: 960 + debug_integration_test: + name: "Debug integration tests" + runs-on: [self-hosted, Linux, X64, Debian10] + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build debug binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build debug binaries. + cd build + cmake .. + make -j$THREADS + + - name: Run integration tests + run: | + tests/integration/run.sh + + release_build: + name: "Release build" + runs-on: [self-hosted, Linux, X64, Debian10] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -201,10 +219,6 @@ jobs: # Initialize dependencies. ./init - # Set default build_type to Release - INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }} - BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"} - # Build release binaries. cd build cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. @@ -226,11 +240,60 @@ jobs: cpack -G DEB --config ../CPackConfig.cmake - name: Save enterprise DEB package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: "Enterprise DEB package" path: build/output/memgraph*.deb + - name: Run GQL Behave tests + run: | + cd tests + ./setup.sh /opt/toolchain-v4/activate + cd gql_behave + ./continuous_integration + + - name: Save quality assurance status + uses: actions/upload-artifact@v4 + with: + name: "GQL Behave Status" + path: | + tests/gql_behave/gql_behave_status.csv + tests/gql_behave/gql_behave_status.html + + - name: Run unit tests + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Run unit tests. + cd build + ctest -R memgraph__unit --output-on-failure + + release_benchmark_tests: + name: "Release Benchmark Tests" + runs-on: [self-hosted, Linux, X64, Debian10] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + # Initialize dependencies. + ./init + + # Build release binaries + cd build + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS + - name: Run micro benchmark tests run: | # Activate toolchain. @@ -257,29 +320,31 @@ jobs: --num-database-workers 9 --num-clients-workers 30 \ --no-strict - - name: Run GQL Behave tests - run: | - cd tests - ./setup.sh /opt/toolchain-v4/activate - cd gql_behave - ./continuous_integration + release_e2e_test: + if: false + name: "Release End-to-end Test" + runs-on: [self-hosted, Linux, X64, Debian10] + timeout-minutes: 90 - - name: Save quality assurance status - uses: actions/upload-artifact@v3 + steps: + - name: Set up repository + uses: actions/checkout@v4 with: - name: "GQL Behave Status" - path: | - tests/gql_behave/gql_behave_status.csv - tests/gql_behave/gql_behave_status.html + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 - - name: Run unit tests + - name: Build release binaries run: | # Activate toolchain. source /opt/toolchain-v4/activate + # Initialize dependencies. + ./init - # Run unit tests. + # Build release binaries cd build - ctest -R memgraph__unit --output-on-failure + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS - name: Ensure Kafka and Pulsar are up run: | @@ -304,6 +369,32 @@ jobs: cd ../pulsar docker-compose down + release_durability_stress_tests: + name: "Release durability and stress tests" + runs-on: [self-hosted, Linux, X64, Debian10] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build release binaries. + cd build + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS + - name: Run stress test (plain) run: | cd tests/stress @@ -314,11 +405,6 @@ jobs: cd tests/stress ./continuous_integration --use-ssl - - name: Run stress test (large) - run: | - cd tests/stress - ./continuous_integration --large-dataset - - name: Run durability test (plain) run: | cd tests/stress @@ -334,15 +420,11 @@ jobs: release_jepsen_test: name: "Release Jepsen Test" runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - timeout-minutes: 60 + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -355,22 +437,23 @@ jobs: # Initialize dependencies. ./init - # Set default build_type to Release - INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }} - BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"} - # Build only memgraph release binary. cd build cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. make -j$THREADS memgraph + - name: Refresh Jepsen Cluster + run: | + cd tests/jepsen + ./run.sh cluster-refresh + - name: Run Jepsen tests run: | cd tests/jepsen ./run.sh test-all-individually --binary ../../build/memgraph --ignore-run-stdout-logs --ignore-run-stderr-logs - name: Save Jepsen report - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: "Jepsen Report" diff --git a/.github/workflows/release_docker.yaml b/.github/workflows/release_docker.yaml index 2ebb2e804..d5e02254b 100644 --- a/.github/workflows/release_docker.yaml +++ b/.github/workflows/release_docker.yaml @@ -19,7 +19,7 @@ jobs: DOCKER_REPOSITORY_NAME: memgraph steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/.github/workflows/release_mgbench_client.yaml b/.github/workflows/release_mgbench_client.yaml index 2abdad2b9..88c65f7fe 100644 --- a/.github/workflows/release_mgbench_client.yaml +++ b/.github/workflows/release_mgbench_client.yaml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/.github/workflows/release_ubuntu2004.yaml b/.github/workflows/release_ubuntu2004.yaml index c82d05ad1..96f85bdf3 100644 --- a/.github/workflows/release_ubuntu2004.yaml +++ b/.github/workflows/release_ubuntu2004.yaml @@ -14,19 +14,21 @@ on: schedule: - cron: "0 22 * * *" +env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }} + jobs: community_build: name: "Community build" runs-on: [self-hosted, Linux, X64, Ubuntu20.04] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - timeout-minutes: 960 + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -40,10 +42,6 @@ jobs: # Initialize dependencies. ./init - # Set default build_type to Release - INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }} - BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"} - # Build community binaries. cd build cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DMG_ENTERPRISE=OFF .. @@ -61,14 +59,11 @@ jobs: coverage_build: name: "Coverage build" runs-on: [self-hosted, Linux, X64, Ubuntu20.04] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -110,22 +105,19 @@ jobs: tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu - name: Save code coverage - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Coverage build)" path: tools/github/generated/code_coverage.tar.gz debug_build: name: "Debug build" runs-on: [self-hosted, Linux, X64, Ubuntu20.04] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -157,10 +149,6 @@ jobs: run: | ./tests/drivers/run.sh - - name: Run integration tests - run: | - tests/integration/run.sh - - name: Run cppcheck and clang-format run: | # Activate toolchain. @@ -171,23 +159,49 @@ jobs: ./cppcheck_and_clang_format diff - name: Save cppcheck and clang-format errors - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: "Code coverage" + name: "Code coverage(Debug build)" path: tools/github/cppcheck_and_clang_format.txt + debug_integration_test: + name: "Debug integration tests" + runs-on: [self-hosted, Linux, X64, Ubuntu20.04] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build debug binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build debug binaries. + cd build + cmake .. + make -j$THREADS + + - name: Run integration tests + run: | + tests/integration/run.sh + release_build: name: "Release build" runs-on: [self-hosted, Linux, X64, Ubuntu20.04] - env: - THREADS: 24 - MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} - MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} - timeout-minutes: 960 + timeout-minutes: 90 steps: - name: Set up repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Number of commits to fetch. `0` indicates all history for all # branches and tags. (default: 1) @@ -201,10 +215,6 @@ jobs: # Initialize dependencies. ./init - # Set default build_type to Release - INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }} - BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"} - # Build release binaries. cd build cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. @@ -226,11 +236,60 @@ jobs: cpack -G DEB --config ../CPackConfig.cmake - name: Save enterprise DEB package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: "Enterprise DEB package" path: build/output/memgraph*.deb + - name: Run GQL Behave tests + run: | + cd tests + ./setup.sh /opt/toolchain-v4/activate + cd gql_behave + ./continuous_integration + + - name: Save quality assurance status + uses: actions/upload-artifact@v4 + with: + name: "GQL Behave Status" + path: | + tests/gql_behave/gql_behave_status.csv + tests/gql_behave/gql_behave_status.html + + - name: Run unit tests + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Run unit tests. + cd build + ctest -R memgraph__unit --output-on-failure + + release_benchmark_tests: + name: "Release Benchmark Tests" + runs-on: [self-hosted, Linux, X64, Ubuntu20.04] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + # Initialize dependencies. + ./init + + # Build release binaries + cd build + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS + - name: Run micro benchmark tests run: | # Activate toolchain. @@ -257,29 +316,31 @@ jobs: --num-database-workers 9 --num-clients-workers 30 \ --no-strict - - name: Run GQL Behave tests - run: | - cd tests - ./setup.sh /opt/toolchain-v4/activate - cd gql_behave - ./continuous_integration + release_e2e_test: + if: false + name: "Release End-to-end Test" + runs-on: [self-hosted, Linux, X64, Ubuntu20.04] + timeout-minutes: 90 - - name: Save quality assurance status - uses: actions/upload-artifact@v3 + steps: + - name: Set up repository + uses: actions/checkout@v4 with: - name: "GQL Behave Status" - path: | - tests/gql_behave/gql_behave_status.csv - tests/gql_behave/gql_behave_status.html + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 - - name: Run unit tests + - name: Build release binaries run: | # Activate toolchain. source /opt/toolchain-v4/activate + # Initialize dependencies. + ./init - # Run unit tests. + # Build release binaries cd build - ctest -R memgraph__unit --output-on-failure + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS - name: Ensure Kafka and Pulsar are up run: | @@ -304,6 +365,32 @@ jobs: cd ../pulsar docker-compose down + release_durability_stress_tests: + name: "Release durability and stress tests" + runs-on: [self-hosted, Linux, X64, Ubuntu20.04] + timeout-minutes: 90 + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build release binaries. + cd build + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS + - name: Run stress test (plain) run: | cd tests/stress @@ -314,11 +401,6 @@ jobs: cd tests/stress ./continuous_integration --use-ssl - - name: Run stress test (large) - run: | - cd tests/stress - ./continuous_integration --large-dataset - - name: Run durability test (plain) run: | cd tests/stress diff --git a/.github/workflows/stress_test_large.yaml b/.github/workflows/stress_test_large.yaml new file mode 100644 index 000000000..54a6c55ba --- /dev/null +++ b/.github/workflows/stress_test_large.yaml @@ -0,0 +1,62 @@ +name: Stress test large + +on: + workflow_dispatch: + inputs: + build_type: + type: choice + description: "Memgraph Build type. Default value is Release." + default: 'Release' + options: + - Release + - RelWithDebInfo + + schedule: + - cron: "0 22 * * *" + +env: + THREADS: 24 + MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} + MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} + BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }} + +jobs: + stress_test_large: + name: "Stress test large" + timeout-minutes: 600 + strategy: + matrix: + os: [Debian10, Ubuntu20.04] + extra: [BigMemory, Gen8] + exclude: + - os: Debian10 + extra: Gen8 + - os: Ubuntu20.04 + extra: BigMemory + runs-on: [self-hosted, Linux, X64, "${{ matrix.os }}", "${{ matrix.extra }}"] + + steps: + - name: Set up repository + uses: actions/checkout@v4 + with: + # Number of commits to fetch. `0` indicates all history for all + # branches and tags. (default: 1) + fetch-depth: 0 + + - name: Build release binaries + run: | + # Activate toolchain. + source /opt/toolchain-v4/activate + + # Initialize dependencies. + ./init + + # Build release binaries. + cd build + cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. + make -j$THREADS + + - name: Run stress test (large) + run: | + cd tests/stress + ./continuous_integration --large-dataset diff --git a/.github/workflows/upload_to_s3.yaml b/.github/workflows/upload_to_s3.yaml index 978af7d66..b6bda4ca5 100644 --- a/.github/workflows/upload_to_s3.yaml +++ b/.github/workflows/upload_to_s3.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download artifacts - uses: dawidd6/action-download-artifact@v2 + uses: dawidd6/action-download-artifact@v4 with: workflow: package_all.yaml workflow_conclusion: success diff --git a/CMakeLists.txt b/CMakeLists.txt index a5ad2612a..266a3bedb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -271,6 +271,17 @@ endif() set(libs_dir ${CMAKE_SOURCE_DIR}/libs) add_subdirectory(libs EXCLUDE_FROM_ALL) +option(MG_EXPERIMENTAL_HIGH_AVAILABILITY "Feature flag for experimental high availability" OFF) + +if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_HIGH_AVAILABILITY) + set(MG_EXPERIMENTAL_HIGH_AVAILABILITY OFF) + message(FATAL_ERROR "MG_EXPERIMENTAL_HIGH_AVAILABILITY must be used with enterpise version of the code.") +endif () + +if (MG_EXPERIMENTAL_HIGH_AVAILABILITY) + add_compile_definitions(MG_EXPERIMENTAL_HIGH_AVAILABILITY) +endif () + # Optional subproject configuration ------------------------------------------- option(TEST_COVERAGE "Generate coverage reports from running memgraph" OFF) option(TOOLS "Build tools binaries" ON) @@ -279,6 +290,18 @@ option(ASAN "Build with Address Sanitizer. To get a reasonable performance optio option(TSAN "Build with Thread Sanitizer. To get a reasonable performance option should be used only in Release or RelWithDebInfo build " OFF) option(UBSAN "Build with Undefined Behaviour Sanitizer" OFF) +# Build feature flags +option(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY "Feature flag for experimental replicaition of multitenacy" OFF) + +if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + set(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY OFF) + message(FATAL_ERROR "MG_EXPERIMENTAL_REPLICATION_MULTITENANCY with community edition build isn't possible") +endif () + +if (MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + add_compile_definitions(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) +endif () + if (TEST_COVERAGE) string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type) if (NOT lower_build_type STREQUAL "debug") diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c5b0cde9e..98a24354c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -22,6 +22,8 @@ add_subdirectory(dbms) add_subdirectory(flags) add_subdirectory(distributed) add_subdirectory(replication) +add_subdirectory(coordination) +add_subdirectory(replication_coordination_glue) string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type) diff --git a/src/auth/auth.cpp b/src/auth/auth.cpp index cfe9dbdbe..88f0c4410 100644 --- a/src/auth/auth.cpp +++ b/src/auth/auth.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -8,17 +8,15 @@ #include "auth/auth.hpp" -#include #include -#include #include #include +#include "auth/crypto.hpp" #include "auth/exceptions.hpp" #include "license/license.hpp" #include "utils/flag_validation.hpp" -#include "utils/logging.hpp" #include "utils/message.hpp" #include "utils/settings.hpp" #include "utils/string.hpp" @@ -46,6 +44,9 @@ namespace memgraph::auth { const std::string kUserPrefix = "user:"; const std::string kRolePrefix = "role:"; const std::string kLinkPrefix = "link:"; +const std::string kVersion = "version"; + +static constexpr auto kVersionV1 = "V1"; /** * All data stored in the `Auth` storage is stored in an underlying @@ -64,7 +65,59 @@ const std::string kLinkPrefix = "link:"; * key="link:", value="" */ -Auth::Auth(const std::string &storage_directory) : storage_(storage_directory), module_(FLAGS_auth_module_executable) {} +namespace { +void MigrateVersions(kvstore::KVStore &store) { + static constexpr auto kPasswordHashV0V1 = "password_hash"; + auto version_str = store.Get(kVersion); + + if (!version_str) { + using namespace std::string_literals; + + // pre versioning, add version to the store + auto puts = std::map{{kVersion, kVersionV1}}; + + // also add hash kind into durability + + auto it = store.begin(kUserPrefix); + auto const e = store.end(kUserPrefix); + + if (it != e) { + const auto hash_algo = CurrentHashAlgorithm(); + spdlog::info("Updating auth durability, assuming previously stored as {}", AsString(hash_algo)); + + for (; it != e; ++it) { + auto const &[key, value] = *it; + try { + auto user_data = nlohmann::json::parse(value); + + auto password_hash = user_data[kPasswordHashV0V1]; + if (!password_hash.is_string()) { + throw AuthException("Couldn't load user data!"); + } + // upgrade the password_hash to include the hash algortihm + if (password_hash.empty()) { + user_data[kPasswordHashV0V1] = nullptr; + } else { + user_data[kPasswordHashV0V1] = HashedPassword{hash_algo, password_hash}; + } + puts.emplace(key, user_data.dump()); + } catch (const nlohmann::json::parse_error &e) { + throw AuthException("Couldn't load user data!"); + } + } + } + + // Perform migration to V1 + store.PutMultiple(puts); + version_str = kVersionV1; + } +} +}; // namespace + +Auth::Auth(std::string storage_directory, Config config) + : storage_(std::move(storage_directory)), module_(FLAGS_auth_module_executable), config_{std::move(config)} { + MigrateVersions(storage_); +} std::optional Auth::Authenticate(const std::string &username, const std::string &password) { if (module_.IsUsed()) { @@ -113,7 +166,7 @@ std::optional Auth::Authenticate(const std::string &username, const std::s return std::nullopt; } } else { - user->UpdatePassword(password); + UpdatePassword(*user, password); } if (FLAGS_auth_module_manage_roles) { if (!rolename.empty()) { @@ -155,6 +208,10 @@ std::optional Auth::Authenticate(const std::string &username, const std::s username, "https://memgr.ph/auth")); return std::nullopt; } + if (user->UpgradeHash(password)) { + SaveUser(*user); + } + return user; } } @@ -197,13 +254,46 @@ void Auth::SaveUser(const User &user) { } } +void Auth::UpdatePassword(auth::User &user, const std::optional &password) { + // Check if null + if (!password) { + if (!config_.password_permit_null) { + throw AuthException("Null passwords aren't permitted!"); + } + } else { + // Check if compliant with our filter + if (config_.custom_password_regex) { + if (const auto license_check_result = license::global_license_checker.IsEnterpriseValid(utils::global_settings); + license_check_result.HasError()) { + throw AuthException( + "Custom password regex is a Memgraph Enterprise feature. Please set the config " + "(\"--auth-password-strength-regex\") to its default value (\"{}\") or remove the flag.\n{}", + glue::kDefaultPasswordRegex, + license::LicenseCheckErrorToString(license_check_result.GetError(), "password regex")); + } + } + if (!std::regex_match(*password, config_.password_regex)) { + throw AuthException( + "The user password doesn't conform to the required strength! Regex: " + "\"{}\"", + config_.password_regex_str); + } + } + + // All checks passed; update + user.UpdatePassword(password); +} + std::optional Auth::AddUser(const std::string &username, const std::optional &password) { + if (!NameRegexMatch(username)) { + throw AuthException("Invalid user name."); + } auto existing_user = GetUser(username); if (existing_user) return std::nullopt; auto existing_role = GetRole(username); if (existing_role) return std::nullopt; auto new_user = User(username); - new_user.UpdatePassword(password); + UpdatePassword(new_user, password); SaveUser(new_user); return new_user; } @@ -255,10 +345,11 @@ void Auth::SaveRole(const Role &role) { } std::optional Auth::AddRole(const std::string &rolename) { - auto existing_role = GetRole(rolename); - if (existing_role) return std::nullopt; - auto existing_user = GetUser(rolename); - if (existing_user) return std::nullopt; + if (!NameRegexMatch(rolename)) { + throw AuthException("Invalid role name."); + } + if (auto existing_role = GetRole(rolename)) return std::nullopt; + if (auto existing_user = GetUser(rolename)) return std::nullopt; auto new_role = Role(rolename); SaveRole(new_role); return new_role; @@ -285,8 +376,7 @@ std::vector Auth::AllRoles() const { for (auto it = storage_.begin(kRolePrefix); it != storage_.end(kRolePrefix); ++it) { auto rolename = it->first.substr(kRolePrefix.size()); if (rolename != utils::ToLowerCase(rolename)) continue; - auto role = GetRole(rolename); - if (role) { + if (auto role = GetRole(rolename)) { ret.push_back(*role); } else { throw AuthException("Couldn't load role '{}'!", rolename); @@ -296,15 +386,14 @@ std::vector Auth::AllRoles() const { } std::vector Auth::AllUsersForRole(const std::string &rolename_orig) const { - auto rolename = utils::ToLowerCase(rolename_orig); + const auto rolename = utils::ToLowerCase(rolename_orig); std::vector ret; for (auto it = storage_.begin(kLinkPrefix); it != storage_.end(kLinkPrefix); ++it) { auto username = it->first.substr(kLinkPrefix.size()); if (username != utils::ToLowerCase(username)) continue; if (it->second != utils::ToLowerCase(it->second)) continue; if (it->second == rolename) { - auto user = GetUser(username); - if (user) { + if (auto user = GetUser(username)) { ret.push_back(std::move(*user)); } else { throw AuthException("Couldn't load user '{}'!", username); @@ -316,8 +405,7 @@ std::vector Auth::AllUsersForRole(const std::string &rolename_orig) #ifdef MG_ENTERPRISE bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { + if (auto user = GetUser(name)) { if (db == kAllDatabases) { user->db_access().GrantAll(); } else { @@ -330,8 +418,7 @@ bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) { } bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { + if (auto user = GetUser(name)) { if (db == kAllDatabases) { user->db_access().DenyAll(); } else { @@ -346,17 +433,15 @@ bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name void Auth::DeleteDatabase(const std::string &db) { for (auto it = storage_.begin(kUserPrefix); it != storage_.end(kUserPrefix); ++it) { auto username = it->first.substr(kUserPrefix.size()); - auto user = GetUser(username); - if (user) { + if (auto user = GetUser(username)) { user->db_access().Delete(db); SaveUser(*user); } } } -bool Auth::SetMainDatabase(const std::string &db, const std::string &name) { - auto user = GetUser(name); - if (user) { +bool Auth::SetMainDatabase(std::string_view db, const std::string &name) { + if (auto user = GetUser(name)) { if (!user->db_access().SetDefault(db)) { throw AuthException("Couldn't set default database '{}' for user '{}'!", db, name); } @@ -367,4 +452,19 @@ bool Auth::SetMainDatabase(const std::string &db, const std::string &name) { } #endif +bool Auth::NameRegexMatch(const std::string &user_or_role) const { + if (config_.custom_name_regex) { + if (const auto license_check_result = + memgraph::license::global_license_checker.IsEnterpriseValid(memgraph::utils::global_settings); + license_check_result.HasError()) { + throw memgraph::auth::AuthException( + "Custom user/role regex is a Memgraph Enterprise feature. Please set the config " + "(\"--auth-user-or-role-name-regex\") to its default value (\"{}\") or remove the flag.\n{}", + glue::kDefaultUserRoleRegex, + memgraph::license::LicenseCheckErrorToString(license_check_result.GetError(), "user/role regex")); + } + } + return std::regex_match(user_or_role, config_.name_regex); +} + } // namespace memgraph::auth diff --git a/src/auth/auth.hpp b/src/auth/auth.hpp index 8d2a9d91c..aa90c349a 100644 --- a/src/auth/auth.hpp +++ b/src/auth/auth.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -10,11 +10,13 @@ #include #include +#include #include #include "auth/exceptions.hpp" #include "auth/models.hpp" #include "auth/module.hpp" +#include "glue/auth_global.hpp" #include "kvstore/kvstore.hpp" #include "utils/settings.hpp" @@ -31,7 +33,40 @@ static const constexpr char *const kAllDatabases = "*"; */ class Auth final { public: - explicit Auth(const std::string &storage_directory); + struct Config { + Config() {} + Config(std::string name_regex, std::string password_regex, bool password_permit_null) + : name_regex_str{std::move(name_regex)}, + password_regex_str{std::move(password_regex)}, + password_permit_null{password_permit_null}, + custom_name_regex{name_regex_str != glue::kDefaultUserRoleRegex}, + name_regex{name_regex_str}, + custom_password_regex{password_regex_str != glue::kDefaultPasswordRegex}, + password_regex{password_regex_str} {} + + std::string name_regex_str{glue::kDefaultUserRoleRegex}; + std::string password_regex_str{glue::kDefaultPasswordRegex}; + bool password_permit_null{true}; + + private: + friend class Auth; + bool custom_name_regex{false}; + std::regex name_regex{name_regex_str}; + bool custom_password_regex{false}; + std::regex password_regex{password_regex_str}; + }; + + explicit Auth(std::string storage_directory, Config config); + + /** + * @brief Set the Config object + * + * @param config + */ + void SetConfig(Config config) { + // NOTE: The Auth class itself is not thread-safe, higher-level code needs to synchronize it when using it. + config_ = std::move(config); + } /** * Authenticates a user using his username and password. @@ -85,6 +120,14 @@ class Auth final { */ bool RemoveUser(const std::string &username); + /** + * @brief + * + * @param user + * @param password + */ + void UpdatePassword(auth::User &user, const std::optional &password); + /** * Gets all users from the storage. * @@ -195,14 +238,24 @@ class Auth final { * @return true on success * @throw AuthException if unable to find or update the user */ - bool SetMainDatabase(const std::string &db, const std::string &name); + bool SetMainDatabase(std::string_view db, const std::string &name); #endif private: + /** + * @brief + * + * @param user_or_role + * @return true + * @return false + */ + bool NameRegexMatch(const std::string &user_or_role) const; + // Even though the `kvstore::KVStore` class is guaranteed to be thread-safe, // Auth is not thread-safe because modifying users and roles might require // more than one operation on the storage. kvstore::KVStore storage_; auth::Module module_; + Config config_; }; } // namespace memgraph::auth diff --git a/src/auth/crypto.cpp b/src/auth/crypto.cpp index c433eaf62..a8351635a 100644 --- a/src/auth/crypto.cpp +++ b/src/auth/crypto.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -22,10 +22,14 @@ namespace { using namespace std::literals; -inline constexpr std::array password_encryption_mappings{ - std::pair{"bcrypt"sv, memgraph::auth::PasswordEncryptionAlgorithm::BCRYPT}, - std::pair{"sha256"sv, memgraph::auth::PasswordEncryptionAlgorithm::SHA256}, - std::pair{"sha256-multiple"sv, memgraph::auth::PasswordEncryptionAlgorithm::SHA256_MULTIPLE}}; + +constexpr auto kHashAlgo = "hash_algo"; +constexpr auto kPasswordHash = "password_hash"; + +inline constexpr std::array password_hash_mappings{ + std::pair{"bcrypt"sv, memgraph::auth::PasswordHashAlgorithm::BCRYPT}, + std::pair{"sha256"sv, memgraph::auth::PasswordHashAlgorithm::SHA256}, + std::pair{"sha256-multiple"sv, memgraph::auth::PasswordHashAlgorithm::SHA256_MULTIPLE}}; inline constexpr uint64_t ONE_SHA_ITERATION = 1; inline constexpr uint64_t MULTIPLE_SHA_ITERATIONS = 1024; @@ -35,7 +39,7 @@ inline constexpr uint64_t MULTIPLE_SHA_ITERATIONS = 1024; DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt", "The password encryption algorithm used for authentication.", { if (const auto result = - memgraph::utils::IsValidEnumValueString(value, password_encryption_mappings); + memgraph::utils::IsValidEnumValueString(value, password_hash_mappings); result.HasError()) { const auto error = result.GetError(); switch (error) { @@ -45,7 +49,7 @@ DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt", } case memgraph::utils::ValidationError::InvalidValue: { std::cout << "Invalid value for password encryption algorithm. Allowed values: " - << memgraph::utils::GetAllowedEnumValuesString(password_encryption_mappings) + << memgraph::utils::GetAllowedEnumValuesString(password_hash_mappings) << std::endl; break; } @@ -58,7 +62,7 @@ DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt", namespace memgraph::auth { namespace BCrypt { -std::string EncryptPassword(const std::string &password) { +std::string HashPassword(const std::string &password) { char salt[BCRYPT_HASHSIZE]; char hash[BCRYPT_HASHSIZE]; @@ -86,16 +90,30 @@ bool VerifyPassword(const std::string &password, const std::string &hash) { } // namespace BCrypt namespace SHA { + +namespace { + +constexpr auto SHA_LENGTH = 64U; +constexpr auto SALT_SIZE = 16U; +constexpr auto SALT_SIZE_DURABLE = SALT_SIZE * 2; + #if OPENSSL_VERSION_MAJOR >= 3 -std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t number_of_iterations) { +std::string HashPasswordOpenSSL3(std::string_view password, const uint64_t number_of_iterations, + std::string_view salt) { unsigned char hash[SHA256_DIGEST_LENGTH]; EVP_MD_CTX *ctx = EVP_MD_CTX_new(); EVP_MD *md = EVP_MD_fetch(nullptr, "SHA2-256", nullptr); EVP_DigestInit_ex(ctx, md, nullptr); + + if (!salt.empty()) { + DMG_ASSERT(salt.size() == SALT_SIZE); + EVP_DigestUpdate(ctx, salt.data(), salt.size()); + } + for (auto i = 0; i < number_of_iterations; i++) { - EVP_DigestUpdate(ctx, password.c_str(), password.size()); + EVP_DigestUpdate(ctx, password.data(), password.size()); } EVP_DigestFinal_ex(ctx, hash, nullptr); @@ -103,6 +121,11 @@ std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t EVP_MD_CTX_free(ctx); std::stringstream result_stream; + + for (unsigned char salt_char : salt) { + result_stream << std::hex << std::setw(2) << std::setfill('0') << (((unsigned int)salt_char) & 0xFFU); + } + for (auto hash_char : hash) { result_stream << std::hex << std::setw(2) << std::setfill('0') << (int)hash_char; } @@ -110,17 +133,27 @@ std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t return result_stream.str(); } #else -std::string EncryptPasswordOpenSSL1_1(const std::string &password, const uint64_t number_of_iterations) { +std::string HashPasswordOpenSSL1_1(std::string_view password, const uint64_t number_of_iterations, + std::string_view salt) { unsigned char hash[SHA256_DIGEST_LENGTH]; SHA256_CTX sha256; SHA256_Init(&sha256); + + if (!salt.empty()) { + DMG_ASSERT(salt.size() == SALT_SIZE); + SHA256_Update(&sha256, salt.data(), salt.size()); + } + for (auto i = 0; i < number_of_iterations; i++) { - SHA256_Update(&sha256, password.c_str(), password.size()); + SHA256_Update(&sha256, password.data(), password.size()); } SHA256_Final(hash, &sha256); std::stringstream ss; + for (unsigned char salt_char : salt) { + ss << std::hex << std::setw(2) << std::setfill('0') << (((unsigned int)salt_char) & 0xFFU); + } for (auto hash_char : hash) { ss << std::hex << std::setw(2) << std::setfill('0') << (int)hash_char; } @@ -129,55 +162,144 @@ std::string EncryptPasswordOpenSSL1_1(const std::string &password, const uint64_ } #endif -std::string EncryptPassword(const std::string &password, const uint64_t number_of_iterations) { +std::string HashPassword(std::string_view password, const uint64_t number_of_iterations, std::string_view salt) { #if OPENSSL_VERSION_MAJOR >= 3 - return EncryptPasswordOpenSSL3(password, number_of_iterations); + return HashPasswordOpenSSL3(password, number_of_iterations, salt); #else - return EncryptPasswordOpenSSL1_1(password, number_of_iterations); + return HashPasswordOpenSSL1_1(password, number_of_iterations, salt); #endif } -bool VerifyPassword(const std::string &password, const std::string &hash, const uint64_t number_of_iterations) { - auto password_hash = EncryptPassword(password, number_of_iterations); +auto ExtractSalt(std::string_view salt_durable) -> std::array { + static_assert(SALT_SIZE_DURABLE % 2 == 0); + static_assert(SALT_SIZE_DURABLE / 2 == SALT_SIZE); + + MG_ASSERT(salt_durable.size() == SALT_SIZE_DURABLE); + auto const *b = salt_durable.cbegin(); + auto const *const e = salt_durable.cend(); + + auto salt = std::array{}; + auto *inserter = salt.begin(); + + auto const toval = [](char a) -> uint8_t { + if ('0' <= a && a <= '9') { + return a - '0'; + } + if ('a' <= a && a <= 'f') { + return 10 + (a - 'a'); + } + MG_ASSERT(false, "Currupt hash, can't extract salt"); + __builtin_unreachable(); + }; + + for (; b != e; b += 2, ++inserter) { + *inserter = static_cast(static_cast(toval(b[0]) << 4U) | toval(b[1])); + } + return salt; +} + +bool IsSalted(std::string_view hash) { return hash.size() == SHA_LENGTH + SALT_SIZE_DURABLE; } + +bool VerifyPassword(std::string_view password, std::string_view hash, const uint64_t number_of_iterations) { + auto password_hash = std::invoke([&] { + if (hash.size() == SHA_LENGTH) [[unlikely]] { + // Just SHA256 + return HashPassword(password, number_of_iterations, {}); + } else { + // SHA256 + SALT + MG_ASSERT(IsSalted(hash)); + auto const salt_durable = std::string_view{hash.data(), SALT_SIZE_DURABLE}; + std::array salt = ExtractSalt(salt_durable); + return HashPassword(password, number_of_iterations, {salt.data(), salt.size()}); + } + }); return password_hash == hash; } + +} // namespace + } // namespace SHA -bool VerifyPassword(const std::string &password, const std::string &hash) { - const auto password_encryption_algorithm = utils::StringToEnum( - FLAGS_password_encryption_algorithm, password_encryption_mappings); +HashedPassword HashPassword(const std::string &password, std::optional override_algo) { + auto const hash_algo = override_algo.value_or(CurrentHashAlgorithm()); + auto password_hash = std::invoke([&] { + switch (hash_algo) { + case PasswordHashAlgorithm::BCRYPT: { + return BCrypt::HashPassword(password); + } + case PasswordHashAlgorithm::SHA256: + case PasswordHashAlgorithm::SHA256_MULTIPLE: { + auto gen = std::mt19937(std::random_device{}()); + auto salt = std::array{}; + auto dis = std::uniform_int_distribution(0, 255); + std::generate(salt.begin(), salt.end(), [&]() { return dis(gen); }); + auto iterations = (hash_algo == PasswordHashAlgorithm::SHA256) ? ONE_SHA_ITERATION : MULTIPLE_SHA_ITERATIONS; + return SHA::HashPassword(password, iterations, {salt.data(), salt.size()}); + } + } + }); + return HashedPassword{hash_algo, std::move(password_hash)}; +}; - if (!password_encryption_algorithm.has_value()) { - throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm); +namespace { + +auto InternalParseHashAlgorithm(std::string_view algo) -> PasswordHashAlgorithm { + auto maybe_parsed = utils::StringToEnum(algo, password_hash_mappings); + if (!maybe_parsed) { + throw AuthException("Invalid password encryption '{}'!", algo); } - - switch (password_encryption_algorithm.value()) { - case PasswordEncryptionAlgorithm::BCRYPT: - return BCrypt::VerifyPassword(password, hash); - case PasswordEncryptionAlgorithm::SHA256: - return SHA::VerifyPassword(password, hash, ONE_SHA_ITERATION); - case PasswordEncryptionAlgorithm::SHA256_MULTIPLE: - return SHA::VerifyPassword(password, hash, MULTIPLE_SHA_ITERATIONS); - } - - throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm); + return *maybe_parsed; } -std::string EncryptPassword(const std::string &password) { - const auto password_encryption_algorithm = utils::StringToEnum( - FLAGS_password_encryption_algorithm, password_encryption_mappings); +PasswordHashAlgorithm &InternalCurrentHashAlgorithm() { + static auto current = PasswordHashAlgorithm::BCRYPT; + static std::once_flag flag; + std::call_once(flag, [] { current = InternalParseHashAlgorithm(FLAGS_password_encryption_algorithm); }); + return current; +} +} // namespace - if (!password_encryption_algorithm.has_value()) { - throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm); +auto CurrentHashAlgorithm() -> PasswordHashAlgorithm { return InternalCurrentHashAlgorithm(); } + +void SetHashAlgorithm(std::string_view algo) { + auto ¤t = InternalCurrentHashAlgorithm(); + current = InternalParseHashAlgorithm(algo); +} + +auto AsString(PasswordHashAlgorithm hash_algo) -> std::string_view { + return *utils::EnumToString(hash_algo, password_hash_mappings); +} + +bool HashedPassword::VerifyPassword(const std::string &password) { + switch (hash_algo) { + case PasswordHashAlgorithm::BCRYPT: + return BCrypt::VerifyPassword(password, password_hash); + case PasswordHashAlgorithm::SHA256: + return SHA::VerifyPassword(password, password_hash, ONE_SHA_ITERATION); + case PasswordHashAlgorithm::SHA256_MULTIPLE: + return SHA::VerifyPassword(password, password_hash, MULTIPLE_SHA_ITERATIONS); } +} - switch (password_encryption_algorithm.value()) { - case PasswordEncryptionAlgorithm::BCRYPT: - return BCrypt::EncryptPassword(password); - case PasswordEncryptionAlgorithm::SHA256: - return SHA::EncryptPassword(password, ONE_SHA_ITERATION); - case PasswordEncryptionAlgorithm::SHA256_MULTIPLE: - return SHA::EncryptPassword(password, MULTIPLE_SHA_ITERATIONS); +void to_json(nlohmann::json &j, const HashedPassword &p) { + j = nlohmann::json{{kHashAlgo, p.hash_algo}, {kPasswordHash, p.password_hash}}; +} + +void from_json(const nlohmann::json &j, HashedPassword &p) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + PasswordHashAlgorithm hash_algo; + j.at(kHashAlgo).get_to(hash_algo); + auto password_hash = j.value(kPasswordHash, std::string()); + p = HashedPassword{hash_algo, std::move(password_hash)}; +} + +bool HashedPassword::IsSalted() const { + switch (hash_algo) { + case PasswordHashAlgorithm::BCRYPT: + return true; + case PasswordHashAlgorithm::SHA256: + case PasswordHashAlgorithm::SHA256_MULTIPLE: + return SHA::IsSalted(password_hash); } } diff --git a/src/auth/crypto.hpp b/src/auth/crypto.hpp index dbceb128b..c5dfc1c05 100644 --- a/src/auth/crypto.hpp +++ b/src/auth/crypto.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -8,14 +8,45 @@ #pragma once +#include +#include #include namespace memgraph::auth { -enum class PasswordEncryptionAlgorithm : uint8_t { BCRYPT, SHA256, SHA256_MULTIPLE }; +/// Need to be stable, auth durability depends on this +enum class PasswordHashAlgorithm : uint8_t { BCRYPT = 0, SHA256 = 1, SHA256_MULTIPLE = 2 }; -/// @throw AuthException if unable to encrypt the password. -std::string EncryptPassword(const std::string &password); +void SetHashAlgorithm(std::string_view algo); -/// @throw AuthException if unable to verify the password. -bool VerifyPassword(const std::string &password, const std::string &hash); +auto CurrentHashAlgorithm() -> PasswordHashAlgorithm; + +auto AsString(PasswordHashAlgorithm hash_algo) -> std::string_view; + +struct HashedPassword { + HashedPassword() = default; + HashedPassword(PasswordHashAlgorithm hash_algo, std::string password_hash) + : hash_algo{hash_algo}, password_hash{std::move(password_hash)} {} + HashedPassword(HashedPassword const &) = default; + HashedPassword(HashedPassword &&) = default; + HashedPassword &operator=(HashedPassword const &) = default; + HashedPassword &operator=(HashedPassword &&) = default; + + friend bool operator==(HashedPassword const &, HashedPassword const &) = default; + + bool VerifyPassword(const std::string &password); + + bool IsSalted() const; + + auto HashAlgo() const -> PasswordHashAlgorithm { return hash_algo; } + + friend void to_json(nlohmann::json &j, const HashedPassword &p); + friend void from_json(const nlohmann::json &j, HashedPassword &p); + + private: + PasswordHashAlgorithm hash_algo{PasswordHashAlgorithm::BCRYPT}; + std::string password_hash{}; +}; + +/// @throw AuthException if unable to hash the password. +HashedPassword HashPassword(const std::string &password, std::optional override_algo = {}); } // namespace memgraph::auth diff --git a/src/auth/models.cpp b/src/auth/models.cpp index 5415dc08d..a59a73c7b 100644 --- a/src/auth/models.cpp +++ b/src/auth/models.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -9,7 +9,6 @@ #include "auth/models.hpp" #include -#include #include #include @@ -21,22 +20,26 @@ #include "query/constants.hpp" #include "spdlog/spdlog.h" #include "utils/cast.hpp" -#include "utils/logging.hpp" -#include "utils/settings.hpp" #include "utils/string.hpp" -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_bool(auth_password_permit_null, true, "Set to false to disable null passwords."); - -inline constexpr std::string_view default_password_regex = ".+"; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_string(auth_password_strength_regex, default_password_regex.data(), - "The regular expression that should be used to match the entire " - "entered password to ensure its strength."); - namespace memgraph::auth { namespace { +constexpr auto kRoleName = "rolename"; +constexpr auto kPermissions = "permissions"; +constexpr auto kGrants = "grants"; +constexpr auto kDenies = "denies"; +constexpr auto kUsername = "username"; +constexpr auto kPasswordHash = "password_hash"; + +#ifdef MG_ENTERPRISE +constexpr auto kGlobalPermission = "global_permission"; +constexpr auto kFineGrainedAccessHandler = "fine_grained_access_handler"; +constexpr auto kAllowAll = "allow_all"; +constexpr auto kDefault = "default"; +constexpr auto kDatabases = "databases"; +#endif + // Constant list of all available permissions. const std::vector kPermissionsAll = {Permission::MATCH, Permission::CREATE, @@ -62,7 +65,8 @@ const std::vector kPermissionsAll = {Permission::MATCH, Permission::TRANSACTION_MANAGEMENT, Permission::STORAGE_MODE, Permission::MULTI_DATABASE_EDIT, - Permission::MULTI_DATABASE_USE}; + Permission::MULTI_DATABASE_USE, + Permission::COORDINATOR}; } // namespace @@ -118,6 +122,8 @@ std::string PermissionToString(Permission permission) { return "MULTI_DATABASE_EDIT"; case Permission::MULTI_DATABASE_USE: return "MULTI_DATABASE_USE"; + case Permission::COORDINATOR: + return "COORDINATOR"; } } @@ -242,8 +248,9 @@ std::vector Permissions::GetDenies() const { nlohmann::json Permissions::Serialize() const { nlohmann::json data = nlohmann::json::object(); - data["grants"] = grants_; - data["denies"] = denies_; + + data[kGrants] = grants_; + data[kDenies] = denies_; return data; } @@ -251,10 +258,10 @@ Permissions Permissions::Deserialize(const nlohmann::json &data) { if (!data.is_object()) { throw AuthException("Couldn't load permissions data!"); } - if (!data["grants"].is_number_unsigned() || !data["denies"].is_number_unsigned()) { + if (!data[kGrants].is_number_unsigned() || !data[kDenies].is_number_unsigned()) { throw AuthException("Couldn't load permissions data!"); } - return Permissions{data["grants"], data["denies"]}; + return Permissions{data[kGrants], data[kDenies]}; } uint64_t Permissions::grants() const { return grants_; } @@ -316,8 +323,8 @@ nlohmann::json FineGrainedAccessPermissions::Serialize() const { return {}; } nlohmann::json data = nlohmann::json::object(); - data["permissions"] = permissions_; - data["global_permission"] = global_permission_.has_value() ? global_permission_.value() : -1; + data[kPermissions] = permissions_; + data[kGlobalPermission] = global_permission_.has_value() ? global_permission_.value() : -1; return data; } @@ -330,13 +337,13 @@ FineGrainedAccessPermissions FineGrainedAccessPermissions::Deserialize(const nlo } std::optional global_permission; - if (data["global_permission"].empty() || data["global_permission"] == -1) { + if (data[kGlobalPermission].empty() || data[kGlobalPermission] == -1) { global_permission = std::nullopt; } else { - global_permission = data["global_permission"]; + global_permission = data[kGlobalPermission]; } - return FineGrainedAccessPermissions(data["permissions"], global_permission); + return FineGrainedAccessPermissions(data[kPermissions], global_permission); } const std::unordered_map &FineGrainedAccessPermissions::GetPermissions() const { @@ -442,13 +449,13 @@ const FineGrainedAccessPermissions &Role::GetFineGrainedAccessEdgeTypePermission nlohmann::json Role::Serialize() const { nlohmann::json data = nlohmann::json::object(); - data["rolename"] = rolename_; - data["permissions"] = permissions_.Serialize(); + data[kRoleName] = rolename_; + data[kPermissions] = permissions_.Serialize(); #ifdef MG_ENTERPRISE if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { - data["fine_grained_access_handler"] = fine_grained_access_handler_.Serialize(); + data[kFineGrainedAccessHandler] = fine_grained_access_handler_.Serialize(); } else { - data["fine_grained_access_handler"] = {}; + data[kFineGrainedAccessHandler] = {}; } #endif return data; @@ -458,21 +465,21 @@ Role Role::Deserialize(const nlohmann::json &data) { if (!data.is_object()) { throw AuthException("Couldn't load role data!"); } - if (!data["rolename"].is_string() || !data["permissions"].is_object()) { + if (!data[kRoleName].is_string() || !data[kPermissions].is_object()) { throw AuthException("Couldn't load role data!"); } - auto permissions = Permissions::Deserialize(data["permissions"]); + auto permissions = Permissions::Deserialize(data[kPermissions]); #ifdef MG_ENTERPRISE if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { FineGrainedAccessHandler fine_grained_access_handler; // We can have an empty fine_grained if the user was created without a valid license - if (data["fine_grained_access_handler"].is_object()) { - fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data["fine_grained_access_handler"]); + if (data[kFineGrainedAccessHandler].is_object()) { + fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data[kFineGrainedAccessHandler]); } - return {data["rolename"], permissions, std::move(fine_grained_access_handler)}; + return {data[kRoleName], permissions, std::move(fine_grained_access_handler)}; } #endif - return {data["rolename"], permissions}; + return {data[kRoleName], permissions}; } bool operator==(const Role &first, const Role &second) { @@ -486,13 +493,13 @@ bool operator==(const Role &first, const Role &second) { } #ifdef MG_ENTERPRISE -void Databases::Add(const std::string &db) { +void Databases::Add(std::string_view db) { if (allow_all_) { grants_dbs_.clear(); allow_all_ = false; } grants_dbs_.emplace(db); - denies_dbs_.erase(db); + denies_dbs_.erase(std::string{db}); // TODO: C++23 use transparent key compare } void Databases::Remove(const std::string &db) { @@ -523,13 +530,13 @@ void Databases::DenyAll() { denies_dbs_.clear(); } -bool Databases::SetDefault(const std::string &db) { +bool Databases::SetDefault(std::string_view db) { if (!Contains(db)) return false; default_db_ = db; return true; } -[[nodiscard]] bool Databases::Contains(const std::string &db) const { +[[nodiscard]] bool Databases::Contains(std::string_view db) const { return !denies_dbs_.contains(db) && (allow_all_ || grants_dbs_.contains(db)); } @@ -542,10 +549,10 @@ const std::string &Databases::GetDefault() const { nlohmann::json Databases::Serialize() const { nlohmann::json data = nlohmann::json::object(); - data["grants"] = grants_dbs_; - data["denies"] = denies_dbs_; - data["allow_all"] = allow_all_; - data["default"] = default_db_; + data[kGrants] = grants_dbs_; + data[kDenies] = denies_dbs_; + data[kAllowAll] = allow_all_; + data[kDefault] = default_db_; return data; } @@ -553,22 +560,22 @@ Databases Databases::Deserialize(const nlohmann::json &data) { if (!data.is_object()) { throw AuthException("Couldn't load database data!"); } - if (!data["grants"].is_structured() || !data["denies"].is_structured() || !data["allow_all"].is_boolean() || - !data["default"].is_string()) { + if (!data[kGrants].is_structured() || !data[kDenies].is_structured() || !data[kAllowAll].is_boolean() || + !data[kDefault].is_string()) { throw AuthException("Couldn't load database data!"); } - return {data["allow_all"], data["grants"], data["denies"], data["default"]}; + return {data[kAllowAll], data[kGrants], data[kDenies], data[kDefault]}; } #endif User::User() = default; User::User(const std::string &username) : username_(utils::ToLowerCase(username)) {} -User::User(const std::string &username, std::string password_hash, const Permissions &permissions) +User::User(const std::string &username, std::optional password_hash, const Permissions &permissions) : username_(utils::ToLowerCase(username)), password_hash_(std::move(password_hash)), permissions_(permissions) {} #ifdef MG_ENTERPRISE -User::User(const std::string &username, std::string password_hash, const Permissions &permissions, +User::User(const std::string &username, std::optional password_hash, const Permissions &permissions, FineGrainedAccessHandler fine_grained_access_handler, Databases db_access) : username_(utils::ToLowerCase(username)), password_hash_(std::move(password_hash)), @@ -578,38 +585,16 @@ User::User(const std::string &username, std::string password_hash, const Permiss #endif bool User::CheckPassword(const std::string &password) { - if (password_hash_.empty()) return true; - return VerifyPassword(password, password_hash_); + return password_hash_ ? password_hash_->VerifyPassword(password) : true; } -void User::UpdatePassword(const std::optional &password) { +void User::UpdatePassword(const std::optional &password, + std::optional algo_override) { if (!password) { - if (!FLAGS_auth_password_permit_null) { - throw AuthException("Null passwords aren't permitted!"); - } - password_hash_ = ""; + password_hash_.reset(); return; } - - if (FLAGS_auth_password_strength_regex != default_password_regex) { - if (const auto license_check_result = license::global_license_checker.IsEnterpriseValid(utils::global_settings); - license_check_result.HasError()) { - throw AuthException( - "Custom password regex is a Memgraph Enterprise feature. Please set the config " - "(\"--auth-password-strength-regex\") to its default value (\"{}\") or remove the flag.\n{}", - default_password_regex, - license::LicenseCheckErrorToString(license_check_result.GetError(), "password regex")); - } - } - std::regex re(FLAGS_auth_password_strength_regex); - if (!std::regex_match(*password, re)) { - throw AuthException( - "The user password doesn't conform to the required strength! Regex: " - "\"{}\"", - FLAGS_auth_password_strength_regex); - } - - password_hash_ = EncryptPassword(*password); + password_hash_ = HashPassword(*password, algo_override); } void User::SetRole(const Role &role) { role_.emplace(role); } @@ -668,16 +653,20 @@ const Role *User::role() const { nlohmann::json User::Serialize() const { nlohmann::json data = nlohmann::json::object(); - data["username"] = username_; - data["password_hash"] = password_hash_; - data["permissions"] = permissions_.Serialize(); + data[kUsername] = username_; + if (password_hash_.has_value()) { + data[kPasswordHash] = *password_hash_; + } else { + data[kPasswordHash] = nullptr; + } + data[kPermissions] = permissions_.Serialize(); #ifdef MG_ENTERPRISE if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { - data["fine_grained_access_handler"] = fine_grained_access_handler_.Serialize(); - data["databases"] = database_access_.Serialize(); + data[kFineGrainedAccessHandler] = fine_grained_access_handler_.Serialize(); + data[kDatabases] = database_access_.Serialize(); } else { - data["fine_grained_access_handler"] = {}; - data["databases"] = {}; + data[kFineGrainedAccessHandler] = {}; + data[kDatabases] = {}; } #endif // The role shouldn't be serialized here, it is stored as a foreign key. @@ -688,15 +677,23 @@ User User::Deserialize(const nlohmann::json &data) { if (!data.is_object()) { throw AuthException("Couldn't load user data!"); } - if (!data["username"].is_string() || !data["password_hash"].is_string() || !data["permissions"].is_object()) { + auto password_hash_json = data[kPasswordHash]; + if (!data[kUsername].is_string() || !(password_hash_json.is_object() || password_hash_json.is_null()) || + !data[kPermissions].is_object()) { throw AuthException("Couldn't load user data!"); } - auto permissions = Permissions::Deserialize(data["permissions"]); + + std::optional password_hash{}; + if (password_hash_json.is_object()) { + password_hash = password_hash_json.get(); + } + + auto permissions = Permissions::Deserialize(data[kPermissions]); #ifdef MG_ENTERPRISE if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { Databases db_access; - if (data["databases"].is_structured()) { - db_access = Databases::Deserialize(data["databases"]); + if (data[kDatabases].is_structured()) { + db_access = Databases::Deserialize(data[kDatabases]); } else { // Back-compatibility spdlog::warn("User without specified database access. Given access to the default database."); @@ -705,13 +702,13 @@ User User::Deserialize(const nlohmann::json &data) { } FineGrainedAccessHandler fine_grained_access_handler; // We can have an empty fine_grained if the user was created without a valid license - if (data["fine_grained_access_handler"].is_object()) { - fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data["fine_grained_access_handler"]); + if (data[kFineGrainedAccessHandler].is_object()) { + fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data[kFineGrainedAccessHandler]); } - return {data["username"], data["password_hash"], permissions, std::move(fine_grained_access_handler), db_access}; + return {data[kUsername], std::move(password_hash), permissions, std::move(fine_grained_access_handler), db_access}; } #endif - return {data["username"], data["password_hash"], permissions}; + return {data[kUsername], std::move(password_hash), permissions}; } bool operator==(const User &first, const User &second) { diff --git a/src/auth/models.hpp b/src/auth/models.hpp index 9f66d3119..bb6dd2a7a 100644 --- a/src/auth/models.hpp +++ b/src/auth/models.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Licensed as a Memgraph Enterprise file under the Memgraph Enterprise // License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use @@ -15,6 +15,7 @@ #include #include +#include "crypto.hpp" #include "dbms/constants.hpp" #include "utils/logging.hpp" @@ -48,6 +49,7 @@ enum class Permission : uint64_t { STORAGE_MODE = 1U << 22U, MULTI_DATABASE_EDIT = 1U << 23U, MULTI_DATABASE_USE = 1U << 24U, + COORDINATOR = 1U << 25U, }; // clang-format on @@ -246,7 +248,7 @@ bool operator==(const Role &first, const Role &second); #ifdef MG_ENTERPRISE class Databases final { public: - Databases() : grants_dbs_({dbms::kDefaultDB}), allow_all_(false), default_db_(dbms::kDefaultDB) {} + Databases() : grants_dbs_{std::string{dbms::kDefaultDB}}, allow_all_(false), default_db_(dbms::kDefaultDB) {} Databases(const Databases &) = default; Databases &operator=(const Databases &) = default; @@ -259,7 +261,7 @@ class Databases final { * * @param db name of the database to grant access to */ - void Add(const std::string &db); + void Add(std::string_view db); /** * @brief Remove database to the list of granted access. @@ -291,7 +293,7 @@ class Databases final { /** * @brief Set the default database. */ - bool SetDefault(const std::string &db); + bool SetDefault(std::string_view db); /** * @brief Checks if access is grated to the database. @@ -299,7 +301,7 @@ class Databases final { * @param db name of the database * @return true if allow_all and not denied or granted */ - bool Contains(const std::string &db) const; + bool Contains(std::string_view db) const; bool GetAllowAll() const { return allow_all_; } const std::set> &GetGrants() const { return grants_dbs_; } @@ -312,7 +314,7 @@ class Databases final { private: Databases(bool allow_all, std::set> grant, std::set> deny, - std::string default_db = dbms::kDefaultDB) + std::string default_db = std::string{dbms::kDefaultDB}) : grants_dbs_(std::move(grant)), denies_dbs_(std::move(deny)), allow_all_(allow_all), @@ -331,9 +333,9 @@ class User final { User(); explicit User(const std::string &username); - User(const std::string &username, std::string password_hash, const Permissions &permissions); + User(const std::string &username, std::optional password_hash, const Permissions &permissions); #ifdef MG_ENTERPRISE - User(const std::string &username, std::string password_hash, const Permissions &permissions, + User(const std::string &username, std::optional password_hash, const Permissions &permissions, FineGrainedAccessHandler fine_grained_access_handler, Databases db_access = {}); #endif User(const User &) = default; @@ -345,8 +347,18 @@ class User final { /// @throw AuthException if unable to verify the password. bool CheckPassword(const std::string &password); + bool UpgradeHash(const std::string password) { + if (!password_hash_) return false; + if (password_hash_->IsSalted()) return false; + + auto const algo = password_hash_->HashAlgo(); + UpdatePassword(password, algo); + return true; + } + /// @throw AuthException if unable to set the password. - void UpdatePassword(const std::optional &password = std::nullopt); + void UpdatePassword(const std::optional &password = {}, + std::optional algo_override = std::nullopt); void SetRole(const Role &role); @@ -381,7 +393,7 @@ class User final { private: std::string username_; - std::string password_hash_; + std::optional password_hash_; Permissions permissions_; #ifdef MG_ENTERPRISE FineGrainedAccessHandler fine_grained_access_handler_; diff --git a/src/communication/bolt/v1/states/handlers.hpp b/src/communication/bolt/v1/states/handlers.hpp index 3b5a67b17..3ffcb6f55 100644 --- a/src/communication/bolt/v1/states/handlers.hpp +++ b/src/communication/bolt/v1/states/handlers.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -170,6 +170,7 @@ inline State HandleFailure(TSession &session, const std::exception &e) { spdlog::trace("Error trace: {}", p->trace()); } session.encoder_buffer_.Clear(); + auto code_message = ExceptionToErrorMessage(e); bool fail_sent = session.encoder_.MessageFailure({{"code", code_message.first}, {"message", code_message.second}}); if (!fail_sent) { diff --git a/src/communication/result_stream_faker.hpp b/src/communication/result_stream_faker.hpp index f8786dd43..779d039cc 100644 --- a/src/communication/result_stream_faker.hpp +++ b/src/communication/result_stream_faker.hpp @@ -44,7 +44,7 @@ class ResultStreamFaker { std::vector bvalues; bvalues.reserve(values.size()); for (const auto &value : values) { - auto maybe_value = memgraph::glue::ToBoltValue(value, *store_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(value, store_, memgraph::storage::View::NEW); MG_ASSERT(maybe_value.HasValue()); bvalues.push_back(std::move(*maybe_value)); } @@ -56,7 +56,7 @@ class ResultStreamFaker { void Summary(const std::map &summary) { std::map bsummary; for (const auto &item : summary) { - auto maybe_value = memgraph::glue::ToBoltValue(item.second, *store_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(item.second, store_, memgraph::storage::View::NEW); MG_ASSERT(maybe_value.HasValue()); bsummary.insert({item.first, std::move(*maybe_value)}); } diff --git a/src/coordination/CMakeLists.txt b/src/coordination/CMakeLists.txt new file mode 100644 index 000000000..e8c4b3735 --- /dev/null +++ b/src/coordination/CMakeLists.txt @@ -0,0 +1,29 @@ +add_library(mg-coordination STATIC) +add_library(mg::coordination ALIAS mg-coordination) +target_sources(mg-coordination + PUBLIC + include/coordination/coordinator_client.hpp + include/coordination/coordinator_state.hpp + include/coordination/coordinator_rpc.hpp + include/coordination/coordinator_server.hpp + include/coordination/coordinator_config.hpp + include/coordination/coordinator_exceptions.hpp + include/coordination/coordinator_instance.hpp + include/coordination/coordinator_slk.hpp + include/coordination/coordinator_data.hpp + include/coordination/constants.hpp + include/coordination/failover_status.hpp + include/coordination/coordinator_cluster_config.hpp + + PRIVATE + coordinator_client.cpp + coordinator_state.cpp + coordinator_rpc.cpp + coordinator_server.cpp + coordinator_data.cpp +) +target_include_directories(mg-coordination PUBLIC include) + +target_link_libraries(mg-coordination + PUBLIC mg::utils mg::rpc mg::slk mg::io mg::repl_coord_glue lib::rangev3 +) diff --git a/src/coordination/coordinator_client.cpp b/src/coordination/coordinator_client.cpp new file mode 100644 index 000000000..93ef3e3af --- /dev/null +++ b/src/coordination/coordinator_client.cpp @@ -0,0 +1,109 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_client.hpp" + +#include "coordination/coordinator_config.hpp" +#include "coordination/coordinator_rpc.hpp" +#include "replication_coordination_glue/messages.hpp" + +namespace memgraph::coordination { + +namespace { +auto CreateClientContext(const memgraph::coordination::CoordinatorClientConfig &config) + -> communication::ClientContext { + return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file} + : communication::ClientContext{}; +} +} // namespace + +CoordinatorClient::CoordinatorClient(CoordinatorData *coord_data, CoordinatorClientConfig config, + HealthCheckCallback succ_cb, HealthCheckCallback fail_cb) + : rpc_context_{CreateClientContext(config)}, + rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port), + &rpc_context_}, + config_{std::move(config)}, + coord_data_{coord_data}, + succ_cb_{std::move(succ_cb)}, + fail_cb_{std::move(fail_cb)} {} + +auto CoordinatorClient::InstanceName() const -> std::string { return config_.instance_name; } +auto CoordinatorClient::SocketAddress() const -> std::string { return rpc_client_.Endpoint().SocketAddress(); } + +void CoordinatorClient::StartFrequentCheck() { + MG_ASSERT(config_.health_check_frequency_sec > std::chrono::seconds(0), + "Health check frequency must be greater than 0"); + + instance_checker_.Run( + "Coord checker", config_.health_check_frequency_sec, [this, instance_name = config_.instance_name] { + try { + spdlog::trace("Sending frequent heartbeat to machine {} on {}", instance_name, + rpc_client_.Endpoint().SocketAddress()); + auto stream{rpc_client_.Stream()}; + stream.AwaitResponse(); + succ_cb_(coord_data_, instance_name); + } catch (const rpc::RpcFailedException &) { + fail_cb_(coord_data_, instance_name); + } + }); +} + +void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); } + +void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); } +void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); } + +auto CoordinatorClient::SetSuccCallback(HealthCheckCallback succ_cb) -> void { succ_cb_ = std::move(succ_cb); } +auto CoordinatorClient::SetFailCallback(HealthCheckCallback fail_cb) -> void { fail_cb_ = std::move(fail_cb); } + +auto CoordinatorClient::ReplicationClientInfo() const -> const CoordinatorClientConfig::ReplicationClientInfo & { + return config_.replication_client_info; +} + +auto CoordinatorClient::ResetReplicationClientInfo() -> void { + // TODO (antoniofilipovic) Sync with Andi on this one + // config_.replication_client_info.reset(); +} + +auto CoordinatorClient::SendPromoteReplicaToMainRpc( + std::vector replication_clients_info) const -> bool { + try { + auto stream{rpc_client_.Stream(std::move(replication_clients_info))}; + if (!stream.AwaitResponse().success) { + spdlog::error("Failed to receive successful RPC failover response!"); + return false; + } + return true; + } catch (const rpc::RpcFailedException &) { + spdlog::error("RPC error occurred while sending failover RPC!"); + } + return false; +} + +auto CoordinatorClient::SendSetToReplicaRpc(CoordinatorClient::ReplClientInfo replication_client_info) const -> bool { + try { + auto stream{rpc_client_.Stream(std::move(replication_client_info))}; + if (!stream.AwaitResponse().success) { + spdlog::error("Failed to set main to replica!"); + return false; + } + spdlog::info("Sent request RPC from coordinator to instance to set it as replica!"); + return true; + } catch (const rpc::RpcFailedException &) { + spdlog::error("Failed to send failover RPC from coordinator to new main!"); + } + return false; +} + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/coordinator_data.cpp b/src/coordination/coordinator_data.cpp new file mode 100644 index 000000000..c236cf753 --- /dev/null +++ b/src/coordination/coordinator_data.cpp @@ -0,0 +1,220 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include "coordination/coordinator_instance.hpp" +#include "coordination/register_main_replica_coordinator_status.hpp" +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_data.hpp" + +#include +#include + +namespace memgraph::coordination { + +CoordinatorData::CoordinatorData() { + auto find_instance = [](CoordinatorData *coord_data, std::string_view instance_name) -> CoordinatorInstance & { + auto instance = std::ranges::find_if( + coord_data->registered_instances_, + [instance_name](const CoordinatorInstance &instance) { return instance.InstanceName() == instance_name; }); + + MG_ASSERT(instance != coord_data->registered_instances_.end(), "Instance {} not found during callback!", + instance_name); + return *instance; + }; + + replica_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void { + auto lock = std::lock_guard{coord_data->coord_data_lock_}; + spdlog::trace("Instance {} performing replica successful callback", instance_name); + auto &instance = find_instance(coord_data, instance_name); + MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name); + instance.UpdateLastResponseTime(); + }; + + replica_fail_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void { + auto lock = std::lock_guard{coord_data->coord_data_lock_}; + spdlog::trace("Instance {} performing replica failure callback", instance_name); + auto &instance = find_instance(coord_data, instance_name); + MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name); + instance.UpdateInstanceStatus(); + }; + + main_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void { + auto lock = std::lock_guard{coord_data->coord_data_lock_}; + spdlog::trace("Instance {} performing main successful callback", instance_name); + auto &instance = find_instance(coord_data, instance_name); + MG_ASSERT(instance.IsMain(), "Instance {} is not a main!", instance_name); + instance.UpdateLastResponseTime(); + }; + + main_fail_cb_ = [this, find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void { + auto lock = std::lock_guard{coord_data->coord_data_lock_}; + spdlog::trace("Instance {} performing main failure callback", instance_name); + auto &instance = find_instance(coord_data, instance_name); + MG_ASSERT(instance.IsMain(), "Instance {} is not a main!", instance_name); + if (bool main_alive = instance.UpdateInstanceStatus(); !main_alive) { + spdlog::info("Main instance {} is not alive, starting automatic failover", instance_name); + switch (auto failover_status = DoFailover(); failover_status) { + using enum DoFailoverStatus; + case ALL_REPLICAS_DOWN: + spdlog::warn("Failover aborted since all replicas are down!"); + break; + case MAIN_ALIVE: + spdlog::warn("Failover aborted since main is alive!"); + break; + case RPC_FAILED: + spdlog::warn("Failover aborted since promoting replica to main failed!"); + break; + case SUCCESS: + break; + } + } + }; +} + +auto CoordinatorData::DoFailover() -> DoFailoverStatus { + using ReplicationClientInfo = CoordinatorClientConfig::ReplicationClientInfo; + + auto replica_instances = registered_instances_ | ranges::views::filter(&CoordinatorInstance::IsReplica); + + auto chosen_replica_instance = std::ranges::find_if(replica_instances, &CoordinatorInstance::IsAlive); + if (chosen_replica_instance == replica_instances.end()) { + return DoFailoverStatus::ALL_REPLICAS_DOWN; + } + + chosen_replica_instance->PrepareForFailover(); + + std::vector repl_clients_info; + repl_clients_info.reserve(std::ranges::distance(replica_instances)); + + auto const not_chosen_replica_instance = [&chosen_replica_instance](const CoordinatorInstance &instance) { + return instance != *chosen_replica_instance; + }; + auto const not_main = [](const CoordinatorInstance &instance) { return !instance.IsMain(); }; + + // TODO (antoniofilipovic): Should we send also data on old MAIN??? + // TODO: (andi) Don't send replicas which aren't alive + for (const auto &unchosen_replica_instance : + replica_instances | ranges::views::filter(not_chosen_replica_instance) | ranges::views::filter(not_main)) { + repl_clients_info.emplace_back(unchosen_replica_instance.client_.ReplicationClientInfo()); + } + + if (!chosen_replica_instance->client_.SendPromoteReplicaToMainRpc(std::move(repl_clients_info))) { + chosen_replica_instance->RestoreAfterFailedFailover(); + return DoFailoverStatus::RPC_FAILED; + } + + auto old_main = std::ranges::find_if(registered_instances_, &CoordinatorInstance::IsMain); + // TODO: (andi) For performing restoration we will have to improve this + old_main->client_.PauseFrequentCheck(); + + chosen_replica_instance->PostFailover(main_succ_cb_, main_fail_cb_); + + return DoFailoverStatus::SUCCESS; +} + +auto CoordinatorData::ShowInstances() const -> std::vector { + std::vector instances_status; + instances_status.reserve(registered_instances_.size()); + + auto const stringify_repl_role = [](const CoordinatorInstance &instance) -> std::string { + if (!instance.IsAlive()) return ""; + if (instance.IsMain()) return "main"; + return "replica"; + }; + + auto const instance_to_status = + [&stringify_repl_role](const CoordinatorInstance &instance) -> CoordinatorInstanceStatus { + return {.instance_name = instance.InstanceName(), + .socket_address = instance.SocketAddress(), + .replication_role = stringify_repl_role(instance), + .is_alive = instance.IsAlive()}; + }; + + { + auto lock = std::shared_lock{coord_data_lock_}; + std::ranges::transform(registered_instances_, std::back_inserter(instances_status), instance_to_status); + } + + return instances_status; +} + +auto CoordinatorData::SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus { + auto lock = std::lock_guard{coord_data_lock_}; + + // Find replica we already registered + auto registered_replica = std::find_if( + registered_instances_.begin(), registered_instances_.end(), + [instance_name](const CoordinatorInstance &instance) { return instance.InstanceName() == instance_name; }); + + // if replica not found... + if (registered_replica == registered_instances_.end()) { + spdlog::error("You didn't register instance with given name {}", instance_name); + return SetInstanceToMainCoordinatorStatus::NO_INSTANCE_WITH_NAME; + } + + registered_replica->client_.PauseFrequentCheck(); + + std::vector repl_clients_info; + repl_clients_info.reserve(registered_instances_.size() - 1); + std::ranges::for_each(registered_instances_, + [registered_replica, &repl_clients_info](const CoordinatorInstance &replica) { + if (replica != *registered_replica) { + repl_clients_info.emplace_back(replica.client_.ReplicationClientInfo()); + } + }); + + // PROMOTE REPLICA TO MAIN + // THIS SHOULD FAIL HERE IF IT IS DOWN + if (auto result = registered_replica->client_.SendPromoteReplicaToMainRpc(std::move(repl_clients_info)); !result) { + registered_replica->client_.ResumeFrequentCheck(); + return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN; + } + + registered_replica->client_.SetSuccCallback(main_succ_cb_); + registered_replica->client_.SetFailCallback(main_fail_cb_); + registered_replica->replication_role_ = replication_coordination_glue::ReplicationRole::MAIN; + registered_replica->client_.ResumeFrequentCheck(); + + return SetInstanceToMainCoordinatorStatus::SUCCESS; +} + +auto CoordinatorData::RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus { + auto lock = std::lock_guard{coord_data_lock_}; + if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) { + return instance.InstanceName() == config.instance_name; + })) { + return RegisterInstanceCoordinatorStatus::NAME_EXISTS; + } + + if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) { + spdlog::trace("Comparing {} with {}", instance.SocketAddress(), config.SocketAddress()); + return instance.SocketAddress() == config.SocketAddress(); + })) { + return RegisterInstanceCoordinatorStatus::END_POINT_EXISTS; + } + + CoordinatorClientConfig::ReplicationClientInfo replication_client_info_copy = config.replication_client_info; + + // TODO (antoniofilipovic) create and then push back + auto *instance = ®istered_instances_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_, + replication_coordination_glue::ReplicationRole::REPLICA); + if (auto res = instance->client_.SendSetToReplicaRpc(replication_client_info_copy); !res) { + return RegisterInstanceCoordinatorStatus::RPC_FAILED; + } + + instance->client_.StartFrequentCheck(); + + return RegisterInstanceCoordinatorStatus::SUCCESS; +} + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/coordinator_rpc.cpp b/src/coordination/coordinator_rpc.cpp new file mode 100644 index 000000000..e8a16f0e2 --- /dev/null +++ b/src/coordination/coordinator_rpc.cpp @@ -0,0 +1,107 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_rpc.hpp" + +#include "coordination/coordinator_slk.hpp" +#include "slk/serialization.hpp" + +namespace memgraph { + +namespace coordination { + +void PromoteReplicaToMainReq::Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} + +void PromoteReplicaToMainReq::Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + +void PromoteReplicaToMainRes::Save(const PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} + +void PromoteReplicaToMainRes::Load(PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + +void SetMainToReplicaReq::Save(const SetMainToReplicaReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} + +void SetMainToReplicaReq::Load(SetMainToReplicaReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + +void SetMainToReplicaRes::Save(const SetMainToReplicaRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} + +void SetMainToReplicaRes::Load(SetMainToReplicaRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + +} // namespace coordination + +constexpr utils::TypeInfo coordination::PromoteReplicaToMainReq::kType{utils::TypeId::COORD_FAILOVER_REQ, + "CoordPromoteReplicaToMainReq", nullptr}; + +constexpr utils::TypeInfo coordination::PromoteReplicaToMainRes::kType{utils::TypeId::COORD_FAILOVER_RES, + "CoordPromoteReplicaToMainRes", nullptr}; + +constexpr utils::TypeInfo coordination::SetMainToReplicaReq::kType{utils::TypeId::COORD_SET_REPL_MAIN_REQ, + "CoordSetReplMainReq", nullptr}; + +constexpr utils::TypeInfo coordination::SetMainToReplicaRes::kType{utils::TypeId::COORD_SET_REPL_MAIN_RES, + "CoordSetReplMainRes", nullptr}; + +namespace slk { + +void Save(const memgraph::coordination::PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.success, builder); +} + +void Load(memgraph::coordination::PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->success, reader); +} + +void Save(const memgraph::coordination::PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.replication_clients_info, builder); +} + +void Load(memgraph::coordination::PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->replication_clients_info, reader); +} + +void Save(const memgraph::coordination::SetMainToReplicaReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.replication_client_info, builder); +} + +void Load(memgraph::coordination::SetMainToReplicaReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->replication_client_info, reader); +} + +void Save(const memgraph::coordination::SetMainToReplicaRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.success, builder); +} + +void Load(memgraph::coordination::SetMainToReplicaRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->success, reader); +} + +} // namespace slk + +} // namespace memgraph + +#endif diff --git a/src/coordination/coordinator_server.cpp b/src/coordination/coordinator_server.cpp new file mode 100644 index 000000000..a8253cf25 --- /dev/null +++ b/src/coordination/coordinator_server.cpp @@ -0,0 +1,57 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_server.hpp" +#include "replication_coordination_glue/messages.hpp" + +namespace memgraph::coordination { + +namespace { + +auto CreateServerContext(const memgraph::coordination::CoordinatorServerConfig &config) + -> communication::ServerContext { + return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file, + config.ssl->verify_peer} + : communication::ServerContext{}; +} + +// NOTE: The coordinator server doesn't more than 1 processing thread - each replica can +// have only a single coordinator server. Also, the single-threaded guarantee +// simplifies the rest of the implementation. +constexpr auto kCoordinatorServerThreads = 1; + +} // namespace + +CoordinatorServer::CoordinatorServer(const CoordinatorServerConfig &config) + : rpc_server_context_{CreateServerContext(config)}, + rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_, + kCoordinatorServerThreads} { + rpc_server_.Register([](auto *req_reader, auto *res_builder) { + spdlog::debug("Received FrequentHeartbeatRpc on coordinator server"); + replication_coordination_glue::FrequentHeartbeatHandler(req_reader, res_builder); + }); +} + +CoordinatorServer::~CoordinatorServer() { + if (rpc_server_.IsRunning()) { + auto const &endpoint = rpc_server_.endpoint(); + spdlog::trace("Closing coordinator server on {}:{}", endpoint.address, endpoint.port); + rpc_server_.Shutdown(); + } + rpc_server_.AwaitShutdown(); +} + +bool CoordinatorServer::Start() { return rpc_server_.Start(); } + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/coordinator_state.cpp b/src/coordination/coordinator_state.cpp new file mode 100644 index 000000000..60ec458ac --- /dev/null +++ b/src/coordination/coordinator_state.cpp @@ -0,0 +1,89 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_state.hpp" + +#include "coordination/coordinator_config.hpp" +#include "coordination/register_main_replica_coordinator_status.hpp" +#include "flags/replication.hpp" +#include "spdlog/spdlog.h" +#include "utils/logging.hpp" +#include "utils/variant_helpers.hpp" + +#include + +namespace memgraph::coordination { + +CoordinatorState::CoordinatorState() { + MG_ASSERT(!(FLAGS_coordinator && FLAGS_coordinator_server_port), + "Instance cannot be a coordinator and have registered coordinator server."); + + spdlog::info("Executing coordinator constructor"); + if (FLAGS_coordinator_server_port) { + spdlog::info("Coordinator server port set"); + auto const config = CoordinatorServerConfig{ + .ip_address = kDefaultReplicationServerIp, + .port = static_cast(FLAGS_coordinator_server_port), + }; + spdlog::info("Executing coordinator constructor main replica"); + + data_ = CoordinatorMainReplicaData{.coordinator_server_ = std::make_unique(config)}; + } +} + +auto CoordinatorState::RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus { + MG_ASSERT(std::holds_alternative(data_), + "Coordinator cannot register replica since variant holds wrong alternative"); + + return std::visit( + memgraph::utils::Overloaded{ + [](const CoordinatorMainReplicaData & /*coordinator_main_replica_data*/) { + return RegisterInstanceCoordinatorStatus::NOT_COORDINATOR; + }, + [config](CoordinatorData &coordinator_data) { return coordinator_data.RegisterInstance(config); }}, + data_); +} + +auto CoordinatorState::SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus { + MG_ASSERT(std::holds_alternative(data_), + "Coordinator cannot register replica since variant holds wrong alternative"); + + return std::visit( + memgraph::utils::Overloaded{[](const CoordinatorMainReplicaData & /*coordinator_main_replica_data*/) { + return SetInstanceToMainCoordinatorStatus::NOT_COORDINATOR; + }, + [&instance_name](CoordinatorData &coordinator_data) { + return coordinator_data.SetInstanceToMain(instance_name); + }}, + data_); +} + +auto CoordinatorState::ShowInstances() const -> std::vector { + MG_ASSERT(std::holds_alternative(data_), + "Can't call show instances on data_, as variant holds wrong alternative"); + return std::get(data_).ShowInstances(); +} + +[[nodiscard]] auto CoordinatorState::DoFailover() -> DoFailoverStatus { + MG_ASSERT(std::holds_alternative(data_), "Cannot do failover since variant holds wrong alternative"); + auto &coord_state = std::get(data_); + return coord_state.DoFailover(); +} + +auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & { + MG_ASSERT(std::holds_alternative(data_), + "Cannot get coordinator server since variant holds wrong alternative"); + return *std::get(data_).coordinator_server_; +} +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/constants.hpp b/src/coordination/include/coordination/constants.hpp new file mode 100644 index 000000000..819b9fa05 --- /dev/null +++ b/src/coordination/include/coordination/constants.hpp @@ -0,0 +1,22 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +namespace memgraph::coordination { + +#ifdef MG_EXPERIMENTAL_HIGH_AVAILABILITY +constexpr bool allow_ha = true; +#else +constexpr bool allow_ha = false; +#endif + +} // namespace memgraph::coordination diff --git a/src/coordination/include/coordination/coordinator_client.hpp b/src/coordination/include/coordination/coordinator_client.hpp new file mode 100644 index 000000000..1bc361a57 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_client.hpp @@ -0,0 +1,77 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_config.hpp" +#include "rpc/client.hpp" +#include "utils/scheduler.hpp" + +namespace memgraph::coordination { + +class CoordinatorData; +using HealthCheckCallback = std::function; + +class CoordinatorClient { + public: + using ReplClientInfo = CoordinatorClientConfig::ReplicationClientInfo; + using ReplicationClientsInfo = std::vector; + + explicit CoordinatorClient(CoordinatorData *coord_data_, CoordinatorClientConfig config, HealthCheckCallback succ_cb, + HealthCheckCallback fail_cb); + + ~CoordinatorClient() = default; + + CoordinatorClient(CoordinatorClient &) = delete; + CoordinatorClient &operator=(CoordinatorClient const &) = delete; + + CoordinatorClient(CoordinatorClient &&) noexcept = delete; + CoordinatorClient &operator=(CoordinatorClient &&) noexcept = delete; + + void StartFrequentCheck(); + void StopFrequentCheck(); + void PauseFrequentCheck(); + void ResumeFrequentCheck(); + + auto InstanceName() const -> std::string; + auto SocketAddress() const -> std::string; + + auto SendPromoteReplicaToMainRpc(ReplicationClientsInfo replication_clients_info) const -> bool; + + auto ReplicationClientInfo() const -> const ReplClientInfo &; + auto ResetReplicationClientInfo() -> void; + + auto SendSetToReplicaRpc(ReplClientInfo replication_client_info) const -> bool; + + auto SetSuccCallback(HealthCheckCallback succ_cb) -> void; + auto SetFailCallback(HealthCheckCallback fail_cb) -> void; + + friend bool operator==(CoordinatorClient const &first, CoordinatorClient const &second) { + return first.config_ == second.config_; + } + + private: + utils::Scheduler instance_checker_; + + // TODO: (andi) Pimpl? + communication::ClientContext rpc_context_; + mutable rpc::Client rpc_client_; + + CoordinatorClientConfig config_; + CoordinatorData *coord_data_; + HealthCheckCallback succ_cb_; + HealthCheckCallback fail_cb_; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_cluster_config.hpp b/src/coordination/include/coordination/coordinator_cluster_config.hpp new file mode 100644 index 000000000..e1d91ff7d --- /dev/null +++ b/src/coordination/include/coordination/coordinator_cluster_config.hpp @@ -0,0 +1,22 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE +namespace memgraph::coordination { + +struct CoordinatorClusterConfig { + static constexpr int alive_response_time_difference_sec_{5}; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_config.hpp b/src/coordination/include/coordination/coordinator_config.hpp new file mode 100644 index 000000000..bbbed9dd7 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_config.hpp @@ -0,0 +1,78 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "replication_coordination_glue/mode.hpp" + +#include +#include +#include +#include + +namespace memgraph::coordination { + +inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0"; + +struct CoordinatorClientConfig { + std::string instance_name; + std::string ip_address; + uint16_t port{}; + std::chrono::seconds health_check_frequency_sec{1}; + + auto SocketAddress() const -> std::string { return ip_address + ":" + std::to_string(port); } + + // Info which coordinator will send to new main when performing failover + struct ReplicationClientInfo { + // Must be the same as CoordinatorClientConfig's instance_name + std::string instance_name; + replication_coordination_glue::ReplicationMode replication_mode{}; + std::string replication_ip_address; + uint16_t replication_port{}; + + friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default; + }; + + // Each instance has replication config in case it fails + ReplicationClientInfo replication_client_info; + + struct SSL { + std::string key_file; + std::string cert_file; + + friend bool operator==(const SSL &, const SSL &) = default; + }; + + std::optional ssl; + + friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default; +}; + +struct CoordinatorServerConfig { + std::string ip_address; + uint16_t port{}; + struct SSL { + std::string key_file; + std::string cert_file; + std::string ca_file; + bool verify_peer{}; + friend bool operator==(SSL const &, SSL const &) = default; + }; + + std::optional ssl; + + friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_data.hpp b/src/coordination/include/coordination/coordinator_data.hpp new file mode 100644 index 000000000..d14f5e1db --- /dev/null +++ b/src/coordination/include/coordination/coordinator_data.hpp @@ -0,0 +1,49 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_instance.hpp" +#include "coordination/coordinator_instance_status.hpp" +#include "coordination/coordinator_server.hpp" +#include "coordination/failover_status.hpp" +#include "coordination/register_main_replica_coordinator_status.hpp" +#include "utils/rw_lock.hpp" + +#include + +namespace memgraph::coordination { +class CoordinatorData { + public: + CoordinatorData(); + + [[nodiscard]] auto DoFailover() -> DoFailoverStatus; + + [[nodiscard]] auto RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus; + [[nodiscard]] auto SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus; + + auto ShowInstances() const -> std::vector; + + private: + mutable utils::RWLock coord_data_lock_{utils::RWLock::Priority::READ}; + HealthCheckCallback main_succ_cb_, main_fail_cb_, replica_succ_cb_, replica_fail_cb_; + // Must be std::list because we rely on pointer stability + std::list registered_instances_; +}; + +struct CoordinatorMainReplicaData { + std::unique_ptr coordinator_server_; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_exceptions.hpp b/src/coordination/include/coordination/coordinator_exceptions.hpp new file mode 100644 index 000000000..708fb81f3 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_exceptions.hpp @@ -0,0 +1,32 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "utils/exceptions.hpp" + +namespace memgraph::coordination { +class CoordinatorFailoverException final : public utils::BasicException { + public: + explicit CoordinatorFailoverException(const std::string_view what) noexcept + : BasicException("Failover didn't complete successfully: " + std::string(what)) {} + + template + explicit CoordinatorFailoverException(fmt::format_string fmt, Args &&...args) noexcept + : CoordinatorFailoverException(fmt::format(fmt, std::forward(args)...)) {} + + SPECIALIZE_GET_EXCEPTION_NAME(CoordinatorFailoverException) +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_instance.hpp b/src/coordination/include/coordination/coordinator_instance.hpp new file mode 100644 index 000000000..31a6d8204 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_instance.hpp @@ -0,0 +1,77 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_client.hpp" +#include "coordination/coordinator_cluster_config.hpp" +#include "replication_coordination_glue/role.hpp" + +namespace memgraph::coordination { + +class CoordinatorData; + +class CoordinatorInstance { + public: + CoordinatorInstance(CoordinatorData *data, CoordinatorClientConfig config, HealthCheckCallback succ_cb, + HealthCheckCallback fail_cb, replication_coordination_glue::ReplicationRole replication_role) + : client_(data, std::move(config), std::move(succ_cb), std::move(fail_cb)), + replication_role_(replication_role), + is_alive_(true) {} + + CoordinatorInstance(CoordinatorInstance const &other) = delete; + CoordinatorInstance &operator=(CoordinatorInstance const &other) = delete; + CoordinatorInstance(CoordinatorInstance &&other) noexcept = delete; + CoordinatorInstance &operator=(CoordinatorInstance &&other) noexcept = delete; + ~CoordinatorInstance() = default; + + auto UpdateInstanceStatus() -> bool { + is_alive_ = std::chrono::duration_cast(std::chrono::system_clock::now() - last_response_time_) + .count() < CoordinatorClusterConfig::alive_response_time_difference_sec_; + return is_alive_; + } + auto UpdateLastResponseTime() -> void { last_response_time_ = std::chrono::system_clock::now(); } + + auto InstanceName() const -> std::string { return client_.InstanceName(); } + auto SocketAddress() const -> std::string { return client_.SocketAddress(); } + auto IsAlive() const -> bool { return is_alive_; } + + auto IsReplica() const -> bool { + return replication_role_ == replication_coordination_glue::ReplicationRole::REPLICA; + } + auto IsMain() const -> bool { return replication_role_ == replication_coordination_glue::ReplicationRole::MAIN; } + + auto PrepareForFailover() -> void { client_.PauseFrequentCheck(); } + auto RestoreAfterFailedFailover() -> void { client_.ResumeFrequentCheck(); } + + auto PostFailover(HealthCheckCallback main_succ_cb, HealthCheckCallback main_fail_cb) -> void { + replication_role_ = replication_coordination_glue::ReplicationRole::MAIN; + client_.SetSuccCallback(std::move(main_succ_cb)); + client_.SetFailCallback(std::move(main_fail_cb)); + // Comment with Andi but we shouldn't delete this, what if this MAIN FAILS AGAIN + // client_.ResetReplicationClientInfo(); + client_.ResumeFrequentCheck(); + } + + CoordinatorClient client_; + replication_coordination_glue::ReplicationRole replication_role_; + std::chrono::system_clock::time_point last_response_time_{}; + bool is_alive_{false}; + + friend bool operator==(CoordinatorInstance const &first, CoordinatorInstance const &second) { + return first.client_ == second.client_ && first.replication_role_ == second.replication_role_; + } +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_instance_status.hpp b/src/coordination/include/coordination/coordinator_instance_status.hpp new file mode 100644 index 000000000..2a0a3a985 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_instance_status.hpp @@ -0,0 +1,31 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "io/network/endpoint.hpp" + +#include + +namespace memgraph::coordination { + +struct CoordinatorInstanceStatus { + std::string instance_name; + std::string socket_address; + std::string replication_role; + bool is_alive; +}; + +} // namespace memgraph::coordination + +#endif diff --git a/src/coordination/include/coordination/coordinator_rpc.hpp b/src/coordination/include/coordination/coordinator_rpc.hpp new file mode 100644 index 000000000..99996ef52 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_rpc.hpp @@ -0,0 +1,104 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_config.hpp" +#include "rpc/messages.hpp" +#include "slk/serialization.hpp" + +namespace memgraph::coordination { + +struct PromoteReplicaToMainReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader); + static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder); + + explicit PromoteReplicaToMainReq(std::vector replication_clients_info) + : replication_clients_info(std::move(replication_clients_info)) {} + PromoteReplicaToMainReq() = default; + + std::vector replication_clients_info; +}; + +struct PromoteReplicaToMainRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader); + static void Save(const PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder); + + explicit PromoteReplicaToMainRes(bool success) : success(success) {} + PromoteReplicaToMainRes() = default; + + bool success; +}; + +using PromoteReplicaToMainRpc = rpc::RequestResponse; + +struct SetMainToReplicaReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SetMainToReplicaReq *self, memgraph::slk::Reader *reader); + static void Save(const SetMainToReplicaReq &self, memgraph::slk::Builder *builder); + + explicit SetMainToReplicaReq(CoordinatorClientConfig::ReplicationClientInfo replication_client_info) + : replication_client_info(std::move(replication_client_info)) {} + + SetMainToReplicaReq() = default; + + CoordinatorClientConfig::ReplicationClientInfo replication_client_info; +}; + +struct SetMainToReplicaRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SetMainToReplicaRes *self, memgraph::slk::Reader *reader); + static void Save(const SetMainToReplicaRes &self, memgraph::slk::Builder *builder); + + explicit SetMainToReplicaRes(bool success) : success(success) {} + SetMainToReplicaRes() = default; + + bool success; +}; + +using SetMainToReplicaRpc = rpc::RequestResponse; + +} // namespace memgraph::coordination + +// SLK serialization declarations +namespace memgraph::slk { + +void Save(const memgraph::coordination::PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::coordination::PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::coordination::PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::coordination::PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::coordination::SetMainToReplicaRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::coordination::SetMainToReplicaRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::coordination::SetMainToReplicaReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::coordination::SetMainToReplicaReq *self, memgraph::slk::Reader *reader); + +} // namespace memgraph::slk + +#endif diff --git a/src/coordination/include/coordination/coordinator_server.hpp b/src/coordination/include/coordination/coordinator_server.hpp new file mode 100644 index 000000000..2a261bc32 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_server.hpp @@ -0,0 +1,44 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_config.hpp" +#include "rpc/server.hpp" + +namespace memgraph::coordination { + +class CoordinatorServer { + public: + explicit CoordinatorServer(const CoordinatorServerConfig &config); + CoordinatorServer(const CoordinatorServer &) = delete; + CoordinatorServer(CoordinatorServer &&) = delete; + CoordinatorServer &operator=(const CoordinatorServer &) = delete; + CoordinatorServer &operator=(CoordinatorServer &&) = delete; + + virtual ~CoordinatorServer(); + + bool Start(); + + template + void Register(F &&callback) { + rpc_server_.Register(std::forward(callback)); + } + + private: + communication::ServerContext rpc_server_context_; + rpc::Server rpc_server_; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/coordinator_slk.hpp b/src/coordination/include/coordination/coordinator_slk.hpp new file mode 100644 index 000000000..49834be41 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_slk.hpp @@ -0,0 +1,38 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_config.hpp" +#include "slk/serialization.hpp" +#include "slk/streams.hpp" + +namespace memgraph::slk { + +using ReplicationClientInfo = coordination::CoordinatorClientConfig::ReplicationClientInfo; + +inline void Save(const ReplicationClientInfo &obj, Builder *builder) { + Save(obj.instance_name, builder); + Save(obj.replication_mode, builder); + Save(obj.replication_ip_address, builder); + Save(obj.replication_port, builder); +} + +inline void Load(ReplicationClientInfo *obj, Reader *reader) { + Load(&obj->instance_name, reader); + Load(&obj->replication_mode, reader); + Load(&obj->replication_ip_address, reader); + Load(&obj->replication_port, reader); +} +} // namespace memgraph::slk +#endif diff --git a/src/coordination/include/coordination/coordinator_state.hpp b/src/coordination/include/coordination/coordinator_state.hpp new file mode 100644 index 000000000..9cf2d2471 --- /dev/null +++ b/src/coordination/include/coordination/coordinator_state.hpp @@ -0,0 +1,53 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "coordination/coordinator_data.hpp" +#include "coordination/coordinator_instance_status.hpp" +#include "coordination/coordinator_server.hpp" +#include "coordination/failover_status.hpp" +#include "coordination/register_main_replica_coordinator_status.hpp" + +#include + +namespace memgraph::coordination { + +class CoordinatorState { + public: + CoordinatorState(); + ~CoordinatorState() = default; + + CoordinatorState(const CoordinatorState &) = delete; + CoordinatorState &operator=(const CoordinatorState &) = delete; + + CoordinatorState(CoordinatorState &&) noexcept = delete; + CoordinatorState &operator=(CoordinatorState &&) noexcept = delete; + + [[nodiscard]] auto RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus; + + [[nodiscard]] auto SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus; + + auto ShowInstances() const -> std::vector; + + // The client code must check that the server exists before calling this method. + auto GetCoordinatorServer() const -> CoordinatorServer &; + + [[nodiscard]] auto DoFailover() -> DoFailoverStatus; + + private: + std::variant data_; +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/failover_status.hpp b/src/coordination/include/coordination/failover_status.hpp new file mode 100644 index 000000000..9cfa0ffe6 --- /dev/null +++ b/src/coordination/include/coordination/failover_status.hpp @@ -0,0 +1,21 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include + +namespace memgraph::coordination { +enum class DoFailoverStatus : uint8_t { SUCCESS, ALL_REPLICAS_DOWN, MAIN_ALIVE, RPC_FAILED }; +} // namespace memgraph::coordination +#endif diff --git a/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp b/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp new file mode 100644 index 000000000..acb191bfd --- /dev/null +++ b/src/coordination/include/coordination/register_main_replica_coordinator_status.hpp @@ -0,0 +1,37 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include + +namespace memgraph::coordination { + +enum class RegisterInstanceCoordinatorStatus : uint8_t { + NAME_EXISTS, + END_POINT_EXISTS, + COULD_NOT_BE_PERSISTED, + NOT_COORDINATOR, + RPC_FAILED, + SUCCESS +}; + +enum class SetInstanceToMainCoordinatorStatus : uint8_t { + NO_INSTANCE_WITH_NAME, + NOT_COORDINATOR, + SUCCESS, + COULD_NOT_PROMOTE_TO_MAIN, +}; + +} // namespace memgraph::coordination +#endif diff --git a/src/dbms/CMakeLists.txt b/src/dbms/CMakeLists.txt index f1df4985a..9cd94c44c 100644 --- a/src/dbms/CMakeLists.txt +++ b/src/dbms/CMakeLists.txt @@ -1,3 +1,2 @@ - -add_library(mg-dbms STATIC dbms_handler.cpp database.cpp replication_handler.cpp replication_client.cpp inmemory/replication_handlers.cpp) -target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query) +add_library(mg-dbms STATIC dbms_handler.cpp database.cpp replication_handler.cpp coordinator_handler.cpp replication_client.cpp inmemory/replication_handlers.cpp coordinator_handlers.cpp) +target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query mg-replication mg-coordination) diff --git a/src/dbms/constants.hpp b/src/dbms/constants.hpp index e7ea9987b..a0e9f6f22 100644 --- a/src/dbms/constants.hpp +++ b/src/dbms/constants.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,7 +13,8 @@ namespace memgraph::dbms { -constexpr static const char *kDefaultDB = "memgraph"; //!< Name of the default database +constexpr std::string_view kDefaultDB = "memgraph"; //!< Name of the default database +constexpr std::string_view kMultiTenantDir = "databases"; //!< Name of the multi-tenant directory #ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY constexpr bool allow_mt_repl = true; diff --git a/src/dbms/coordinator_handler.cpp b/src/dbms/coordinator_handler.cpp new file mode 100644 index 000000000..1c062c074 --- /dev/null +++ b/src/dbms/coordinator_handler.cpp @@ -0,0 +1,38 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include "coordination/register_main_replica_coordinator_status.hpp" +#ifdef MG_ENTERPRISE + +#include "dbms/coordinator_handler.hpp" + +#include "dbms/dbms_handler.hpp" + +namespace memgraph::dbms { + +CoordinatorHandler::CoordinatorHandler(DbmsHandler &dbms_handler) : dbms_handler_(dbms_handler) {} + +auto CoordinatorHandler::RegisterInstance(memgraph::coordination::CoordinatorClientConfig config) + -> coordination::RegisterInstanceCoordinatorStatus { + return dbms_handler_.CoordinatorState().RegisterInstance(config); +} + +auto CoordinatorHandler::SetInstanceToMain(std::string instance_name) + -> coordination::SetInstanceToMainCoordinatorStatus { + return dbms_handler_.CoordinatorState().SetInstanceToMain(std::move(instance_name)); +} + +auto CoordinatorHandler::ShowInstances() const -> std::vector { + return dbms_handler_.CoordinatorState().ShowInstances(); +} +} // namespace memgraph::dbms + +#endif diff --git a/src/dbms/coordinator_handler.hpp b/src/dbms/coordinator_handler.hpp new file mode 100644 index 000000000..233532cbc --- /dev/null +++ b/src/dbms/coordinator_handler.hpp @@ -0,0 +1,47 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "utils/result.hpp" + +#include "coordination/coordinator_config.hpp" +#include "coordination/coordinator_instance_status.hpp" +#include "coordination/failover_status.hpp" +#include "coordination/register_main_replica_coordinator_status.hpp" + +#include +#include +#include + +namespace memgraph::dbms { + +class DbmsHandler; + +class CoordinatorHandler { + public: + explicit CoordinatorHandler(DbmsHandler &dbms_handler); + + auto RegisterInstance(coordination::CoordinatorClientConfig config) + -> coordination::RegisterInstanceCoordinatorStatus; + + auto SetInstanceToMain(std::string instance_name) -> coordination::SetInstanceToMainCoordinatorStatus; + + auto ShowInstances() const -> std::vector; + + private: + DbmsHandler &dbms_handler_; +}; + +} // namespace memgraph::dbms +#endif diff --git a/src/dbms/coordinator_handlers.cpp b/src/dbms/coordinator_handlers.cpp new file mode 100644 index 000000000..5c051408e --- /dev/null +++ b/src/dbms/coordinator_handlers.cpp @@ -0,0 +1,153 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#ifdef MG_ENTERPRISE + +#include "dbms/coordinator_handlers.hpp" +#include "dbms/utils.hpp" + +#include "coordination/coordinator_exceptions.hpp" +#include "coordination/coordinator_rpc.hpp" +#include "dbms/dbms_handler.hpp" +#include "dbms/replication_client.hpp" + +#include "range/v3/view.hpp" + +namespace memgraph::dbms { + +void CoordinatorHandlers::Register(DbmsHandler &dbms_handler) { + auto &server = dbms_handler.CoordinatorState().GetCoordinatorServer(); + + server.Register( + [&dbms_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void { + spdlog::info("Received PromoteReplicaToMainRpc"); + CoordinatorHandlers::PromoteReplicaToMainHandler(dbms_handler, req_reader, res_builder); + }); + + server.Register( + [&dbms_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void { + spdlog::info("Received SetMainToReplicaRpc from coordinator server"); + CoordinatorHandlers::SetMainToReplicaHandler(dbms_handler, req_reader, res_builder); + }); +} + +void CoordinatorHandlers::SetMainToReplicaHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, + slk::Builder *res_builder) { + auto &repl_state = dbms_handler.ReplicationState(); + + if (!repl_state.IsMain()) { + spdlog::error("Setting to replica must be performed on main."); + slk::Save(coordination::SetMainToReplicaRes{false}, res_builder); + return; + } + + coordination::SetMainToReplicaReq req; + slk::Load(&req, req_reader); + + replication::ReplicationServerConfig clients_config{.ip_address = req.replication_client_info.replication_ip_address, + .port = req.replication_client_info.replication_port}; + + if (bool success = memgraph::dbms::SetReplicationRoleReplica(dbms_handler, clients_config); !success) { + spdlog::error("Setting main to replica failed!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + } + + slk::Save(coordination::PromoteReplicaToMainRes{true}, res_builder); +} + +void CoordinatorHandlers::PromoteReplicaToMainHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, + slk::Builder *res_builder) { + auto &repl_state = dbms_handler.ReplicationState(); + + if (!repl_state.IsReplica()) { + spdlog::error("Failover must be performed on replica!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + } + + auto repl_server_config = std::get(repl_state.ReplicationData()).config; + + // This can fail because of disk. If it does, the cluster state could get inconsistent. + // We don't handle disk issues. + if (bool success = memgraph::dbms::DoReplicaToMainPromotion(dbms_handler); !success) { + spdlog::error("Promoting replica to main failed!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + } + + coordination::PromoteReplicaToMainReq req; + slk::Load(&req, req_reader); + + auto const converter = [](const auto &repl_info_config) { + return replication::ReplicationClientConfig{ + .name = repl_info_config.instance_name, + .mode = repl_info_config.replication_mode, + .ip_address = repl_info_config.replication_ip_address, + .port = repl_info_config.replication_port, + }; + }; + + MG_ASSERT( + std::get(repl_state.ReplicationData()).registered_replicas_.empty(), + "No replicas should be registered after promoting replica to main and before registering replication clients!"); + + // registering replicas + for (auto const &config : req.replication_clients_info | ranges::views::transform(converter)) { + auto instance_client = repl_state.RegisterReplica(config); + if (instance_client.HasError()) { + switch (instance_client.GetError()) { + // Can't happen, we are already replica + case memgraph::replication::RegisterReplicaError::NOT_MAIN: + spdlog::error("Failover must be performed to main!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + // Can't happen, checked on the coordinator side + case memgraph::replication::RegisterReplicaError::NAME_EXISTS: + spdlog::error("Replica with the same name already exists!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + // Can't happen, checked on the coordinator side + case memgraph::replication::RegisterReplicaError::ENDPOINT_EXISTS: + spdlog::error("Replica with the same endpoint already exists!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + // We don't handle disk issues + case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED: + spdlog::error("Registered replica could not be persisted!"); + slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder); + return; + case memgraph::replication::RegisterReplicaError::SUCCESS: + break; + } + } + if (!allow_mt_repl && dbms_handler.All().size() > 1) { + spdlog::warn("Multi-tenant replication is currently not supported!"); + } + + auto &instance_client_ref = *instance_client.GetValue(); + + // Update system before enabling individual storage <-> replica clients + dbms_handler.SystemRestore(instance_client_ref); + + // TODO: (andi) Policy for register all databases + // Will be resolved after deciding about choosing new replica + const bool all_clients_good = memgraph::dbms::RegisterAllDatabasesClients(dbms_handler, instance_client_ref); + MG_ASSERT(all_clients_good, "Failed to register one or more databases to the REPLICA \"{}\".", config.name); + + StartReplicaClient(dbms_handler, instance_client_ref); + } + + slk::Save(coordination::PromoteReplicaToMainRes{true}, res_builder); +} + +} // namespace memgraph::dbms +#endif diff --git a/src/dbms/coordinator_handlers.hpp b/src/dbms/coordinator_handlers.hpp new file mode 100644 index 000000000..ae4c59a0a --- /dev/null +++ b/src/dbms/coordinator_handlers.hpp @@ -0,0 +1,34 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#ifdef MG_ENTERPRISE + +#include "slk/serialization.hpp" + +namespace memgraph::dbms { + +class DbmsHandler; + +class CoordinatorHandlers { + public: + static void Register(DbmsHandler &dbms_handler); + + private: + static void PromoteReplicaToMainHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, + slk::Builder *res_builder); + static void SetMainToReplicaHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder); +}; + +} // namespace memgraph::dbms + +#endif diff --git a/src/dbms/database.cpp b/src/dbms/database.cpp index 74ee13892..9a56d400a 100644 --- a/src/dbms/database.cpp +++ b/src/dbms/database.cpp @@ -26,7 +26,7 @@ Database::Database(storage::Config config, replication::ReplicationState &repl_s streams_{config.durability.storage_directory / "streams"}, plan_cache_{FLAGS_query_plan_cache_max_size}, repl_state_(&repl_state) { - if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk || + if (config.salient.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk || utils::DirExists(config.disk.main_storage_directory)) { storage_ = std::make_unique(std::move(config)); } else { diff --git a/src/dbms/database.hpp b/src/dbms/database.hpp index 955c66998..2d7d3fe88 100644 --- a/src/dbms/database.hpp +++ b/src/dbms/database.hpp @@ -81,7 +81,14 @@ class Database { * * @return const std::string& */ - const std::string &id() const { return storage_->id(); } + const std::string &name() const { return storage_->name(); } + + /** + * @brief Unique storage identified (uuid) + * + * @return const utils::UUID& + */ + const utils::UUID &uuid() const { return storage_->uuid(); } /** * @brief Returns the storage configuration @@ -103,7 +110,7 @@ class Database { * @param force_directory Use the configured directory, do not try to decipher the multi-db version * @return DatabaseInfo */ - DatabaseInfo GetInfo(bool force_directory, replication::ReplicationRole replication_role) const { + DatabaseInfo GetInfo(bool force_directory, replication_coordination_glue::ReplicationRole replication_role) const { DatabaseInfo info; info.storage_info = storage_->GetInfo(force_directory, replication_role); info.triggers = trigger_store_.GetTriggerInfo().size(); diff --git a/src/dbms/database_handler.hpp b/src/dbms/database_handler.hpp index 617e614c3..de5f813ba 100644 --- a/src/dbms/database_handler.hpp +++ b/src/dbms/database_handler.hpp @@ -51,7 +51,7 @@ class DatabaseHandler : public Handler { * @param config Storage configuration * @return HandlerT::NewResult */ - HandlerT::NewResult New(std::string_view name, storage::Config config, replication::ReplicationState &repl_state) { + HandlerT::NewResult New(storage::Config config, replication::ReplicationState &repl_state) { // Control that no one is using the same data directory if (std::any_of(begin(), end(), [&](auto &elem) { auto db_acc = elem.second.access(); @@ -61,8 +61,7 @@ class DatabaseHandler : public Handler { spdlog::info("Tried to generate new storage using a claimed directory."); return NewError::EXISTS; } - config.name = name; // Set storage id via config - return HandlerT::New(std::piecewise_construct, name, config, repl_state); + return HandlerT::New(std::piecewise_construct, config.salient.name, config, repl_state); } /** diff --git a/src/dbms/dbms_handler.cpp b/src/dbms/dbms_handler.cpp index 0af9364bf..7222c4461 100644 --- a/src/dbms/dbms_handler.cpp +++ b/src/dbms/dbms_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,56 +11,205 @@ #include "dbms/dbms_handler.hpp" +#include "dbms/coordinator_handlers.hpp" +#include "flags/replication.hpp" + +#include +#include + +#include "dbms/constants.hpp" +#include "dbms/global.hpp" +#include "dbms/replication_client.hpp" +#include "spdlog/spdlog.h" +#include "utils/exceptions.hpp" +#include "utils/logging.hpp" +#include "utils/uuid.hpp" + namespace memgraph::dbms { + #ifdef MG_ENTERPRISE + +namespace { +constexpr std::string_view kDBPrefix = "database:"; // Key prefix for database durability +constexpr std::string_view kLastCommitedSystemTsKey = "last_commited_system_ts"; // Key for timestamp durability +} // namespace + +struct Durability { + enum class DurabilityVersion : uint8_t { + V0 = 0, + V1, + }; + + struct VersionException : public utils::BasicException { + VersionException() : utils::BasicException("Unsupported durability version!") {} + }; + + struct UnknownVersionException : public utils::BasicException { + UnknownVersionException() : utils::BasicException("Unable to parse the durability version!") {} + }; + + struct MigrationException : public utils::BasicException { + MigrationException() : utils::BasicException("Failed to migrate to the current durability version!") {} + }; + + static DurabilityVersion VersionCheck(std::optional val) { + if (!val) { + return DurabilityVersion::V0; + } + if (val == "V1") { + return DurabilityVersion::V1; + } + throw UnknownVersionException(); + }; + + static auto GenKey(std::string_view name) -> std::string { return fmt::format("{}{}", kDBPrefix, name); } + + static auto GenVal(utils::UUID uuid, std::filesystem::path rel_dir) { + nlohmann::json json; + json["uuid"] = uuid; + json["rel_dir"] = rel_dir; + // TODO: Serialize the configuration + return json.dump(); + } + + static void Migrate(kvstore::KVStore *durability, const std::filesystem::path &root) { + const auto ver_val = durability->Get("version"); + const auto ver = VersionCheck(ver_val); + + std::map to_put; + std::vector to_delete; + + // Update from V0 to V1 + if (ver == DurabilityVersion::V0) { + for (const auto &[key, val] : *durability) { + if (key == "version") continue; // Reserved key + // Generate a UUID + auto const uuid = utils::UUID(); + // New json values + auto new_key = GenKey(key); + auto path = root; + if (key != kDefaultDB) { // Special case for non-default DBs + // Move directory to new UUID dir + path = root / kMultiTenantDir / std::string{uuid}; + std::filesystem::path old_dir(root / kMultiTenantDir / key); + std::error_code ec; + std::filesystem::rename(old_dir, path, ec); + MG_ASSERT(!ec, "Failed to upgrade durability: cannot move default directory."); + } + // Generate json and update value + auto new_data = GenVal(uuid, std::filesystem::relative(path, root)); + to_put.emplace(std::move(new_key), std::move(new_data)); + to_delete.emplace_back(key); + } + } + + // Set version + durability->Put("version", "V1"); + // Update to the new key-value pairs + if (!durability->PutAndDeleteMultiple(to_put, to_delete)) { + throw MigrationException(); + } + } +}; + DbmsHandler::DbmsHandler( storage::Config config, memgraph::utils::Synchronized *auth, - bool recovery_on_startup, bool delete_on_drop) - : default_config_{std::move(config)}, - delete_on_drop_(delete_on_drop), - repl_state_{ReplicationStateRootPath(default_config_)} { + bool recovery_on_startup) + : default_config_{std::move(config)}, repl_state_{ReplicationStateRootPath(default_config_)} { // TODO: Decouple storage config from dbms config // TODO: Save individual db configs inside the kvstore and restore from there - storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases"); - const auto &db_dir = default_config_.durability.storage_directory; + + /* + * FILESYSTEM MANIPULATION + */ + const auto &root = default_config_.durability.storage_directory; + storage::UpdatePaths(default_config_, root); + const auto &db_dir = default_config_.durability.storage_directory / kMultiTenantDir; + // TODO: Unify durability and wal const auto durability_dir = db_dir / ".durability"; utils::EnsureDirOrDie(db_dir); utils::EnsureDirOrDie(durability_dir); durability_ = std::make_unique(durability_dir); - // Generate the default database - MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB."); + /* + * DURABILITY + */ + // Migrate durability + Durability::Migrate(durability_.get(), root); + auto directories = std::set{std::string{kDefaultDB}}; // Recover previous databases if (recovery_on_startup) { - for (const auto &[name, _] : *durability_) { - if (name == kDefaultDB) continue; // Already set - spdlog::info("Restoring database {}.", name); - MG_ASSERT(!New_(name).HasError(), "Failed while creating database {}.", name); + auto it = durability_->begin(std::string(kDBPrefix)); + auto end = durability_->end(std::string(kDBPrefix)); + for (; it != end; ++it) { + const auto &[key, config_json] = *it; + const auto name = key.substr(kDBPrefix.size()); + auto json = nlohmann::json::parse(config_json); + const auto uuid = json.at("uuid").get(); + const auto rel_dir = json.at("rel_dir").get(); + spdlog::info("Restoring database {} at {}.", name, rel_dir); + auto new_db = New_(name, uuid, rel_dir); + MG_ASSERT(!new_db.HasError(), "Failed while creating database {}.", name); + directories.emplace(rel_dir.filename()); spdlog::info("Database {} restored.", name); } + // Read the last timestamp + auto lcst = durability_->Get(kLastCommitedSystemTsKey); + if (lcst) { + last_commited_system_timestamp_ = std::stoul(*lcst); + system_timestamp_ = last_commited_system_timestamp_; + } } else { // Clear databases from the durability list and auth auto locked_auth = auth->Lock(); - for (const auto &[name, _] : *durability_) { + auto it = durability_->begin(std::string{kDBPrefix}); + auto end = durability_->end(std::string{kDBPrefix}); + for (; it != end; ++it) { + const auto &[key, _] = *it; + const auto name = key.substr(kDBPrefix.size()); if (name == kDefaultDB) continue; locked_auth->DeleteDatabase(name); - durability_->Delete(name); + durability_->Delete(key); + } + // Delete the last timestamp + durability_->Delete(kLastCommitedSystemTsKey); + } + + /* + * DATABASES CLEAN UP + */ + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(db_dir)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + auto itr = directories.find(name); + if (itr == directories.end()) { + std::error_code dummy; + std::filesystem::remove_all(entry, dummy); + } else { + directories.erase(itr); + } } } + /* + * DEFAULT DB SETUP + */ + // Setup the default DB + SetupDefault_(); + + /* + * REPLICATION RECOVERY AND STARTUP + */ // Startup replication state (if recovered at startup) - auto replica = [this](replication::RoleReplicaData const &data) { - // Register handlers - InMemoryReplicationHandlers::Register(this, *data.server); - if (!data.server->Start()) { - spdlog::error("Unable to start the replication server."); - return false; - } - return true; - }; - // Replication frequent check start + auto replica = [this](replication::RoleReplicaData const &data) { return StartRpcServer(*this, data); }; + // Replication recovery and frequent check start auto main = [this](replication::RoleMainData &data) { + for (auto &client : data.registered_replicas_) { + SystemRestore(client); + } + ForEach([this](DatabaseAccess db) { RecoverReplication(db); }); for (auto &client : data.registered_replicas_) { StartReplicaClient(*this, client); } @@ -69,7 +218,232 @@ DbmsHandler::DbmsHandler( // Startup proccess for main/replica MG_ASSERT(std::visit(memgraph::utils::Overloaded{replica, main}, repl_state_.ReplicationData()), "Replica recovery failure!"); -} -#endif + // Warning + if (default_config_.durability.snapshot_wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && + repl_state_.IsMain()) { + spdlog::warn( + "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please " + "consider " + "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because " + "without write-ahead logs this instance is not replicating any data."); + } + + // MAIN or REPLICA instance + if (FLAGS_coordinator_server_port) { + CoordinatorHandlers::Register(*this); + MG_ASSERT(coordinator_state_.GetCoordinatorServer().Start(), "Failed to start coordinator server!"); + } +} + +DbmsHandler::DeleteResult DbmsHandler::TryDelete(std::string_view db_name) { + std::lock_guard wr(lock_); + if (db_name == kDefaultDB) { + // MSG cannot delete the default db + return DeleteError::DEFAULT_DB; + } + + // Get DB config for the UUID and disk clean up + const auto conf = db_handler_.GetConfig(db_name); + if (!conf) { + return DeleteError::NON_EXISTENT; + } + const auto &storage_path = conf->durability.storage_directory; + const auto &uuid = conf->salient.uuid; + + // Check if db exists + try { + // Low level handlers + if (!db_handler_.TryDelete(db_name)) { + return DeleteError::USING; + } + } catch (utils::BasicException &) { + return DeleteError::NON_EXISTENT; + } + + // Remove from durability list + if (durability_) durability_->Delete(Durability::GenKey(db_name)); + + // Delete disk storage + std::error_code ec; + (void)std::filesystem::remove_all(storage_path, ec); + if (ec) { + spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path); + } + + // Success + // Save delta + if (system_transaction_) { + system_transaction_->delta.emplace(SystemTransaction::Delta::drop_database, uuid); + } + return {}; +} + +DbmsHandler::DeleteResult DbmsHandler::Delete(std::string_view db_name) { + auto wr = std::lock_guard(lock_); + return Delete_(db_name); +} + +DbmsHandler::DeleteResult DbmsHandler::Delete(utils::UUID uuid) { + auto wr = std::lock_guard(lock_); + std::string db_name; + try { + const auto db = Get_(uuid); + db_name = db->name(); + } catch (const UnknownDatabaseException &) { + return DeleteError::NON_EXISTENT; + } + return Delete_(db_name); +} + +DbmsHandler::NewResultT DbmsHandler::New_(storage::Config storage_config) { + auto new_db = db_handler_.New(storage_config, repl_state_); + + if (new_db.HasValue()) { // Success + // Save delta + if (system_transaction_) { + system_transaction_->delta.emplace(SystemTransaction::Delta::create_database, storage_config.salient); + } + UpdateDurability(storage_config); + return new_db.GetValue(); + } + return new_db.GetError(); +} + +DbmsHandler::DeleteResult DbmsHandler::Delete_(std::string_view db_name) { + if (db_name == kDefaultDB) { + // MSG cannot delete the default db + return DeleteError::DEFAULT_DB; + } + + const auto storage_path = StorageDir_(db_name); + if (!storage_path) return DeleteError::NON_EXISTENT; + + { + auto db = db_handler_.Get(db_name); + if (!db) return DeleteError::NON_EXISTENT; + // TODO: ATM we assume REPLICA won't have streams, + // this is a best effort approach just in case they do + // there is still subtle data race we stream manipulation + // can occur while we are dropping the database + db->prepare_for_deletion(); + auto &database = *db->get(); + database.streams()->StopAll(); + database.streams()->DropAll(); + database.thread_pool()->Shutdown(); + } + + // Remove from durability list + if (durability_) durability_->Delete(Durability::GenKey(db_name)); + + // Check if db exists + // Low level handlers + db_handler_.DeferDelete(db_name, [storage_path = *storage_path, db_name = std::string{db_name}]() { + // Delete disk storage + std::error_code ec; + (void)std::filesystem::remove_all(storage_path, ec); + if (ec) { + spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path); + } + }); + + return {}; // Success +} + +void DbmsHandler::UpdateDurability(const storage::Config &config, std::optional rel_dir) { + if (!durability_) return; + // Save database in a list of active databases + const auto &key = Durability::GenKey(config.salient.name); + if (rel_dir == std::nullopt) + rel_dir = + std::filesystem::relative(config.durability.storage_directory, default_config_.durability.storage_directory); + const auto &val = Durability::GenVal(config.salient.uuid, *rel_dir); + durability_->Put(key, val); +} + +AllSyncReplicaStatus DbmsHandler::Commit() { + if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt) + return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit + const auto &delta = *system_transaction_->delta; + + auto sync_status = AllSyncReplicaStatus::AllCommitsConfirmed; + // TODO Create a system client that can handle all of this automatically + switch (delta.action) { + using enum SystemTransaction::Delta::Action; + case CREATE_DATABASE: { + // Replication + auto main_handler = [&](memgraph::replication::RoleMainData &main_data) { + // TODO: data race issue? registered_replicas_ access not protected + // This is sync in any case, as this is the startup + for (auto &client : main_data.registered_replicas_) { + bool completed = SteamAndFinalizeDelta( + client, + [](const storage::replication::CreateDatabaseRes &response) { + return response.result != storage::replication::CreateDatabaseRes::Result::FAILURE; + }, + std::string(main_data.epoch_.id()), last_commited_system_timestamp_, + system_transaction_->system_timestamp, delta.config); + // TODO: reduce duplicate code + if (!completed && client.mode_ == replication_coordination_glue::ReplicationMode::SYNC) { + sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed; + } + } + // Sync database with REPLICAs + RecoverReplication(Get_(delta.config.name)); + }; + auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ }; + std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData()); + } break; + case DROP_DATABASE: { + // Replication + auto main_handler = [&](memgraph::replication::RoleMainData &main_data) { + // TODO: data race issue? registered_replicas_ access not protected + // This is sync in any case, as this is the startup + for (auto &client : main_data.registered_replicas_) { + bool completed = SteamAndFinalizeDelta( + client, + [](const storage::replication::DropDatabaseRes &response) { + return response.result != storage::replication::DropDatabaseRes::Result::FAILURE; + }, + std::string(main_data.epoch_.id()), last_commited_system_timestamp_, + system_transaction_->system_timestamp, delta.uuid); + // TODO: reduce duplicate code + if (!completed && client.mode_ == replication_coordination_glue::ReplicationMode::SYNC) { + sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed; + } + } + }; + auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ }; + std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData()); + } break; + } + + durability_->Put(kLastCommitedSystemTsKey, std::to_string(system_transaction_->system_timestamp)); + last_commited_system_timestamp_ = system_transaction_->system_timestamp; + ResetSystemTransaction(); + return sync_status; +} + +#else // not MG_ENTERPRISE + +AllSyncReplicaStatus DbmsHandler::Commit() { + if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt) { + return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit + } + const auto &delta = *system_transaction_->delta; + + switch (delta.action) { + using enum SystemTransaction::Delta::Action; + case CREATE_DATABASE: + case DROP_DATABASE: + /* Community edition doesn't support multi-tenant replication */ + break; + } + + last_commited_system_timestamp_ = system_transaction_->system_timestamp; + ResetSystemTransaction(); + return AllSyncReplicaStatus::AllCommitsConfirmed; +} + +#endif } // namespace memgraph::dbms diff --git a/src/dbms/dbms_handler.hpp b/src/dbms/dbms_handler.hpp index f9aa621dc..2066321e2 100644 --- a/src/dbms/dbms_handler.hpp +++ b/src/dbms/dbms_handler.hpp @@ -12,34 +12,37 @@ #pragma once #include -#include +#include #include #include #include #include #include -#include -#include #include -#include +#include #include "auth/auth.hpp" #include "constants.hpp" #include "dbms/database.hpp" #include "dbms/inmemory/replication_handlers.hpp" +#include "dbms/replication_handler.hpp" +#include "kvstore/kvstore.hpp" +#include "replication/replication_client.hpp" +#include "storage/v2/config.hpp" +#include "storage/v2/replication/enums.hpp" +#include "storage/v2/replication/rpc.hpp" +#include "storage/v2/transaction.hpp" +#include "utils/thread_pool.hpp" #ifdef MG_ENTERPRISE +#include "coordination/coordinator_state.hpp" #include "dbms/database_handler.hpp" #endif -#include "dbms/replication_client.hpp" +#include "dbms/transaction.hpp" #include "global.hpp" #include "query/config.hpp" #include "query/interpreter_context.hpp" #include "spdlog/spdlog.h" -#include "storage/v2/durability/durability.hpp" -#include "storage/v2/durability/paths.hpp" #include "storage/v2/isolation_level.hpp" -#include "utils/exceptions.hpp" -#include "utils/file.hpp" #include "utils/logging.hpp" #include "utils/result.hpp" #include "utils/rw_lock.hpp" @@ -48,6 +51,11 @@ namespace memgraph::dbms { +enum class AllSyncReplicaStatus { + AllCommitsConfirmed, + SomeCommitsUnconfirmed, +}; + struct Statistics { uint64_t num_vertex; //!< Sum of vertexes in every database uint64_t num_edges; //!< Sum of edges in every database @@ -102,11 +110,10 @@ class DbmsHandler { * @param configs storage configuration * @param auth pointer to the global authenticator * @param recovery_on_startup restore databases (and its content) and authentication data - * @param delete_on_drop when dropping delete any associated directories on disk */ DbmsHandler(storage::Config config, memgraph::utils::Synchronized *auth, - bool recovery_on_startup, bool delete_on_drop); // TODO If more arguments are added use a config strut + bool recovery_on_startup); // TODO If more arguments are added use a config struct #else /** * @brief Initialize the handler. A single database is supported in community edition. @@ -116,10 +123,12 @@ class DbmsHandler { DbmsHandler(storage::Config config) : repl_state_{ReplicationStateRootPath(config)}, db_gatekeeper_{[&] { - config.name = kDefaultDB; + config.salient.name = kDefaultDB; return std::move(config); }(), - repl_state_} {} + repl_state_} { + RecoverReplication(Get()); + } #endif #ifdef MG_ENTERPRISE @@ -131,9 +140,56 @@ class DbmsHandler { */ NewResultT New(const std::string &name) { std::lock_guard wr(lock_); - return New_(name, name); + const auto uuid = utils::UUID{}; + return New_(name, uuid); } + /** + * @brief Create new if name/uuid do not match any database. Drop and recreate if database already present. + * @note Default database is not dropped, only its UUID is updated and only if the database is clean. + * + * @param config desired salient config + * @return NewResultT context on success, error on failure + */ + NewResultT Update(const storage::SalientConfig &config) { + std::lock_guard wr(lock_); + auto new_db = New_(config); + if (new_db.HasValue() || new_db.GetError() != NewError::EXISTS) { + // NOTE: If db already exists we retry below + return new_db; + } + + spdlog::debug("Trying to create db '{}' on replica which already exists.", config.name); + + auto db = Get_(config.name); + if (db->uuid() == config.uuid) { // Same db + return db; + } + + spdlog::debug("Different UUIDs"); + + // TODO: Fix this hack + if (config.name == kDefaultDB) { + if (db->storage()->repl_storage_state_.last_commit_timestamp_ != storage::kTimestampInitialId) { + spdlog::debug("Default storage is not clean, cannot update UUID..."); + return NewError::GENERIC; // Update error + } + spdlog::debug("Update default db's UUID"); + // Default db cannot be deleted and remade, have to just update the UUID + db->storage()->config_.salient.uuid = config.uuid; + UpdateDurability(db->storage()->config_, "."); + return db; + } + + spdlog::debug("Drop database and recreate with the correct UUID"); + // Defer drop + (void)Delete_(db->name()); + // Second attempt + return New_(config); + } + + void UpdateDurability(const storage::Config &config, std::optional rel_dir = {}); + /** * @brief Get the context associated with the "name" database * @@ -145,6 +201,19 @@ class DbmsHandler { std::shared_lock rd(lock_); return Get_(name); } + + /** + * @brief Get the context associated with the UUID database + * + * @param uuid + * @return DatabaseAccess + * @throw UnknownDatabaseException if database not found + */ + DatabaseAccess Get(const utils::UUID &uuid) { + std::shared_lock rd(lock_); + return Get_(uuid); + } + #else /** * @brief Get the context associated with the default database @@ -160,50 +229,28 @@ class DbmsHandler { #ifdef MG_ENTERPRISE /** - * @brief Delete database. + * @brief Attempt to delete database. * * @param db_name database name * @return DeleteResult error on failure */ - DeleteResult Delete(const std::string &db_name) { - std::lock_guard wr(lock_); - if (db_name == kDefaultDB) { - // MSG cannot delete the default db - return DeleteError::DEFAULT_DB; - } + DeleteResult TryDelete(std::string_view db_name); - const auto storage_path = StorageDir_(db_name); - if (!storage_path) return DeleteError::NON_EXISTENT; + /** + * @brief Delete or defer deletion of database. + * + * @param db_name database name + * @return DeleteResult error on failure + */ + DeleteResult Delete(std::string_view db_name); - // Check if db exists - try { - // Low level handlers - if (!db_handler_.Delete(db_name)) { - return DeleteError::USING; - } - } catch (utils::BasicException &) { - return DeleteError::NON_EXISTENT; - } - - // Remove from durability list - if (durability_) durability_->Delete(db_name); - - // Delete disk storage - if (delete_on_drop_) { - std::error_code ec; - (void)std::filesystem::remove_all(*storage_path, ec); - if (ec) { - spdlog::error("Failed to clean disk while deleting database \"{}\".", db_name); - defunct_dbs_.emplace(db_name); - return DeleteError::DISK_FAIL; - } - } - - // Delete from defunct_dbs_ (in case a second delete call was successful) - defunct_dbs_.erase(db_name); - - return {}; // Success - } + /** + * @brief Delete or defer deletion of database. + * + * @param uuid database UUID + * @return DeleteResult error on failure + */ + DeleteResult Delete(utils::UUID uuid); #endif /** @@ -216,7 +263,7 @@ class DbmsHandler { std::shared_lock rd(lock_); return db_handler_.All(); #else - return {db_gatekeeper_.access()->get()->id()}; + return {db_gatekeeper_.access()->get()->name()}; #endif } @@ -226,6 +273,10 @@ class DbmsHandler { bool IsMain() const { return repl_state_.IsMain(); } bool IsReplica() const { return repl_state_.IsReplica(); } +#ifdef MG_ENTERPRISE + coordination::CoordinatorState &CoordinatorState() { return coordinator_state_; } +#endif + /** * @brief Return the statistics all databases. * @@ -305,7 +356,7 @@ class DbmsHandler { auto db_acc_opt = db_gk.access(); if (db_acc_opt) { auto &db_acc = *db_acc_opt; - spdlog::debug("Restoring trigger for database \"{}\"", db_acc->id()); + spdlog::debug("Restoring trigger for database \"{}\"", db_acc->name()); auto storage_accessor = db_acc->Access(); auto dba = memgraph::query::DbAccessor{storage_accessor.get()}; db_acc->trigger_store()->RestoreTriggers(&ic->ast_cache, &dba, ic->config.query, ic->auth_checker); @@ -330,7 +381,7 @@ class DbmsHandler { auto db_acc = db_gk.access(); if (db_acc) { auto *db = db_acc->get(); - spdlog::debug("Restoring streams for database \"{}\"", db->id()); + spdlog::debug("Restoring streams for database \"{}\"", db->name()); db->streams()->RestoreStreams(*db_acc, ic); } } @@ -341,7 +392,7 @@ class DbmsHandler { * * @param f */ - void ForEach(auto f) { + void ForEach(std::invocable auto f) { #ifdef MG_ENTERPRISE std::shared_lock rd(lock_); for (auto &[_, db_gk] : db_handler_) { @@ -351,33 +402,103 @@ class DbmsHandler { #endif auto db_acc = db_gk.access(); if (db_acc) { // This isn't an error, just a defunct db - f(db_acc->get()); + f(*db_acc); } } } - /** - * @brief todo - * - * @param f - */ - void ForOne(auto f) { + void NewSystemTransaction() { + DMG_ASSERT(!system_transaction_, "Already running a system transaction"); + system_transaction_.emplace(++system_timestamp_); + } + + void ResetSystemTransaction() { system_transaction_.reset(); } + + //! \tparam RPC An rpc::RequestResponse + //! \tparam Args the args type + //! \param client the client to use for rpc communication + //! \param check predicate to check response is ok + //! \param args arguments to forward to the rpc request + //! \return If replica stream is completed or enqueued + template + bool SteamAndFinalizeDelta(auto &client, auto &&check, Args &&...args) { + try { + auto stream = client.rpc_client_.template Stream(std::forward(args)...); + auto task = [&client, check = std::forward(check), stream = std::move(stream)]() mutable { + if (stream.IsDefunct()) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + } + try { + if (check(stream.AwaitResponse())) { + return true; + } + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + // swallow error, fallthrough to error handling + } + // This replica needs SYSTEM recovery + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + }; + + if (client.mode_ == memgraph::replication_coordination_glue::ReplicationMode::ASYNC) { + client.thread_pool_.AddTask([task = utils::CopyMovableFunctionWrapper{std::move(task)}]() mutable { task(); }); + return true; + } + + return task(); + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + // This replica needs SYSTEM recovery + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return false; + } + }; + + AllSyncReplicaStatus Commit(); + + auto LastCommitedTS() const -> uint64_t { return last_commited_system_timestamp_; } + void SetLastCommitedTS(uint64_t new_ts) { last_commited_system_timestamp_.store(new_ts); } + #ifdef MG_ENTERPRISE - std::shared_lock rd(lock_); - for (auto &[_, db_gk] : db_handler_) { - auto db_acc = db_gk.access(); - if (db_acc) { // This isn't an error, just a defunct db - if (f(db_acc->get())) break; // Run until the first successful one + // When being called by intepreter no need to gain lock, it should already be under a system transaction + // But concurrently the FrequentCheck is running and will need to lock before reading last_commited_system_timestamp_ + template + void SystemRestore(replication::ReplicationClient &client) { + // Check if system is up to date + if (client.state_.WithLock( + [](auto &state) { return state == memgraph::replication::ReplicationClient::State::READY; })) + return; + + // Try to recover... + { + auto [database_configs, last_commited_system_timestamp] = std::invoke([&] { + auto sys_guard = + std::unique_lock{system_lock_, std::defer_lock}; // ensure no other system transaction in progress + if constexpr (REQUIRE_LOCK) { + sys_guard.lock(); + } + auto configs = std::vector{}; + ForEach([&configs](DatabaseAccess acc) { configs.emplace_back(acc->config().salient); }); + return std::pair{configs, last_commited_system_timestamp_.load()}; + }); + try { + auto stream = client.rpc_client_.Stream(last_commited_system_timestamp, + std::move(database_configs)); + const auto response = stream.AwaitResponse(); + if (response.result == storage::replication::SystemRecoveryRes::Result::FAILURE) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return; + } + } catch (memgraph::rpc::GenericRpcFailedException const &e) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + return; } } -#else - { - auto db_acc = db_gatekeeper_.access(); - MG_ASSERT(db_acc, "Should always have the database"); - f(db_acc->get()); - } -#endif + + // Successfully recovered + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::READY; }); } +#endif private: #ifdef MG_ENTERPRISE @@ -387,7 +508,7 @@ class DbmsHandler { * @param name Database name * @return std::optional */ - std::optional StorageDir_(const std::string &name) { + std::optional StorageDir_(std::string_view name) { const auto conf = db_handler_.GetConfig(name); if (conf) { return conf->durability.storage_directory; @@ -400,105 +521,108 @@ class DbmsHandler { * @brief Create a new Database associated with the "name" database * * @param name name of the database + * @param uuid undelying RocksDB directory * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name) { return New_(name, name); } + NewResultT New_(std::string_view name, utils::UUID uuid, std::optional rel_dir = {}) { + auto config_copy = default_config_; + config_copy.salient.name = name; + config_copy.salient.uuid = uuid; + spdlog::debug("Creating database '{}' - '{}'", name, std::string{uuid}); + if (rel_dir) { + storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / *rel_dir); + } else { + storage::UpdatePaths(config_copy, + default_config_.durability.storage_directory / kMultiTenantDir / std::string{uuid}); + } + return New_(std::move(config_copy)); + } /** - * @brief Create a new Database associated with the "name" database + * @brief Create a new Database using the passed configuration * - * @param name name of the database - * @param storage_subdir undelying RocksDB directory + * @param config configuration to be used * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name, std::filesystem::path storage_subdir) { + NewResultT New_(const storage::SalientConfig &config) { auto config_copy = default_config_; - storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / storage_subdir); - return New_(name, config_copy); + config_copy.salient = config; // name, uuid, mode, etc + UpdatePaths(config_copy, config_copy.durability.storage_directory / kMultiTenantDir / std::string{config.uuid}); + return New_(std::move(config_copy)); } /** * @brief Create a new Database associated with the "name" database * - * @param name name of the database * @param storage_config storage configuration * @return NewResultT context on success, error on failure */ - NewResultT New_(const std::string &name, storage::Config &storage_config) { - if (defunct_dbs_.contains(name)) { - spdlog::warn("Failed to generate database due to the unknown state of the previously defunct database \"{}\".", - name); - return NewError::DEFUNCT; - } + NewResultT New_(storage::Config storage_config); - auto new_db = db_handler_.New(name, storage_config, repl_state_); - if (new_db.HasValue()) { - // Success - if (durability_) durability_->Put(name, "ok"); // TODO: Serialize the configuration? - return new_db.GetValue(); - } - return new_db.GetError(); - } + // TODO: new overload of Delete_ with DatabaseAccess + DeleteResult Delete_(std::string_view db_name); /** * @brief Create a new Database associated with the default database * * @return NewResultT context on success, error on failure */ - NewResultT NewDefault_() { - // Create the default DB in the root (this is how it was done pre multi-tenancy) - auto res = New_(kDefaultDB, ".."); - if (res.HasValue()) { - // For back-compatibility... - // Recreate the dbms layout for the default db and symlink to the root - const auto dir = StorageDir_(kDefaultDB); - MG_ASSERT(dir, "Failed to find storage path."); - const auto main_dir = *dir / "databases" / kDefaultDB; + void SetupDefault_() { + try { + Get(kDefaultDB); + } catch (const UnknownDatabaseException &) { + // No default DB restored, create it + MG_ASSERT(New_(kDefaultDB, {/* random UUID */}, ".").HasValue(), "Failed while creating the default database"); + } - if (!std::filesystem::exists(main_dir)) { - std::filesystem::create_directory(main_dir); - } + // For back-compatibility... + // Recreate the dbms layout for the default db and symlink to the root + const auto dir = StorageDir_(kDefaultDB); + MG_ASSERT(dir, "Failed to find storage path."); + const auto main_dir = *dir / kMultiTenantDir / kDefaultDB; - // Force link on-disk directories - const auto conf = db_handler_.GetConfig(kDefaultDB); - MG_ASSERT(conf, "No configuration for the default database."); - const auto &tmp_conf = conf->disk; - std::vector to_link{ - tmp_conf.main_storage_directory, tmp_conf.label_index_directory, - tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory, - tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory, - tmp_conf.durability_directory, tmp_conf.wal_directory, - }; + if (!std::filesystem::exists(main_dir)) { + std::filesystem::create_directory(main_dir); + } - // Add in-memory paths - // Some directories are redundant (skip those) - const std::vector skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"}; - for (auto const &item : std::filesystem::directory_iterator{*dir}) { - const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path()); - if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue; - to_link.push_back(item.path()); - } + // Force link on-disk directories + const auto conf = db_handler_.GetConfig(kDefaultDB); + MG_ASSERT(conf, "No configuration for the default database."); + const auto &tmp_conf = conf->disk; + std::vector to_link{ + tmp_conf.main_storage_directory, tmp_conf.label_index_directory, + tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory, + tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory, + tmp_conf.durability_directory, tmp_conf.wal_directory, + }; - // Symlink to root dir - for (auto const &item : to_link) { - const auto dir_name = std::filesystem::relative(item, item.parent_path()); - const auto link = main_dir / dir_name; - const auto to = std::filesystem::relative(item, main_dir); - if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) { - std::filesystem::create_directory_symlink(to, link); - } else { // Check existing link - std::error_code ec; - const auto test_link = std::filesystem::read_symlink(link, ec); - if (ec || test_link != to) { - MG_ASSERT(false, - "Memgraph storage directory incompatible with new version.\n" - "Please use a clean directory or remove \"{}\" and try again.", - link.string()); - } + // Add in-memory paths + // Some directories are redundant (skip those) + const std::vector skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"}; + for (auto const &item : std::filesystem::directory_iterator{*dir}) { + const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path()); + if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue; + to_link.push_back(item.path()); + } + + // Symlink to root dir + for (auto const &item : to_link) { + const auto dir_name = std::filesystem::relative(item, item.parent_path()); + const auto link = main_dir / dir_name; + const auto to = std::filesystem::relative(item, main_dir); + if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) { + std::filesystem::create_directory_symlink(to, link); + } else { // Check existing link + std::error_code ec; + const auto test_link = std::filesystem::read_symlink(link, ec); + if (ec || test_link != to) { + MG_ASSERT(false, + "Memgraph storage directory incompatible with new version.\n" + "Please use a clean directory or remove \"{}\" and try again.", + link.string()); } } } - return res; } /** @@ -516,17 +640,57 @@ class DbmsHandler { throw UnknownDatabaseException("Tried to retrieve an unknown database \"{}\".", name); } + /** + * @brief Get the context associated with the UUID database + * + * @param uuid + * @return DatabaseAccess + * @throw UnknownDatabaseException if database not found + */ + DatabaseAccess Get_(const utils::UUID &uuid) { + // TODO Speed up + for (auto &[_, db_gk] : db_handler_) { + auto acc = db_gk.access(); + if (acc->get()->uuid() == uuid) { + return std::move(*acc); + } + } + throw UnknownDatabaseException("Tried to retrieve an unknown database with UUID \"{}\".", std::string{uuid}); + } +#endif + + void RecoverReplication(DatabaseAccess db_acc) { + if (allow_mt_repl || db_acc->name() == dbms::kDefaultDB) { + // Handle global replication state + spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); + // RECOVER REPLICA CONNECTIONS + memgraph::dbms::RestoreReplication(repl_state_, std::move(db_acc)); + } else if (const ::memgraph::replication::RoleMainData *data = + std::get_if<::memgraph::replication::RoleMainData>(&repl_state_.ReplicationData()); + data && !data->registered_replicas_.empty()) { + spdlog::warn("Multi-tenant replication is currently not supported!"); + } + } + +#ifdef MG_ENTERPRISE mutable LockT lock_{utils::RWLock::Priority::READ}; //!< protective lock storage::Config default_config_; //!< Storage configuration used when creating new databases DatabaseHandler db_handler_; //!< multi-tenancy storage handler std::unique_ptr durability_; //!< list of active dbs (pointer so we can postpone its creation) - bool delete_on_drop_; //!< Flag defining if dropping storage also deletes its directory - std::set defunct_dbs_; //!< Databases that are in an unknown state due to various failures + coordination::CoordinatorState coordinator_state_; //!< Replication coordinator #endif + // TODO: Make an api + public: + utils::ResourceLock system_lock_{}; //!> Ensure exclusive access for system queries + private: + std::optional system_transaction_; //!< Current system transaction (only one at a time) + uint64_t system_timestamp_{storage::kTimestampInitialId}; //!< System timestamp + std::atomic_uint64_t last_commited_system_timestamp_{ + storage::kTimestampInitialId}; //!< Last commited system timestamp replication::ReplicationState repl_state_; //!< Global replication state #ifndef MG_ENTERPRISE mutable utils::Gatekeeper db_gatekeeper_; //!< Single databases gatekeeper #endif -}; +}; // namespace memgraph::dbms } // namespace memgraph::dbms diff --git a/src/dbms/handler.hpp b/src/dbms/handler.hpp index 568b2fc7c..53724dabe 100644 --- a/src/dbms/handler.hpp +++ b/src/dbms/handler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -21,6 +21,7 @@ #include "utils/exceptions.hpp" #include "utils/gatekeeper.hpp" #include "utils/result.hpp" +#include "utils/thread_pool.hpp" namespace memgraph::dbms { @@ -82,7 +83,7 @@ class Handler { * @return true on success * @throw BasicException */ - bool Delete(const std::string &name) { + bool TryDelete(std::string_view name) { if (auto itr = items_.find(name); itr != items_.end()) { auto db_acc = itr->second.access(); if (db_acc && db_acc->try_delete()) { @@ -92,9 +93,42 @@ class Handler { } return false; } + // TODO: Change to return enum throw utils::BasicException("Unknown item \"{}\".", name); } + /** + * @brief Delete or defunct the context associated with the name. + * + * @param name Name associated with the context to delete + * @param post_delete_func What to do after deletion has happened + */ + template + void DeferDelete(std::string_view name, Func &&post_delete_func) { + auto itr = items_.find(name); + if (itr == items_.end()) return; + + auto db_acc = itr->second.access(); + if (!db_acc) return; + + if (db_acc->try_delete()) { + // Delete the database now + db_acc->reset(); + post_delete_func(); + } else { + // Defer deletion + db_acc->reset(); + // TODO: Make sure this shuts down correctly + auto task = [gk = std::move(itr->second), post_delete_func = std::forward(post_delete_func)]() mutable { + gk.~Gatekeeper(); + post_delete_func(); + }; + defer_pool_.AddTask(utils::CopyMovableFunctionWrapper{std::move(task)}); + } + // In any case remove from handled map + items_.erase(itr); + } + /** * @brief Check if a name is already used. * @@ -120,6 +154,7 @@ class Handler { private: std::unordered_map, string_hash, std::equal_to<>> items_; //!< map to all active items + utils::ThreadPool defer_pool_{1}; }; } // namespace memgraph::dbms diff --git a/src/dbms/inmemory/replication_handlers.cpp b/src/dbms/inmemory/replication_handlers.cpp index d25e655bc..f6c985406 100644 --- a/src/dbms/inmemory/replication_handlers.cpp +++ b/src/dbms/inmemory/replication_handlers.cpp @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "dbms/inmemory/replication_handlers.hpp" +#include #include #include "dbms/constants.hpp" #include "dbms/dbms_handler.hpp" @@ -22,7 +23,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/inmemory/unique_constraints.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using memgraph::storage::Delta; using memgraph::storage::EdgeAccessor; using memgraph::storage::EdgeRef; @@ -49,29 +50,29 @@ std::pair ReadDelta(storage::durability::BaseDecoder *de } }; -std::optional GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, std::string_view db_name) { +std::optional GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, const utils::UUID &uuid) { try { #ifdef MG_ENTERPRISE - auto acc = dbms_handler->Get(db_name); -#else - if (db_name != dbms::kDefaultDB) { - spdlog::warn("Trying to replicate a non-default database on a community replica."); - return std::nullopt; - } - auto acc = dbms_handler->Get(); -#endif + auto acc = dbms_handler->Get(uuid); if (!acc) { - spdlog::error("Failed to get access to ", db_name); + spdlog::error("Failed to get access to UUID ", std::string{uuid}); return std::nullopt; } +#else + auto acc = dbms_handler->Get(); + if (!acc) { + spdlog::warn("Failed to get access to the default db."); + return std::nullopt; + } +#endif auto *inmem_storage = dynamic_cast(acc.get()->storage()); if (!inmem_storage || inmem_storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) { - spdlog::error("Database \"{}\" is not IN_MEMORY_TRANSACTIONAL.", db_name); + spdlog::error("Database is not IN_MEMORY_TRANSACTIONAL."); return std::nullopt; } return std::optional{std::move(acc)}; } catch (const dbms::UnknownDatabaseException &e) { - spdlog::warn("No database \"{}\" on replica!", db_name); + spdlog::warn("No database with UUID \"{}\" on replica!", std::string{uuid}); return std::nullopt; } } @@ -109,13 +110,16 @@ void InMemoryReplicationHandlers::HeartbeatHandler(dbms::DbmsHandler *dbms_handl slk::Builder *res_builder) { storage::replication::HeartbeatReq req; slk::Load(&req, req_reader); - auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::HeartbeatRes res{false, 0, ""}; + slk::Save(res, res_builder); + return; + } // TODO: this handler is agnostic of InMemory, move to be reused by on-disk auto const *storage = db_acc->get()->storage(); - storage::replication::HeartbeatRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load(), + storage::replication::HeartbeatRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load(), std::string{storage->repl_storage_state_.epoch_.id()}}; slk::Save(res, res_builder); } @@ -124,8 +128,12 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha slk::Builder *res_builder) { storage::replication::AppendDeltasReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::AppendDeltasRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -165,7 +173,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating } - storage::replication::AppendDeltasRes res{storage->id(), false, repl_storage_state.last_commit_timestamp_.load()}; + storage::replication::AppendDeltasRes res{false, repl_storage_state.last_commit_timestamp_.load()}; slk::Save(res, res_builder); return; } @@ -174,7 +182,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha storage, &decoder, storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating - storage::replication::AppendDeltasRes res{storage->id(), true, repl_storage_state.last_commit_timestamp_.load()}; + storage::replication::AppendDeltasRes res{true, repl_storage_state.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from append deltas finished, replica is now up to date!"); } @@ -183,8 +191,12 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle slk::Builder *res_builder) { storage::replication::SnapshotReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::SnapshotRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -232,8 +244,7 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle } storage_guard.unlock(); - storage::replication::SnapshotRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::SnapshotRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::trace("Deleting old snapshot files due to snapshot recovery."); @@ -263,8 +274,12 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle slk::Builder *res_builder) { storage::replication::WalFilesReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::WalFilesRes res{false, 0}; + slk::Save(res, res_builder); + return; + } const auto wal_file_number = req.file_number; spdlog::debug("Received WAL files: {}", wal_file_number); @@ -278,8 +293,7 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle LoadWal(storage, &decoder); } - storage::replication::WalFilesRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::WalFilesRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from WAL files ended successfully, replica is now up to date!"); } @@ -288,8 +302,12 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand slk::Builder *res_builder) { storage::replication::CurrentWalReq req; slk::Load(&req, req_reader); - auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::CurrentWalRes res{false, 0}; + slk::Save(res, res_builder); + return; + } storage::replication::Decoder decoder(req_reader); @@ -298,8 +316,7 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand LoadWal(storage, &decoder); - storage::replication::CurrentWalRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::CurrentWalRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); spdlog::debug("Replication recovery from current WAL ended successfully, replica is now up to date!"); } @@ -318,6 +335,8 @@ void InMemoryReplicationHandlers::LoadWal(storage::InMemoryStorage *storage, sto } auto &replica_epoch = storage->repl_storage_state_.epoch_; if (wal_info.epoch_id != replica_epoch.id()) { + // questionable behaviour, we trust that any change in epoch implies change in who is MAIN + // when we use high availability, this assumption need to be checked. auto prev_epoch = replica_epoch.SetEpoch(wal_info.epoch_id); storage->repl_storage_state_.AddEpochToHistoryForce(prev_epoch); } @@ -355,13 +374,16 @@ void InMemoryReplicationHandlers::TimestampHandler(dbms::DbmsHandler *dbms_handl slk::Builder *res_builder) { storage::replication::TimestampReq req; slk::Load(&req, req_reader); - auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name); - if (!db_acc) return; + auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid); + if (!db_acc) { + storage::replication::TimestampRes res{false, 0}; + slk::Save(res, res_builder); + return; + } // TODO: this handler is agnostic of InMemory, move to be reused by on-disk auto const *storage = db_acc->get()->storage(); - storage::replication::TimestampRes res{storage->id(), true, - storage->repl_storage_state_.last_commit_timestamp_.load()}; + storage::replication::TimestampRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()}; slk::Save(res, res_builder); } @@ -508,7 +530,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage case WalDeltaData::Type::EDGE_SET_PROPERTY: { spdlog::trace(" Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(), delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value); - if (!storage->config_.items.properties_on_edges) + if (!storage->config_.salient.items.properties_on_edges) throw utils::BasicException( "Can't set properties on edges because properties on edges " "are disabled!"); @@ -575,8 +597,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage spdlog::trace(" Transaction end"); if (!commit_timestamp_and_accessor || commit_timestamp_and_accessor->first != timestamp) throw utils::BasicException("Invalid commit data!"); - auto ret = - commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first, false /* not main */); + auto ret = commit_timestamp_and_accessor->second.Commit( + {.desired_commit_timestamp = commit_timestamp_and_accessor->first, .is_main = false}); if (ret.HasError()) throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__); commit_timestamp_and_accessor = std::nullopt; diff --git a/src/dbms/inmemory/replication_handlers.hpp b/src/dbms/inmemory/replication_handlers.hpp index fc76d2b3a..4f6523747 100644 --- a/src/dbms/inmemory/replication_handlers.hpp +++ b/src/dbms/inmemory/replication_handlers.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -12,7 +12,6 @@ #pragma once #include "replication/replication_server.hpp" -#include "replication/state.hpp" #include "storage/v2/replication/serialization.hpp" namespace memgraph::storage { diff --git a/src/dbms/inmemory/storage_helper.hpp b/src/dbms/inmemory/storage_helper.hpp index 8e38f0a0f..fa1b9646a 100644 --- a/src/dbms/inmemory/storage_helper.hpp +++ b/src/dbms/inmemory/storage_helper.hpp @@ -24,8 +24,7 @@ namespace memgraph::dbms { inline std::unique_ptr CreateInMemoryStorage(storage::Config config, ::memgraph::replication::ReplicationState &repl_state) { - const auto wal_mode = config.durability.snapshot_wal_mode; - const auto name = config.name; + const auto name = config.salient.name; auto storage = std::make_unique(std::move(config)); // Connect replication state and storage @@ -34,24 +33,6 @@ inline std::unique_ptr CreateInMemoryStorage(storage::Config c return storage->CreateSnapshot(repl_state.GetRole()); }); - if (allow_mt_repl || name == dbms::kDefaultDB) { - // Handle global replication state - spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); - // RECOVER REPLICA CONNECTIONS - memgraph::dbms::RestoreReplication(repl_state, *storage); - } else if (const ::memgraph::replication::RoleMainData *data = - std::get_if<::memgraph::replication::RoleMainData>(&repl_state.ReplicationData()); - data && !data->registered_replicas_.empty()) { - spdlog::warn("Multi-tenant replication is currently not supported!"); - } - - if (wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && repl_state.IsMain()) { - spdlog::warn( - "The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please consider " - "enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because " - "without write-ahead logs this instance is not replicating any data."); - } - return std::move(storage); } diff --git a/src/dbms/replication_client.cpp b/src/dbms/replication_client.cpp index bfa4c622f..fa0c30daa 100644 --- a/src/dbms/replication_client.cpp +++ b/src/dbms/replication_client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,6 +10,7 @@ // licenses/APL.txt. #include "dbms/replication_client.hpp" +#include "replication/replication_client.hpp" namespace memgraph::dbms { @@ -17,18 +18,26 @@ void StartReplicaClient(DbmsHandler &dbms_handler, replication::ReplicationClien // No client error, start instance level client auto const &endpoint = client.rpc_client_.Endpoint(); spdlog::trace("Replication client started at: {}:{}", endpoint.address, endpoint.port); - client.StartFrequentCheck([&dbms_handler](std::string_view name) { - // Working connection, check if any database has been left behind - dbms_handler.ForEach([name](dbms::Database *db) { + client.StartFrequentCheck([&dbms_handler](bool reconnect, replication::ReplicationClient &client) { + // Working connection + // Check if system needs restoration + if (reconnect) { + client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; }); + } +#ifdef MG_ENTERPRISE + dbms_handler.SystemRestore(client); +#endif + // Check if any database has been left behind + dbms_handler.ForEach([&name = client.name_, reconnect](dbms::DatabaseAccess db_acc) { // Specific database <-> replica client - db->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) { - if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { + db_acc->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) { + if (reconnect || client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { // Database <-> replica might be behind, check and recover - client->TryCheckReplicaStateAsync(db->storage()); + client->TryCheckReplicaStateAsync(db_acc->storage(), db_acc); } }); }); }); -} +} // namespace memgraph::dbms } // namespace memgraph::dbms diff --git a/src/dbms/replication_handler.cpp b/src/dbms/replication_handler.cpp index 2cbe2c432..285752f76 100644 --- a/src/dbms/replication_handler.cpp +++ b/src/dbms/replication_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,15 +11,21 @@ #include "dbms/replication_handler.hpp" +#include + #include "dbms/constants.hpp" #include "dbms/dbms_handler.hpp" +#include "dbms/global.hpp" #include "dbms/inmemory/replication_handlers.hpp" -#include "dbms/inmemory/storage_helper.hpp" #include "dbms/replication_client.hpp" +#include "dbms/utils.hpp" +#include "replication/messages.hpp" #include "replication/state.hpp" +#include "spdlog/spdlog.h" +#include "storage/v2/config.hpp" +#include "storage/v2/replication/rpc.hpp" +#include "utils/on_scope_exit.hpp" -using memgraph::replication::ReplicationClientConfig; -using memgraph::replication::ReplicationState; using memgraph::replication::RoleMainData; using memgraph::replication::RoleReplicaData; @@ -32,8 +38,8 @@ std::string RegisterReplicaErrorToString(RegisterReplicaError error) { using enum RegisterReplicaError; case NAME_EXISTS: return "NAME_EXISTS"; - case END_POINT_EXISTS: - return "END_POINT_EXISTS"; + case ENDPOINT_EXISTS: + return "ENDPOINT_EXISTS"; case CONNECTION_FAILED: return "CONNECTION_FAILED"; case COULD_NOT_BE_PERSISTED: @@ -45,34 +51,13 @@ std::string RegisterReplicaErrorToString(RegisterReplicaError error) { ReplicationHandler::ReplicationHandler(DbmsHandler &dbms_handler) : dbms_handler_(dbms_handler) {} bool ReplicationHandler::SetReplicationRoleMain() { - auto const main_handler = [](RoleMainData const &) { + auto const main_handler = [](RoleMainData &) { // If we are already MAIN, we don't want to change anything return false; }; + auto const replica_handler = [this](RoleReplicaData const &) { - // STEP 1) bring down all REPLICA servers - dbms_handler_.ForEach([](Database *db) { - auto *storage = db->storage(); - // Remember old epoch + storage timestamp association - storage->PrepareForNewEpoch(); - }); - - // STEP 2) Change to MAIN - // TODO: restore replication servers if false? - if (!dbms_handler_.ReplicationState().SetReplicationRoleMain()) { - // TODO: Handle recovery on failure??? - return false; - } - - // STEP 3) We are now MAIN, update storage local epoch - const auto &epoch = - std::get(std::as_const(dbms_handler_.ReplicationState()).ReplicationData()).epoch_; - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); - storage->repl_storage_state_.epoch_ = epoch; - }); - - return true; + return memgraph::dbms::DoReplicaToMainPromotion(dbms_handler_); }; // TODO: under lock @@ -89,8 +74,8 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication:: // TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are // deleting the replica. // Remove database specific clients - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); + dbms_handler_.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); }); }); // Remove instance level clients @@ -105,15 +90,7 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication:: // ASSERT return false; }, - [this](RoleReplicaData const &data) { - // Register handlers - InMemoryReplicationHandlers::Register(&dbms_handler_, *data.server); - if (!data.server->Start()) { - spdlog::error("Unable to start the replication server."); - return false; - } - return true; - }}, + [this](RoleReplicaData const &data) { return StartRpcServer(dbms_handler_, data); }}, dbms_handler_.ReplicationState().ReplicationData()); // TODO Handle error (restore to main?) return success; @@ -123,60 +100,48 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio -> memgraph::utils::BasicResult { MG_ASSERT(dbms_handler_.ReplicationState().IsMain(), "Only main instance can register a replica!"); - auto instance_client = dbms_handler_.ReplicationState().RegisterReplica(config); - if (instance_client.HasError()) switch (instance_client.GetError()) { + auto maybe_client = dbms_handler_.ReplicationState().RegisterReplica(config); + if (maybe_client.HasError()) { + switch (maybe_client.GetError()) { case memgraph::replication::RegisterReplicaError::NOT_MAIN: MG_ASSERT(false, "Only main instance can register a replica!"); return {}; case memgraph::replication::RegisterReplicaError::NAME_EXISTS: return memgraph::dbms::RegisterReplicaError::NAME_EXISTS; - case memgraph::replication::RegisterReplicaError::END_POINT_EXISTS: - return memgraph::dbms::RegisterReplicaError::END_POINT_EXISTS; + case memgraph::replication::RegisterReplicaError::ENDPOINT_EXISTS: + return memgraph::dbms::RegisterReplicaError::ENDPOINT_EXISTS; case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED: return memgraph::dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED; case memgraph::replication::RegisterReplicaError::SUCCESS: break; } + } if (!allow_mt_repl && dbms_handler_.All().size() > 1) { spdlog::warn("Multi-tenant replication is currently not supported!"); } - bool all_clients_good = true; +#ifdef MG_ENTERPRISE + // Update system before enabling individual storage <-> replica clients + dbms_handler_.SystemRestore(*maybe_client.GetValue()); +#endif - // Add database specific clients (NOTE Currently all databases are connected to each replica) - dbms_handler_.ForEach([&](Database *db) { - auto *storage = db->storage(); - if (!allow_mt_repl && storage->id() != kDefaultDB) { - return; - } - // TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes - if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return; - - all_clients_good &= - storage->repl_storage_state_.replication_clients_.WithLock([storage, &instance_client](auto &storage_clients) { - auto client = std::make_unique(*instance_client.GetValue()); - client->Start(storage); - // After start the storage <-> replica state should be READY or RECOVERING (if correctly started) - // MAYBE_BEHIND isn't a statement of the current state, this is the default value - // Failed to start due to branching of MAIN and REPLICA - if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { - return false; - } - storage_clients.push_back(std::move(client)); - return true; - }); - }); + const auto dbms_error = memgraph::dbms::HandleRegisterReplicaStatus(maybe_client); + if (dbms_error.has_value()) { + return *dbms_error; + } + auto &instance_client_ptr = maybe_client.GetValue(); + const bool all_clients_good = memgraph::dbms::RegisterAllDatabasesClients(dbms_handler_, *instance_client_ptr); // NOTE Currently if any databases fails, we revert back if (!all_clients_good) { - spdlog::error("Failed to register all databases to the REPLICA \"{}\"", config.name); + spdlog::error("Failed to register all databases on the REPLICA \"{}\"", config.name); UnregisterReplica(config.name); return RegisterReplicaError::CONNECTION_FAILED; } // No client error, start instance level client - StartReplicaClient(dbms_handler_, *instance_client.GetValue()); + StartReplicaClient(dbms_handler_, *instance_client_ptr); return {}; } @@ -189,8 +154,8 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED; } // Remove database specific clients - dbms_handler_.ForEach([name](Database *db) { - db->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) { + dbms_handler_.ForEach([name](DatabaseAccess db_acc) { + db_acc->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) { std::erase_if(clients, [name](const auto &client) { return client->Name() == name; }); }); }); @@ -204,7 +169,7 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR dbms_handler_.ReplicationState().ReplicationData()); } -auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole { +auto ReplicationHandler::GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole { return dbms_handler_.ReplicationState().GetRole(); } @@ -214,20 +179,20 @@ bool ReplicationHandler::IsReplica() const { return dbms_handler_.ReplicationSta // Per storage // NOTE Storage will connect to all replicas. Future work might change this -void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage) { +void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc) { spdlog::info("Restoring replication role."); /// MAIN - auto const recover_main = [&storage](RoleMainData &mainData) { + auto const recover_main = [db_acc = std::move(db_acc)](RoleMainData &mainData) mutable { // NOLINT // Each individual client has already been restored and started. Here we just go through each database and start its // client for (auto &instance_client : mainData.registered_replicas_) { - spdlog::info("Replica {} restoration started for {}.", instance_client.name_, storage.id()); - - const auto &ret = storage.repl_storage_state_.replication_clients_.WithLock( - [&](auto &storage_clients) -> utils::BasicResult { + spdlog::info("Replica {} restoration started for {}.", instance_client.name_, db_acc->name()); + const auto &ret = db_acc->storage()->repl_storage_state_.replication_clients_.WithLock( + [&, db_acc](auto &storage_clients) mutable -> utils::BasicResult { auto client = std::make_unique(instance_client); - client->Start(&storage); + auto *storage = db_acc->storage(); + client->Start(storage, std::move(db_acc)); // After start the storage <-> replica state should be READY or RECOVERING (if correctly started) // MAYBE_BEHIND isn't a statement of the current state, this is the default value // Failed to start due to branching of MAIN and REPLICA @@ -244,7 +209,7 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor LOG_FATAL("Failure when restoring replica {}: {}.", instance_client.name_, RegisterReplicaErrorToString(ret.GetError())); } - spdlog::info("Replica {} restored for {}.", instance_client.name_, storage.id()); + spdlog::info("Replica {} restored for {}.", instance_client.name_, db_acc->name()); } spdlog::info("Replication role restored to MAIN."); }; @@ -259,4 +224,177 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor }, repl_state.ReplicationData()); } + +namespace system_replication { +#ifdef MG_ENTERPRISE +void SystemHeartbeatHandler(const uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder) { + replication::SystemHeartbeatReq req; + replication::SystemHeartbeatReq::Load(&req, req_reader); + + replication::SystemHeartbeatRes res(ts); + memgraph::slk::Save(res, res_builder); +} + +void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + memgraph::storage::replication::CreateDatabaseReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::CreateDatabaseRes; + CreateDatabaseRes res(CreateDatabaseRes::Result::FAILURE); + + // Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot + // of the set of databases. Hence no history exists to maintain regarding epoch change. + // If MAIN has changed we need to check this new group_timestamp is consistent with + // what we have so far. + + if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) { + spdlog::debug("CreateDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp, + dbms_handler.LastCommitedTS()); + memgraph::slk::Save(res, res_builder); + return; + } + + try { + // Create new + auto new_db = dbms_handler.Update(req.config); + if (new_db.HasValue()) { + // Successfully create db + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = CreateDatabaseRes(CreateDatabaseRes::Result::SUCCESS); + spdlog::debug("CreateDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp); + } + } catch (...) { + // Failure + } + + memgraph::slk::Save(res, res_builder); +} + +void DropDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + memgraph::storage::replication::DropDatabaseReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::DropDatabaseRes; + DropDatabaseRes res(DropDatabaseRes::Result::FAILURE); + + // Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot + // of the set of databases. Hence no history exists to maintain regarding epoch change. + // If MAIN has changed we need to check this new group_timestamp is consistent with + // what we have so far. + + if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) { + spdlog::debug("DropDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp, + dbms_handler.LastCommitedTS()); + memgraph::slk::Save(res, res_builder); + return; + } + + try { + // NOTE: Single communication channel can exist at a time, no other database can be deleted/created at the moment. + auto new_db = dbms_handler.Delete(req.uuid); + if (new_db.HasError()) { + if (new_db.GetError() == DeleteError::NON_EXISTENT) { + // Nothing to drop + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = DropDatabaseRes(DropDatabaseRes::Result::NO_NEED); + } + } else { + // Successfully drop db + dbms_handler.SetLastCommitedTS(req.new_group_timestamp); + res = DropDatabaseRes(DropDatabaseRes::Result::SUCCESS); + spdlog::debug("DropDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp); + } + } catch (...) { + // Failure + } + + memgraph::slk::Save(res, res_builder); +} + +void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) { + // TODO Speed up + memgraph::storage::replication::SystemRecoveryReq req; + memgraph::slk::Load(&req, req_reader); + + using memgraph::storage::replication::SystemRecoveryRes; + SystemRecoveryRes res(SystemRecoveryRes::Result::FAILURE); + + utils::OnScopeExit send_on_exit([&]() { memgraph::slk::Save(res, res_builder); }); + + // Get all current dbs + auto old = dbms_handler.All(); + + // Check/create the incoming dbs + for (const auto &config : req.database_configs) { + // Missing db + try { + if (dbms_handler.Update(config).HasError()) { + spdlog::debug("SystemRecoveryHandler: Failed to update database \"{}\".", config.name); + return; // Send failure on exit + } + } catch (const UnknownDatabaseException &) { + spdlog::debug("SystemRecoveryHandler: UnknownDatabaseException"); + return; // Send failure on exit + } + const auto it = std::find(old.begin(), old.end(), config.name); + if (it != old.end()) old.erase(it); + } + + // Delete all the leftover old dbs + for (const auto &remove_db : old) { + const auto del = dbms_handler.Delete(remove_db); + if (del.HasError()) { + // Some errors are not terminal + if (del.GetError() == DeleteError::DEFAULT_DB || del.GetError() == DeleteError::NON_EXISTENT) { + spdlog::debug("SystemRecoveryHandler: Dropped database \"{}\".", remove_db); + continue; + } + spdlog::debug("SystemRecoveryHandler: Failed to drop database \"{}\".", remove_db); + return; // Send failure on exit + } + } + // Successfully recovered + dbms_handler.SetLastCommitedTS(req.forced_group_timestamp); + spdlog::debug("SystemRecoveryHandler: SUCCESS updated LCTS to {}", req.forced_group_timestamp); + res = SystemRecoveryRes(SystemRecoveryRes::Result::SUCCESS); +} +#endif + +void Register(replication::RoleReplicaData const &data, dbms::DbmsHandler &dbms_handler) { +#ifdef MG_ENTERPRISE + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received SystemHeartbeatRpc"); + SystemHeartbeatHandler(dbms_handler.LastCommitedTS(), req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received CreateDatabaseRpc"); + CreateDatabaseHandler(dbms_handler, req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received DropDatabaseRpc"); + DropDatabaseHandler(dbms_handler, req_reader, res_builder); + }); + data.server->rpc_server_.Register( + [&dbms_handler](auto *req_reader, auto *res_builder) { + spdlog::debug("Received SystemRecoveryRpc"); + SystemRecoveryHandler(dbms_handler, req_reader, res_builder); + }); +#endif +} +} // namespace system_replication + +bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data) { + // Register handlers + InMemoryReplicationHandlers::Register(&dbms_handler, *data.server); + system_replication::Register(data, dbms_handler); + // Start server + if (!data.server->Start()) { + spdlog::error("Unable to start the replication server."); + return false; + } + return true; +} } // namespace memgraph::dbms diff --git a/src/dbms/replication_handler.hpp b/src/dbms/replication_handler.hpp index dc95407b1..53c64e34b 100644 --- a/src/dbms/replication_handler.hpp +++ b/src/dbms/replication_handler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,11 +11,10 @@ #pragma once -#include "replication/role.hpp" -#include "storage/v2/storage.hpp" +#include "replication_coordination_glue/role.hpp" +#include "dbms/database.hpp" #include "utils/result.hpp" -// BEGIN fwd declares namespace memgraph::replication { struct ReplicationState; struct ReplicationServerConfig; @@ -23,9 +22,11 @@ struct ReplicationClientConfig; } // namespace memgraph::replication namespace memgraph::dbms { + class DbmsHandler; -enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, CONNECTION_FAILED, COULD_NOT_BE_PERSISTED }; +enum class RegisterReplicaError : uint8_t { NAME_EXISTS, ENDPOINT_EXISTS, CONNECTION_FAILED, COULD_NOT_BE_PERSISTED }; + enum class UnregisterReplicaResult : uint8_t { NOT_MAIN, COULD_NOT_BE_PERSISTED, @@ -52,7 +53,7 @@ struct ReplicationHandler { auto UnregisterReplica(std::string_view name) -> UnregisterReplicaResult; // Helper pass-through (TODO: remove) - auto GetRole() const -> memgraph::replication::ReplicationRole; + auto GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole; bool IsMain() const; bool IsReplica() const; @@ -62,6 +63,20 @@ struct ReplicationHandler { /// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage /// TODO: extend to do multiple storages -void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage); +void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc); + +namespace system_replication { +// System handlers +#ifdef MG_ENTERPRISE +void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder); +void SystemHeartbeatHandler(uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder); +void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder); +#endif + +/// Register all DBMS level RPC handlers +void Register(replication::RoleReplicaData const &data, DbmsHandler &dbms_handler); +} // namespace system_replication + +bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data); } // namespace memgraph::dbms diff --git a/src/dbms/transaction.hpp b/src/dbms/transaction.hpp new file mode 100644 index 000000000..7167d9ec5 --- /dev/null +++ b/src/dbms/transaction.hpp @@ -0,0 +1,64 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include +#include "storage/v2/config.hpp" + +namespace memgraph::dbms { +struct SystemTransaction { + struct Delta { + enum class Action { + CREATE_DATABASE, + DROP_DATABASE, + }; + + static constexpr struct CreateDatabase { + } create_database; + static constexpr struct DropDatabase { + } drop_database; + + Delta(CreateDatabase /*tag*/, storage::SalientConfig config) + : action(Action::CREATE_DATABASE), config(std::move(config)) {} + Delta(DropDatabase /*tag*/, const utils::UUID &uuid) : action(Action::DROP_DATABASE), uuid(uuid) {} + + Delta(const Delta &) = delete; + Delta(Delta &&) = delete; + Delta &operator=(const Delta &) = delete; + Delta &operator=(Delta &&) = delete; + + ~Delta() { + switch (action) { + case Action::CREATE_DATABASE: + std::destroy_at(&config); + break; + case Action::DROP_DATABASE: + break; + // Some deltas might have special destructor handling + } + } + + Action action; + union { + storage::SalientConfig config; + utils::UUID uuid; + }; + }; + + explicit SystemTransaction(uint64_t timestamp) : system_timestamp(timestamp) {} + + // Currently system transitions support a single delta + std::optional delta{}; + uint64_t system_timestamp; +}; + +} // namespace memgraph::dbms diff --git a/src/dbms/utils.hpp b/src/dbms/utils.hpp new file mode 100644 index 000000000..fd5db9cf1 --- /dev/null +++ b/src/dbms/utils.hpp @@ -0,0 +1,133 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +#pragma once + +#include "dbms/dbms_handler.hpp" +#include "dbms/replication_handler.hpp" +#include "replication/include/replication/state.hpp" +#include "utils/result.hpp" + +namespace memgraph::dbms { + +inline bool DoReplicaToMainPromotion(dbms::DbmsHandler &dbms_handler) { + auto &repl_state = dbms_handler.ReplicationState(); + // STEP 1) bring down all REPLICA servers + dbms_handler.ForEach([](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); + // Remember old epoch + storage timestamp association + storage->PrepareForNewEpoch(); + }); + + // STEP 2) Change to MAIN + // TODO: restore replication servers if false? + if (!repl_state.SetReplicationRoleMain()) { + // TODO: Handle recovery on failure??? + return false; + } + + // STEP 3) We are now MAIN, update storage local epoch + const auto &epoch = + std::get(std::as_const(dbms_handler.ReplicationState()).ReplicationData()).epoch_; + dbms_handler.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); + storage->repl_storage_state_.epoch_ = epoch; + }); + + return true; +}; + +inline bool SetReplicationRoleReplica(dbms::DbmsHandler &dbms_handler, + const memgraph::replication::ReplicationServerConfig &config) { + if (dbms_handler.ReplicationState().IsReplica()) { + return false; + } + + // TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are + // deleting the replica. + // Remove database specific clients + dbms_handler.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); + storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); }); + }); + // Remove instance level clients + std::get(dbms_handler.ReplicationState().ReplicationData()).registered_replicas_.clear(); + + // Creates the server + dbms_handler.ReplicationState().SetReplicationRoleReplica(config); + + // Start + const auto success = std::visit(utils::Overloaded{[](replication::RoleMainData const &) { + // ASSERT + return false; + }, + [&dbms_handler](replication::RoleReplicaData const &data) { + return StartRpcServer(dbms_handler, data); + }}, + dbms_handler.ReplicationState().ReplicationData()); + // TODO Handle error (restore to main?) + return success; +} + +inline bool RegisterAllDatabasesClients(dbms::DbmsHandler &dbms_handler, + replication::ReplicationClient &instance_client) { + if (!allow_mt_repl && dbms_handler.All().size() > 1) { + spdlog::warn("Multi-tenant replication is currently not supported!"); + } + + bool all_clients_good = true; + + // Add database specific clients (NOTE Currently all databases are connected to each replica) + dbms_handler.ForEach([&](DatabaseAccess db_acc) { + auto *storage = db_acc->storage(); + if (!allow_mt_repl && storage->name() != kDefaultDB) { + return; + } + // TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes + if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return; + + all_clients_good &= storage->repl_storage_state_.replication_clients_.WithLock( + [storage, &instance_client, db_acc = std::move(db_acc)](auto &storage_clients) mutable { // NOLINT + auto client = std::make_unique(instance_client); + // All good, start replica client + client->Start(storage, std::move(db_acc)); + // After start the storage <-> replica state should be READY or RECOVERING (if correctly started) + // MAYBE_BEHIND isn't a statement of the current state, this is the default value + // Failed to start due an error like branching of MAIN and REPLICA + if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) { + return false; // TODO: sometimes we need to still add to storage_clients + } + storage_clients.push_back(std::move(client)); + return true; + }); + }); + + return all_clients_good; +} + +inline std::optional HandleRegisterReplicaStatus( + utils::BasicResult &instance_client) { + if (instance_client.HasError()) switch (instance_client.GetError()) { + case replication::RegisterReplicaError::NOT_MAIN: + MG_ASSERT(false, "Only main instance can register a replica!"); + return {}; + case replication::RegisterReplicaError::NAME_EXISTS: + return dbms::RegisterReplicaError::NAME_EXISTS; + case replication::RegisterReplicaError::ENDPOINT_EXISTS: + return dbms::RegisterReplicaError::ENDPOINT_EXISTS; + case replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED: + return dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED; + case replication::RegisterReplicaError::SUCCESS: + break; + } + return {}; +} + +} // namespace memgraph::dbms diff --git a/src/flags/CMakeLists.txt b/src/flags/CMakeLists.txt index e8988756f..e80438d1d 100644 --- a/src/flags/CMakeLists.txt +++ b/src/flags/CMakeLists.txt @@ -6,6 +6,7 @@ add_library(mg-flags STATIC audit.cpp memory_limit.cpp run_time_configurable.cpp storage_mode.cpp - query.cpp) + query.cpp + replication.cpp) target_include_directories(mg-flags PUBLIC ${CMAKE_SOURCE_DIR}/include) target_link_libraries(mg-flags PUBLIC spdlog::spdlog mg-settings mg-utils) diff --git a/src/flags/all.hpp b/src/flags/all.hpp index f7b44272a..f60f059d6 100644 --- a/src/flags/all.hpp +++ b/src/flags/all.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,5 +17,6 @@ #include "flags/log_level.hpp" #include "flags/memory_limit.hpp" #include "flags/query.hpp" +#include "flags/replication.hpp" #include "flags/run_time_configurable.hpp" #include "flags/storage_mode.hpp" diff --git a/src/flags/general.cpp b/src/flags/general.cpp index fdc301fa2..cd2c95c60 100644 --- a/src/flags/general.cpp +++ b/src/flags/general.cpp @@ -131,12 +131,6 @@ DEFINE_uint64(storage_recovery_thread_count, DEFINE_bool(storage_enable_schema_metadata, false, "Controls whether metadata should be collected about the resident labels and edge types."); -#ifdef MG_ENTERPRISE -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_bool(storage_delete_on_drop, true, - "If set to true the query 'DROP DATABASE x' will delete the underlying storage as well."); -#endif - // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DEFINE_bool(telemetry_enabled, false, "Set to true to enable telemetry. We collect information about the " @@ -162,13 +156,6 @@ DEFINE_string(pulsar_service_url, "", "Default URL used while connecting to Puls // Query flags. -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_uint64(replication_replica_check_frequency_sec, 1, - "The time duration between two replica checks/pings. If < 1, replicas will NOT be checked at all. NOTE: " - "The MAIN instance allocates a new thread for each REPLICA."); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DEFINE_bool(replication_restore_state_on_startup, false, "Restore replication state on startup, e.g. recover replica"); - DEFINE_VALIDATED_string(query_modules_directory, "", "Directory where modules with custom query procedures are stored. " "NOTE: Multiple comma-separated directories can be defined.", @@ -208,3 +195,9 @@ DEFINE_HIDDEN_string(organization_name, "", "Organization name."); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DEFINE_string(auth_user_or_role_name_regex, memgraph::glue::kDefaultUserRoleRegex.data(), "Set to the regular expression that each user or role name must fulfill."); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_bool(auth_password_permit_null, true, "Set to false to disable null passwords."); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_string(auth_password_strength_regex, memgraph::glue::kDefaultPasswordRegex.data(), + "The regular expression that should be used to match the entire " + "entered password to ensure its strength."); diff --git a/src/flags/general.hpp b/src/flags/general.hpp index 7cc353564..a1e8729ab 100644 --- a/src/flags/general.hpp +++ b/src/flags/general.hpp @@ -84,10 +84,6 @@ DECLARE_bool(storage_parallel_schema_recovery); DECLARE_uint64(storage_recovery_thread_count); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_bool(storage_enable_schema_metadata); -#ifdef MG_ENTERPRISE -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DECLARE_bool(storage_delete_on_drop); -#endif // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_bool(telemetry_enabled); @@ -116,14 +112,13 @@ namespace memgraph::flags { auto ParseQueryModulesDirectory() -> std::vector; } // namespace memgraph::flags -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DECLARE_uint64(replication_replica_check_frequency_sec); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -DECLARE_bool(replication_restore_state_on_startup); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_string(license_key); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_string(organization_name); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) DECLARE_string(auth_user_or_role_name_regex); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_bool(auth_password_permit_null); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_string(auth_password_strength_regex); diff --git a/src/flags/replication.cpp b/src/flags/replication.cpp new file mode 100644 index 000000000..3cd5187f3 --- /dev/null +++ b/src/flags/replication.cpp @@ -0,0 +1,26 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include "replication.hpp" + +#ifdef MG_ENTERPRISE +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_bool(coordinator, false, "Controls whether the instance is a replication coordinator."); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_uint32(coordinator_server_port, 0, "Port on which coordinator servers will be started."); +#endif + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_uint64(replication_replica_check_frequency_sec, 1, + "The time duration between two replica checks/pings. If < 1, replicas will NOT be checked at all. NOTE: " + "The MAIN instance allocates a new thread for each REPLICA."); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DEFINE_bool(replication_restore_state_on_startup, false, "Restore replication state on startup, e.g. recover replica"); diff --git a/src/flags/replication.hpp b/src/flags/replication.hpp new file mode 100644 index 000000000..16f4c74d2 --- /dev/null +++ b/src/flags/replication.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include "gflags/gflags.h" + +#ifdef MG_ENTERPRISE +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_bool(coordinator); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_uint32(coordinator_server_port); +#endif + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_uint64(replication_replica_check_frequency_sec); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +DECLARE_bool(replication_restore_state_on_startup); diff --git a/src/flags/storage_mode.cpp b/src/flags/storage_mode.cpp index b342719dd..63e9948fd 100644 --- a/src/flags/storage_mode.cpp +++ b/src/flags/storage_mode.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,20 +17,14 @@ #include "gflags/gflags.h" -#include - -inline constexpr std::array storage_mode_mappings{ - std::pair{std::string_view{"IN_MEMORY_TRANSACTIONAL"}, memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL}, - std::pair{std::string_view{"IN_MEMORY_ANALYTICAL"}, memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL}, - std::pair{std::string_view{"ON_DISK_TRANSACTIONAL"}, memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL}}; - const std::string storage_mode_help_string = fmt::format("Default storage mode Memgraph uses. Allowed values: {}", - memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings)); + memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings)); // NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables) DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_help_string.c_str(), { - if (const auto result = memgraph::utils::IsValidEnumValueString(value, storage_mode_mappings); result.HasError()) { + if (const auto result = memgraph::utils::IsValidEnumValueString(value, memgraph::storage::storage_mode_mappings); + result.HasError()) { switch (result.GetError()) { case memgraph::utils::ValidationError::EmptyValue: { std::cout << "Storage mode cannot be empty." << std::endl; @@ -38,7 +32,7 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he } case memgraph::utils::ValidationError::InvalidValue: { std::cout << "Invalid value for storage mode. Allowed values: " - << memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings) << std::endl; + << memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings) << std::endl; break; } } @@ -48,8 +42,8 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he }); memgraph::storage::StorageMode memgraph::flags::ParseStorageMode() { - const auto storage_mode = - memgraph::utils::StringToEnum(FLAGS_storage_mode, storage_mode_mappings); + const auto storage_mode = memgraph::utils::StringToEnum( + FLAGS_storage_mode, memgraph::storage::storage_mode_mappings); MG_ASSERT(storage_mode, "Invalid storage mode"); return *storage_mode; } diff --git a/src/glue/SessionHL.cpp b/src/glue/SessionHL.cpp index bff12d188..61c1ab26f 100644 --- a/src/glue/SessionHL.cpp +++ b/src/glue/SessionHL.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -80,7 +80,7 @@ std::vector TypedValueResultStreamBase::De std::vector decoded_values; decoded_values.reserve(values.size()); for (const auto &v : values) { - auto maybe_value = memgraph::glue::ToBoltValue(v, *storage_, memgraph::storage::View::NEW); + auto maybe_value = memgraph::glue::ToBoltValue(v, storage_, memgraph::storage::View::NEW); if (maybe_value.HasError()) { switch (maybe_value.GetError()) { case memgraph::storage::Error::DELETED_OBJECT: @@ -112,14 +112,14 @@ std::string SessionHL::GetDefaultDB() { if (user_.has_value()) { return user_->db_access().GetDefault(); } - return memgraph::dbms::kDefaultDB; + return std::string{memgraph::dbms::kDefaultDB}; } #endif std::string SessionHL::GetCurrentDB() const { if (!interpreter_.current_db_.db_acc_) return ""; const auto *db = interpreter_.current_db_.db_acc_->get(); - return db->id(); + return db->name(); } std::optional SessionHL::GetServerNameForInit() { @@ -167,10 +167,10 @@ std::map SessionHL::Discard(s std::map SessionHL::Pull(SessionHL::TEncoder *encoder, std::optional n, std::optional qid) { - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); try { - TypedValueResultStream stream(encoder, db->storage()); + auto &db = interpreter_.current_db_.db_acc_; + auto *storage = db ? db->get()->storage() : nullptr; + TypedValueResultStream stream(encoder, storage); return DecodeSummary(interpreter_.Pull(&stream, n, qid)); } catch (const memgraph::query::QueryException &e) { // Count the number of specific exceptions thrown @@ -193,17 +193,17 @@ std::pair, std::optional> SessionHL::Interpret( for (const auto &[key, bolt_param] : params) { params_pv.emplace(key, ToPropertyValue(bolt_param)); } + +#ifdef MG_ENTERPRISE const std::string *username{nullptr}; if (user_) { username = &user_->username(); } -#ifdef MG_ENTERPRISE - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) { + auto &db = interpreter_.current_db_.db_acc_; audit_log_->Record(endpoint_.address().to_string(), user_ ? *username : "", query, - memgraph::storage::PropertyValue(params_pv), db->id()); + memgraph::storage::PropertyValue(params_pv), db ? db->get()->name() : "no known database"); } #endif try { @@ -351,11 +351,11 @@ SessionHL::~SessionHL() { std::map SessionHL::DecodeSummary( const std::map &summary) { - // TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt) - auto *db = interpreter_.current_db_.db_acc_->get(); + auto &db_acc = interpreter_.current_db_.db_acc_; + auto *storage = db_acc ? db_acc->get()->storage() : nullptr; std::map decoded_summary; for (const auto &kv : summary) { - auto maybe_value = ToBoltValue(kv.second, *db->storage(), memgraph::storage::View::NEW); + auto maybe_value = ToBoltValue(kv.second, storage, memgraph::storage::View::NEW); if (maybe_value.HasError()) { switch (maybe_value.GetError()) { case memgraph::storage::Error::DELETED_OBJECT: diff --git a/src/glue/auth.cpp b/src/glue/auth.cpp index 8344ad49d..9be5cd87b 100644 --- a/src/glue/auth.cpp +++ b/src/glue/auth.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -66,6 +66,8 @@ auth::Permission PrivilegeToPermission(query::AuthQuery::Privilege privilege) { return auth::Permission::MULTI_DATABASE_EDIT; case query::AuthQuery::Privilege::MULTI_DATABASE_USE: return auth::Permission::MULTI_DATABASE_USE; + case query::AuthQuery::Privilege::COORDINATOR: + return auth::Permission::COORDINATOR; } } diff --git a/src/glue/auth_global.hpp b/src/glue/auth_global.hpp index 4675b6978..008960c76 100644 --- a/src/glue/auth_global.hpp +++ b/src/glue/auth_global.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,4 +13,5 @@ namespace memgraph::glue { inline constexpr std::string_view kDefaultUserRoleRegex = "[a-zA-Z0-9_.+-@]+"; +static constexpr std::string_view kDefaultPasswordRegex = ".+"; } // namespace memgraph::glue diff --git a/src/glue/auth_handler.cpp b/src/glue/auth_handler.cpp index b4ebfcd2a..f3efb6ba0 100644 --- a/src/glue/auth_handler.cpp +++ b/src/glue/auth_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -249,25 +249,10 @@ std::vector> ShowFineGrainedRolePrivile namespace memgraph::glue { AuthQueryHandler::AuthQueryHandler( - memgraph::utils::Synchronized *auth, - std::string name_regex_string) - : auth_(auth), name_regex_string_(std::move(name_regex_string)), name_regex_(name_regex_string_) {} + memgraph::utils::Synchronized *auth) + : auth_(auth) {} bool AuthQueryHandler::CreateUser(const std::string &username, const std::optional &password) { - if (name_regex_string_ != kDefaultUserRoleRegex) { - if (const auto license_check_result = - memgraph::license::global_license_checker.IsEnterpriseValid(memgraph::utils::global_settings); - license_check_result.HasError()) { - throw memgraph::auth::AuthException( - "Custom user/role regex is a Memgraph Enterprise feature. Please set the config " - "(\"--auth-user-or-role-name-regex\") to its default value (\"{}\") or remove the flag.\n{}", - kDefaultUserRoleRegex, - memgraph::license::LicenseCheckErrorToString(license_check_result.GetError(), "user/role regex")); - } - } - if (!std::regex_match(username, name_regex_)) { - throw query::QueryRuntimeException("Invalid user name."); - } try { const auto [first_user, user_added] = std::invoke([&, this] { auto locked_auth = auth_->Lock(); @@ -294,7 +279,7 @@ bool AuthQueryHandler::CreateUser(const std::string &username, const std::option ); #ifdef MG_ENTERPRISE GrantDatabaseToUser(auth::kAllDatabases, username); - SetMainDatabase(username, dbms::kDefaultDB); + SetMainDatabase(dbms::kDefaultDB, username); #endif } @@ -305,9 +290,6 @@ bool AuthQueryHandler::CreateUser(const std::string &username, const std::option } bool AuthQueryHandler::DropUser(const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -319,16 +301,13 @@ bool AuthQueryHandler::DropUser(const std::string &username) { } void AuthQueryHandler::SetPassword(const std::string &username, const std::optional &password) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); if (!user) { throw memgraph::query::QueryRuntimeException("User '{}' doesn't exist.", username); } - user->UpdatePassword(password); + locked_auth->UpdatePassword(*user, password); locked_auth->SaveUser(*user); } catch (const memgraph::auth::AuthException &e) { throw memgraph::query::QueryRuntimeException(e.what()); @@ -336,9 +315,6 @@ void AuthQueryHandler::SetPassword(const std::string &username, const std::optio } bool AuthQueryHandler::CreateRole(const std::string &rolename) { - if (!std::regex_match(rolename, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid role name."); - } try { auto locked_auth = auth_->Lock(); return locked_auth->AddRole(rolename).has_value(); @@ -349,9 +325,6 @@ bool AuthQueryHandler::CreateRole(const std::string &rolename) { #ifdef MG_ENTERPRISE bool AuthQueryHandler::RevokeDatabaseFromUser(const std::string &db, const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -363,9 +336,6 @@ bool AuthQueryHandler::RevokeDatabaseFromUser(const std::string &db, const std:: } bool AuthQueryHandler::GrantDatabaseToUser(const std::string &db, const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -378,9 +348,6 @@ bool AuthQueryHandler::GrantDatabaseToUser(const std::string &db, const std::str std::vector> AuthQueryHandler::GetDatabasePrivileges( const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user or role name."); - } try { auto locked_auth = auth_->ReadLock(); auto user = locked_auth->GetUser(username); @@ -393,10 +360,7 @@ std::vector> AuthQueryHandler::GetDatab } } -bool AuthQueryHandler::SetMainDatabase(const std::string &db, const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } +bool AuthQueryHandler::SetMainDatabase(std::string_view db, const std::string &username) { try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -417,9 +381,6 @@ void AuthQueryHandler::DeleteDatabase(std::string_view db) { #endif bool AuthQueryHandler::DropRole(const std::string &rolename) { - if (!std::regex_match(rolename, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid role name."); - } try { auto locked_auth = auth_->Lock(); auto role = locked_auth->GetRole(rolename); @@ -465,9 +426,6 @@ std::vector AuthQueryHandler::GetRolenames() { } std::optional AuthQueryHandler::GetRolenameForUser(const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->ReadLock(); auto user = locked_auth->GetUser(username); @@ -485,9 +443,6 @@ std::optional AuthQueryHandler::GetRolenameForUser(const std::strin } std::vector AuthQueryHandler::GetUsernamesForRole(const std::string &rolename) { - if (!std::regex_match(rolename, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid role name."); - } try { auto locked_auth = auth_->ReadLock(); auto role = locked_auth->GetRole(rolename); @@ -507,12 +462,6 @@ std::vector AuthQueryHandler::GetUsernamesForRole(c } void AuthQueryHandler::SetRole(const std::string &username, const std::string &rolename) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } - if (!std::regex_match(rolename, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid role name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -535,9 +484,6 @@ void AuthQueryHandler::SetRole(const std::string &username, const std::string &r } void AuthQueryHandler::ClearRole(const std::string &username) { - if (!std::regex_match(username, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user name."); - } try { auto locked_auth = auth_->Lock(); auto user = locked_auth->GetUser(username); @@ -552,9 +498,6 @@ void AuthQueryHandler::ClearRole(const std::string &username) { } std::vector> AuthQueryHandler::GetPrivileges(const std::string &user_or_role) { - if (!std::regex_match(user_or_role, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user or role name."); - } try { auto locked_auth = auth_->ReadLock(); std::vector> grants; @@ -704,9 +647,6 @@ void AuthQueryHandler::EditPermissions( const TEditFineGrainedPermissionsFun &edit_fine_grained_permissions_fun #endif ) { - if (!std::regex_match(user_or_role, name_regex_)) { - throw memgraph::query::QueryRuntimeException("Invalid user or role name."); - } try { std::vector permissions; permissions.reserve(privileges.size()); diff --git a/src/glue/auth_handler.hpp b/src/glue/auth_handler.hpp index 8798c150a..c226a4560 100644 --- a/src/glue/auth_handler.hpp +++ b/src/glue/auth_handler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -24,12 +24,9 @@ namespace memgraph::glue { class AuthQueryHandler final : public memgraph::query::AuthQueryHandler { memgraph::utils::Synchronized *auth_; - std::string name_regex_string_; - std::regex name_regex_; public: - AuthQueryHandler(memgraph::utils::Synchronized *auth, - std::string name_regex_string); + AuthQueryHandler(memgraph::utils::Synchronized *auth); bool CreateUser(const std::string &username, const std::optional &password) override; @@ -44,7 +41,7 @@ class AuthQueryHandler final : public memgraph::query::AuthQueryHandler { std::vector> GetDatabasePrivileges(const std::string &username) override; - bool SetMainDatabase(const std::string &db, const std::string &username) override; + bool SetMainDatabase(std::string_view db, const std::string &username) override; void DeleteDatabase(std::string_view db) override; #endif diff --git a/src/glue/communication.cpp b/src/glue/communication.cpp index 60181e877..2c71e37c7 100644 --- a/src/glue/communication.cpp +++ b/src/glue/communication.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -73,8 +73,14 @@ storage::Result ToBoltEdge(const query::EdgeAccessor return ToBoltEdge(edge.impl_, db, view); } -storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage &db, storage::View view) { +storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage *db, storage::View view) { + auto check_db = [db]() { + if (db == nullptr) [[unlikely]] + throw communication::bolt::ValueException("Database needed for TypeValue conversion."); + }; + switch (value.type()) { + // No database needed case query::TypedValue::Type::Null: return Value(); case query::TypedValue::Type::Bool: @@ -85,16 +91,16 @@ storage::Result ToBoltValue(const query::TypedValue &value, const storage return Value(value.ValueDouble()); case query::TypedValue::Type::String: return Value(std::string(value.ValueString())); - case query::TypedValue::Type::List: { - std::vector values; - values.reserve(value.ValueList().size()); - for (const auto &v : value.ValueList()) { - auto maybe_value = ToBoltValue(v, db, view); - if (maybe_value.HasError()) return maybe_value.GetError(); - values.emplace_back(std::move(*maybe_value)); - } - return Value(std::move(values)); - } + case query::TypedValue::Type::Date: + return Value(value.ValueDate()); + case query::TypedValue::Type::LocalTime: + return Value(value.ValueLocalTime()); + case query::TypedValue::Type::LocalDateTime: + return Value(value.ValueLocalDateTime()); + case query::TypedValue::Type::Duration: + return Value(value.ValueDuration()); + + // Database potentially not required case query::TypedValue::Type::Map: { std::map map; for (const auto &kv : value.ValueMap()) { @@ -104,35 +110,48 @@ storage::Result ToBoltValue(const query::TypedValue &value, const storage } return Value(std::move(map)); } + + // Database is required + case query::TypedValue::Type::List: { + check_db(); + std::vector values; + values.reserve(value.ValueList().size()); + for (const auto &v : value.ValueList()) { + auto maybe_value = ToBoltValue(v, db, view); + if (maybe_value.HasError()) return maybe_value.GetError(); + values.emplace_back(std::move(*maybe_value)); + } + return Value(std::move(values)); + } case query::TypedValue::Type::Vertex: { - auto maybe_vertex = ToBoltVertex(value.ValueVertex(), db, view); + check_db(); + auto maybe_vertex = ToBoltVertex(value.ValueVertex(), *db, view); if (maybe_vertex.HasError()) return maybe_vertex.GetError(); return Value(std::move(*maybe_vertex)); } case query::TypedValue::Type::Edge: { - auto maybe_edge = ToBoltEdge(value.ValueEdge(), db, view); + check_db(); + auto maybe_edge = ToBoltEdge(value.ValueEdge(), *db, view); if (maybe_edge.HasError()) return maybe_edge.GetError(); return Value(std::move(*maybe_edge)); } case query::TypedValue::Type::Path: { - auto maybe_path = ToBoltPath(value.ValuePath(), db, view); + check_db(); + auto maybe_path = ToBoltPath(value.ValuePath(), *db, view); if (maybe_path.HasError()) return maybe_path.GetError(); return Value(std::move(*maybe_path)); } - case query::TypedValue::Type::Date: - return Value(value.ValueDate()); - case query::TypedValue::Type::LocalTime: - return Value(value.ValueLocalTime()); - case query::TypedValue::Type::LocalDateTime: - return Value(value.ValueLocalDateTime()); - case query::TypedValue::Type::Duration: - return Value(value.ValueDuration()); - case query::TypedValue::Type::Function: - throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value"); - case query::TypedValue::Type::Graph: - auto maybe_graph = ToBoltGraph(value.ValueGraph(), db, view); + case query::TypedValue::Type::Graph: { + check_db(); + auto maybe_graph = ToBoltGraph(value.ValueGraph(), *db, view); if (maybe_graph.HasError()) return maybe_graph.GetError(); return Value(std::move(*maybe_graph)); + } + + // Unsupported conversions + case query::TypedValue::Type::Function: { + throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value"); + } } } diff --git a/src/glue/communication.hpp b/src/glue/communication.hpp index 0e3b39f4d..737f32db2 100644 --- a/src/glue/communication.hpp +++ b/src/glue/communication.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -65,7 +65,7 @@ storage::Result> ToBoltGraph(c /// @param storage::View for ToBoltVertex and ToBoltEdge. /// /// @throw std::bad_alloc -storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage &db, +storage::Result ToBoltValue(const query::TypedValue &value, const storage::Storage *db, storage::View view); query::TypedValue ToTypedValue(const communication::bolt::Value &value); diff --git a/src/io/CMakeLists.txt b/src/io/CMakeLists.txt index 128e87114..428cad4b2 100644 --- a/src/io/CMakeLists.txt +++ b/src/io/CMakeLists.txt @@ -8,4 +8,5 @@ find_package(fmt REQUIRED) find_package(Threads REQUIRED) add_library(mg-io STATIC ${io_src_files}) +add_library(mg::io ALIAS mg-io) target_link_libraries(mg-io stdc++fs Threads::Threads fmt::fmt mg-utils) diff --git a/src/io/network/endpoint.cpp b/src/io/network/endpoint.cpp index 5e671ef38..e9032e42e 100644 --- a/src/io/network/endpoint.cpp +++ b/src/io/network/endpoint.cpp @@ -166,7 +166,7 @@ bool Endpoint::IsResolvableAddress(const std::string &address, uint16_t port) { } std::optional> Endpoint::ParseSocketOrAddress( - const std::string &address, const std::optional default_port = {}) { + const std::string &address, const std::optional default_port) { const std::string delimiter = ":"; std::vector parts = utils::Split(address, delimiter); if (parts.size() == 1) { diff --git a/src/kvstore/kvstore.cpp b/src/kvstore/kvstore.cpp index 877d6f9bd..1219b8527 100644 --- a/src/kvstore/kvstore.cpp +++ b/src/kvstore/kvstore.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -51,7 +51,7 @@ KVStore &KVStore::operator=(KVStore &&other) { return *this; } -bool KVStore::Put(const std::string &key, const std::string &value) { +bool KVStore::Put(std::string_view key, std::string_view value) { auto s = pimpl_->db->Put(rocksdb::WriteOptions(), key, value); return s.ok(); } @@ -65,7 +65,7 @@ bool KVStore::PutMultiple(const std::map &items) { return s.ok(); } -std::optional KVStore::Get(const std::string &key) const noexcept { +std::optional KVStore::Get(std::string_view key) const noexcept { std::string value; auto s = pimpl_->db->Get(rocksdb::ReadOptions(), key, &value); if (!s.ok()) return std::nullopt; diff --git a/src/kvstore/kvstore.hpp b/src/kvstore/kvstore.hpp index a67d01c8c..b9675d75b 100644 --- a/src/kvstore/kvstore.hpp +++ b/src/kvstore/kvstore.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -61,7 +61,7 @@ class KVStore final { * @return true if the value has been successfully stored. * In case of any error false is going to be returned. */ - bool Put(const std::string &key, const std::string &value); + bool Put(std::string_view key, std::string_view value); /** * Store values under the given keys. @@ -81,7 +81,7 @@ class KVStore final { * @return Value for the given key. std::nullopt in case of any error * OR the value doesn't exist. */ - std::optional Get(const std::string &key) const noexcept; + std::optional Get(std::string_view key) const noexcept; /** * Deletes the key and corresponding value from storage. diff --git a/src/memgraph.cpp b/src/memgraph.cpp index cf954560d..cbd63490e 100644 --- a/src/memgraph.cpp +++ b/src/memgraph.cpp @@ -11,13 +11,11 @@ #include #include "audit/log.hpp" -#include "communication/metrics.hpp" #include "communication/websocket/auth.hpp" #include "communication/websocket/server.hpp" #include "dbms/constants.hpp" #include "dbms/inmemory/replication_handlers.hpp" #include "flags/all.hpp" -#include "flags/run_time_configurable.hpp" #include "glue/MonitoringServerT.hpp" #include "glue/ServerT.hpp" #include "glue/auth_checker.hpp" @@ -33,9 +31,9 @@ #include "query/procedure/module.hpp" #include "query/procedure/py_module.hpp" #include "requests/requests.hpp" +#include "storage/v2/durability/durability.hpp" #include "telemetry/telemetry.hpp" #include "utils/signals.hpp" -#include "utils/skip_list.hpp" #include "utils/sysinfo/memory.hpp" #include "utils/system_info.hpp" #include "utils/terminate_handler.hpp" @@ -73,7 +71,7 @@ void InitFromCypherlFile(memgraph::query::InterpreterContext &ctx, memgraph::dbm spdlog::warn("{} The rest of the init-file will be run.", e.what()); } if (audit_log) { - audit_log->Record("", "", line, {}, memgraph::dbms::kDefaultDB); + audit_log->Record("", "", line, {}, std::string{memgraph::dbms::kDefaultDB}); } } } @@ -300,8 +298,7 @@ int main(int argc, char **argv) { memgraph::storage::Config db_config{ .gc = {.type = memgraph::storage::Config::Gc::Type::PERIODIC, .interval = std::chrono::seconds(FLAGS_storage_gc_cycle_sec)}, - .items = {.properties_on_edges = FLAGS_storage_properties_on_edges, - .enable_schema_metadata = FLAGS_storage_enable_schema_metadata}, + .durability = {.storage_directory = FLAGS_data_directory, .recover_on_startup = FLAGS_storage_recover_on_startup || FLAGS_data_recovery_on_startup, .snapshot_retention_count = FLAGS_storage_snapshot_retention_count, @@ -323,7 +320,9 @@ int main(int argc, char **argv) { .id_name_mapper_directory = FLAGS_data_directory + "/rocksdb_id_name_mapper", .durability_directory = FLAGS_data_directory + "/rocksdb_durability", .wal_directory = FLAGS_data_directory + "/rocksdb_wal"}, - .storage_mode = memgraph::flags::ParseStorageMode()}; + .salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges, + .enable_schema_metadata = FLAGS_storage_enable_schema_metadata}, + .salient.storage_mode = memgraph::flags::ParseStorageMode()}; memgraph::utils::Scheduler jemalloc_purge_scheduler; jemalloc_purge_scheduler.Run("Jemalloc purge", std::chrono::seconds(FLAGS_storage_gc_cycle_sec), @@ -358,11 +357,10 @@ int main(int argc, char **argv) { .stream_transaction_retry_interval = std::chrono::milliseconds(FLAGS_stream_transaction_retry_interval)}; auto auth_glue = - [flag = FLAGS_auth_user_or_role_name_regex]( - memgraph::utils::Synchronized *auth, - std::unique_ptr &ah, std::unique_ptr &ac) { + [](memgraph::utils::Synchronized *auth, + std::unique_ptr &ah, std::unique_ptr &ac) { // Glue high level auth implementations to the query side - ah = std::make_unique(auth, flag); + ah = std::make_unique(auth); ac = std::make_unique(auth); // Handle users passed via arguments auto *maybe_username = std::getenv(kMgUser); @@ -378,9 +376,10 @@ int main(int argc, char **argv) { } }; - // WIP - memgraph::utils::Synchronized auth_{data_directory / - "auth"}; + memgraph::auth::Auth::Config auth_config{FLAGS_auth_user_or_role_name_regex, FLAGS_auth_password_strength_regex, + FLAGS_auth_password_permit_null}; + memgraph::utils::Synchronized auth_{ + data_directory / "auth", auth_config}; std::unique_ptr auth_handler; std::unique_ptr auth_checker; auth_glue(&auth_, auth_handler, auth_checker); @@ -388,7 +387,7 @@ int main(int argc, char **argv) { memgraph::dbms::DbmsHandler dbms_handler(db_config #ifdef MG_ENTERPRISE , - &auth_, FLAGS_data_recovery_on_startup, FLAGS_storage_delete_on_drop + &auth_, FLAGS_data_recovery_on_startup #endif ); auto db_acc = dbms_handler.Get(); diff --git a/src/mg_import_csv.cpp b/src/mg_import_csv.cpp index e8212b5f4..cbfb905aa 100644 --- a/src/mg_import_csv.cpp +++ b/src/mg_import_csv.cpp @@ -32,7 +32,7 @@ #include "utils/timer.hpp" #include "version.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; bool ValidateControlCharacter(const char *flagname, const std::string &value) { if (value.empty()) { @@ -707,12 +707,11 @@ int main(int argc, char *argv[]) { std::unordered_map node_id_map; memgraph::storage::Config config{ - - .items = {.properties_on_edges = FLAGS_storage_properties_on_edges}, .durability = {.storage_directory = FLAGS_data_directory, .recover_on_startup = false, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = FLAGS_storage_properties_on_edges}}, }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; auto store = memgraph::dbms::CreateInMemoryStorage(config, repl_state); diff --git a/src/query/auth_query_handler.hpp b/src/query/auth_query_handler.hpp index 908dd3ebc..693103354 100644 --- a/src/query/auth_query_handler.hpp +++ b/src/query/auth_query_handler.hpp @@ -57,7 +57,7 @@ class AuthQueryHandler { /// Return true if main database set successfully /// @throw QueryRuntimeException if an error ocurred. - virtual bool SetMainDatabase(const std::string &db, const std::string &username) = 0; + virtual bool SetMainDatabase(std::string_view db, const std::string &username) = 0; /// Delete database from all users /// @throw QueryRuntimeException if an error ocurred. diff --git a/src/query/common.hpp b/src/query/common.hpp index 6f45760fe..054714164 100644 --- a/src/query/common.hpp +++ b/src/query/common.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -72,8 +72,9 @@ class TypedValueVectorCompare final { /// Raise QueryRuntimeException if the value for symbol isn't of expected type. inline void ExpectType(const Symbol &symbol, const TypedValue &value, TypedValue::Type expected) { - if (value.type() != expected) + if (value.type() != expected) [[unlikely]] { throw QueryRuntimeException("Expected a {} for '{}', but got {}.", expected, symbol.name(), value.type()); + } } inline void ProcessError(const storage::Error error) { diff --git a/src/query/config.hpp b/src/query/config.hpp index 64e2da5bb..88c3dd00e 100644 --- a/src/query/config.hpp +++ b/src/query/config.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source diff --git a/src/query/context.hpp b/src/query/context.hpp index 3040d6e10..f1522053c 100644 --- a/src/query/context.hpp +++ b/src/query/context.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source diff --git a/src/query/db_accessor.hpp b/src/query/db_accessor.hpp index ada52f953..6365521e4 100644 --- a/src/query/db_accessor.hpp +++ b/src/query/db_accessor.hpp @@ -555,7 +555,10 @@ class DbAccessor final { void AdvanceCommand() { accessor_->AdvanceCommand(); } - utils::BasicResult Commit() { return accessor_->Commit(); } + utils::BasicResult Commit(storage::CommitReplArgs reparg = {}, + storage::DatabaseAccessProtector db_acc = {}) { + return accessor_->Commit(std::move(reparg), std::move(db_acc)); + } void Abort() { accessor_->Abort(); } diff --git a/src/query/exceptions.hpp b/src/query/exceptions.hpp index ac8cc8fe8..147dc8710 100644 --- a/src/query/exceptions.hpp +++ b/src/query/exceptions.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -195,6 +195,12 @@ class DatabaseContextRequiredException : public QueryRuntimeException { SPECIALIZE_GET_EXCEPTION_NAME(DatabaseContextRequiredException) }; +class ConcurrentSystemQueriesException : public QueryRuntimeException { + public: + using QueryRuntimeException::QueryRuntimeException; + SPECIALIZE_GET_EXCEPTION_NAME(ConcurrentSystemQueriesException) +}; + class WriteVertexOperationInEdgeImportModeException : public QueryException { public: WriteVertexOperationInEdgeImportModeException() @@ -253,6 +259,13 @@ class ReplicationModificationInMulticommandTxException : public QueryException { SPECIALIZE_GET_EXCEPTION_NAME(ReplicationModificationInMulticommandTxException) }; +class CoordinatorModificationInMulticommandTxException : public QueryException { + public: + CoordinatorModificationInMulticommandTxException() + : QueryException("Coordinator clause not allowed in multicommand transactions.") {} + SPECIALIZE_GET_EXCEPTION_NAME(CoordinatorModificationInMulticommandTxException) +}; + class ReplicationDisabledOnDiskStorage : public QueryException { public: ReplicationDisabledOnDiskStorage() : QueryException("Replication is not supported while in on-disk storage mode.") {} diff --git a/src/query/frontend/ast/ast.cpp b/src/query/frontend/ast/ast.cpp index 6a9f05bad..57d5398ab 100644 --- a/src/query/frontend/ast/ast.cpp +++ b/src/query/frontend/ast/ast.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -239,6 +239,9 @@ constexpr utils::TypeInfo query::DumpQuery::kType{utils::TypeId::AST_DUMP_QUERY, constexpr utils::TypeInfo query::ReplicationQuery::kType{utils::TypeId::AST_REPLICATION_QUERY, "ReplicationQuery", &query::Query::kType}; +constexpr utils::TypeInfo query::CoordinatorQuery::kType{utils::TypeId::AST_COORDINATOR_QUERY, "CoordinatorQuery", + &query::Query::kType}; + constexpr utils::TypeInfo query::LockPathQuery::kType{utils::TypeId::AST_LOCK_PATH_QUERY, "LockPathQuery", &query::Query::kType}; diff --git a/src/query/frontend/ast/ast.hpp b/src/query/frontend/ast/ast.hpp index d862ecf81..b9c37da19 100644 --- a/src/query/frontend/ast/ast.hpp +++ b/src/query/frontend/ast/ast.hpp @@ -1209,7 +1209,8 @@ class PropertyLookup : public memgraph::query::Expression { } protected: - PropertyLookup(Expression *expression, PropertyIx property) : expression_(expression), property_(property) {} + PropertyLookup(Expression *expression, PropertyIx property) + : expression_(expression), property_(std::move(property)) {} private: friend class AstStorage; @@ -1805,9 +1806,9 @@ class EdgeAtom : public memgraph::query::PatternAtom { static const utils::TypeInfo kType; const utils::TypeInfo &GetTypeInfo() const override { return kType; } - enum class Type { SINGLE, DEPTH_FIRST, BREADTH_FIRST, WEIGHTED_SHORTEST_PATH, ALL_SHORTEST_PATHS }; + enum class Type : uint8_t { SINGLE, DEPTH_FIRST, BREADTH_FIRST, WEIGHTED_SHORTEST_PATH, ALL_SHORTEST_PATHS }; - enum class Direction { IN, OUT, BOTH }; + enum class Direction : uint8_t { IN, OUT, BOTH }; /// Lambda for use in filtering or weight calculation during variable expand. struct Lambda { @@ -2860,6 +2861,7 @@ class AuthQuery : public memgraph::query::Query { TRANSACTION_MANAGEMENT, MULTI_DATABASE_EDIT, MULTI_DATABASE_USE, + COORDINATOR }; enum class FineGrainedPrivilege { NOTHING, READ, UPDATE, CREATE_DELETE }; @@ -2938,7 +2940,8 @@ const std::vector kPrivilegesAll = {AuthQuery::Privilege:: AuthQuery::Privilege::TRANSACTION_MANAGEMENT, AuthQuery::Privilege::STORAGE_MODE, AuthQuery::Privilege::MULTI_DATABASE_EDIT, - AuthQuery::Privilege::MULTI_DATABASE_USE}; + AuthQuery::Privilege::MULTI_DATABASE_USE, + AuthQuery::Privilege::COORDINATOR}; class DatabaseInfoQuery : public memgraph::query::Query { public: @@ -3050,8 +3053,9 @@ class ReplicationQuery : public memgraph::query::Query { memgraph::query::ReplicationQuery::Action action_; memgraph::query::ReplicationQuery::ReplicationRole role_; - std::string replica_name_; + std::string instance_name_; memgraph::query::Expression *socket_address_{nullptr}; + memgraph::query::Expression *coordinator_socket_address_{nullptr}; memgraph::query::Expression *port_{nullptr}; memgraph::query::ReplicationQuery::SyncMode sync_mode_; @@ -3059,10 +3063,53 @@ class ReplicationQuery : public memgraph::query::Query { ReplicationQuery *object = storage->Create(); object->action_ = action_; object->role_ = role_; - object->replica_name_ = replica_name_; + object->instance_name_ = instance_name_; object->socket_address_ = socket_address_ ? socket_address_->Clone(storage) : nullptr; object->port_ = port_ ? port_->Clone(storage) : nullptr; object->sync_mode_ = sync_mode_; + object->coordinator_socket_address_ = + coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr; + + return object; + } + + private: + friend class AstStorage; +}; + +class CoordinatorQuery : public memgraph::query::Query { + public: + static const utils::TypeInfo kType; + const utils::TypeInfo &GetTypeInfo() const override { return kType; } + + enum class Action { + REGISTER_INSTANCE, + SET_INSTANCE_TO_MAIN, + SHOW_REPLICATION_CLUSTER, + }; + + enum class SyncMode { SYNC, ASYNC }; + + CoordinatorQuery() = default; + + DEFVISITABLE(QueryVisitor); + + memgraph::query::CoordinatorQuery::Action action_; + std::string instance_name_; + memgraph::query::Expression *replication_socket_address_{nullptr}; + memgraph::query::Expression *coordinator_socket_address_{nullptr}; + memgraph::query::CoordinatorQuery::SyncMode sync_mode_; + + CoordinatorQuery *Clone(AstStorage *storage) const override { + auto *object = storage->Create(); + object->action_ = action_; + object->instance_name_ = instance_name_; + object->replication_socket_address_ = + replication_socket_address_ ? replication_socket_address_->Clone(storage) : nullptr; + object->sync_mode_ = sync_mode_; + object->coordinator_socket_address_ = + coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr; + return object; } @@ -3623,7 +3670,7 @@ class MultiDatabaseQuery : public memgraph::query::Query { DEFVISITABLE(QueryVisitor); - enum class Action { CREATE, USE, DROP }; + enum class Action { CREATE, USE, DROP, SHOW }; memgraph::query::MultiDatabaseQuery::Action action_; std::string db_name_; diff --git a/src/query/frontend/ast/ast_visitor.hpp b/src/query/frontend/ast/ast_visitor.hpp index ff1586fe4..5d463d3ee 100644 --- a/src/query/frontend/ast/ast_visitor.hpp +++ b/src/query/frontend/ast/ast_visitor.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -108,6 +108,7 @@ class MultiDatabaseQuery; class ShowDatabasesQuery; class EdgeImportModeQuery; class PatternComprehension; +class CoordinatorQuery; using TreeCompositeVisitor = utils::CompositeVisitor< SingleQuery, CypherUnion, NamedExpression, OrOperator, XorOperator, AndOperator, NotOperator, AdditionOperator, @@ -146,6 +147,7 @@ class QueryVisitor SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery, LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery, StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery, StorageModeQuery, - AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery> {}; + AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery, + CoordinatorQuery> {}; } // namespace memgraph::query diff --git a/src/query/frontend/ast/cypher_main_visitor.cpp b/src/query/frontend/ast/cypher_main_visitor.cpp index fe202d430..c343b2ab2 100644 --- a/src/query/frontend/ast/cypher_main_visitor.cpp +++ b/src/query/frontend/ast/cypher_main_visitor.cpp @@ -321,6 +321,13 @@ antlrcpp::Any CypherMainVisitor::visitReplicationQuery(MemgraphCypher::Replicati return replication_query; } +antlrcpp::Any CypherMainVisitor::visitCoordinatorQuery(MemgraphCypher::CoordinatorQueryContext *ctx) { + MG_ASSERT(ctx->children.size() == 1, "CoordinatorQuery should have exactly one child!"); + auto *coordinator_query = std::any_cast(ctx->children[0]->accept(this)); + query_ = coordinator_query; + return coordinator_query; +} + antlrcpp::Any CypherMainVisitor::visitEdgeImportModeQuery(MemgraphCypher::EdgeImportModeQueryContext *ctx) { auto *edge_import_mode_query = storage_->Create(); if (ctx->ACTIVE()) { @@ -335,24 +342,34 @@ antlrcpp::Any CypherMainVisitor::visitEdgeImportModeQuery(MemgraphCypher::EdgeIm antlrcpp::Any CypherMainVisitor::visitSetReplicationRole(MemgraphCypher::SetReplicationRoleContext *ctx) { auto *replication_query = storage_->Create(); replication_query->action_ = ReplicationQuery::Action::SET_REPLICATION_ROLE; + + auto set_replication_port = [replication_query, ctx, this]() -> void { + if (ctx->port->numberLiteral() && ctx->port->numberLiteral()->integerLiteral()) { + replication_query->port_ = std::any_cast(ctx->port->accept(this)); + } else { + throw SyntaxException("Port must be an integer literal!"); + } + }; + if (ctx->MAIN()) { + replication_query->role_ = ReplicationQuery::ReplicationRole::MAIN; if (ctx->WITH() || ctx->PORT()) { throw SemanticException("Main can't set a port!"); } - replication_query->role_ = ReplicationQuery::ReplicationRole::MAIN; + } else if (ctx->REPLICA()) { replication_query->role_ = ReplicationQuery::ReplicationRole::REPLICA; if (ctx->WITH() && ctx->PORT()) { - if (ctx->port->numberLiteral() && ctx->port->numberLiteral()->integerLiteral()) { - replication_query->port_ = std::any_cast(ctx->port->accept(this)); - } else { - throw SyntaxException("Port must be an integer literal!"); - } + set_replication_port(); + } else { + throw SemanticException("Replica must set a port!"); } } + return replication_query; } -antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowReplicationRoleContext *ctx) { + +antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowReplicationRoleContext * /*ctx*/) { auto *replication_query = storage_->Create(); replication_query->action_ = ReplicationQuery::Action::SHOW_REPLICATION_ROLE; return replication_query; @@ -361,7 +378,7 @@ antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowRe antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterReplicaContext *ctx) { auto *replication_query = storage_->Create(); replication_query->action_ = ReplicationQuery::Action::REGISTER_REPLICA; - replication_query->replica_name_ = std::any_cast(ctx->replicaName()->symbolicName()->accept(this)); + replication_query->instance_name_ = std::any_cast(ctx->instanceName()->symbolicName()->accept(this)); if (ctx->SYNC()) { replication_query->sync_mode_ = memgraph::query::ReplicationQuery::SyncMode::SYNC; } else if (ctx->ASYNC()) { @@ -370,26 +387,67 @@ antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterRe if (!ctx->socketAddress()->literal()->StringLiteral()) { throw SemanticException("Socket address should be a string literal!"); - } else { - replication_query->socket_address_ = std::any_cast(ctx->socketAddress()->accept(this)); } + replication_query->socket_address_ = std::any_cast(ctx->socketAddress()->accept(this)); return replication_query; } +// License check is done in the interpreter. +antlrcpp::Any CypherMainVisitor::visitRegisterInstanceOnCoordinator( + MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) { + auto *coordinator_query = storage_->Create(); + if (!ctx->replicationSocketAddress()->literal()->StringLiteral()) { + throw SemanticException("Replication socket address should be a string literal!"); + } + + if (!ctx->coordinatorSocketAddress()->literal()->StringLiteral()) { + throw SemanticException("Coordinator socket address should be a string literal!"); + } + coordinator_query->action_ = CoordinatorQuery::Action::REGISTER_INSTANCE; + coordinator_query->replication_socket_address_ = + std::any_cast(ctx->replicationSocketAddress()->accept(this)); + coordinator_query->coordinator_socket_address_ = + std::any_cast(ctx->coordinatorSocketAddress()->accept(this)); + coordinator_query->instance_name_ = std::any_cast(ctx->instanceName()->symbolicName()->accept(this)); + if (ctx->ASYNC()) { + coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::ASYNC; + } else { + coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::SYNC; + } + + return coordinator_query; +} + +// License check is done in the interpreter +antlrcpp::Any CypherMainVisitor::visitShowReplicationCluster(MemgraphCypher::ShowReplicationClusterContext * /*ctx*/) { + auto *coordinator_query = storage_->Create(); + coordinator_query->action_ = CoordinatorQuery::Action::SHOW_REPLICATION_CLUSTER; + return coordinator_query; +} + antlrcpp::Any CypherMainVisitor::visitDropReplica(MemgraphCypher::DropReplicaContext *ctx) { auto *replication_query = storage_->Create(); replication_query->action_ = ReplicationQuery::Action::DROP_REPLICA; - replication_query->replica_name_ = std::any_cast(ctx->replicaName()->symbolicName()->accept(this)); + replication_query->instance_name_ = std::any_cast(ctx->instanceName()->symbolicName()->accept(this)); return replication_query; } -antlrcpp::Any CypherMainVisitor::visitShowReplicas(MemgraphCypher::ShowReplicasContext *ctx) { +antlrcpp::Any CypherMainVisitor::visitShowReplicas(MemgraphCypher::ShowReplicasContext * /*ctx*/) { auto *replication_query = storage_->Create(); replication_query->action_ = ReplicationQuery::Action::SHOW_REPLICAS; return replication_query; } +// License check is done in the interpreter +antlrcpp::Any CypherMainVisitor::visitSetInstanceToMain(MemgraphCypher::SetInstanceToMainContext *ctx) { + auto *coordinator_query = storage_->Create(); + coordinator_query->action_ = CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN; + coordinator_query->instance_name_ = std::any_cast(ctx->instanceName()->symbolicName()->accept(this)); + query_ = coordinator_query; + return coordinator_query; +} + antlrcpp::Any CypherMainVisitor::visitLockPathQuery(MemgraphCypher::LockPathQueryContext *ctx) { auto *lock_query = storage_->Create(); if (ctx->STATUS()) { @@ -1657,6 +1715,7 @@ antlrcpp::Any CypherMainVisitor::visitPrivilege(MemgraphCypher::PrivilegeContext if (ctx->STORAGE_MODE()) return AuthQuery::Privilege::STORAGE_MODE; if (ctx->MULTI_DATABASE_EDIT()) return AuthQuery::Privilege::MULTI_DATABASE_EDIT; if (ctx->MULTI_DATABASE_USE()) return AuthQuery::Privilege::MULTI_DATABASE_USE; + if (ctx->COORDINATOR()) return AuthQuery::Privilege::COORDINATOR; LOG_FATAL("Should not get here - unknown privilege!"); } @@ -1771,7 +1830,11 @@ antlrcpp::Any CypherMainVisitor::visitReturnBody(MemgraphCypher::ReturnBodyConte body.skip = static_cast(std::any_cast(ctx->skip()->accept(this))); } if (ctx->limit()) { - body.limit = static_cast(std::any_cast(ctx->limit()->accept(this))); + if (ctx->limit()->expression()) { + body.limit = std::any_cast(ctx->limit()->accept(this)); + } else { + body.limit = std::any_cast(ctx->limit()->accept(this)); + } } std::tie(body.all_identifiers, body.named_expressions) = std::any_cast>>(ctx->returnItems()->accept(this)); @@ -2907,6 +2970,14 @@ antlrcpp::Any CypherMainVisitor::visitDropDatabase(MemgraphCypher::DropDatabaseC return mdb_query; } +antlrcpp::Any CypherMainVisitor::visitShowDatabase(MemgraphCypher::ShowDatabaseContext * /*ctx*/) { + auto *mdb_query = storage_->Create(); + mdb_query->db_name_ = ""; + mdb_query->action_ = MultiDatabaseQuery::Action::SHOW; + query_ = mdb_query; + return mdb_query; +} + antlrcpp::Any CypherMainVisitor::visitShowDatabases(MemgraphCypher::ShowDatabasesContext * /*ctx*/) { query_ = storage_->Create(); return query_; diff --git a/src/query/frontend/ast/cypher_main_visitor.hpp b/src/query/frontend/ast/cypher_main_visitor.hpp index 030689392..46bfd3224 100644 --- a/src/query/frontend/ast/cypher_main_visitor.hpp +++ b/src/query/frontend/ast/cypher_main_visitor.hpp @@ -233,6 +233,26 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor { */ antlrcpp::Any visitShowReplicas(MemgraphCypher::ShowReplicasContext *ctx) override; + /** + * @return CoordinatorQuery* + */ + antlrcpp::Any visitCoordinatorQuery(MemgraphCypher::CoordinatorQueryContext *ctx) override; + + /** + * @return CoordinatorQuery* + */ + antlrcpp::Any visitRegisterInstanceOnCoordinator(MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) override; + + /** + * @return CoordinatorQuery* + */ + antlrcpp::Any visitSetInstanceToMain(MemgraphCypher::SetInstanceToMainContext *ctx) override; + + /** + * @return CoordinatorQuery* + */ + antlrcpp::Any visitShowReplicationCluster(MemgraphCypher::ShowReplicationClusterContext *ctx) override; + /** * @return LockPathQuery* */ @@ -1007,6 +1027,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor { */ antlrcpp::Any visitDropDatabase(MemgraphCypher::DropDatabaseContext *ctx) override; + /** + * @return MultiDatabaseQuery* + */ + antlrcpp::Any visitShowDatabase(MemgraphCypher::ShowDatabaseContext *ctx) override; + /** * @return ShowDatabasesQuery* */ diff --git a/src/query/frontend/opencypher/grammar/Cypher.g4 b/src/query/frontend/opencypher/grammar/Cypher.g4 index 4fef32cb0..0d550d0c5 100644 --- a/src/query/frontend/opencypher/grammar/Cypher.g4 +++ b/src/query/frontend/opencypher/grammar/Cypher.g4 @@ -143,7 +143,7 @@ order : ORDER BY sortItem ( ',' sortItem )* ; skip : L_SKIP expression ; -limit : LIMIT expression ; +limit : LIMIT ( expression | parameter ) ; sortItem : expression ( ASCENDING | ASC | DESCENDING | DESC )? ; diff --git a/src/query/frontend/opencypher/grammar/CypherLexer.g4 b/src/query/frontend/opencypher/grammar/CypherLexer.g4 index 7866c9107..fb8a30b0f 100644 --- a/src/query/frontend/opencypher/grammar/CypherLexer.g4 +++ b/src/query/frontend/opencypher/grammar/CypherLexer.g4 @@ -102,6 +102,7 @@ FILTER : F I L T E R ; IN : I N ; INDEX : I N D E X ; INFO : I N F O ; +INSTANCE : I N S T A N C E ; IS : I S ; KB : K B ; KEY : K E Y ; @@ -122,6 +123,7 @@ PROCEDURE : P R O C E D U R E ; PROFILE : P R O F I L E ; QUERY : Q U E R Y ; REDUCE : R E D U C E ; +REGISTER : R E G I S T E R; REMOVE : R E M O V E ; RETURN : R E T U R N ; SET : S E T ; diff --git a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 index d585acbb1..e41184468 100644 --- a/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 +++ b/src/query/frontend/opencypher/grammar/MemgraphCypher.g4 @@ -48,10 +48,12 @@ memgraphCypherKeyword : cypherKeyword | DATABASE | DENY | DROP + | DO | DUMP | EDGE | EDGE_TYPES | EXECUTE + | FAILOVER | FOR | FOREACH | FREE @@ -61,6 +63,7 @@ memgraphCypherKeyword : cypherKeyword | GRANT | HEADER | IDENTIFIED + | INSTANCE | NODE_LABELS | NULLIF | IMPORT @@ -151,6 +154,7 @@ query : cypherQuery | multiDatabaseQuery | showDatabases | edgeImportModeQuery + | coordinatorQuery ; cypherQuery : ( indexHints )? singleQuery ( cypherUnion )* ( queryMemoryLimit )? ; @@ -183,6 +187,11 @@ replicationQuery : setReplicationRole | showReplicas ; +coordinatorQuery : registerInstanceOnCoordinator + | setInstanceToMain + | showReplicationCluster + ; + triggerQuery : createTrigger | dropTrigger | showTriggers @@ -323,6 +332,7 @@ privilege : CREATE | STORAGE_MODE | MULTI_DATABASE_EDIT | MULTI_DATABASE_USE + | COORDINATOR ; granularPrivilege : NOTHING | READ | UPDATE | CREATE_DELETE ; @@ -364,14 +374,23 @@ setReplicationRole : SET REPLICATION ROLE TO ( MAIN | REPLICA ) showReplicationRole : SHOW REPLICATION ROLE ; -replicaName : symbolicName ; +showReplicationCluster : SHOW REPLICATION CLUSTER ; + +instanceName : symbolicName ; socketAddress : literal ; -registerReplica : REGISTER REPLICA replicaName ( SYNC | ASYNC ) +coordinatorSocketAddress : literal ; +replicationSocketAddress : literal ; + +registerReplica : REGISTER REPLICA instanceName ( SYNC | ASYNC ) TO socketAddress ; -dropReplica : DROP REPLICA replicaName ; +registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ON coordinatorSocketAddress ( AS ASYNC ) ? WITH replicationSocketAddress ; + +setInstanceToMain : SET INSTANCE instanceName TO MAIN ; + +dropReplica : DROP REPLICA instanceName ; showReplicas : SHOW REPLICAS ; @@ -480,6 +499,7 @@ transactionId : literal ; multiDatabaseQuery : createDatabase | useDatabase | dropDatabase + | showDatabase ; createDatabase : CREATE DATABASE databaseName ; @@ -488,6 +508,8 @@ useDatabase : USE DATABASE databaseName ; dropDatabase : DROP DATABASE databaseName ; +showDatabase : SHOW DATABASE ; + showDatabases : SHOW DATABASES ; edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ; diff --git a/src/query/frontend/opencypher/grammar/MemgraphCypherLexer.g4 b/src/query/frontend/opencypher/grammar/MemgraphCypherLexer.g4 index 1b44a6e79..b0febc4af 100644 --- a/src/query/frontend/opencypher/grammar/MemgraphCypherLexer.g4 +++ b/src/query/frontend/opencypher/grammar/MemgraphCypherLexer.g4 @@ -39,15 +39,18 @@ BOOTSTRAP_SERVERS : B O O T S T R A P UNDERSCORE S E R V E R S ; CALL : C A L L ; CHECK : C H E C K ; CLEAR : C L E A R ; +CLUSTER : C L U S T E R ; COMMIT : C O M M I T ; COMMITTED : C O M M I T T E D ; CONFIG : C O N F I G ; CONFIGS : C O N F I G S; CONSUMER_GROUP : C O N S U M E R UNDERSCORE G R O U P ; +COORDINATOR : C O O R D I N A T O R ; CREATE_DELETE : C R E A T E UNDERSCORE D E L E T E ; CREDENTIALS : C R E D E N T I A L S ; CSV : C S V ; DATA : D A T A ; +DO : D O ; DELIMITER : D E L I M I T E R ; DATABASE : D A T A B A S E ; DATABASES : D A T A B A S E S ; @@ -59,6 +62,7 @@ DURABILITY : D U R A B I L I T Y ; EDGE : E D G E ; EDGE_TYPES : E D G E UNDERSCORE T Y P E S ; EXECUTE : E X E C U T E ; +FAILOVER : F A I L O V E R ; FOR : F O R ; FOREACH : F O R E A C H; FREE : F R E E ; @@ -75,6 +79,7 @@ IMPORT : I M P O R T ; INACTIVE : I N A C T I V E ; IN_MEMORY_ANALYTICAL : I N UNDERSCORE M E M O R Y UNDERSCORE A N A L Y T I C A L ; IN_MEMORY_TRANSACTIONAL : I N UNDERSCORE M E M O R Y UNDERSCORE T R A N S A C T I O N A L ; +INSTANCE : I N S T A N C E ; ISOLATION : I S O L A T I O N ; KAFKA : K A F K A ; LABELS : L A B E L S ; @@ -107,6 +112,7 @@ REVOKE : R E V O K E ; ROLE : R O L E ; ROLES : R O L E S ; QUOTE : Q U O T E ; +SERVER : S E R V E R ; SERVICE_URL : S E R V I C E UNDERSCORE U R L ; SESSION : S E S S I O N ; SETTING : S E T T I N G ; diff --git a/src/query/frontend/semantic/required_privileges.cpp b/src/query/frontend/semantic/required_privileges.cpp index 04772cded..ef66a75ac 100644 --- a/src/query/frontend/semantic/required_privileges.cpp +++ b/src/query/frontend/semantic/required_privileges.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -106,6 +106,7 @@ class PrivilegeExtractor : public QueryVisitor, public HierarchicalTreeVis AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_EDIT); break; case MultiDatabaseQuery::Action::USE: + case MultiDatabaseQuery::Action::SHOW: AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_USE); break; } @@ -115,6 +116,8 @@ class PrivilegeExtractor : public QueryVisitor, public HierarchicalTreeVis AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_USE); /* OR EDIT */ } + void Visit(CoordinatorQuery & /*coordinator_query*/) override { AddPrivilege(AuthQuery::Privilege::COORDINATOR); } + bool PreVisit(Create & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::CREATE); return false; diff --git a/src/query/frontend/stripped_lexer_constants.hpp b/src/query/frontend/stripped_lexer_constants.hpp index 21a14ae83..bd6ab7971 100644 --- a/src/query/frontend/stripped_lexer_constants.hpp +++ b/src/query/frontend/stripped_lexer_constants.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -218,7 +218,8 @@ const trie::Trie kKeywords = {"union", "directory", "lock", "unlock", - "build"}; + "build", + "instance"}; // Unicode codepoints that are allowed at the start of the unescaped name. const std::bitset kUnescapedNameAllowedStarts( diff --git a/src/query/interpret/eval.hpp b/src/query/interpret/eval.hpp index 017dc9101..fe47a3fcd 100644 --- a/src/query/interpret/eval.hpp +++ b/src/query/interpret/eval.hpp @@ -1121,11 +1121,11 @@ class ExpressionEvaluator : public ExpressionVisitor { throw QueryRuntimeException("Unexpected error when getting properties."); } } - return *maybe_props; + return *std::move(maybe_props); } template - storage::PropertyValue GetProperty(const TRecordAccessor &record_accessor, PropertyIx prop) { + storage::PropertyValue GetProperty(const TRecordAccessor &record_accessor, const PropertyIx &prop) { auto maybe_prop = record_accessor.GetProperty(view_, ctx_->properties[prop.ix]); if (maybe_prop.HasError() && maybe_prop.GetError() == storage::Error::NONEXISTENT_OBJECT) { // This is a very nasty and temporary hack in order to make MERGE work. @@ -1148,7 +1148,7 @@ class ExpressionEvaluator : public ExpressionVisitor { throw QueryRuntimeException("Unexpected error when getting a property."); } } - return *maybe_prop; + return *std::move(maybe_prop); } template @@ -1178,7 +1178,7 @@ class ExpressionEvaluator : public ExpressionVisitor { return *maybe_prop; } - storage::LabelId GetLabel(LabelIx label) { return ctx_->labels[label.ix]; } + storage::LabelId GetLabel(const LabelIx &label) { return ctx_->labels[label.ix]; } Frame *frame_; const SymbolTable *symbol_table_; diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index bbbc01b87..b27083ee4 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -35,9 +35,9 @@ #include "auth/models.hpp" #include "csv/parsing.hpp" #include "dbms/database.hpp" -#include "dbms/dbms_handler.hpp" #include "dbms/global.hpp" #include "dbms/inmemory/storage_helper.hpp" +#include "flags/replication.hpp" #include "flags/run_time_configurable.hpp" #include "glue/communication.hpp" #include "license/license.hpp" @@ -101,12 +101,17 @@ #include "utils/typeinfo.hpp" #include "utils/variant_helpers.hpp" +#include "dbms/coordinator_handler.hpp" #include "dbms/dbms_handler.hpp" #include "dbms/replication_handler.hpp" #include "query/auth_query_handler.hpp" #include "query/interpreter_context.hpp" #include "replication/state.hpp" +#ifdef MG_ENTERPRISE +#include "coordination/constants.hpp" +#endif + namespace memgraph::metrics { extern Event ReadQuery; extern Event WriteQuery; @@ -121,6 +126,7 @@ extern const Event CommitedTransactions; extern const Event RollbackedTransactions; extern const Event ActiveTransactions; } // namespace memgraph::metrics + void memgraph::query::CurrentDB::SetupDatabaseTransaction( std::optional override_isolation_level, bool could_commit, bool unique) { auto &db_acc = *db_acc_; @@ -148,6 +154,7 @@ void memgraph::query::CurrentDB::CleanupDBTransaction(bool abort) { namespace memgraph::query { constexpr std::string_view kSchemaAssert = "SCHEMA.ASSERT"; +constexpr int kSystemTxTryMS = 100; //!< Duration of the unique try_lock_for template constexpr auto kAlwaysFalse = false; @@ -259,33 +266,61 @@ bool IsAllShortestPathsQuery(const std::vector &claus return false; } -inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode) -> replication::ReplicationMode { +inline auto convertFromCoordinatorToReplicationMode(const CoordinatorQuery::SyncMode &sync_mode) + -> replication_coordination_glue::ReplicationMode { switch (sync_mode) { - case ReplicationQuery::SyncMode::ASYNC: { - return replication::ReplicationMode::ASYNC; + case CoordinatorQuery::SyncMode::ASYNC: { + return replication_coordination_glue::ReplicationMode::ASYNC; } - case ReplicationQuery::SyncMode::SYNC: { - return replication::ReplicationMode::SYNC; + case CoordinatorQuery::SyncMode::SYNC: { + return replication_coordination_glue::ReplicationMode::SYNC; } } // TODO: C++23 std::unreachable() - return replication::ReplicationMode::ASYNC; + return replication_coordination_glue::ReplicationMode::ASYNC; } -class ReplQueryHandler final : public query::ReplicationQueryHandler { +inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode) + -> replication_coordination_glue::ReplicationMode { + switch (sync_mode) { + case ReplicationQuery::SyncMode::ASYNC: { + return replication_coordination_glue::ReplicationMode::ASYNC; + } + case ReplicationQuery::SyncMode::SYNC: { + return replication_coordination_glue::ReplicationMode::SYNC; + } + } + // TODO: C++23 std::unreachable() + return replication_coordination_glue::ReplicationMode::ASYNC; +} + +class ReplQueryHandler { public: - explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler) : dbms_handler_(dbms_handler), handler_{*dbms_handler} {} + struct ReplicaInfo { + std::string name; + std::string socket_address; + ReplicationQuery::SyncMode sync_mode; + std::optional timeout; + uint64_t current_timestamp_of_replica; + uint64_t current_number_of_timestamp_behind_master; + ReplicationQuery::ReplicaState state; + }; + + explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler) : handler_{*dbms_handler} {} /// @throw QueryRuntimeException if an error ocurred. - void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) override { - if (replication_role == ReplicationQuery::ReplicationRole::MAIN) { - if (!handler_.SetReplicationRoleMain()) { - throw QueryRuntimeException("Couldn't set role to main!"); - } - } else { - if (!port || *port < 0 || *port > std::numeric_limits::max()) { + void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) { + auto ValidatePort = [](std::optional port) -> void { + if (*port < 0 || *port > std::numeric_limits::max()) { throw QueryRuntimeException("Port number invalid!"); } + }; + if (replication_role == ReplicationQuery::ReplicationRole::MAIN) { + if (!handler_.SetReplicationRoleMain()) { + throw QueryRuntimeException("Couldn't set replication role to main!"); + } + } else { + ValidatePort(port); auto const config = memgraph::replication::ReplicationServerConfig{ .ip_address = memgraph::replication::kDefaultReplicationServerIp, @@ -299,11 +334,11 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } /// @throw QueryRuntimeException if an error ocurred. - ReplicationQuery::ReplicationRole ShowReplicationRole() const override { + ReplicationQuery::ReplicationRole ShowReplicationRole() const { switch (handler_.GetRole()) { - case memgraph::replication::ReplicationRole::MAIN: + case memgraph::replication_coordination_glue::ReplicationRole::MAIN: return ReplicationQuery::ReplicationRole::MAIN; - case memgraph::replication::ReplicationRole::REPLICA: + case memgraph::replication_coordination_glue::ReplicationRole::REPLICA: return ReplicationQuery::ReplicationRole::REPLICA; } throw QueryRuntimeException("Couldn't show replication role - invalid role set!"); @@ -311,36 +346,41 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { /// @throw QueryRuntimeException if an error ocurred. void RegisterReplica(const std::string &name, const std::string &socket_address, - const ReplicationQuery::SyncMode sync_mode, - const std::chrono::seconds replica_check_frequency) override { + const ReplicationQuery::SyncMode sync_mode, const std::chrono::seconds replica_check_frequency) { + // Coordinator is main by default so this check is OK although it should actually be nothing (neither main nor + // replica) if (handler_.IsReplica()) { // replica can't register another replica throw QueryRuntimeException("Replica can't register another replica!"); } - auto repl_mode = convertToReplicationMode(sync_mode); + const auto repl_mode = convertToReplicationMode(sync_mode); - auto maybe_ip_and_port = + const auto maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(socket_address, memgraph::replication::kDefaultReplicationPort); if (maybe_ip_and_port) { - auto [ip, port] = *maybe_ip_and_port; - auto config = replication::ReplicationClientConfig{.name = name, - .mode = repl_mode, - .ip_address = ip, - .port = port, - .replica_check_frequency = replica_check_frequency, - .ssl = std::nullopt}; - auto ret = handler_.RegisterReplica(config); - if (ret.HasError()) { + const auto [ip, port] = *maybe_ip_and_port; + const auto replication_config = + replication::ReplicationClientConfig{.name = name, + .mode = repl_mode, + .ip_address = ip, + .port = port, + .replica_check_frequency = replica_check_frequency, + .ssl = std::nullopt}; + + const auto error = handler_.RegisterReplica(replication_config).HasError(); + + if (error) { throw QueryRuntimeException(fmt::format("Couldn't register replica '{}'!", name)); } + } else { throw QueryRuntimeException("Invalid socket address!"); } } /// @throw QueryRuntimeException if an error occurred. - void DropReplica(std::string_view replica_name) override { + void DropReplica(std::string_view replica_name) { auto const result = handler_.UnregisterReplica(replica_name); switch (result) { using enum memgraph::dbms::UnregisterReplicaResult; @@ -355,8 +395,7 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } } - using Replica = ReplicationQueryHandler::Replica; - std::vector ShowReplicas() const override { + std::vector ShowReplicas(const dbms::Database &db) const { if (handler_.IsReplica()) { // replica can't show registered replicas (it shouldn't have any) throw QueryRuntimeException("Replica can't show registered replicas (it shouldn't have any)!"); @@ -364,27 +403,19 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { // TODO: Combine results? Have a single place with clients??? // Also authentication checks (replica + database visibility) - std::vector repl_infos{}; - dbms_handler_->ForOne([&repl_infos](dbms::Database *db) -> bool { - auto infos = db->storage()->ReplicasInfo(); - if (!infos.empty()) { - repl_infos = std::move(infos); - return true; - } - return false; - }); - std::vector replicas; + const auto repl_infos = db.storage()->ReplicasInfo(); + std::vector replicas; replicas.reserve(repl_infos.size()); - const auto from_info = [](const auto &repl_info) -> Replica { - Replica replica; + const auto from_info = [](const auto &repl_info) -> ReplicaInfo { + ReplicaInfo replica; replica.name = repl_info.name; replica.socket_address = repl_info.endpoint.SocketAddress(); switch (repl_info.mode) { - case memgraph::replication::ReplicationMode::SYNC: + case replication_coordination_glue::ReplicationMode::SYNC: replica.sync_mode = ReplicationQuery::SyncMode::SYNC; break; - case memgraph::replication::ReplicationMode::ASYNC: + case replication_coordination_glue::ReplicationMode::ASYNC: replica.sync_mode = ReplicationQuery::SyncMode::ASYNC; break; } @@ -416,10 +447,104 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler { } private: - dbms::DbmsHandler *dbms_handler_; dbms::ReplicationHandler handler_; }; +class CoordQueryHandler final : public query::CoordinatorQueryHandler { + public: + explicit CoordQueryHandler(dbms::DbmsHandler *dbms_handler) : handler_ { *dbms_handler } +#ifdef MG_ENTERPRISE + , coordinator_handler_(*dbms_handler) +#endif + { + } + +#ifdef MG_ENTERPRISE + /// @throw QueryRuntimeException if an error ocurred. + void RegisterInstance(const std::string &coordinator_socket_address, const std::string &replication_socket_address, + const std::chrono::seconds instance_check_frequency, const std::string &instance_name, + CoordinatorQuery::SyncMode sync_mode) override { + const auto maybe_replication_ip_port = + io::network::Endpoint::ParseSocketOrAddress(replication_socket_address, std::nullopt); + if (!maybe_replication_ip_port) { + throw QueryRuntimeException("Invalid replication socket address!"); + } + + const auto maybe_coordinator_ip_port = + io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address, std::nullopt); + if (!maybe_replication_ip_port) { + throw QueryRuntimeException("Invalid replication socket address!"); + } + + const auto [replication_ip, replication_port] = *maybe_replication_ip_port; + const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port; + const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{ + .instance_name = instance_name, + .replication_mode = convertFromCoordinatorToReplicationMode(sync_mode), + .replication_ip_address = replication_ip, + .replication_port = replication_port}; + + auto coordinator_client_config = + coordination::CoordinatorClientConfig{.instance_name = instance_name, + .ip_address = coordinator_server_ip, + .port = coordinator_server_port, + .health_check_frequency_sec = instance_check_frequency, + .replication_client_info = repl_config, + .ssl = std::nullopt}; + + auto status = coordinator_handler_.RegisterInstance(coordinator_client_config); + switch (status) { + using enum memgraph::coordination::RegisterInstanceCoordinatorStatus; + case NAME_EXISTS: + throw QueryRuntimeException("Couldn't register replica instance since instance with such name already exists!"); + case END_POINT_EXISTS: + throw QueryRuntimeException( + "Couldn't register replica instance since instance with such endpoint already exists!"); + case COULD_NOT_BE_PERSISTED: + throw QueryRuntimeException("Couldn't register replica instance since it couldn't be persisted!"); + case NOT_COORDINATOR: + throw QueryRuntimeException("Couldn't register replica instance since this instance is not a coordinator!"); + case RPC_FAILED: + throw QueryRuntimeException( + "Couldn't register replica because promotion on replica failed! Check logs on replica to find out more " + "info!"); + case SUCCESS: + break; + } + } + + void SetInstanceToMain(const std::string &instance_name) override { + auto status = coordinator_handler_.SetInstanceToMain(instance_name); + switch (status) { + using enum memgraph::coordination::SetInstanceToMainCoordinatorStatus; + case NO_INSTANCE_WITH_NAME: + throw QueryRuntimeException("No instance with such name!"); + case NOT_COORDINATOR: + throw QueryRuntimeException("Couldn't set replica instance to main since this instance is not a coordinator!"); + case COULD_NOT_PROMOTE_TO_MAIN: + throw QueryRuntimeException( + "Couldn't set replica instance to main. Check coordinator and replica for more logs"); + case SUCCESS: + break; + } + } + +#endif + +#ifdef MG_ENTERPRISE + std::vector ShowInstances() const override { + return coordinator_handler_.ShowInstances(); + } + +#endif + + private: + dbms::ReplicationHandler handler_; +#ifdef MG_ENTERPRISE + dbms::CoordinatorHandler coordinator_handler_; +#endif +}; + /// returns false if the replication role can't be set /// @throw QueryRuntimeException if an error ocurred. @@ -711,8 +836,8 @@ Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_ } // namespace Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters ¶meters, - dbms::DbmsHandler *dbms_handler, const query::InterpreterConfig &config, - std::vector *notifications) { + dbms::DbmsHandler *dbms_handler, CurrentDB ¤t_db, + const query::InterpreterConfig &config, std::vector *notifications) { // TODO: MemoryResource for EvaluationContext, it should probably be passed as // the argument to Callback. EvaluationContext evaluation_context; @@ -723,6 +848,15 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & Callback callback; switch (repl_query->action_) { case ReplicationQuery::Action::SET_REPLICATION_ROLE: { +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator) { + throw QueryRuntimeException("Coordinator can't set roles!"); + } + if (FLAGS_coordinator_server_port) { + throw QueryRuntimeException("Can't set role manually on instance with coordinator server port."); + } +#endif + auto port = EvaluateOptionalExpression(repl_query->port_, evaluator); std::optional maybe_port; if (port.IsInt()) { @@ -743,6 +877,12 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & return callback; } case ReplicationQuery::Action::SHOW_REPLICATION_ROLE: { +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator) { + throw QueryRuntimeException("Coordinator doesn't have a replication role!"); + } +#endif + callback.header = {"replication role"}; callback.fn = [handler = ReplQueryHandler{dbms_handler}] { auto mode = handler.ShowReplicationRole(); @@ -758,7 +898,12 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & return callback; } case ReplicationQuery::Action::REGISTER_REPLICA: { - const auto &name = repl_query->replica_name_; +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator_server_port) { + throw QueryRuntimeException("Can't register replica manually on instance with coordinator server port."); + } +#endif + const auto &name = repl_query->instance_name_; const auto &sync_mode = repl_query->sync_mode_; auto socket_address = repl_query->socket_address_->Accept(evaluator); const auto replica_check_frequency = config.replication_replica_check_frequency; @@ -769,25 +914,38 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & return std::vector>(); }; notifications->emplace_back(SeverityLevel::INFO, NotificationCode::REGISTER_REPLICA, - fmt::format("Replica {} is registered.", repl_query->replica_name_)); + fmt::format("Replica {} is registered.", repl_query->instance_name_)); return callback; } + case ReplicationQuery::Action::DROP_REPLICA: { - const auto &name = repl_query->replica_name_; +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator_server_port) { + throw QueryRuntimeException("Can't drop replica manually on instance with coordinator server port."); + } +#endif + const auto &name = repl_query->instance_name_; callback.fn = [handler = ReplQueryHandler{dbms_handler}, name]() mutable { handler.DropReplica(name); return std::vector>(); }; notifications->emplace_back(SeverityLevel::INFO, NotificationCode::DROP_REPLICA, - fmt::format("Replica {} is dropped.", repl_query->replica_name_)); + fmt::format("Replica {} is dropped.", repl_query->instance_name_)); return callback; } case ReplicationQuery::Action::SHOW_REPLICAS: { +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator) { + throw QueryRuntimeException("Coordinator cannot call SHOW REPLICAS! Use SHOW REPLICATION CLUSTER instead."); + } +#endif + callback.header = { "name", "socket_address", "sync_mode", "current_timestamp_of_replica", "number_of_timestamp_behind_master", "state"}; - callback.fn = [handler = ReplQueryHandler{dbms_handler}, replica_nfields = callback.header.size()] { - const auto &replicas = handler.ShowReplicas(); + callback.fn = [handler = ReplQueryHandler{dbms_handler}, replica_nfields = callback.header.size(), + db_acc = current_db.db_acc_] { + const auto &replicas = handler.ShowReplicas(*db_acc->get()); auto typed_replicas = std::vector>{}; typed_replicas.reserve(replicas.size()); for (const auto &replica : replicas) { @@ -833,6 +991,110 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters & } } +Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Parameters ¶meters, + dbms::DbmsHandler *dbms_handler, const query::InterpreterConfig &config, + std::vector *notifications) { + Callback callback; + switch (coordinator_query->action_) { + case CoordinatorQuery::Action::REGISTER_INSTANCE: { + if (!license::global_license_checker.IsEnterpriseValidFast()) { + throw QueryException("Trying to use enterprise feature without a valid license."); + } +#ifdef MG_ENTERPRISE + if constexpr (!coordination::allow_ha) { + throw QueryRuntimeException( + "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " + "be able to use this functionality."); + } + if (!FLAGS_coordinator) { + throw QueryRuntimeException("Only coordinator can register coordinator server!"); + } + // TODO: MemoryResource for EvaluationContext, it should probably be passed as + // the argument to Callback. + EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters}; + auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context}; + + auto coordinator_socket_address_tv = coordinator_query->coordinator_socket_address_->Accept(evaluator); + auto replication_socket_address_tv = coordinator_query->replication_socket_address_->Accept(evaluator); + callback.fn = [handler = CoordQueryHandler{dbms_handler}, coordinator_socket_address_tv, + replication_socket_address_tv, main_check_frequency = config.replication_replica_check_frequency, + instance_name = coordinator_query->instance_name_, + sync_mode = coordinator_query->sync_mode_]() mutable { + handler.RegisterInstance(std::string(coordinator_socket_address_tv.ValueString()), + std::string(replication_socket_address_tv.ValueString()), main_check_frequency, + instance_name, sync_mode); + return std::vector>(); + }; + + notifications->emplace_back( + SeverityLevel::INFO, NotificationCode::REGISTER_COORDINATOR_SERVER, + fmt::format("Coordinator has registered coordinator server on {} for instance {}.", + coordinator_socket_address_tv.ValueString(), coordinator_query->instance_name_)); + return callback; +#endif + } + case CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN: { + if (!license::global_license_checker.IsEnterpriseValidFast()) { + throw QueryException("Trying to use enterprise feature without a valid license."); + } +#ifdef MG_ENTERPRISE + if constexpr (!coordination::allow_ha) { + throw QueryRuntimeException( + "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " + "be able to use this functionality."); + } + if (!FLAGS_coordinator) { + throw QueryRuntimeException("Only coordinator can register coordinator server!"); + } + // TODO: MemoryResource for EvaluationContext, it should probably be passed as + // the argument to Callback. + EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters}; + auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context}; + + callback.fn = [handler = CoordQueryHandler{dbms_handler}, + instance_name = coordinator_query->instance_name_]() mutable { + handler.SetInstanceToMain(instance_name); + return std::vector>(); + }; + + return callback; +#endif + } + case CoordinatorQuery::Action::SHOW_REPLICATION_CLUSTER: { + if (!license::global_license_checker.IsEnterpriseValidFast()) { + throw QueryException("Trying to use enterprise feature without a valid license."); + } +#ifdef MG_ENTERPRISE + if constexpr (!coordination::allow_ha) { + throw QueryRuntimeException( + "High availability is experimental feature. Please set MG_EXPERIMENTAL_HIGH_AVAILABILITY compile flag to " + "be able to use this functionality."); + } + if (!FLAGS_coordinator) { + throw QueryRuntimeException("Only coordinator can run SHOW REPLICATION CLUSTER."); + } + + callback.header = {"name", "socket_address", "alive", "role"}; + callback.fn = [handler = CoordQueryHandler{dbms_handler}, replica_nfields = callback.header.size()]() mutable { + auto const instances = handler.ShowInstances(); + std::vector> result{}; + result.reserve(result.size()); + + std::ranges::transform(instances, std::back_inserter(result), + [](const auto &status) -> std::vector { + return {TypedValue{status.instance_name}, TypedValue{status.socket_address}, + TypedValue{status.is_alive}, TypedValue{status.replication_role}}; + }); + + return result; + }; + return callback; +#endif + } + return callback; + } +} + stream::CommonStreamInfo GetCommonStreamInfo(StreamQuery *stream_query, ExpressionVisitor &evaluator) { return { .batch_interval = GetOptionalValue(stream_query->batch_interval_, evaluator) @@ -963,12 +1225,19 @@ Callback HandleStreamQuery(StreamQuery *stream_query, const Parameters ¶mete throw utils::BasicException("Parameter BATCH_LIMIT cannot hold negative value"); } - callback.fn = [streams = db_acc->streams(), stream_name = stream_query->stream_name_, batch_limit, timeout]() { + callback.fn = [db_acc, streams = db_acc->streams(), stream_name = stream_query->stream_name_, batch_limit, + timeout]() { + if (db_acc.is_deleting()) { + throw QueryException("Can not start stream while database is being dropped."); + } streams->StartWithLimit(stream_name, static_cast(batch_limit.value()), timeout); return std::vector>{}; }; } else { - callback.fn = [streams = db_acc->streams(), stream_name = stream_query->stream_name_]() { + callback.fn = [db_acc, streams = db_acc->streams(), stream_name = stream_query->stream_name_]() { + if (db_acc.is_deleting()) { + throw QueryException("Can not start stream while database is being dropped."); + } streams->Start(stream_name); return std::vector>{}; }; @@ -1457,8 +1726,7 @@ PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper, std::function handler; if (query_upper == "BEGIN") { - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); // TODO: Evaluate doing move(extras). Currently the extras is very small, but this will be important if it ever // becomes large. handler = [this, extras = extras] { @@ -2279,14 +2547,14 @@ PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transa PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, bool in_explicit_transaction, std::vector *notifications, dbms::DbmsHandler &dbms_handler, - const InterpreterConfig &config) { + CurrentDB ¤t_db, const InterpreterConfig &config) { if (in_explicit_transaction) { throw ReplicationModificationInMulticommandTxException(); } auto *replication_query = utils::Downcast(parsed_query.query); - auto callback = - HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, config, notifications); + auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, current_db, config, + notifications); return PreparedQuery{callback.header, std::move(parsed_query.required_privileges), [callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr{nullptr}]( @@ -2305,6 +2573,34 @@ PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, bool in_explicit // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) } +PreparedQuery PrepareCoordinatorQuery(ParsedQuery parsed_query, bool in_explicit_transaction, + std::vector *notifications, dbms::DbmsHandler &dbms_handler, + const InterpreterConfig &config) { + if (in_explicit_transaction) { + throw CoordinatorModificationInMulticommandTxException(); + } + + auto *coordinator_query = utils::Downcast(parsed_query.query); + auto callback = + HandleCoordinatorQuery(coordinator_query, parsed_query.parameters, &dbms_handler, config, notifications); + + return PreparedQuery{callback.header, std::move(parsed_query.required_privileges), + [callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr{nullptr}]( + AnyStream *stream, std::optional n) mutable -> std::optional { + if (UNLIKELY(!pull_plan)) { + pull_plan = std::make_shared(callback_fn()); + } + + if (pull_plan->Pull(stream, n)) [[likely]] { + return QueryHandlerResult::COMMIT; + } + return std::nullopt; + }, + RWType::NONE}; + // False positive report for the std::make_shared above + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) +} + PreparedQuery PrepareLockPathQuery(ParsedQuery parsed_query, bool in_explicit_transaction, CurrentDB ¤t_db) { if (in_explicit_transaction) { throw LockPathModificationInMulticommandTxException(); @@ -2806,7 +3102,7 @@ PreparedQuery PrepareEdgeImportModeQuery(ParsedQuery parsed_query, CurrentDB &cu } PreparedQuery PrepareCreateSnapshotQuery(ParsedQuery parsed_query, bool in_explicit_transaction, CurrentDB ¤t_db, - replication::ReplicationRole replication_role) { + replication_coordination_glue::ReplicationRole replication_role) { if (in_explicit_transaction) { throw CreateSnapshotInMulticommandTxException(); } @@ -2884,7 +3180,7 @@ auto ShowTransactions(const std::unordered_set &interpreters, con auto get_interpreter_db_name = [&]() -> std::string const & { static std::string all; - return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->id() : all; + return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->name() : all; }; if (transaction_id.has_value() && (interpreter->username_ == username || privilege_checker(get_interpreter_db_name()))) { @@ -3070,7 +3366,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici case DatabaseInfoQuery::InfoType::EDGE_TYPES: { header = {"edge types"}; handler = [storage = current_db.db_acc_->get()->storage(), dba] { - if (!storage->config_.items.enable_schema_metadata) { + if (!storage->config_.salient.items.enable_schema_metadata) { throw QueryRuntimeException( "The metadata collection for edge-types is disabled. To enable it, restart your instance and set the " "storage-enable-schema-metadata flag to True."); @@ -3090,7 +3386,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici case DatabaseInfoQuery::InfoType::NODE_LABELS: { header = {"node labels"}; handler = [storage = current_db.db_acc_->get()->storage(), dba] { - if (!storage->config_.items.enable_schema_metadata) { + if (!storage->config_.salient.items.enable_schema_metadata) { throw QueryRuntimeException( "The metadata collection for node-labels is disabled. To enable it, restart your instance and set the " "storage-enable-schema-metadata flag to True."); @@ -3149,7 +3445,7 @@ PreparedQuery PrepareSystemInfoQuery(ParsedQuery parsed_query, bool in_explicit_ const int64_t vm_max_map_count_storage_info = vm_max_map_count.has_value() ? vm_max_map_count.value() : memgraph::utils::VM_MAX_MAP_COUNT_DEFAULT; std::vector> results{ - {TypedValue("name"), TypedValue(storage->id())}, + {TypedValue("name"), TypedValue(storage->name())}, {TypedValue("vertex_count"), TypedValue(static_cast(info.vertex_count))}, {TypedValue("edge_count"), TypedValue(static_cast(info.edge_count))}, {TypedValue("average_degree"), TypedValue(info.average_degree)}, @@ -3415,8 +3711,6 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur if (!license::global_license_checker.IsEnterpriseValidFast()) { throw QueryException("Trying to use enterprise feature without a valid license."); } - // TODO: Remove once replicas support multi-tenant replication - if (!current_db.db_acc_) throw DatabaseContextRequiredException("Multi database queries require a defined database."); auto *query = utils::Downcast(parsed_query.query); auto *db_handler = interpreter_context->dbms_handler; @@ -3424,7 +3718,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur const bool is_replica = interpreter_context->repl_state->IsReplica(); switch (query->action_) { - case MultiDatabaseQuery::Action::CREATE: + case MultiDatabaseQuery::Action::CREATE: { if (is_replica) { throw QueryException("Query forbidden on the replica!"); } @@ -3465,8 +3759,8 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur RWType::W, "" // No target DB possible }; - - case MultiDatabaseQuery::Action::USE: + } + case MultiDatabaseQuery::Action::USE: { if (current_db.in_explicit_db_) { throw QueryException("Database switching is prohibited if session explicitly defines the used database"); } @@ -3481,7 +3775,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur std::string res; try { - if (current_db.db_acc_ && db_name == current_db.db_acc_->get()->id()) { + if (current_db.db_acc_ && db_name == current_db.db_acc_->get()->name()) { res = "Already using " + db_name; } else { auto tmp = db_handler->Get(db_name); @@ -3502,11 +3796,12 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur }, RWType::NONE, query->db_name_}; - - case MultiDatabaseQuery::Action::DROP: + } + case MultiDatabaseQuery::Action::DROP: { if (is_replica) { throw QueryException("Query forbidden on the replica!"); } + return PreparedQuery{ {"STATUS"}, std::move(parsed_query.required_privileges), @@ -3516,10 +3811,10 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur try { // Remove database - auto success = db_handler->Delete(db_name); + auto success = db_handler->TryDelete(db_name); if (!success.HasError()) { // Remove from auth - auth->DeleteDatabase(db_name); + if (auth) auth->DeleteDatabase(db_name); } else { switch (success.GetError()) { case dbms::DeleteError::DEFAULT_DB: @@ -3547,48 +3842,56 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur }, RWType::W, query->db_name_}; - } + } + case MultiDatabaseQuery::Action::SHOW: { + return PreparedQuery{ + {"Current"}, + std::move(parsed_query.required_privileges), + [db_acc = current_db.db_acc_, pull_plan = std::shared_ptr(nullptr)]( + AnyStream *stream, std::optional n) mutable -> std::optional { + if (!pull_plan) { + std::vector> results; + auto db_name = db_acc ? TypedValue{db_acc->get()->storage()->name()} : TypedValue{}; + results.push_back({std::move(db_name)}); + pull_plan = std::make_shared(std::move(results)); + } + + if (pull_plan->Pull(stream, n)) { + return QueryHandlerResult::NOTHING; + } + return std::nullopt; + }, + RWType::NONE, + "" // No target DB + }; + } + }; #else throw QueryException("Query not supported."); #endif } -PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, CurrentDB ¤t_db, - InterpreterContext *interpreter_context, +PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, InterpreterContext *interpreter_context, const std::optional &username) { #ifdef MG_ENTERPRISE - - // TODO: split query into two, Databases (no need for current_db), & Current database (uses current_db) - MG_ASSERT(current_db.db_acc_, "Show Database Level query expects a current DB"); - storage::Storage *storage = current_db.db_acc_->get()->storage(); - if (!license::global_license_checker.IsEnterpriseValidFast()) { throw QueryException("Trying to use enterprise feature without a valid license."); } - // TODO pick directly from ic auto *db_handler = interpreter_context->dbms_handler; AuthQueryHandler *auth = interpreter_context->auth; Callback callback; - callback.header = {"Name", "Current"}; - callback.fn = [auth, storage, db_handler, username]() mutable -> std::vector> { + callback.header = {"Name"}; + callback.fn = [auth, db_handler, username]() mutable -> std::vector> { std::vector> status; - const auto &in_use = storage->id(); - bool found_current = false; - auto gen_status = [&](T all, K denied) { Sort(all); Sort(denied); status.reserve(all.size()); for (const auto &name : all) { - TypedValue use(""); - if (!found_current && Same(name, in_use)) { - use = TypedValue("*"); - found_current = true; - } - status.push_back({TypedValue(name), std::move(use)}); + status.push_back({TypedValue(name)}); } // No denied databases (no need to filter them out) @@ -3618,7 +3921,6 @@ PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, CurrentDB &cur } } - if (!found_current) throw QueryRuntimeException("Missing current database!"); return status; }; @@ -3654,15 +3956,13 @@ void Interpreter::BeginTransaction(QueryExtras const &extras) { void Interpreter::CommitTransaction() { const auto prepared_query = PrepareTransactionQuery("COMMIT"); prepared_query.query_handler(nullptr, {}); - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } void Interpreter::RollbackTransaction() { const auto prepared_query = PrepareTransactionQuery("ROLLBACK"); prepared_query.query_handler(nullptr, {}); - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } #if MG_ENTERPRISE @@ -3678,11 +3978,6 @@ void Interpreter::SetCurrentDB(std::string_view db_name, bool in_explicit_db) { Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, const std::map ¶ms, QueryExtras const &extras) { - // TODO: Remove once the interpreter is storage/tx independent and could run without an associated database - if (!current_db_.db_acc_) { - throw DatabaseContextRequiredException("Database required for the query."); - } - // Handle transaction control queries. const auto upper_case_query = utils::ToUpperCase(query_string); const auto trimmed_query = utils::Trim(upper_case_query); @@ -3696,18 +3991,16 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, {}}; } - if (!in_explicit_transaction_) { - transaction_queries_->clear(); - } - // Don't save BEGIN, COMMIT or ROLLBACK - transaction_queries_->push_back(query_string); + // NOTE: query_string is not BEGIN, COMMIT or ROLLBACK // All queries other than transaction control queries advance the command in // an explicit transaction block. if (in_explicit_transaction_) { + transaction_queries_->push_back(query_string); AdvanceCommand(); } else { - query_executions_.clear(); + ResetInterpreter(); + transaction_queries_->push_back(query_string); if (current_db_.db_transactional_accessor_ /* && !in_explicit_transaction_*/) { // If we're not in an explicit transaction block and we have an open // transaction, abort it since we're about to prepare a new query. @@ -3765,6 +4058,37 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, // field with an improved estimate. query_execution->summary["cost_estimate"] = 0.0; + // System queries require strict ordering; since there is no MVCC-like thing, we allow single queries + bool system_queries = utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query); + + // TODO Split SHOW REPLICAS (which needs the db) and other replication queries + auto system_transaction_guard = std::invoke([&]() -> std::optional { + if (system_queries) { + // TODO: Ordering between system and data queries + // Start a system transaction + auto system_unique = std::unique_lock{interpreter_context_->dbms_handler->system_lock_, std::defer_lock}; + if (!system_unique.try_lock_for(std::chrono::milliseconds(kSystemTxTryMS))) { + throw ConcurrentSystemQueriesException("Multiple concurrent system queries are not supported."); + } + return std::optional{std::in_place, std::move(system_unique), + *interpreter_context_->dbms_handler}; + } + return std::nullopt; + }); + + // Some queries do not require a database to be executed (current_db_ won't be passed on to the Prepare*; special + // case for use database which overwrites the current database) + bool no_db_required = system_queries || utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query) || + utils::Downcast(parsed_query.query); + if (!no_db_required && !current_db_.db_acc_) { + throw DatabaseContextRequiredException("Database required for the query."); + } + // Some queries require an active transaction in order to be prepared. // TODO: make a better analysis visitor over the `parsed_query.query` bool requires_db_transaction = @@ -3783,10 +4107,12 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, SetupDatabaseTransaction(could_commit, unique); } - // TODO: none database transaction (assuming mutually exclusive from DB transactions) - // if (!requires_db_transaction) { - // /* something */ - // } +#ifdef MG_ENTERPRISE + if (FLAGS_coordinator && !utils::Downcast(parsed_query.query) && + !utils::Downcast(parsed_query.query)) { + throw QueryRuntimeException("Coordinator can run only coordinator queries!"); + } +#endif utils::Timer planning_timer; PreparedQuery prepared_query; @@ -3829,6 +4155,10 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, /// TODO: make replication DB agnostic prepared_query = PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications, + *interpreter_context_->dbms_handler, current_db_, interpreter_context_->config); + } else if (utils::Downcast(parsed_query.query)) { + prepared_query = + PrepareCoordinatorQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications, *interpreter_context_->dbms_handler, interpreter_context_->config); } else if (utils::Downcast(parsed_query.query)) { prepared_query = PrepareLockPathQuery(std::move(parsed_query), in_explicit_transaction_, current_db_); @@ -3871,12 +4201,11 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, throw MultiDatabaseQueryInMulticommandTxException(); } /// SYSTEM (Replication) + INTERPRETER - prepared_query = - PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_); + // DMG_ASSERT(system_guard); + prepared_query = PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_ + /*, *system_guard*/); } else if (utils::Downcast(parsed_query.query)) { - /// SYSTEM PURE ("SHOW DATABASES") - /// INTERPRETER (TODO: "SHOW DATABASE") - prepared_query = PrepareShowDatabasesQuery(std::move(parsed_query), current_db_, interpreter_context_, username_); + prepared_query = PrepareShowDatabasesQuery(std::move(parsed_query), interpreter_context_, username_); } else if (utils::Downcast(parsed_query.query)) { if (in_explicit_transaction_) { throw EdgeImportModeModificationInMulticommandTxException(); @@ -3901,10 +4230,12 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, // Set the target db to the current db (some queries have different target from the current db) if (!query_execution->prepared_query->db) { - query_execution->prepared_query->db = current_db_.db_acc_->get()->id(); + query_execution->prepared_query->db = current_db_.db_acc_->get()->name(); } query_execution->summary["db"] = *query_execution->prepared_query->db; + // prepare is done, move system txn guard to be owned by interpreter + system_transaction_guard_ = std::move(system_transaction_guard); return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, query_execution->prepared_query->db}; } catch (const utils::BasicException &) { @@ -3916,9 +4247,11 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string, throw; } } + void Interpreter::SetupDatabaseTransaction(bool couldCommit, bool unique) { current_db_.SetupDatabaseTransaction(GetIsolationLevelOverride(), couldCommit, unique); } + void Interpreter::SetupInterpreterTransaction(const QueryExtras &extras) { metrics::IncrementCounter(metrics::ActiveTransactions); transaction_status_.store(TransactionStatus::ACTIVE, std::memory_order_release); @@ -3997,7 +4330,9 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int continue; } - auto maybe_commit_error = db_accessor.Commit(); + bool is_main = interpreter_context->repl_state->IsMain(); + auto maybe_commit_error = db_accessor.Commit({.is_main = is_main}, db_acc); + if (maybe_commit_error.HasError()) { const auto &error = maybe_commit_error.GetError(); @@ -4048,10 +4383,35 @@ void Interpreter::Commit() { // We should document clearly that all results should be pulled to complete // a query. current_transaction_.reset(); - if (!current_db_.db_transactional_accessor_) return; + if (!current_db_.db_transactional_accessor_ || !current_db_.db_acc_) { + // No database nor db transaction; check for system transaction + if (!system_transaction_guard_) return; - // TODO: Better (or removed) check - if (!current_db_.db_acc_) return; + // TODO Distinguish between data and system transaction state + // Think about updating the status to a struct with bitfield + // Clean transaction status on exit + utils::OnScopeExit clean_status([this]() { + system_transaction_guard_.reset(); + // System transactions are not terminable + // Durability has happened at time of PULL + // Commit is doing replication and timestamp update + // The DBMS does not support MVCC, so doing durability here doesn't change the overall logic; we cannot abort! + // What we are trying to do is set the transaction back to IDLE + // We cannot simply put it to IDLE, since the status is used as a syncronization method and we have to follow + // its logic. There are 2 states when we could update to IDLE (ACTIVE and TERMINATED). + auto expected = TransactionStatus::ACTIVE; + while (!transaction_status_.compare_exchange_weak(expected, TransactionStatus::IDLE)) { + if (expected == TransactionStatus::TERMINATED) { + continue; + } + expected = TransactionStatus::ACTIVE; + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + }); + + system_transaction_guard_->Commit(); + return; + } auto *db = current_db_.db_acc_->get(); /* @@ -4121,19 +4481,19 @@ void Interpreter::Commit() { }; utils::OnScopeExit members_reseter(reset_necessary_members); - auto commit_confirmed_by_all_sync_repplicas = true; + auto commit_confirmed_by_all_sync_replicas = true; - auto maybe_commit_error = - current_db_.db_transactional_accessor_->Commit(std::nullopt, interpreter_context_->repl_state->IsMain()); + bool is_main = interpreter_context_->repl_state->IsMain(); + auto maybe_commit_error = current_db_.db_transactional_accessor_->Commit({.is_main = is_main}, current_db_.db_acc_); if (maybe_commit_error.HasError()) { const auto &error = maybe_commit_error.GetError(); std::visit( [&execution_db_accessor = current_db_.execution_db_accessor_, - &commit_confirmed_by_all_sync_repplicas](T &&arg) { + &commit_confirmed_by_all_sync_replicas](const T &arg) { using ErrorType = std::remove_cvref_t; if constexpr (std::is_same_v) { - commit_confirmed_by_all_sync_repplicas = false; + commit_confirmed_by_all_sync_replicas = false; } else if constexpr (std::is_same_v) { const auto &constraint_violation = arg; auto &label_name = execution_db_accessor->LabelToName(constraint_violation.label); @@ -4173,7 +4533,6 @@ void Interpreter::Commit() { if (trigger_context && db->trigger_store()->AfterCommitTriggers().size() > 0) { db->AddTask([this, trigger_context = std::move(*trigger_context), user_transaction = std::shared_ptr(std::move(current_db_.db_transactional_accessor_))]() mutable { - // TODO: Should this take the db_ and not Access()? RunTriggersAfterCommit(*current_db_.db_acc_, interpreter_context_, std::move(trigger_context), &this->transaction_status_); user_transaction->FinalizeTransaction(); @@ -4182,7 +4541,7 @@ void Interpreter::Commit() { } SPDLOG_DEBUG("Finished committing the transaction"); - if (!commit_confirmed_by_all_sync_repplicas) { + if (!commit_confirmed_by_all_sync_replicas) { throw ReplicationException("At least one SYNC replica has not confirmed committing last transaction."); } } diff --git a/src/query/interpreter.hpp b/src/query/interpreter.hpp index 5cb73cb07..42100059c 100644 --- a/src/query/interpreter.hpp +++ b/src/query/interpreter.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,6 +16,7 @@ #include #include "dbms/database.hpp" +#include "dbms/dbms_handler.hpp" #include "memory/query_memory_control.hpp" #include "query/auth_checker.hpp" #include "query/auth_query_handler.hpp" @@ -51,6 +52,10 @@ #include "utils/timer.hpp" #include "utils/tsc.hpp" +#ifdef MG_ENTERPRISE +#include "coordination/coordinator_instance_status.hpp" +#endif + namespace memgraph::metrics { extern const Event FailedQuery; extern const Event FailedPrepare; @@ -67,16 +72,16 @@ inline constexpr size_t kExecutionPoolMaxBlockSize = 1024UL; // 2 ^ 10 enum class QueryHandlerResult { COMMIT, ABORT, NOTHING }; -class ReplicationQueryHandler { +class CoordinatorQueryHandler { public: - ReplicationQueryHandler() = default; - virtual ~ReplicationQueryHandler() = default; + CoordinatorQueryHandler() = default; + virtual ~CoordinatorQueryHandler() = default; - ReplicationQueryHandler(const ReplicationQueryHandler &) = default; - ReplicationQueryHandler &operator=(const ReplicationQueryHandler &) = default; + CoordinatorQueryHandler(const CoordinatorQueryHandler &) = default; + CoordinatorQueryHandler &operator=(const CoordinatorQueryHandler &) = default; - ReplicationQueryHandler(ReplicationQueryHandler &&) = default; - ReplicationQueryHandler &operator=(ReplicationQueryHandler &&) = default; + CoordinatorQueryHandler(CoordinatorQueryHandler &&) = default; + CoordinatorQueryHandler &operator=(CoordinatorQueryHandler &&) = default; struct Replica { std::string name; @@ -88,22 +93,32 @@ class ReplicationQueryHandler { ReplicationQuery::ReplicaState state; }; +#ifdef MG_ENTERPRISE + struct MainReplicaStatus { + std::string_view name; + std::string_view socket_address; + bool alive; + bool is_main; + + MainReplicaStatus(std::string_view name, std::string_view socket_address, bool alive, bool is_main) + : name{name}, socket_address{socket_address}, alive{alive}, is_main{is_main} {} + }; +#endif + +#ifdef MG_ENTERPRISE /// @throw QueryRuntimeException if an error ocurred. - virtual void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional port) = 0; + virtual void RegisterInstance(const std::string &coordinator_socket_address, + const std::string &replication_socket_address, + const std::chrono::seconds instance_check_frequency, const std::string &instance_name, + CoordinatorQuery::SyncMode sync_mode) = 0; /// @throw QueryRuntimeException if an error ocurred. - virtual ReplicationQuery::ReplicationRole ShowReplicationRole() const = 0; + virtual void SetInstanceToMain(const std::string &instance_name) = 0; /// @throw QueryRuntimeException if an error ocurred. - virtual void RegisterReplica(const std::string &name, const std::string &socket_address, - ReplicationQuery::SyncMode sync_mode, - const std::chrono::seconds replica_check_frequency) = 0; + virtual std::vector ShowInstances() const = 0; - /// @throw QueryRuntimeException if an error ocurred. - virtual void DropReplica(std::string_view replica_name) = 0; - - /// @throw QueryRuntimeException if an error ocurred. - virtual std::vector ShowReplicas() const = 0; +#endif }; class AnalyzeGraphQueryHandler { @@ -281,7 +296,38 @@ class Interpreter final { void SetUser(std::string_view username); + struct SystemTransactionGuard { + explicit SystemTransactionGuard(std::unique_lock guard, dbms::DbmsHandler &dbms_handler) + : system_guard_(std::move(guard)), dbms_handler_{&dbms_handler} { + dbms_handler_->NewSystemTransaction(); + } + SystemTransactionGuard &operator=(SystemTransactionGuard &&) = default; + SystemTransactionGuard(SystemTransactionGuard &&) = default; + + ~SystemTransactionGuard() { + if (system_guard_.owns_lock()) dbms_handler_->ResetSystemTransaction(); + } + + dbms::AllSyncReplicaStatus Commit() { return dbms_handler_->Commit(); } + + private: + std::unique_lock system_guard_; + dbms::DbmsHandler *dbms_handler_; + }; + + std::optional system_transaction_guard_{}; + private: + void ResetInterpreter() { + query_executions_.clear(); + system_guard.reset(); + system_transaction_guard_.reset(); + transaction_queries_->clear(); + if (current_db_.db_acc_ && current_db_.db_acc_->is_deleting()) { + current_db_.db_acc_.reset(); + } + } + struct QueryExecution { std::variant execution_memory; utils::ResourceWithOutOfMemoryException execution_memory_with_exception; @@ -340,6 +386,9 @@ class Interpreter final { // TODO Figure out how this would work for multi-database // Exists only during a single transaction (for now should be okay as is) std::vector> query_executions_; + // TODO: our upgradable lock guard for system + std::optional system_guard; + // all queries that are run as part of the current transaction utils::Synchronized, utils::SpinLock> transaction_queries_; @@ -435,8 +484,7 @@ std::map Interpreter::Pull(TStream *result_stream, std: // NOTE: we cannot clear query_execution inside the Abort and Commit // methods as we will delete summary contained in them which we need // after our query finished executing. - query_executions_.clear(); - transaction_queries_->clear(); + ResetInterpreter(); } else { // We can only clear this execution as some of the queries // in the transaction can be in unfinished state diff --git a/src/query/interpreter_context.cpp b/src/query/interpreter_context.cpp index 75d734645..cace25ec6 100644 --- a/src/query/interpreter_context.cpp +++ b/src/query/interpreter_context.cpp @@ -56,7 +56,7 @@ std::vector> InterpreterContext::TerminateTransactions( std::iter_swap(it, not_found_midpoint); auto get_interpreter_db_name = [&]() -> std::string const & { static std::string all; - return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->id() : all; + return interpreter->current_db_.db_acc_ ? interpreter->current_db_.db_acc_->get()->name() : all; }; if (interpreter->username_ == username || privilege_checker(get_interpreter_db_name())) { killed = true; // Note: this is used by the above `clean_status` (OnScopeExit) diff --git a/src/query/interpreter_context.hpp b/src/query/interpreter_context.hpp index af8648376..9b54dbd3a 100644 --- a/src/query/interpreter_context.hpp +++ b/src/query/interpreter_context.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -22,6 +22,8 @@ #include "query/cypher_query_interpreter.hpp" #include "query/typed_value.hpp" #include "replication/state.hpp" +#include "storage/v2/config.hpp" +#include "storage/v2/transaction.hpp" #include "utils/gatekeeper.hpp" #include "utils/skip_list.hpp" #include "utils/spin_lock.hpp" @@ -57,6 +59,7 @@ struct InterpreterContext { // GLOBAL memgraph::replication::ReplicationState *repl_state; + AuthQueryHandler *auth; AuthChecker *auth_checker; diff --git a/src/query/metadata.cpp b/src/query/metadata.cpp index ade17eb5c..56ef57431 100644 --- a/src/query/metadata.cpp +++ b/src/query/metadata.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -66,6 +66,10 @@ constexpr std::string_view GetCodeString(const NotificationCode code) { return "PlanHinting"sv; case NotificationCode::REGISTER_REPLICA: return "RegisterReplica"sv; +#ifdef MG_ENTERPRISE + case NotificationCode::REGISTER_COORDINATOR_SERVER: + return "RegisterCoordinatorServer"sv; +#endif case NotificationCode::REPLICA_PORT_WARNING: return "ReplicaPortWarning"sv; case NotificationCode::SET_REPLICA: diff --git a/src/query/metadata.hpp b/src/query/metadata.hpp index ca3914047..8e82ad1e3 100644 --- a/src/query/metadata.hpp +++ b/src/query/metadata.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -42,6 +42,9 @@ enum class NotificationCode : uint8_t { PLAN_HINTING, REPLICA_PORT_WARNING, REGISTER_REPLICA, +#ifdef MG_ENTERPRISE + REGISTER_COORDINATOR_SERVER, +#endif SET_REPLICA, START_STREAM, START_ALL_STREAMS, diff --git a/src/query/plan/operator.hpp b/src/query/plan/operator.hpp index 8fa3d3a7c..516ef2e38 100644 --- a/src/query/plan/operator.hpp +++ b/src/query/plan/operator.hpp @@ -916,11 +916,11 @@ struct ExpansionLambda { /// Currently expanded node symbol. Symbol inner_node_symbol; /// Expression used in lambda during expansion. - Expression *expression; + Expression *expression = nullptr; /// Currently expanded accumulated path symbol. - std::optional accumulated_path_symbol; + std::optional accumulated_path_symbol = std::nullopt; /// Currently expanded accumulated weight symbol. - std::optional accumulated_weight_symbol; + std::optional accumulated_weight_symbol = std::nullopt; ExpansionLambda Clone(AstStorage *storage) const { ExpansionLambda object; diff --git a/src/query/plan/preprocess.cpp b/src/query/plan/preprocess.cpp index 22899cbc0..cf8ad9c97 100644 --- a/src/query/plan/preprocess.cpp +++ b/src/query/plan/preprocess.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -257,7 +257,7 @@ void Filters::EraseFilter(const FilterInfo &filter) { all_filters_.end()); } -void Filters::EraseLabelFilter(const Symbol &symbol, LabelIx label, std::vector *removed_filters) { +void Filters::EraseLabelFilter(const Symbol &symbol, const LabelIx &label, std::vector *removed_filters) { for (auto filter_it = all_filters_.begin(); filter_it != all_filters_.end();) { if (filter_it->type != FilterInfo::Type::Label) { ++filter_it; diff --git a/src/query/plan/preprocess.hpp b/src/query/plan/preprocess.hpp index 322da545a..2b53fb7b0 100644 --- a/src/query/plan/preprocess.hpp +++ b/src/query/plan/preprocess.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -366,7 +366,8 @@ class Filters final { /// Remove a label filter for symbol; may invalidate iterators. /// If removed_filters is not nullptr, fills the vector with original /// `Expression *` which are now completely removed. - void EraseLabelFilter(const Symbol &, LabelIx, std::vector *removed_filters = nullptr); + void EraseLabelFilter(const Symbol &symbol, const LabelIx &label, + std::vector *removed_filters = nullptr); /// Returns a vector of FilterInfo for properties. auto PropertyFilters(const Symbol &symbol) const { diff --git a/src/query/plan/rewrite/index_lookup.hpp b/src/query/plan/rewrite/index_lookup.hpp index 407c32ba0..09c6e2014 100644 --- a/src/query/plan/rewrite/index_lookup.hpp +++ b/src/query/plan/rewrite/index_lookup.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -655,9 +655,9 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor { } } - storage::LabelId GetLabel(LabelIx label) { return db_->NameToLabel(label.name); } + storage::LabelId GetLabel(const LabelIx &label) { return db_->NameToLabel(label.name); } - storage::PropertyId GetProperty(PropertyIx prop) { return db_->NameToProperty(prop.name); } + storage::PropertyId GetProperty(const PropertyIx &prop) { return db_->NameToProperty(prop.name); } std::optional FindBestLabelIndex(const std::unordered_set &labels) { MG_ASSERT(!labels.empty(), "Trying to find the best label without any labels."); diff --git a/src/query/plan/rule_based_planner.hpp b/src/query/plan/rule_based_planner.hpp index bdd7bdbd9..092710628 100644 --- a/src/query/plan/rule_based_planner.hpp +++ b/src/query/plan/rule_based_planner.hpp @@ -271,9 +271,9 @@ class RuleBasedPlanner { private: TPlanningContext *context_; - storage::LabelId GetLabel(LabelIx label) { return context_->db->NameToLabel(label.name); } + storage::LabelId GetLabel(const LabelIx &label) { return context_->db->NameToLabel(label.name); } - storage::PropertyId GetProperty(PropertyIx prop) { return context_->db->NameToProperty(prop.name); } + storage::PropertyId GetProperty(const PropertyIx &prop) { return context_->db->NameToProperty(prop.name); } storage::EdgeTypeId GetEdgeType(EdgeTypeIx edge_type) { return context_->db->NameToEdgeType(edge_type.name); } diff --git a/src/query/procedure/py_module.cpp b/src/query/procedure/py_module.cpp index 264dee5ba..19393c4d0 100644 --- a/src/query/procedure/py_module.cpp +++ b/src/query/procedure/py_module.cpp @@ -868,7 +868,10 @@ py::Object MgpListToPyTuple(mgp_list *list, PyObject *py_graph) { } void PyCollectGarbage() { - if (!Py_IsInitialized() || _Py_IsFinalizing()) { + // NOTE: No need to call _Py_IsFinalizing(), we ensure + // Python GC thread is stopped before Py_Finalize() is called + // in memgraph.cpp + if (!Py_IsInitialized()) { // Calling EnsureGIL will crash the program if this is true. return; } diff --git a/src/query/stream/streams.cpp b/src/query/stream/streams.cpp index 57011aecb..101ca592c 100644 --- a/src/query/stream/streams.cpp +++ b/src/query/stream/streams.cpp @@ -644,6 +644,25 @@ void Streams::Drop(const std::string &stream_name) { // TODO(antaljanosbenjamin) Release the transformation } +void Streams::DropAll() { + streams_.WithLock([this](StreamsMap &streams) { + bool durability_ok = true; + for (auto &[name, stream] : streams) { + // streams_ is write locked, which means there is no access to it outside of this function, thus only the Test + // function can be executing with the consumer, nothing else. + // By acquiring the write lock here for the consumer, we make sure there is + // no running Test function for this consumer, therefore it can be erased. + std::visit([&](const auto &stream_data) { stream_data.stream_source->Lock(); }, stream); + if (!storage_.Delete(name)) { + durability_ok = false; + } + } + + streams.clear(); + return durability_ok; // TODO: do we need special case for this cleanup if false + }); +} + void Streams::Start(const std::string &stream_name) { auto locked_streams = streams_.Lock(); auto it = GetStream(*locked_streams, stream_name); diff --git a/src/query/stream/streams.hpp b/src/query/stream/streams.hpp index 2c89341d1..bad1f8c98 100644 --- a/src/query/stream/streams.hpp +++ b/src/query/stream/streams.hpp @@ -110,6 +110,11 @@ class Streams final { /// @throws StreamsException if the stream doesn't exist or if the persisted metadata can't be deleted. void Drop(const std::string &stream_name); + /// Deletes all existing streams and all the data that was persisted. + /// + /// @throws StreamsException if the persisted metadata can't be deleted. + void DropAll(); + /// Start consuming from a stream. /// /// @param stream_name name of the stream that needs to be started diff --git a/src/query/typed_value.cpp b/src/query/typed_value.cpp index ea883e428..4cb79508e 100644 --- a/src/query/typed_value.cpp +++ b/src/query/typed_value.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -125,9 +125,7 @@ TypedValue::TypedValue(storage::PropertyValue &&other, utils::MemoryResource *me case storage::PropertyValue::Type::List: { type_ = Type::List; auto &vec = other.ValueList(); - new (&list_v) TVector(memory_); - list_v.reserve(vec.size()); - for (auto &v : vec) list_v.emplace_back(std::move(v)); + new (&list_v) TVector(std::make_move_iterator(vec.begin()), std::make_move_iterator(vec.end()), memory_); break; } case storage::PropertyValue::Type::Map: { @@ -324,13 +322,13 @@ TypedValue::operator storage::PropertyValue() const { #define DEFINE_VALUE_AND_TYPE_GETTERS(type_param, type_enum, field) \ type_param &TypedValue::Value##type_enum() { \ - if (type_ != Type::type_enum) \ + if (type_ != Type::type_enum) [[unlikely]] \ throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \ return field; \ } \ \ const type_param &TypedValue::Value##type_enum() const { \ - if (type_ != Type::type_enum) \ + if (type_ != Type::type_enum) [[unlikely]] \ throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \ return field; \ } \ diff --git a/src/replication/CMakeLists.txt b/src/replication/CMakeLists.txt index 597ed096a..e19ba7061 100644 --- a/src/replication/CMakeLists.txt +++ b/src/replication/CMakeLists.txt @@ -5,10 +5,8 @@ target_sources(mg-replication include/replication/state.hpp include/replication/epoch.hpp include/replication/config.hpp - include/replication/mode.hpp - include/replication/messages.hpp - include/replication/role.hpp include/replication/status.hpp + include/replication/messages.hpp include/replication/replication_client.hpp include/replication/replication_server.hpp @@ -25,6 +23,6 @@ target_include_directories(mg-replication PUBLIC include) find_package(fmt REQUIRED) target_link_libraries(mg-replication - PUBLIC mg::utils mg::kvstore lib::json mg::rpc mg::slk + PUBLIC mg::utils mg::kvstore lib::json mg::rpc mg::slk mg::io mg::repl_coord_glue PRIVATE fmt::fmt ) diff --git a/src/replication/include/replication/config.hpp b/src/replication/include/replication/config.hpp index f98069955..822e09f72 100644 --- a/src/replication/include/replication/config.hpp +++ b/src/replication/include/replication/config.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,7 +15,7 @@ #include #include #include -#include "replication/mode.hpp" +#include "replication_coordination_glue/mode.hpp" namespace memgraph::replication { @@ -24,7 +24,7 @@ inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0"; struct ReplicationClientConfig { std::string name; - ReplicationMode mode{}; + replication_coordination_glue::ReplicationMode mode{}; std::string ip_address; uint16_t port{}; @@ -40,7 +40,7 @@ struct ReplicationClientConfig { friend bool operator==(const SSL &, const SSL &) = default; }; - std::optional ssl; + std::optional ssl{}; friend bool operator==(ReplicationClientConfig const &, ReplicationClientConfig const &) = default; }; diff --git a/src/replication/include/replication/messages.hpp b/src/replication/include/replication/messages.hpp index 57cf29351..b4e0b51c7 100644 --- a/src/replication/include/replication/messages.hpp +++ b/src/replication/include/replication/messages.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,30 +15,33 @@ #include "slk/serialization.hpp" namespace memgraph::replication { - -struct FrequentHeartbeatReq { - static const utils::TypeInfo kType; // TODO: make constexpr? - static const utils::TypeInfo &GetTypeInfo() { return kType; } // WHAT? - - static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader); - static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder); - FrequentHeartbeatReq() = default; -}; - -struct FrequentHeartbeatRes { +struct SystemHeartbeatReq { static const utils::TypeInfo kType; static const utils::TypeInfo &GetTypeInfo() { return kType; } - static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader); - static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder); - FrequentHeartbeatRes() = default; - explicit FrequentHeartbeatRes(bool success) : success(success) {} - - bool success; + static void Load(SystemHeartbeatReq *self, memgraph::slk::Reader *reader); + static void Save(const SystemHeartbeatReq &self, memgraph::slk::Builder *builder); + SystemHeartbeatReq() = default; }; -using FrequentHeartbeatRpc = rpc::RequestResponse; +struct SystemHeartbeatRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } -void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder); + static void Load(SystemHeartbeatRes *self, memgraph::slk::Reader *reader); + static void Save(const SystemHeartbeatRes &self, memgraph::slk::Builder *builder); + SystemHeartbeatRes() = default; + explicit SystemHeartbeatRes(uint64_t system_timestamp) : system_timestamp(system_timestamp) {} + uint64_t system_timestamp; +}; + +using SystemHeartbeatRpc = rpc::RequestResponse; } // namespace memgraph::replication + +namespace memgraph::slk { +void Save(const memgraph::replication::SystemHeartbeatRes &self, memgraph::slk::Builder *builder); +void Load(memgraph::replication::SystemHeartbeatRes *self, memgraph::slk::Reader *reader); +void Save(const memgraph::replication::SystemHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/); +void Load(memgraph::replication::SystemHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/); +} // namespace memgraph::slk diff --git a/src/replication/include/replication/replication_client.hpp b/src/replication/include/replication/replication_client.hpp index 16e1010bf..0c64ae625 100644 --- a/src/replication/include/replication/replication_client.hpp +++ b/src/replication/include/replication/replication_client.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -12,9 +12,10 @@ #pragma once #include "replication/config.hpp" -#include "replication/messages.hpp" +#include "replication_coordination_glue/messages.hpp" #include "rpc/client.hpp" #include "utils/scheduler.hpp" +#include "utils/synchronized.hpp" #include "utils/thread_pool.hpp" #include @@ -22,8 +23,10 @@ namespace memgraph::replication { +struct ReplicationClient; + template -concept InvocableWithStringView = std::invocable; +concept FrequentCheckCB = std::invocable; struct ReplicationClient { explicit ReplicationClient(const memgraph::replication::ReplicationClientConfig &config); @@ -34,24 +37,27 @@ struct ReplicationClient { ReplicationClient(ReplicationClient &&) noexcept = delete; ReplicationClient &operator=(ReplicationClient &&) noexcept = delete; - template + template void StartFrequentCheck(F &&callback) { // Help the user to get the most accurate replica state possible. if (replica_check_frequency_ > std::chrono::seconds(0)) { - replica_checker_.Run("Replica Checker", replica_check_frequency_, [this, cb = std::forward(callback)] { - try { - bool success = false; - { - auto stream{rpc_client_.Stream()}; - success = stream.AwaitResponse().success; - } - if (success) { - cb(name_); - } - } catch (const rpc::RpcFailedException &) { - // Nothing to do...wait for a reconnect - } - }); + replica_checker_.Run("Replica Checker", replica_check_frequency_, + [this, cb = std::forward(callback), reconnect = false]() mutable { + try { + { + auto stream{rpc_client_.Stream()}; + stream.AwaitResponse(); + } + cb(reconnect, *this); + reconnect = false; + } catch (const rpc::RpcFailedException &) { + // Nothing to do...wait for a reconnect + // NOTE: Here we are communicating with the instance connection. + // We don't have access to the undelying client; so the only thing we can do it + // tell the callback that this is a reconnection and to check the state + reconnect = true; + } + }); } } @@ -60,7 +66,14 @@ struct ReplicationClient { rpc::Client rpc_client_; std::chrono::seconds replica_check_frequency_; - memgraph::replication::ReplicationMode mode_{memgraph::replication::ReplicationMode::SYNC}; + // TODO: Better, this was the easiest place to put this + enum class State { + BEHIND, + READY, + }; + utils::Synchronized state_{State::BEHIND}; + + replication_coordination_glue::ReplicationMode mode_{replication_coordination_glue::ReplicationMode::SYNC}; // This thread pool is used for background tasks so we don't // block the main storage thread // We use only 1 thread for 2 reasons: diff --git a/src/replication/include/replication/state.hpp b/src/replication/include/replication/state.hpp index 76aec1053..a53885aff 100644 --- a/src/replication/include/replication/state.hpp +++ b/src/replication/include/replication/state.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -14,9 +14,9 @@ #include "kvstore/kvstore.hpp" #include "replication/config.hpp" #include "replication/epoch.hpp" -#include "replication/mode.hpp" #include "replication/replication_client.hpp" -#include "replication/role.hpp" +#include "replication_coordination_glue/mode.hpp" +#include "replication_coordination_glue/role.hpp" #include "replication_server.hpp" #include "status.hpp" #include "utils/result.hpp" @@ -32,7 +32,8 @@ namespace memgraph::replication { enum class RolePersisted : uint8_t { UNKNOWN_OR_NO, YES }; -enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, COULD_NOT_BE_PERSISTED, NOT_MAIN, SUCCESS }; +// TODO: (andi) Rename Error to Status +enum class RegisterReplicaError : uint8_t { NAME_EXISTS, ENDPOINT_EXISTS, COULD_NOT_BE_PERSISTED, NOT_MAIN, SUCCESS }; struct RoleMainData { RoleMainData() = default; @@ -45,7 +46,7 @@ struct RoleMainData { RoleMainData &operator=(RoleMainData &&) = default; ReplicationEpoch epoch_; - std::list registered_replicas_{}; + std::list registered_replicas_{}; // TODO: data race issues }; struct RoleReplicaData { @@ -72,14 +73,16 @@ struct ReplicationState { using FetchReplicationResult_t = utils::BasicResult; auto FetchReplicationData() -> FetchReplicationResult_t; - auto GetRole() const -> ReplicationRole { - return std::holds_alternative(replication_data_) ? ReplicationRole::REPLICA - : ReplicationRole::MAIN; + auto GetRole() const -> replication_coordination_glue::ReplicationRole { + return std::holds_alternative(replication_data_) + ? replication_coordination_glue::ReplicationRole::REPLICA + : replication_coordination_glue::ReplicationRole::MAIN; } - bool IsMain() const { return GetRole() == ReplicationRole::MAIN; } - bool IsReplica() const { return GetRole() == ReplicationRole::REPLICA; } + bool IsMain() const { return GetRole() == replication_coordination_glue::ReplicationRole::MAIN; } + bool IsReplica() const { return GetRole() == replication_coordination_glue::ReplicationRole::REPLICA; } + + bool HasDurability() const { return nullptr != durability_; } - bool ShouldPersist() const { return nullptr != durability_; } bool TryPersistRoleMain(std::string new_epoch); bool TryPersistRoleReplica(const ReplicationServerConfig &config); bool TryPersistUnregisterReplica(std::string_view name); @@ -91,7 +94,6 @@ struct ReplicationState { utils::BasicResult RegisterReplica(const ReplicationClientConfig &config); bool SetReplicationRoleMain(); - bool SetReplicationRoleReplica(const ReplicationServerConfig &config); private: diff --git a/src/replication/include/replication/status.hpp b/src/replication/include/replication/status.hpp index 943db423a..4dfba6aaa 100644 --- a/src/replication/include/replication/status.hpp +++ b/src/replication/include/replication/status.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -21,7 +21,7 @@ #include "replication/config.hpp" #include "replication/epoch.hpp" -#include "replication/role.hpp" +#include "replication_coordination_glue/role.hpp" namespace memgraph::replication::durability { @@ -42,7 +42,7 @@ struct MainRole { // fragment of key: "__replication_role" struct ReplicaRole { - ReplicationServerConfig config; + ReplicationServerConfig config{}; friend bool operator==(ReplicaRole const &, ReplicaRole const &) = default; }; diff --git a/src/replication/messages.cpp b/src/replication/messages.cpp index 4503e9df2..b2dca374e 100644 --- a/src/replication/messages.cpp +++ b/src/replication/messages.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -8,58 +8,45 @@ // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. - #include "replication/messages.hpp" -#include "rpc/messages.hpp" -#include "slk/serialization.hpp" -#include "slk/streams.hpp" - -namespace memgraph::slk { -// Serialize code for FrequentHeartbeatRes -void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.success, builder); -} -void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->success, reader); -} - -// Serialize code for FrequentHeartbeatReq -void Save(const memgraph::replication::FrequentHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/) { - /* Nothing to serialize */ -} -void Load(memgraph::replication::FrequentHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/) { - /* Nothing to serialize */ -} - -} // namespace memgraph::slk namespace memgraph::replication { -constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ, "FrequentHeartbeatReq", - nullptr}; +constexpr utils::TypeInfo SystemHeartbeatReq::kType{utils::TypeId::REP_SYSTEM_HEARTBEAT_REQ, "SystemHeartbeatReq", + nullptr}; -constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes", - nullptr}; +constexpr utils::TypeInfo SystemHeartbeatRes::kType{utils::TypeId::REP_SYSTEM_HEARTBEAT_RES, "SystemHeartbeatRes", + nullptr}; -void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) { +void SystemHeartbeatReq::Save(const SystemHeartbeatReq &self, memgraph::slk::Builder *builder) { memgraph::slk::Save(self, builder); } -void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) { +void SystemHeartbeatReq::Load(SystemHeartbeatReq *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } -void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) { +void SystemHeartbeatRes::Save(const SystemHeartbeatRes &self, memgraph::slk::Builder *builder) { memgraph::slk::Save(self, builder); } -void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) { +void SystemHeartbeatRes::Load(SystemHeartbeatRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } -void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) { - FrequentHeartbeatReq req; - FrequentHeartbeatReq::Load(&req, req_reader); - memgraph::slk::Load(&req, req_reader); - FrequentHeartbeatRes res{true}; - memgraph::slk::Save(res, res_builder); -} - } // namespace memgraph::replication + +namespace memgraph::slk { +// Serialize code for SystemHeartbeatRes +void Save(const memgraph::replication::SystemHeartbeatRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.system_timestamp, builder); +} +void Load(memgraph::replication::SystemHeartbeatRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->system_timestamp, reader); +} + +// Serialize code for SystemHeartbeatReq +void Save(const memgraph::replication::SystemHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/) { + /* Nothing to serialize */ +} +void Load(memgraph::replication::SystemHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/) { + /* Nothing to serialize */ +} +} // namespace memgraph::slk diff --git a/src/replication/replication_client.cpp b/src/replication/replication_client.cpp index d14250c2a..ed46ea471 100644 --- a/src/replication/replication_client.cpp +++ b/src/replication/replication_client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -28,8 +28,8 @@ ReplicationClient::ReplicationClient(const memgraph::replication::ReplicationCli mode_{config.mode} {} ReplicationClient::~ReplicationClient() { - auto endpoint = rpc_client_.Endpoint(); try { + auto const &endpoint = rpc_client_.Endpoint(); spdlog::trace("Closing replication client on {}:{}", endpoint.address, endpoint.port); } catch (...) { // Logging can throw. Not a big deal, just ignore. diff --git a/src/replication/replication_server.cpp b/src/replication/replication_server.cpp index f79ea2add..03c48d298 100644 --- a/src/replication/replication_server.cpp +++ b/src/replication/replication_server.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,7 +10,7 @@ // licenses/APL.txt. #include "replication/replication_server.hpp" -#include "replication/messages.hpp" +#include "replication_coordination_glue/messages.hpp" namespace memgraph::replication { namespace { @@ -32,9 +32,9 @@ ReplicationServer::ReplicationServer(const memgraph::replication::ReplicationSer : rpc_server_context_{CreateServerContext(config)}, rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_, kReplicationServerThreads} { - rpc_server_.Register([](auto *req_reader, auto *res_builder) { + rpc_server_.Register([](auto *req_reader, auto *res_builder) { spdlog::debug("Received FrequentHeartbeatRpc"); - FrequentHeartbeatHandler(req_reader, res_builder); + replication_coordination_glue::FrequentHeartbeatHandler(req_reader, res_builder); }); } diff --git a/src/replication/state.cpp b/src/replication/state.cpp index 60c390e17..d04a3d245 100644 --- a/src/replication/state.cpp +++ b/src/replication/state.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -34,6 +34,7 @@ ReplicationState::ReplicationState(std::optional durabili repl_dir /= kReplicationDirectory; utils::EnsureDirOrDie(repl_dir); durability_ = std::make_unique(std::move(repl_dir)); + spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash."); auto replicationData = FetchReplicationData(); if (replicationData.HasError()) { @@ -54,7 +55,7 @@ ReplicationState::ReplicationState(std::optional durabili } bool ReplicationState::TryPersistRoleReplica(const ReplicationServerConfig &config) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto data = durability::ReplicationRoleEntry{.role = durability::ReplicaRole{ .config = config, @@ -78,7 +79,7 @@ bool ReplicationState::TryPersistRoleReplica(const ReplicationServerConfig &conf } bool ReplicationState::TryPersistRoleMain(std::string new_epoch) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto data = durability::ReplicationRoleEntry{.role = durability::MainRole{.epoch = ReplicationEpoch{std::move(new_epoch)}}}; @@ -92,7 +93,7 @@ bool ReplicationState::TryPersistRoleMain(std::string new_epoch) { } bool ReplicationState::TryPersistUnregisterReplica(std::string_view name) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; auto key = BuildReplicaKey(name); @@ -104,7 +105,7 @@ bool ReplicationState::TryPersistUnregisterReplica(std::string_view name) { // TODO: FetchEpochData (agnostic of FetchReplicationData, but should be done before) auto ReplicationState::FetchReplicationData() -> FetchReplicationResult_t { - if (!ShouldPersist()) return FetchReplicationError::NOTHING_FETCHED; + if (!HasDurability()) return FetchReplicationError::NOTHING_FETCHED; const auto replication_data = durability_->Get(durability::kReplicationRoleName); if (!replication_data.has_value()) { return FetchReplicationError::NOTHING_FETCHED; @@ -199,7 +200,7 @@ bool ReplicationState::HandleVersionMigration(durability::ReplicationRoleEntry & } bool ReplicationState::TryPersistRegisteredReplica(const ReplicationClientConfig &config) { - if (!ShouldPersist()) return true; + if (!HasDurability()) return true; // If any replicas are persisted then Role must be persisted if (role_persisted != RolePersisted::YES) { @@ -218,10 +219,12 @@ bool ReplicationState::TryPersistRegisteredReplica(const ReplicationClientConfig bool ReplicationState::SetReplicationRoleMain() { auto new_epoch = utils::GenerateUUID(); + if (!TryPersistRoleMain(new_epoch)) { return false; } replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}}; + return true; } @@ -236,6 +239,7 @@ bool ReplicationState::SetReplicationRoleReplica(const ReplicationServerConfig & utils::BasicResult ReplicationState::RegisterReplica( const ReplicationClientConfig &config) { auto const replica_handler = [](RoleReplicaData const &) { return RegisterReplicaError::NOT_MAIN; }; + ReplicationClient *client{nullptr}; auto const main_handler = [&client, &config, this](RoleMainData &mainData) -> RegisterReplicaError { // name check @@ -256,7 +260,7 @@ utils::BasicResult ReplicationState:: return std::any_of(replicas.begin(), replicas.end(), endpoint_matches); }; if (endpoint_check(mainData.registered_replicas_)) { - return RegisterReplicaError::END_POINT_EXISTS; + return RegisterReplicaError::ENDPOINT_EXISTS; } // Durability @@ -275,4 +279,5 @@ utils::BasicResult ReplicationState:: } return res; } + } // namespace memgraph::replication diff --git a/src/replication/status.cpp b/src/replication/status.cpp index 06d67cc66..de1af9589 100644 --- a/src/replication/status.cpp +++ b/src/replication/status.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -29,12 +29,14 @@ constexpr auto *kVersion = "durability_version"; void to_json(nlohmann::json &j, const ReplicationRoleEntry &p) { auto processMAIN = [&](MainRole const &main) { - j = nlohmann::json{{kVersion, p.version}, {kReplicationRole, ReplicationRole::MAIN}, {kEpoch, main.epoch.id()}}; + j = nlohmann::json{{kVersion, p.version}, + {kReplicationRole, replication_coordination_glue::ReplicationRole::MAIN}, + {kEpoch, main.epoch.id()}}; }; auto processREPLICA = [&](ReplicaRole const &replica) { j = nlohmann::json{ {kVersion, p.version}, - {kReplicationRole, ReplicationRole::REPLICA}, + {kReplicationRole, replication_coordination_glue::ReplicationRole::REPLICA}, {kIpAddress, replica.config.ip_address}, {kPort, replica.config.port} // TODO: SSL @@ -47,17 +49,17 @@ void from_json(const nlohmann::json &j, ReplicationRoleEntry &p) { // This value did not exist in V1, hence default DurabilityVersion::V1 DurabilityVersion version = j.value(kVersion, DurabilityVersion::V1); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) - ReplicationRole role; + replication_coordination_glue::ReplicationRole role; j.at(kReplicationRole).get_to(role); switch (role) { - case ReplicationRole::MAIN: { + case replication_coordination_glue::ReplicationRole::MAIN: { auto json_epoch = j.value(kEpoch, std::string{}); auto epoch = ReplicationEpoch{}; if (!json_epoch.empty()) epoch.SetEpoch(json_epoch); p = ReplicationRoleEntry{.version = version, .role = MainRole{.epoch = std::move(epoch)}}; break; } - case ReplicationRole::REPLICA: { + case memgraph::replication_coordination_glue::ReplicationRole::REPLICA: { std::string ip_address; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) uint16_t port; @@ -95,7 +97,7 @@ void from_json(const nlohmann::json &j, ReplicationReplicaEntry &p) { auto seconds = j.at(kCheckFrequency).get(); auto config = ReplicationClientConfig{ .name = j.at(kReplicaName).get(), - .mode = j.at(kSyncMode).get(), + .mode = j.at(kSyncMode).get(), .ip_address = j.at(kIpAddress).get(), .port = j.at(kPort).get(), .replica_check_frequency = std::chrono::seconds{seconds}, diff --git a/src/replication_coordination_glue/CMakeLists.txt b/src/replication_coordination_glue/CMakeLists.txt new file mode 100644 index 000000000..010a7b596 --- /dev/null +++ b/src/replication_coordination_glue/CMakeLists.txt @@ -0,0 +1,14 @@ +add_library(mg-repl_coord_glue STATIC ) +add_library(mg::repl_coord_glue ALIAS mg-repl_coord_glue) + +target_sources(mg-repl_coord_glue + PUBLIC + messages.hpp + mode.hpp + role.hpp + + PRIVATE + messages.cpp +) + +target_link_libraries(mg-repl_coord_glue mg-rpc mg-slk) diff --git a/src/replication_coordination_glue/messages.cpp b/src/replication_coordination_glue/messages.cpp new file mode 100644 index 000000000..c7cf0b15c --- /dev/null +++ b/src/replication_coordination_glue/messages.cpp @@ -0,0 +1,63 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include "replication_coordination_glue/messages.hpp" +#include "rpc/messages.hpp" +#include "slk/serialization.hpp" +#include "slk/streams.hpp" + +namespace memgraph::slk { +// Serialize code for FrequentHeartbeatRes +void Save(const memgraph::replication_coordination_glue::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {} +void Load(memgraph::replication_coordination_glue::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {} + +// Serialize code for FrequentHeartbeatReq +void Save(const memgraph::replication_coordination_glue::FrequentHeartbeatReq & /*self*/, + memgraph::slk::Builder * /*builder*/) { + /* Nothing to serialize */ +} +void Load(memgraph::replication_coordination_glue::FrequentHeartbeatReq * /*self*/, + memgraph::slk::Reader * /*reader*/) { + /* Nothing to serialize */ +} + +} // namespace memgraph::slk + +namespace memgraph::replication_coordination_glue { + +constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ, "FrequentHeartbeatReq", + nullptr}; + +constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes", + nullptr}; + +void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} + +void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) { + FrequentHeartbeatReq req; + FrequentHeartbeatReq::Load(&req, req_reader); + memgraph::slk::Load(&req, req_reader); + FrequentHeartbeatRes res{}; + memgraph::slk::Save(res, res_builder); +} + +} // namespace memgraph::replication_coordination_glue diff --git a/src/replication_coordination_glue/messages.hpp b/src/replication_coordination_glue/messages.hpp new file mode 100644 index 000000000..5e2ef0fdf --- /dev/null +++ b/src/replication_coordination_glue/messages.hpp @@ -0,0 +1,49 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include "rpc/messages.hpp" +#include "slk/serialization.hpp" + +namespace memgraph::replication_coordination_glue { + +struct FrequentHeartbeatReq { + static const utils::TypeInfo kType; // TODO: make constexpr? + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader); + static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder); + FrequentHeartbeatReq() = default; +}; + +struct FrequentHeartbeatRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader); + static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder); + FrequentHeartbeatRes() = default; +}; + +using FrequentHeartbeatRpc = rpc::RequestResponse; + +void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder); + +} // namespace memgraph::replication_coordination_glue + +namespace memgraph::slk { +void Save(const memgraph::replication_coordination_glue::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder); +void Load(memgraph::replication_coordination_glue::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader); +void Save(const memgraph::replication_coordination_glue::FrequentHeartbeatReq & /*self*/, + memgraph::slk::Builder * /*builder*/); +void Load(memgraph::replication_coordination_glue::FrequentHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/); +} // namespace memgraph::slk diff --git a/src/replication/include/replication/mode.hpp b/src/replication_coordination_glue/mode.hpp similarity index 80% rename from src/replication/include/replication/mode.hpp rename to src/replication_coordination_glue/mode.hpp index c1afe2b1f..d0b415733 100644 --- a/src/replication/include/replication/mode.hpp +++ b/src/replication_coordination_glue/mode.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,7 +10,9 @@ // licenses/APL.txt. #pragma once + #include -namespace memgraph::replication { + +namespace memgraph::replication_coordination_glue { enum class ReplicationMode : std::uint8_t { SYNC, ASYNC }; -} +} // namespace memgraph::replication_coordination_glue diff --git a/src/replication/include/replication/role.hpp b/src/replication_coordination_glue/role.hpp similarity index 87% rename from src/replication/include/replication/role.hpp rename to src/replication_coordination_glue/role.hpp index bb720f8e0..d472cb454 100644 --- a/src/replication/include/replication/role.hpp +++ b/src/replication_coordination_glue/role.hpp @@ -12,8 +12,8 @@ #pragma once #include -namespace memgraph::replication { +namespace memgraph::replication_coordination_glue { // TODO: figure out a way of ensuring that usage of this type is never uninitialed/defaulted incorrectly to MAIN enum class ReplicationRole : uint8_t { MAIN, REPLICA }; -} // namespace memgraph::replication +} // namespace memgraph::replication_coordination_glue diff --git a/src/rpc/client.hpp b/src/rpc/client.hpp index 1fd3fff8d..a9ae7202d 100644 --- a/src/rpc/client.hpp +++ b/src/rpc/client.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -105,11 +105,15 @@ class Client { utils::OnScopeExit res_cleanup([&, response_data_size] { self_->client_->ShiftData(response_data_size); }); utils::TypeId res_id{utils::TypeId::UNKNOWN}; - slk::Load(&res_id, &res_reader); - // NOLINTNEXTLINE(cppcoreguidelines-init-variables) rpc::Version version; - slk::Load(&version, &res_reader); + + try { + slk::Load(&res_id, &res_reader); + slk::Load(&version, &res_reader); + } catch (const slk::SlkReaderException &) { + throw SlkRpcFailedException(); + } if (version != rpc::current_version) { // V1 we introduced versioning with, absolutely no backwards compatibility, diff --git a/src/rpc/exceptions.hpp b/src/rpc/exceptions.hpp index 346c53a9a..f278b2414 100644 --- a/src/rpc/exceptions.hpp +++ b/src/rpc/exceptions.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -45,4 +45,12 @@ class GenericRpcFailedException : public RpcFailedException { SPECIALIZE_GET_EXCEPTION_NAME(GenericRpcFailedException); }; +class SlkRpcFailedException : public RpcFailedException { + public: + SlkRpcFailedException() + : RpcFailedException("Received malformed message from cluster. Please raise an issue on Memgraph GitHub issues.") {} + + SPECIALIZE_GET_EXCEPTION_NAME(SlkRpcFailedException); +}; + } // namespace memgraph::rpc diff --git a/src/rpc/protocol.cpp b/src/rpc/protocol.cpp index 8bc77579b..2a9c8ea72 100644 --- a/src/rpc/protocol.cpp +++ b/src/rpc/protocol.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,7 +13,7 @@ #include -#include "rpc/messages.hpp" +#include "rpc/exceptions.hpp" #include "rpc/server.hpp" #include "rpc/version.hpp" #include "slk/serialization.hpp" @@ -46,10 +46,14 @@ void Session::Execute() { // Load the request ID. utils::TypeId req_id{utils::TypeId::UNKNOWN}; - slk::Load(&req_id, &req_reader); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) rpc::Version version; - slk::Load(&version, &req_reader); + try { + slk::Load(&req_id, &req_reader); + slk::Load(&version, &req_reader); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } if (version != rpc::current_version) { // V1 we introduced versioning with, absolutely no backwards compatibility, @@ -76,12 +80,20 @@ void Session::Execute() { SPDLOG_TRACE("[RpcServer] received {}", extended_it->second.req_type.name); slk::Save(extended_it->second.res_type.id, &res_builder); slk::Save(rpc::current_version, &res_builder); - extended_it->second.callback(endpoint_, &req_reader, &res_builder); + try { + extended_it->second.callback(endpoint_, &req_reader, &res_builder); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } } else { SPDLOG_TRACE("[RpcServer] received {}", it->second.req_type.name); slk::Save(it->second.res_type.id, &res_builder); slk::Save(rpc::current_version, &res_builder); - it->second.callback(&req_reader, &res_builder); + try { + it->second.callback(&req_reader, &res_builder); + } catch (const slk::SlkReaderException &) { + throw rpc::SlkRpcFailedException(); + } } // Finalize the SLK streams. diff --git a/src/rpc/version.hpp b/src/rpc/version.hpp index 29e7f8d3a..b234a3ccc 100644 --- a/src/rpc/version.hpp +++ b/src/rpc/version.hpp @@ -22,6 +22,12 @@ using Version = uint64_t; // probability of accidental match/conformance with pre 2.13 versions constexpr auto v1 = Version{2023'10'30'0'2'13}; -constexpr auto current_version = v1; +// TypeId has been changed, they were not stable +// Added stable numbering for replication types to be in +// 2000-2999 range. We shouldn't need to version bump again +// for any TypeIds that get added. +constexpr auto v2 = Version{2023'12'07'0'2'14}; + +constexpr auto current_version = v2; } // namespace memgraph::rpc diff --git a/src/slk/serialization.hpp b/src/slk/serialization.hpp index 9ca99527d..41c3d8539 100644 --- a/src/slk/serialization.hpp +++ b/src/slk/serialization.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -55,11 +55,22 @@ class SlkDecodeException : public utils::BasicException { // here because C++ doesn't know how to resolve the function call if it isn't in // the global namespace. +template +inline void Save(const std::vector &obj, Builder *builder, + std::function item_save_function); +template +inline void Load(std::vector *obj, Reader *reader, std::function item_load_function); + template void Save(const std::vector &obj, Builder *builder); template void Load(std::vector *obj, Reader *reader); +template +void Save(const std::array &obj, Builder *builder); +template +void Load(std::array *obj, Reader *reader); + template void Save(const std::set &obj, Builder *builder); template @@ -201,6 +212,24 @@ inline void Load(std::vector *obj, Reader *reader) { } } +template +inline void Save(const std::array &obj, Builder *builder) { + uint64_t size = obj.size(); + Save(size, builder); + for (const auto &item : obj) { + Save(item, builder); + } +} + +template +inline void Load(std::array *obj, Reader *reader) { + uint64_t size = 0; + Load(&size, reader); + for (uint64_t i = 0; i < size; ++i) { + Load(&(*obj)[i], reader); + } +} + template inline void Save(const std::set &obj, Builder *builder) { uint64_t size = obj.size(); @@ -486,4 +515,17 @@ inline void Load(utils::TypeId *obj, Reader *reader) { *obj = utils::TypeId(utils::MemcpyCast(obj_encoded)); } +template +void Save(const T &enum_value, slk::Builder *builder) { + slk::Save(utils::UnderlyingCast(enum_value), builder); +} + +template +void Load(T *enum_value, slk::Reader *reader) { + using UnderlyingType = std::underlying_type_t; + UnderlyingType value; + slk::Load(&value, reader); + *enum_value = static_cast(value); +} + } // namespace memgraph::slk diff --git a/src/storage/v2/config.hpp b/src/storage/v2/config.hpp index dee2afe87..3533594ce 100644 --- a/src/storage/v2/config.hpp +++ b/src/storage/v2/config.hpp @@ -14,10 +14,12 @@ #include #include #include + #include "storage/v2/isolation_level.hpp" #include "storage/v2/storage_mode.hpp" #include "utils/exceptions.hpp" #include "utils/logging.hpp" +#include "utils/uuid.hpp" namespace memgraph::storage { @@ -27,6 +29,41 @@ class StorageConfigException : public utils::BasicException { SPECIALIZE_GET_EXCEPTION_NAME(StorageConfigException) }; +struct SalientConfig { + std::string name; + utils::UUID uuid; + StorageMode storage_mode{StorageMode::IN_MEMORY_TRANSACTIONAL}; + struct Items { + bool properties_on_edges{true}; + bool enable_schema_metadata{false}; + friend bool operator==(const Items &lrh, const Items &rhs) = default; + } items; + + friend bool operator==(const SalientConfig &, const SalientConfig &) = default; +}; + +inline void to_json(nlohmann::json &data, SalientConfig::Items const &items) { + data = nlohmann::json{{"properties_on_edges", items.properties_on_edges}, + {"enable_schema_metadata", items.enable_schema_metadata}}; +} + +inline void from_json(const nlohmann::json &data, SalientConfig::Items &items) { + data.at("properties_on_edges").get_to(items.properties_on_edges); + data.at("enable_schema_metadata").get_to(items.enable_schema_metadata); +} + +inline void to_json(nlohmann::json &data, SalientConfig const &config) { + data = nlohmann::json{ + {"items", config.items}, {"name", config.name}, {"uuid", config.uuid}, {"storage_mode", config.storage_mode}}; +} + +inline void from_json(const nlohmann::json &data, SalientConfig &config) { + data.at("items").get_to(config.items); + data.at("name").get_to(config.name); + data.at("uuid").get_to(config.uuid); + data.at("storage_mode").get_to(config.storage_mode); +} + /// Pass this class to the \ref Storage constructor to change the behavior of /// the storage. This class also defines the default behavior. struct Config { @@ -36,46 +73,40 @@ struct Config { Type type{Type::PERIODIC}; std::chrono::milliseconds interval{std::chrono::milliseconds(1000)}; friend bool operator==(const Gc &lrh, const Gc &rhs) = default; - } gc; - - struct Items { - bool properties_on_edges{true}; - bool enable_schema_metadata{false}; - friend bool operator==(const Items &lrh, const Items &rhs) = default; - } items; + } gc; // SYSTEM FLAG struct Durability { enum class SnapshotWalMode { DISABLED, PERIODIC_SNAPSHOT, PERIODIC_SNAPSHOT_WITH_WAL }; - std::filesystem::path storage_directory{"storage"}; + std::filesystem::path storage_directory{"storage"}; // PER INSTANCE SYSTEM FLAG-> root folder...ish - bool recover_on_startup{false}; + bool recover_on_startup{false}; // PER INSTANCE SYSTEM FLAG - SnapshotWalMode snapshot_wal_mode{SnapshotWalMode::DISABLED}; + SnapshotWalMode snapshot_wal_mode{SnapshotWalMode::DISABLED}; // PER DATABASE - std::chrono::milliseconds snapshot_interval{std::chrono::minutes(2)}; - uint64_t snapshot_retention_count{3}; + std::chrono::milliseconds snapshot_interval{std::chrono::minutes(2)}; // PER DATABASE + uint64_t snapshot_retention_count{3}; // PER DATABASE - uint64_t wal_file_size_kibibytes{20 * 1024}; - uint64_t wal_file_flush_every_n_tx{100000}; + uint64_t wal_file_size_kibibytes{20 * 1024}; // PER DATABASE + uint64_t wal_file_flush_every_n_tx{100000}; // PER DATABASE - bool snapshot_on_exit{false}; - bool restore_replication_state_on_startup{false}; + bool snapshot_on_exit{false}; // PER DATABASE + bool restore_replication_state_on_startup{false}; // PER INSTANCE - uint64_t items_per_batch{1'000'000}; - uint64_t recovery_thread_count{8}; + uint64_t items_per_batch{1'000'000}; // PER DATABASE + uint64_t recovery_thread_count{8}; // PER INSTANCE SYSTEM FLAG // deprecated - bool allow_parallel_index_creation{false}; + bool allow_parallel_index_creation{false}; // KILL - bool allow_parallel_schema_creation{false}; + bool allow_parallel_schema_creation{false}; // PER DATABASE friend bool operator==(const Durability &lrh, const Durability &rhs) = default; } durability; struct Transaction { IsolationLevel isolation_level{IsolationLevel::SNAPSHOT_ISOLATION}; friend bool operator==(const Transaction &lrh, const Transaction &rhs) = default; - } transaction; + } transaction; // PER DATABASE struct DiskConfig { std::filesystem::path main_storage_directory{"storage/rocksdb_main_storage"}; @@ -89,9 +120,9 @@ struct Config { friend bool operator==(const DiskConfig &lrh, const DiskConfig &rhs) = default; } disk; - std::string name; - bool force_on_disk{false}; - StorageMode storage_mode{StorageMode::IN_MEMORY_TRANSACTIONAL}; + SalientConfig salient; + + bool force_on_disk{false}; // TODO: cleanup.... remove + make the default storage_mode ON_DISK_TRANSACTIONAL if true friend bool operator==(const Config &lrh, const Config &rhs) = default; }; diff --git a/src/storage/v2/database_access.hpp b/src/storage/v2/database_access.hpp new file mode 100644 index 000000000..de7a1d7d4 --- /dev/null +++ b/src/storage/v2/database_access.hpp @@ -0,0 +1,25 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include + +namespace memgraph::storage { + +/** + * @brief We need to protect the database using a DatabaseAccess, and we need to keep the replication/storage/dbms + * untied. To achieve that we are using std::any, but beware to pass in the correct type using DatabaseAccess = + * memgraph::utils::Gatekeeper::Accessor; + */ +using DatabaseAccessProtector = std::any; + +} // namespace memgraph::storage diff --git a/src/storage/v2/disk/storage.cpp b/src/storage/v2/disk/storage.cpp index 44ec89ece..7c02ed5ae 100644 --- a/src/storage/v2/disk/storage.cpp +++ b/src/storage/v2/disk/storage.cpp @@ -288,7 +288,8 @@ DiskStorage::~DiskStorage() { DiskStorage::DiskAccessor::DiskAccessor(auto tag, DiskStorage *storage, IsolationLevel isolation_level, StorageMode storage_mode) - : Accessor(tag, storage, isolation_level, storage_mode, memgraph::replication::ReplicationRole::MAIN) { + : Accessor(tag, storage, isolation_level, storage_mode, + memgraph::replication_coordination_glue::ReplicationRole::MAIN) { rocksdb::WriteOptions write_options; auto txOptions = rocksdb::TransactionOptions{.set_snapshot = true}; transaction_.disk_transaction_ = storage->kvstore_->db_->BeginTransaction(write_options, txOptions); @@ -837,7 +838,8 @@ StorageInfo DiskStorage::GetBaseInfo(bool /* unused */) { return info; } -StorageInfo DiskStorage::GetInfo(bool force_dir, memgraph::replication::ReplicationRole replication_role) { +StorageInfo DiskStorage::GetInfo(bool force_dir, + memgraph::replication_coordination_glue::ReplicationRole replication_role) { StorageInfo info = GetBaseInfo(force_dir); { auto access = Access(replication_role); @@ -951,7 +953,7 @@ Result DiskStorage::DiskAccessor::CreateEdge(VertexAccessor *from, EdgeRef edge(gid); bool edge_import_mode_active = disk_storage->edge_import_status_ == EdgeImportMode::ACTIVE; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto acc = edge_import_mode_active ? disk_storage->edge_import_mode_cache_->AccessToEdges() : transaction_.edges_->access(); auto *delta = CreateDeleteObjectDelta(&transaction_); @@ -975,7 +977,7 @@ Result DiskStorage::DiskAccessor::CreateEdge(VertexAccessor *from, transaction_.manyDeltasCache.Invalidate(from_vertex, edge_type, EdgeDirection::OUT); transaction_.manyDeltasCache.Invalidate(to_vertex, edge_type, EdgeDirection::IN); - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } storage_->edge_count_.fetch_add(1, std::memory_order_acq_rel); @@ -1283,7 +1285,7 @@ bool DiskStorage::DeleteEdgeFromConnectivityIndex(Transaction *transaction, cons const auto src_vertex_gid = modified_edge.second.src_vertex_gid.ToString(); const auto dst_vertex_gid = modified_edge.second.dest_vertex_gid.ToString(); - if (!config_.items.properties_on_edges) { + if (!config_.salient.items.properties_on_edges) { /// If the object was created then flush it, otherwise since properties on edges are false /// edge wasn't modified for sure. if (root_action == Delta::Action::DELETE_OBJECT && @@ -1400,7 +1402,7 @@ std::optional DiskStorage::CreateEdgeFromDisk(const VertexAccessor } EdgeRef edge(gid); - if (config_.items.properties_on_edges) { + if (config_.salient.items.properties_on_edges) { auto acc = edge_import_mode_active ? edge_import_mode_cache_->AccessToEdges() : transaction->edges_->access(); auto *delta = CreateDeleteDeserializedObjectDelta(transaction, old_disk_key, std::move(read_ts)); auto [it, inserted] = acc.insert(Edge(gid, delta)); @@ -1458,7 +1460,8 @@ std::vector DiskStorage::OutEdges(const VertexAccessor *src_vertex if (!edge_types.empty() && !utils::Contains(edge_types, edge_type_id)) continue; auto edge_gid = Gid::FromString(edge_gid_str); - auto properties_str = config_.items.properties_on_edges ? utils::GetPropertiesFromEdgeValue(edge_val_str) : ""; + auto properties_str = + config_.salient.items.properties_on_edges ? utils::GetPropertiesFromEdgeValue(edge_val_str) : ""; const auto edge = std::invoke([this, destination, &edge_val_str, transaction, view, src_vertex, edge_type_id, edge_gid, &properties_str, &edge_gid_str]() { @@ -1599,7 +1602,7 @@ DiskStorage::CheckExistingVerticesBeforeCreatingUniqueConstraint(LabelId label, // NOLINTNEXTLINE(google-default-arguments) utils::BasicResult DiskStorage::DiskAccessor::Commit( - const std::optional desired_commit_timestamp, bool /*is_main*/) { + CommitReplArgs reparg, DatabaseAccessProtector /*db_acc*/) { MG_ASSERT(is_transaction_active_, "The transaction is already terminated!"); MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!"); @@ -1610,7 +1613,7 @@ utils::BasicResult DiskStorage::DiskAccessor::Co // This is usually done by the MVCC, but it does not handle the metadata deltas transaction_.EnsureCommitTimestampExists(); std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(disk_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(disk_storage->CommitTimestamp(reparg.desired_commit_timestamp)); transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release); for (const auto &md_delta : transaction_.md_deltas) { @@ -1696,7 +1699,7 @@ utils::BasicResult DiskStorage::DiskAccessor::Co }))) { } else { std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(disk_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(disk_storage->CommitTimestamp(reparg.desired_commit_timestamp)); transaction_.commit_timestamp->store(*commit_timestamp_, std::memory_order_release); if (edge_import_mode_active) { @@ -2016,7 +2019,7 @@ UniqueConstraints::DeletionStatus DiskStorage::DiskAccessor::DropUniqueConstrain } Transaction DiskStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole /*is_main*/) { + memgraph::replication_coordination_glue::ReplicationRole /*is_main*/) { /// We acquire the transaction engine lock here because we access (and /// modify) the transaction engine variables (`transaction_id` and /// `timestamp`) below. @@ -2041,8 +2044,9 @@ uint64_t DiskStorage::CommitTimestamp(const std::optional desired_comm return *desired_commit_timestamp; } -std::unique_ptr DiskStorage::Access(memgraph::replication::ReplicationRole /*replication_role*/, - std::optional override_isolation_level) { +std::unique_ptr DiskStorage::Access( + memgraph::replication_coordination_glue::ReplicationRole /*replication_role*/, + std::optional override_isolation_level) { auto isolation_level = override_isolation_level.value_or(isolation_level_); if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) { throw utils::NotYetImplemented("Disk storage supports only SNAPSHOT isolation level."); @@ -2051,7 +2055,7 @@ std::unique_ptr DiskStorage::Access(memgraph::replication::Re new DiskAccessor{Storage::Accessor::shared_access, this, isolation_level, storage_mode_}); } std::unique_ptr DiskStorage::UniqueAccess( - memgraph::replication::ReplicationRole /*replication_role*/, + memgraph::replication_coordination_glue::ReplicationRole /*replication_role*/, std::optional override_isolation_level) { auto isolation_level = override_isolation_level.value_or(isolation_level_); if (isolation_level != IsolationLevel::SNAPSHOT_ISOLATION) { diff --git a/src/storage/v2/disk/storage.hpp b/src/storage/v2/disk/storage.hpp index 219cb8272..293e102b1 100644 --- a/src/storage/v2/disk/storage.hpp +++ b/src/storage/v2/disk/storage.hpp @@ -145,8 +145,8 @@ class DiskStorage final : public Storage { ConstraintsInfo ListAllConstraints() const override; // NOLINTNEXTLINE(google-default-arguments) - utils::BasicResult Commit(std::optional desired_commit_timestamp = {}, - bool is_main = true) override; + utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) override; void UpdateObjectsCountOnAbort(); @@ -176,11 +176,11 @@ class DiskStorage final : public Storage { }; using Storage::Access; - std::unique_ptr Access(memgraph::replication::ReplicationRole replication_role, + std::unique_ptr Access(memgraph::replication_coordination_glue::ReplicationRole replication_role, std::optional override_isolation_level) override; using Storage::UniqueAccess; - std::unique_ptr UniqueAccess(memgraph::replication::ReplicationRole replication_role, + std::unique_ptr UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role, std::optional override_isolation_level) override; /// Flushing methods @@ -285,7 +285,7 @@ class DiskStorage final : public Storage { RocksDBStorage *GetRocksDBStorage() const { return kvstore_.get(); } Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role) override; + memgraph::replication_coordination_glue::ReplicationRole replication_role) override; void SetEdgeImportMode(EdgeImportMode edge_import_status); @@ -308,7 +308,8 @@ class DiskStorage final : public Storage { PropertyId property); StorageInfo GetBaseInfo(bool force_directory) override; - StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) override; + StorageInfo GetInfo(bool force_directory, + memgraph::replication_coordination_glue::ReplicationRole replication_role) override; void FreeMemory(std::unique_lock /*lock*/) override {} diff --git a/src/storage/v2/durability/durability.cpp b/src/storage/v2/durability/durability.cpp index 6a89b7b5a..92c4d11e8 100644 --- a/src/storage/v2/durability/durability.cpp +++ b/src/storage/v2/durability/durability.cpp @@ -428,7 +428,7 @@ std::optional Recovery::RecoverData(std::string *uuid, Replication } try { auto info = LoadWal(wal_file.path, &indices_constraints, last_loaded_timestamp, vertices, edges, name_id_mapper, - edge_count, config.items); + edge_count, config.salient.items); recovery_info.next_vertex_id = std::max(recovery_info.next_vertex_id, info.next_vertex_id); recovery_info.next_edge_id = std::max(recovery_info.next_edge_id, info.next_edge_id); recovery_info.next_timestamp = std::max(recovery_info.next_timestamp, info.next_timestamp); diff --git a/src/storage/v2/durability/snapshot.cpp b/src/storage/v2/durability/snapshot.cpp index 87d70aa75..ab8896718 100644 --- a/src/storage/v2/durability/snapshot.cpp +++ b/src/storage/v2/durability/snapshot.cpp @@ -222,7 +222,7 @@ std::vector ReadBatchInfos(Decoder &snapshot) { template void LoadPartialEdges(const std::filesystem::path &path, utils::SkipList &edges, const uint64_t from_offset, - const uint64_t edges_count, const Config::Items items, TFunc get_property_from_id) { + const uint64_t edges_count, const SalientConfig::Items items, TFunc get_property_from_id) { Decoder snapshot; snapshot.Initialize(path, kSnapshotMagic); @@ -420,7 +420,7 @@ template LoadPartialConnectivityResult LoadPartialConnectivity(const std::filesystem::path &path, utils::SkipList &vertices, utils::SkipList &edges, const uint64_t from_offset, const uint64_t vertices_count, - const Config::Items items, const bool snapshot_has_edges, + const SalientConfig::Items items, const bool snapshot_has_edges, TEdgeTypeFromIdFunc get_edge_type_from_id) { Decoder snapshot; snapshot.Initialize(path, kSnapshotMagic); @@ -621,7 +621,7 @@ RecoveredSnapshot LoadSnapshotVersion14(const std::filesystem::path &path, utils utils::SkipList *edges, std::deque> *epoch_history, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items) { + SalientConfig::Items items) { RecoveryInfo ret; RecoveredIndicesAndConstraints indices_constraints; @@ -1178,8 +1178,8 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, edges, items = config.items, &get_property_from_id](const size_t /*batch_index*/, - const BatchInfo &batch) { + [path, edges, items = config.salient.items, &get_property_from_id](const size_t /*batch_index*/, + const BatchInfo &batch) { LoadPartialEdges(path, *edges, batch.offset, batch.count, items, get_property_from_id); }, edge_batches); @@ -1219,7 +1219,7 @@ RecoveredSnapshot LoadSnapshotVersion15(const std::filesystem::path &path, utils RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, vertices, edges, edge_count, items = config.items, snapshot_has_edges, &get_edge_type_from_id, + [path, vertices, edges, edge_count, items = config.salient.items, snapshot_has_edges, &get_edge_type_from_id, &highest_edge_gid, &recovery_info](const size_t batch_index, const BatchInfo &batch) { const auto result = LoadPartialConnectivity(path, *vertices, *edges, batch.offset, batch.count, items, snapshot_has_edges, get_edge_type_from_id); @@ -1392,7 +1392,8 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis if (!IsVersionSupported(*version)) throw RecoveryFailure(fmt::format("Invalid snapshot version {}", *version)); if (*version == 14U) { - return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config.items); + return LoadSnapshotVersion14(path, vertices, edges, epoch_history, name_id_mapper, edge_count, + config.salient.items); } if (*version == 15U) { return LoadSnapshotVersion15(path, vertices, edges, epoch_history, name_id_mapper, edge_count, config); @@ -1471,8 +1472,8 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, edges, items = config.items, &get_property_from_id](const size_t /*batch_index*/, - const BatchInfo &batch) { + [path, edges, items = config.salient.items, &get_property_from_id](const size_t /*batch_index*/, + const BatchInfo &batch) { LoadPartialEdges(path, *edges, batch.offset, batch.count, items, get_property_from_id); }, edge_batches); @@ -1512,7 +1513,7 @@ RecoveredSnapshot LoadSnapshot(const std::filesystem::path &path, utils::SkipLis RecoverOnMultipleThreads( config.durability.recovery_thread_count, - [path, vertices, edges, edge_count, items = config.items, snapshot_has_edges, &get_edge_type_from_id, + [path, vertices, edges, edge_count, items = config.salient.items, snapshot_has_edges, &get_edge_type_from_id, &highest_edge_gid, &recovery_info](const size_t batch_index, const BatchInfo &batch) { const auto result = LoadPartialConnectivity(path, *vertices, *edges, batch.offset, batch.count, items, snapshot_has_edges, get_edge_type_from_id); @@ -1869,7 +1870,7 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files auto items_in_current_batch{0UL}; auto batch_start_offset{0UL}; // Store all edges. - if (storage->config_.items.properties_on_edges) { + if (storage->config_.salient.items.properties_on_edges) { offset_edges = snapshot.GetPosition(); batch_start_offset = offset_edges; auto acc = edges->access(); @@ -1983,18 +1984,34 @@ void CreateSnapshot(Storage *storage, Transaction *transaction, const std::files snapshot.WritePropertyValue(item.second); } const auto &in_edges = maybe_in_edges.GetValue().edges; - snapshot.WriteUint(in_edges.size()); - for (const auto &item : in_edges) { - snapshot.WriteUint(item.Gid().AsUint()); - snapshot.WriteUint(item.FromVertex().Gid().AsUint()); - write_mapping(item.EdgeType()); - } const auto &out_edges = maybe_out_edges.GetValue().edges; - snapshot.WriteUint(out_edges.size()); - for (const auto &item : out_edges) { - snapshot.WriteUint(item.Gid().AsUint()); - snapshot.WriteUint(item.ToVertex().Gid().AsUint()); - write_mapping(item.EdgeType()); + + if (storage->config_.salient.items.properties_on_edges) { + snapshot.WriteUint(in_edges.size()); + for (const auto &item : in_edges) { + snapshot.WriteUint(item.GidPropertiesOnEdges().AsUint()); + snapshot.WriteUint(item.FromVertex().Gid().AsUint()); + write_mapping(item.EdgeType()); + } + snapshot.WriteUint(out_edges.size()); + for (const auto &item : out_edges) { + snapshot.WriteUint(item.GidPropertiesOnEdges().AsUint()); + snapshot.WriteUint(item.ToVertex().Gid().AsUint()); + write_mapping(item.EdgeType()); + } + } else { + snapshot.WriteUint(in_edges.size()); + for (const auto &item : in_edges) { + snapshot.WriteUint(item.GidNoPropertiesOnEdges().AsUint()); + snapshot.WriteUint(item.FromVertex().Gid().AsUint()); + write_mapping(item.EdgeType()); + } + snapshot.WriteUint(out_edges.size()); + for (const auto &item : out_edges) { + snapshot.WriteUint(item.GidNoPropertiesOnEdges().AsUint()); + snapshot.WriteUint(item.ToVertex().Gid().AsUint()); + write_mapping(item.EdgeType()); + } } } diff --git a/src/storage/v2/durability/wal.cpp b/src/storage/v2/durability/wal.cpp index dfeb9c447..6ac128f95 100644 --- a/src/storage/v2/durability/wal.cpp +++ b/src/storage/v2/durability/wal.cpp @@ -560,7 +560,7 @@ WalDeltaData::Type SkipWalDeltaData(BaseDecoder *decoder) { return delta.type; } -void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Config::Items items, const Delta &delta, +void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, SalientConfig::Items items, const Delta &delta, const Vertex &vertex, uint64_t timestamp) { // When converting a Delta to a WAL delta the logic is inverted. That is // because the Delta's represent undo actions and we want to store redo @@ -723,7 +723,7 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConstraints *indices_constraints, const std::optional last_loaded_timestamp, utils::SkipList *vertices, utils::SkipList *edges, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items) { + SalientConfig::Items items) { spdlog::info("Trying to load WAL file {}.", path); RecoveryInfo ret; @@ -1012,8 +1012,8 @@ RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConst } WalFile::WalFile(const std::filesystem::path &wal_directory, const std::string_view uuid, - const std::string_view epoch_id, Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, - utils::FileRetainer *file_retainer) + const std::string_view epoch_id, SalientConfig::Items items, NameIdMapper *name_id_mapper, + uint64_t seq_num, utils::FileRetainer *file_retainer) : items_(items), name_id_mapper_(name_id_mapper), path_(wal_directory / MakeWalName()), @@ -1055,7 +1055,7 @@ WalFile::WalFile(const std::filesystem::path &wal_directory, const std::string_v wal_.Sync(); } -WalFile::WalFile(std::filesystem::path current_wal_path, Config::Items items, NameIdMapper *name_id_mapper, +WalFile::WalFile(std::filesystem::path current_wal_path, SalientConfig::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, utils::FileRetainer *file_retainer) : items_(items), diff --git a/src/storage/v2/durability/wal.hpp b/src/storage/v2/durability/wal.hpp index 6b7003447..c88e7730d 100644 --- a/src/storage/v2/durability/wal.hpp +++ b/src/storage/v2/durability/wal.hpp @@ -202,7 +202,7 @@ WalDeltaData ReadWalDeltaData(BaseDecoder *decoder); WalDeltaData::Type SkipWalDeltaData(BaseDecoder *decoder); /// Function used to encode a `Delta` that originated from a `Vertex`. -void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Config::Items items, const Delta &delta, +void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper, SalientConfig::Items items, const Delta &delta, const Vertex &vertex, uint64_t timestamp); /// Function used to encode a `Delta` that originated from an `Edge`. @@ -222,15 +222,17 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper, Storage RecoveryInfo LoadWal(const std::filesystem::path &path, RecoveredIndicesAndConstraints *indices_constraints, std::optional last_loaded_timestamp, utils::SkipList *vertices, utils::SkipList *edges, NameIdMapper *name_id_mapper, std::atomic *edge_count, - Config::Items items); + SalientConfig::Items items); /// WalFile class used to append deltas and operations to the WAL file. class WalFile { public: WalFile(const std::filesystem::path &wal_directory, std::string_view uuid, std::string_view epoch_id, - Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, utils::FileRetainer *file_retainer); - WalFile(std::filesystem::path current_wal_path, Config::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, - uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, utils::FileRetainer *file_retainer); + SalientConfig::Items items, NameIdMapper *name_id_mapper, uint64_t seq_num, + utils::FileRetainer *file_retainer); + WalFile(std::filesystem::path current_wal_path, SalientConfig::Items items, NameIdMapper *name_id_mapper, + uint64_t seq_num, uint64_t from_timestamp, uint64_t to_timestamp, uint64_t count, + utils::FileRetainer *file_retainer); WalFile(const WalFile &) = delete; WalFile(WalFile &&) = delete; @@ -277,7 +279,7 @@ class WalFile { private: void UpdateStats(uint64_t timestamp); - Config::Items items_; + SalientConfig::Items items_; NameIdMapper *name_id_mapper_; Encoder wal_; std::filesystem::path path_; diff --git a/src/storage/v2/edge_accessor.cpp b/src/storage/v2/edge_accessor.cpp index 09b035e80..3ab2e3d79 100644 --- a/src/storage/v2/edge_accessor.cpp +++ b/src/storage/v2/edge_accessor.cpp @@ -27,7 +27,7 @@ namespace memgraph::storage { bool EdgeAccessor::IsDeleted() const { - if (!storage_->config_.items.properties_on_edges) { + if (!storage_->config_.salient.items.properties_on_edges) { return false; } return edge_.ptr->deleted; @@ -38,7 +38,7 @@ bool EdgeAccessor::IsVisible(const View view) const { bool deleted = true; // When edges don't have properties, their isolation level is still dictated by MVCC -> // iterate over the deltas of the from_vertex_ and see which deltas can be applied on edges. - if (!storage_->config_.items.properties_on_edges) { + if (!storage_->config_.salient.items.properties_on_edges) { Delta *delta = nullptr; { auto guard = std::shared_lock{from_vertex_->lock}; @@ -120,7 +120,7 @@ VertexAccessor EdgeAccessor::DeletedEdgeToVertex() const { Result EdgeAccessor::SetProperty(PropertyId property, const PropertyValue &value) { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -153,7 +153,7 @@ Result EdgeAccessor::SetProperty(PropertyId property, co Result EdgeAccessor::InitProperties(const std::map &properties) { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -175,7 +175,7 @@ Result EdgeAccessor::InitProperties(const std::map>> EdgeAccessor::UpdateProperties( std::map &properties) const { utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception; - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -198,7 +198,7 @@ Result>> EdgeAc } Result> EdgeAccessor::ClearProperties() { - if (!storage_->config_.items.properties_on_edges) return Error::PROPERTIES_DISABLED; + if (!storage_->config_.salient.items.properties_on_edges) return Error::PROPERTIES_DISABLED; auto guard = std::unique_lock{edge_.ptr->lock}; @@ -222,22 +222,22 @@ Result> EdgeAccessor::ClearProperties() { } Result EdgeAccessor::GetProperty(PropertyId property, View view) const { - if (!storage_->config_.items.properties_on_edges) return PropertyValue(); + if (!storage_->config_.salient.items.properties_on_edges) return PropertyValue(); bool exists = true; bool deleted = false; - PropertyValue value; + std::optional value; Delta *delta = nullptr; { auto guard = std::shared_lock{edge_.ptr->lock}; deleted = edge_.ptr->deleted; - value = edge_.ptr->properties.GetProperty(property); + value.emplace(edge_.ptr->properties.GetProperty(property)); delta = edge_.ptr->delta; } ApplyDeltasForRead(transaction_, delta, view, [&exists, &deleted, &value, property](const Delta &delta) { switch (delta.action) { case Delta::Action::SET_PROPERTY: { if (delta.property.key == property) { - value = delta.property.value; + *value = delta.property.value; } break; } @@ -261,11 +261,11 @@ Result EdgeAccessor::GetProperty(PropertyId property, View view) }); if (!exists) return Error::NONEXISTENT_OBJECT; if (!for_deleted_ && deleted) return Error::DELETED_OBJECT; - return std::move(value); + return *std::move(value); } Result> EdgeAccessor::Properties(View view) const { - if (!storage_->config_.items.properties_on_edges) return std::map{}; + if (!storage_->config_.salient.items.properties_on_edges) return std::map{}; bool exists = true; bool deleted = false; std::map properties; @@ -317,7 +317,7 @@ Result> EdgeAccessor::Properties(View view) } Gid EdgeAccessor::Gid() const noexcept { - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { return edge_.ptr->gid; } return edge_.gid; diff --git a/src/storage/v2/edge_accessor.hpp b/src/storage/v2/edge_accessor.hpp index a1c52d0a5..83a3e549d 100644 --- a/src/storage/v2/edge_accessor.hpp +++ b/src/storage/v2/edge_accessor.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -85,6 +85,8 @@ class EdgeAccessor final { /// @throw std::bad_alloc Result> Properties(View view) const; + auto GidPropertiesOnEdges() const -> Gid { return edge_.ptr->gid; } + auto GidNoPropertiesOnEdges() const -> Gid { return edge_.gid; } Gid Gid() const noexcept; bool IsCycle() const { return from_vertex_ == to_vertex_; } diff --git a/src/storage/v2/indices/indices.cpp b/src/storage/v2/indices/indices.cpp index b902aee91..b8fab3fb7 100644 --- a/src/storage/v2/indices/indices.cpp +++ b/src/storage/v2/indices/indices.cpp @@ -32,10 +32,10 @@ void Indices::AbortEntries(LabelId label, std::spanAbortEntries(label, vertices, exact_start_timestamp); } -void Indices::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) const { - static_cast(label_index_.get())->RemoveObsoleteEntries(oldest_active_start_timestamp); +void Indices::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) const { + static_cast(label_index_.get())->RemoveObsoleteEntries(oldest_active_start_timestamp, token); static_cast(label_property_index_.get()) - ->RemoveObsoleteEntries(oldest_active_start_timestamp); + ->RemoveObsoleteEntries(oldest_active_start_timestamp, std::move(token)); } void Indices::UpdateOnAddLabel(LabelId label, Vertex *vertex, const Transaction &tx, Storage *storage, diff --git a/src/storage/v2/indices/indices.hpp b/src/storage/v2/indices/indices.hpp index e13bb0d82..b61b9d337 100644 --- a/src/storage/v2/indices/indices.hpp +++ b/src/storage/v2/indices/indices.hpp @@ -34,7 +34,7 @@ struct Indices { /// This function should be called from garbage collection to clean up the /// index. /// TODO: unused in disk indices - void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) const; + void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) const; /// Surgical removal of entries that were inserted in this transaction /// TODO: unused in disk indices diff --git a/src/storage/v2/indices/indices_utils.hpp b/src/storage/v2/indices/indices_utils.hpp index 59b492ba3..054609188 100644 --- a/src/storage/v2/indices/indices_utils.hpp +++ b/src/storage/v2/indices/indices_utils.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -21,11 +21,18 @@ namespace memgraph::storage { +namespace { + +template +struct ActionSet { + constexpr bool contains(Delta::Action action) const { return ((action == actions) || ...); } +}; + /// Traverses deltas visible from transaction with start timestamp greater than /// the provided timestamp, and calls the provided callback function for each /// delta. If the callback ever returns true, traversal is stopped and the /// function returns true. Otherwise, the function returns false. -template +template inline bool AnyVersionSatisfiesPredicate(uint64_t timestamp, const Delta *delta, const TCallback &predicate) { while (delta != nullptr) { const auto ts = delta->timestamp->load(std::memory_order_acquire); @@ -33,7 +40,7 @@ inline bool AnyVersionSatisfiesPredicate(uint64_t timestamp, const Delta *delta, if (ts < timestamp) { break; } - if (predicate(*delta)) { + if (interesting.contains(delta->action) && predicate(*delta)) { return true; } // Move to the next delta. @@ -42,6 +49,8 @@ inline bool AnyVersionSatisfiesPredicate(uint64_t timestamp, const Delta *delta, return false; } +} // namespace + /// Helper function for label index garbage collection. Returns true if there's /// a reachable version of the vertex that has the given label. inline bool AnyVersionHasLabel(const Vertex &vertex, LabelId label, uint64_t timestamp) { @@ -57,7 +66,10 @@ inline bool AnyVersionHasLabel(const Vertex &vertex, LabelId label, uint64_t tim if (!deleted && has_label) { return true; } - return AnyVersionSatisfiesPredicate(timestamp, delta, [&has_label, &deleted, label](const Delta &delta) { + constexpr auto interesting = + ActionSet{}; + return AnyVersionSatisfiesPredicate(timestamp, delta, [&has_label, &deleted, label](const Delta &delta) { switch (delta.action) { case Delta::Action::ADD_LABEL: if (delta.label == label) { @@ -98,10 +110,10 @@ inline bool AnyVersionHasLabel(const Vertex &vertex, LabelId label, uint64_t tim /// property value. inline bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label, PropertyId key, const PropertyValue &value, uint64_t timestamp) { - bool has_label{false}; - bool current_value_equal_to_value{value.IsNull()}; - bool deleted{false}; - const Delta *delta = nullptr; + Delta const *delta; + bool deleted; + bool has_label; + bool current_value_equal_to_value; { auto guard = std::shared_lock{vertex.lock}; delta = vertex.delta; @@ -116,7 +128,10 @@ inline bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label, Prop return true; } - return AnyVersionSatisfiesPredicate( + constexpr auto interesting = ActionSet{}; + return AnyVersionSatisfiesPredicate( timestamp, delta, [&has_label, ¤t_value_equal_to_value, &deleted, label, key, &value](const Delta &delta) { switch (delta.action) { case Delta::Action::ADD_LABEL: diff --git a/src/storage/v2/inmemory/label_index.cpp b/src/storage/v2/inmemory/label_index.cpp index b833c97ff..9ab027308 100644 --- a/src/storage/v2/inmemory/label_index.cpp +++ b/src/storage/v2/inmemory/label_index.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,6 +16,7 @@ #include "storage/v2/constraints/constraints.hpp" #include "storage/v2/indices/indices_utils.hpp" #include "storage/v2/inmemory/storage.hpp" +#include "utils/counter.hpp" namespace memgraph::storage { @@ -79,10 +80,18 @@ std::vector InMemoryLabelIndex::ListIndices() const { return ret; } -void InMemoryLabelIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) { +void InMemoryLabelIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) { + auto maybe_stop = utils::ResettableCounter<2048>(); + for (auto &label_storage : index_) { + // before starting index, check if stop_requested + if (token.stop_requested()) return; + auto vertices_acc = label_storage.second.access(); for (auto it = vertices_acc.begin(); it != vertices_acc.end();) { + // Hot loop, don't check stop_requested every time + if (maybe_stop() && token.stop_requested()) return; + auto next_it = it; ++next_it; diff --git a/src/storage/v2/inmemory/label_index.hpp b/src/storage/v2/inmemory/label_index.hpp index 2411f0ba1..5ecac117b 100644 --- a/src/storage/v2/inmemory/label_index.hpp +++ b/src/storage/v2/inmemory/label_index.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -54,7 +54,7 @@ class InMemoryLabelIndex : public storage::LabelIndex { std::vector ListIndices() const override; - void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp); + void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token); /// Surgical removal of entries that was inserted this transaction void AbortEntries(LabelId labelId, std::span vertices, uint64_t exact_start_timestamp); diff --git a/src/storage/v2/inmemory/label_property_index.cpp b/src/storage/v2/inmemory/label_property_index.cpp index c8333fb95..59b12a779 100644 --- a/src/storage/v2/inmemory/label_property_index.cpp +++ b/src/storage/v2/inmemory/label_property_index.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,6 +13,7 @@ #include "storage/v2/constraints/constraints.hpp" #include "storage/v2/indices/indices_utils.hpp" #include "storage/v2/inmemory/storage.hpp" +#include "utils/counter.hpp" #include "utils/logging.hpp" namespace memgraph::storage { @@ -139,10 +140,18 @@ std::vector> InMemoryLabelPropertyIndex::ListIndi return ret; } -void InMemoryLabelPropertyIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) { +void InMemoryLabelPropertyIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) { + auto maybe_stop = utils::ResettableCounter<2048>(); + for (auto &[label_property, index] : index_) { + // before starting index, check if stop_requested + if (token.stop_requested()) return; + auto index_acc = index.access(); for (auto it = index_acc.begin(); it != index_acc.end();) { + // Hot loop, don't check stop_requested every time + if (maybe_stop() && token.stop_requested()) return; + auto next_it = it; ++next_it; diff --git a/src/storage/v2/inmemory/label_property_index.hpp b/src/storage/v2/inmemory/label_property_index.hpp index 8bc4148bb..6ca67e1c6 100644 --- a/src/storage/v2/inmemory/label_property_index.hpp +++ b/src/storage/v2/inmemory/label_property_index.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -60,7 +60,7 @@ class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex { std::vector> ListIndices() const override; - void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp); + void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token); void AbortEntries(PropertyId property, std::span const> vertices, uint64_t exact_start_timestamp); diff --git a/src/storage/v2/inmemory/replication/recovery.cpp b/src/storage/v2/inmemory/replication/recovery.cpp index 536c7c8fc..d6f2b464c 100644 --- a/src/storage/v2/inmemory/replication/recovery.cpp +++ b/src/storage/v2/inmemory/replication/recovery.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -44,7 +44,7 @@ class InMemoryCurrentWalHandler { ////// CurrentWalHandler ////// InMemoryCurrentWalHandler::InMemoryCurrentWalHandler(InMemoryStorage const *storage, rpc::Client &rpc_client) - : stream_(rpc_client.Stream(storage->id())) {} + : stream_(rpc_client.Stream(storage->uuid())) {} void InMemoryCurrentWalHandler::AppendFilename(const std::string &filename) { replication::Encoder encoder(stream_.GetBuilder()); @@ -69,10 +69,10 @@ void InMemoryCurrentWalHandler::AppendBufferData(const uint8_t *buffer, const si replication::CurrentWalRes InMemoryCurrentWalHandler::Finalize() { return stream_.AwaitResponse(); } ////// ReplicationClient Helpers ////// -replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client, +replication::WalFilesRes TransferWalFiles(const utils::UUID &uuid, rpc::Client &client, const std::vector &wal_files) { MG_ASSERT(!wal_files.empty(), "Wal files list is empty!"); - auto stream = client.Stream(std::move(db_name), wal_files.size()); + auto stream = client.Stream(uuid, wal_files.size()); replication::Encoder encoder(stream.GetBuilder()); for (const auto &wal : wal_files) { spdlog::debug("Sending wal file: {}", wal); @@ -81,8 +81,9 @@ replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &clie return stream.AwaitResponse(); } -replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path) { - auto stream = client.Stream(std::move(db_name)); +replication::SnapshotRes TransferSnapshot(const utils::UUID &uuid, rpc::Client &client, + const std::filesystem::path &path) { + auto stream = client.Stream(uuid); replication::Encoder encoder(stream.GetBuilder()); encoder.WriteFile(path); return stream.AwaitResponse(); diff --git a/src/storage/v2/inmemory/replication/recovery.hpp b/src/storage/v2/inmemory/replication/recovery.hpp index 2025800ab..730822a62 100644 --- a/src/storage/v2/inmemory/replication/recovery.hpp +++ b/src/storage/v2/inmemory/replication/recovery.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,10 +19,11 @@ class InMemoryStorage; ////// ReplicationClient Helpers ////// -replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client, +replication::WalFilesRes TransferWalFiles(const utils::UUID &uuid, rpc::Client &client, const std::vector &wal_files); -replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path); +replication::SnapshotRes TransferSnapshot(const utils::UUID &uuid, rpc::Client &client, + const std::filesystem::path &path); uint64_t ReplicateCurrentWal(const InMemoryStorage *storage, rpc::Client &client, durability::WalFile const &wal_file); diff --git a/src/storage/v2/inmemory/storage.cpp b/src/storage/v2/inmemory/storage.cpp index b38ce719c..d9c5eb095 100644 --- a/src/storage/v2/inmemory/storage.cpp +++ b/src/storage/v2/inmemory/storage.cpp @@ -66,13 +66,13 @@ auto FindEdges(const View view, EdgeTypeId edge_type, const VertexAccessor *from using OOMExceptionEnabler = utils::MemoryTracker::OutOfMemoryExceptionEnabler; InMemoryStorage::InMemoryStorage(Config config) - : Storage(config, config.storage_mode), + : Storage(config, config.salient.storage_mode), recovery_{config.durability.storage_directory / durability::kSnapshotDirectory, config.durability.storage_directory / durability::kWalDirectory}, lock_file_path_(config.durability.storage_directory / durability::kLockFile), uuid_(utils::GenerateUUID()), global_locker_(file_retainer_.AddLocker()) { - MG_ASSERT(config.storage_mode != StorageMode::ON_DISK_TRANSACTIONAL, + MG_ASSERT(config.salient.storage_mode != StorageMode::ON_DISK_TRANSACTIONAL, "Invalid storage mode sent to InMemoryStorage constructor!"); if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED || config_.durability.snapshot_on_exit || config_.durability.recover_on_startup) { @@ -154,6 +154,8 @@ InMemoryStorage::InMemoryStorage(Config config) } InMemoryStorage::~InMemoryStorage() { + stop_source.request_stop(); + if (config_.gc.type == Config::Gc::Type::PERIODIC) { gc_runner_.Stop(); } @@ -176,8 +178,9 @@ InMemoryStorage::~InMemoryStorage() { InMemoryStorage::InMemoryAccessor::InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role) - : Accessor(tag, storage, isolation_level, storage_mode, replication_role), config_(storage->config_.items) {} + memgraph::replication_coordination_glue::ReplicationRole replication_role) + : Accessor(tag, storage, isolation_level, storage_mode, replication_role), + config_(storage->config_.salient.items) {} InMemoryStorage::InMemoryAccessor::InMemoryAccessor(InMemoryAccessor &&other) noexcept : Accessor(std::move(other)), config_(other.config_) {} @@ -319,7 +322,7 @@ Result InMemoryStorage::InMemoryAccessor::CreateEdge(VertexAccesso if (to_vertex->deleted) return Error::DELETED_OBJECT; } - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } auto *mem_storage = static_cast(storage_); @@ -408,7 +411,7 @@ Result InMemoryStorage::InMemoryAccessor::CreateEdgeEx(VertexAcces if (to_vertex->deleted) return Error::DELETED_OBJECT; } - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_edge_types_.try_insert(edge_type); } @@ -745,7 +748,7 @@ Result InMemoryStorage::InMemoryAccessor::EdgeChangeType(EdgeAcces // NOLINTNEXTLINE(google-default-arguments) utils::BasicResult InMemoryStorage::InMemoryAccessor::Commit( - const std::optional desired_commit_timestamp, bool is_main) { + CommitReplArgs reparg, DatabaseAccessProtector db_acc) { MG_ASSERT(is_transaction_active_, "The transaction is already terminated!"); MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!"); @@ -754,47 +757,14 @@ utils::BasicResult InMemoryStorage::InMemoryAcce auto *mem_storage = static_cast(storage_); // TODO: duplicated transaction finalisation in md_deltas and deltas processing cases - if (!transaction_.md_deltas.empty()) { - // This is usually done by the MVCC, but it does not handle the metadata deltas - transaction_.EnsureCommitTimestampExists(); - - // Save these so we can mark them used in the commit log. - uint64_t start_timestamp = transaction_.start_timestamp; - - std::unique_lock engine_guard(storage_->engine_lock_); - commit_timestamp_.emplace(mem_storage->CommitTimestamp(desired_commit_timestamp)); - - // Write transaction to WAL while holding the engine lock to make sure - // that committed transactions are sorted by the commit timestamp in the - // WAL files. We supply the new commit timestamp to the function so that - // it knows what will be the final commit timestamp. The WAL must be - // written before actually committing the transaction (before setting - // the commit timestamp) so that no other transaction can see the - // modifications before they are written to disk. - // Replica can log only the write transaction received from Main - // so the Wal files are consistent - if (is_main || desired_commit_timestamp.has_value()) { - could_replicate_all_sync_replicas = - mem_storage->AppendToWalDataDefinition(transaction_, *commit_timestamp_); // protected by engine_guard - // TODO: release lock, and update all deltas to have a local copy of the commit timestamp - transaction_.commit_timestamp->store(*commit_timestamp_, - std::memory_order_release); // protected by engine_guard - // Replica can only update the last commit timestamp with - // the commits received from main. - if (is_main || desired_commit_timestamp.has_value()) { - // Update the last commit timestamp - mem_storage->repl_storage_state_.last_commit_timestamp_.store(*commit_timestamp_); // protected by engine_guard - } - // Release engine lock because we don't have to hold it anymore - engine_guard.unlock(); - - mem_storage->commit_log_->MarkFinished(start_timestamp); - } - } else if (transaction_.deltas.use().empty()) { + if (transaction_.deltas.use().empty() && transaction_.md_deltas.empty()) { // We don't have to update the commit timestamp here because no one reads // it. mem_storage->commit_log_->MarkFinished(transaction_.start_timestamp); } else { + // This is usually done by the MVCC, but it does not handle the metadata deltas + transaction_.EnsureCommitTimestampExists(); + if (transaction_.constraint_verification_info.NeedsExistenceConstraintVerification()) { const auto vertices_to_update = transaction_.constraint_verification_info.GetVerticesForExistenceConstraintChecking(); @@ -822,7 +792,7 @@ utils::BasicResult InMemoryStorage::InMemoryAcce std::unique_lock engine_guard(storage_->engine_lock_); auto *mem_unique_constraints = static_cast(storage_->constraints_.unique_constraints_.get()); - commit_timestamp_.emplace(mem_storage->CommitTimestamp(desired_commit_timestamp)); + commit_timestamp_.emplace(mem_storage->CommitTimestamp(reparg.desired_commit_timestamp)); if (transaction_.constraint_verification_info.NeedsUniqueConstraintVerification()) { // Before committing and validating vertices against unique constraints, @@ -846,6 +816,16 @@ utils::BasicResult InMemoryStorage::InMemoryAcce } if (!unique_constraint_violation) { + [[maybe_unused]] bool const is_main_or_replica_write = + reparg.IsMain() || reparg.desired_commit_timestamp.has_value(); + + // TODO Figure out if we can assert this + // DMG_ASSERT(is_main_or_replica_write, "Should only get here on writes"); + // Currently there are queries that write to some subsystem that are allowed on a replica + // ex. analyze graph stats + // There are probably others. We not to check all of them and figure out if they are allowed and what are + // they even doing here... + // Write transaction to WAL while holding the engine lock to make sure // that committed transactions are sorted by the commit timestamp in the // WAL files. We supply the new commit timestamp to the function so that @@ -855,18 +835,16 @@ utils::BasicResult InMemoryStorage::InMemoryAcce // modifications before they are written to disk. // Replica can log only the write transaction received from Main // so the Wal files are consistent - if (is_main || desired_commit_timestamp.has_value()) { - could_replicate_all_sync_replicas = - mem_storage->AppendToWalDataManipulation(transaction_, *commit_timestamp_); // protected by engine_guard - } + if (is_main_or_replica_write) { + could_replicate_all_sync_replicas = mem_storage->AppendToWal(transaction_, *commit_timestamp_, + std::move(db_acc)); // protected by engine_guard - // TODO: release lock, and update all deltas to have a local copy of the commit timestamp - MG_ASSERT(transaction_.commit_timestamp != nullptr, "Invalid database state!"); - transaction_.commit_timestamp->store(*commit_timestamp_, - std::memory_order_release); // protected by engine_guard - // Replica can only update the last commit timestamp with - // the commits received from main. - if (is_main || desired_commit_timestamp.has_value()) { + // TODO: release lock, and update all deltas to have a local copy of the commit timestamp + MG_ASSERT(transaction_.commit_timestamp != nullptr, "Invalid database state!"); + transaction_.commit_timestamp->store(*commit_timestamp_, + std::memory_order_release); // protected by engine_guard + // Replica can only update the last commit timestamp with + // the commits received from main. // Update the last commit timestamp mem_storage->repl_storage_state_.last_commit_timestamp_.store( *commit_timestamp_); // protected by engine_guard @@ -1303,8 +1281,9 @@ VerticesIterable InMemoryStorage::InMemoryAccessor::Vertices( mem_label_property_index->Vertices(label, property, lower_bound, upper_bound, view, storage_, &transaction_)); } -Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role) { +Transaction InMemoryStorage::CreateTransaction( + IsolationLevel isolation_level, StorageMode storage_mode, + memgraph::replication_coordination_glue::ReplicationRole replication_role) { // We acquire the transaction engine lock here because we access (and // modify) the transaction engine variables (`transaction_id` and // `timestamp`) below. @@ -1319,7 +1298,7 @@ Transaction InMemoryStorage::CreateTransaction(IsolationLevel isolation_level, S // of any query on replica to the last commited transaction // which is timestamp_ as only commit of transaction with writes // can change the value of it. - if (replication_role == memgraph::replication::ReplicationRole::MAIN) { + if (replication_role == memgraph::replication_coordination_glue::ReplicationRole::MAIN) { start_timestamp = timestamp_++; } else { start_timestamp = timestamp_; @@ -1600,9 +1579,12 @@ void InMemoryStorage::CollectGarbage(std::unique_lock main_ if (run_index_cleanup) { // This operation is very expensive as it traverses through all of the items // in every index every time. - indices_.RemoveObsoleteEntries(oldest_active_start_timestamp); - auto *mem_unique_constraints = static_cast(constraints_.unique_constraints_.get()); - mem_unique_constraints->RemoveObsoleteEntries(oldest_active_start_timestamp); + auto token = stop_source.get_token(); + if (!token.stop_requested()) { + indices_.RemoveObsoleteEntries(oldest_active_start_timestamp, token); + auto *mem_unique_constraints = static_cast(constraints_.unique_constraints_.get()); + mem_unique_constraints->RemoveObsoleteEntries(oldest_active_start_timestamp, std::move(token)); + } } { @@ -1693,7 +1675,7 @@ StorageInfo InMemoryStorage::GetBaseInfo(bool force_directory) { --it; if (it != end && *it != "databases") { // Default DB points to the root (for back-compatibility); update to the "database" dir - return dir / "databases" / dbms::kDefaultDB; + return dir / dbms::kMultiTenantDir / dbms::kDefaultDB; } } } @@ -1703,7 +1685,8 @@ StorageInfo InMemoryStorage::GetBaseInfo(bool force_directory) { return info; } -StorageInfo InMemoryStorage::GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) { +StorageInfo InMemoryStorage::GetInfo(bool force_directory, + memgraph::replication_coordination_glue::ReplicationRole replication_role) { StorageInfo info = GetBaseInfo(force_directory); { auto access = Access(replication_role); // TODO: override isolation level? @@ -1728,8 +1711,8 @@ bool InMemoryStorage::InitializeWalFile(memgraph::replication::ReplicationEpoch if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL) return false; if (!wal_file_) { - wal_file_.emplace(recovery_.wal_directory_, uuid_, epoch.id(), config_.items, name_id_mapper_.get(), wal_seq_num_++, - &file_retainer_); + wal_file_.emplace(recovery_.wal_directory_, uuid_, epoch.id(), config_.salient.items, name_id_mapper_.get(), + wal_seq_num_++, &file_retainer_); } return true; } @@ -1753,7 +1736,8 @@ void InMemoryStorage::FinalizeWalFile() { } } -bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction, uint64_t final_commit_timestamp) { +bool InMemoryStorage::AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp, + DatabaseAccessProtector db_acc) { if (!InitializeWalFile(repl_storage_state_.epoch_)) { return true; } @@ -1761,7 +1745,7 @@ bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction // A single transaction will always be contained in a single WAL file. auto current_commit_timestamp = transaction.commit_timestamp->load(std::memory_order_acquire); - repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this); + repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this, db_acc); auto append_deltas = [&](auto callback) { // Helper lambda that traverses the delta chain on order to find the first @@ -1910,26 +1894,15 @@ bool InMemoryStorage::AppendToWalDataManipulation(const Transaction &transaction } }; - append_deltas([&](const Delta &delta, const auto &parent, uint64_t timestamp) { - wal_file_->AppendDelta(delta, parent, timestamp); - repl_storage_state_.AppendDelta(delta, parent, timestamp); - }); - - // Add a delta that indicates that the transaction is fully written to the WAL - // file.replication_clients_.WithLock - wal_file_->AppendTransactionEnd(final_commit_timestamp); - FinalizeWalFile(); - - return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this); -} - -bool InMemoryStorage::AppendToWalDataDefinition(const Transaction &transaction, uint64_t final_commit_timestamp) { - if (!InitializeWalFile(repl_storage_state_.epoch_)) { - return true; + // Handle MVCC deltas + if (!transaction.deltas.use().empty()) { + append_deltas([&](const Delta &delta, const auto &parent, uint64_t timestamp) { + wal_file_->AppendDelta(delta, parent, timestamp); + repl_storage_state_.AppendDelta(delta, parent, timestamp); + }); } - repl_storage_state_.InitializeTransaction(wal_file_->SequenceNumber(), this); - + // Handle metadata deltas for (const auto &md_delta : transaction.md_deltas) { switch (md_delta.action) { case MetadataDelta::Action::LABEL_INDEX_CREATE: { @@ -2009,7 +1982,7 @@ bool InMemoryStorage::AppendToWalDataDefinition(const Transaction &transaction, wal_file_->AppendTransactionEnd(final_commit_timestamp); FinalizeWalFile(); - return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this); + return repl_storage_state_.FinalizeTransaction(final_commit_timestamp, this, std::move(db_acc)); } void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label, @@ -2049,15 +2022,15 @@ void InMemoryStorage::AppendToWalDataDefinition(durability::StorageMetadataOpera } utils::BasicResult InMemoryStorage::CreateSnapshot( - memgraph::replication::ReplicationRole replication_role) { - if (replication_role == memgraph::replication::ReplicationRole::REPLICA) { + memgraph::replication_coordination_glue::ReplicationRole replication_role) { + if (replication_role == memgraph::replication_coordination_glue::ReplicationRole::REPLICA) { return InMemoryStorage::CreateSnapshotError::DisabledForReplica; } auto const &epoch = repl_storage_state_.epoch_; auto snapshot_creator = [this, &epoch]() { utils::Timer timer; auto transaction = CreateTransaction(IsolationLevel::SNAPSHOT_ISOLATION, storage_mode_, - memgraph::replication::ReplicationRole::MAIN); + memgraph::replication_coordination_glue::ReplicationRole::MAIN); durability::CreateSnapshot(this, &transaction, recovery_.snapshot_directory_, recovery_.wal_directory_, &vertices_, &edges_, uuid_, epoch, repl_storage_state_.history, &file_retainer_); // Finalize snapshot transaction. @@ -2145,14 +2118,16 @@ utils::FileRetainer::FileLockerAccessor::ret_type InMemoryStorage::UnlockPath() return true; } -std::unique_ptr InMemoryStorage::Access(memgraph::replication::ReplicationRole replication_role, - std::optional override_isolation_level) { +std::unique_ptr InMemoryStorage::Access( + memgraph::replication_coordination_glue::ReplicationRole replication_role, + std::optional override_isolation_level) { return std::unique_ptr(new InMemoryAccessor{Storage::Accessor::shared_access, this, override_isolation_level.value_or(isolation_level_), storage_mode_, replication_role}); } std::unique_ptr InMemoryStorage::UniqueAccess( - memgraph::replication::ReplicationRole replication_role, std::optional override_isolation_level) { + memgraph::replication_coordination_glue::ReplicationRole replication_role, + std::optional override_isolation_level) { return std::unique_ptr(new InMemoryAccessor{Storage::Accessor::unique_access, this, override_isolation_level.value_or(isolation_level_), storage_mode_, replication_role}); @@ -2175,8 +2150,11 @@ void InMemoryStorage::CreateSnapshotHandler( // Run the snapshot thread (if enabled) if (config_.durability.snapshot_wal_mode != Config::Durability::SnapshotWalMode::DISABLED) { - snapshot_runner_.Run("Snapshot", config_.durability.snapshot_interval, - [this]() { this->create_snapshot_handler(); }); + snapshot_runner_.Run("Snapshot", config_.durability.snapshot_interval, [this, token = stop_source.get_token()]() { + if (!token.stop_requested()) { + this->create_snapshot_handler(); + } + }); } } IndicesInfo InMemoryStorage::InMemoryAccessor::ListAllIndices() const { diff --git a/src/storage/v2/inmemory/storage.hpp b/src/storage/v2/inmemory/storage.hpp index cf4ea6c9d..aace9971e 100644 --- a/src/storage/v2/inmemory/storage.hpp +++ b/src/storage/v2/inmemory/storage.hpp @@ -73,7 +73,8 @@ class InMemoryStorage final : public Storage { friend class InMemoryStorage; explicit InMemoryAccessor(auto tag, InMemoryStorage *storage, IsolationLevel isolation_level, - StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role); + StorageMode storage_mode, + memgraph::replication_coordination_glue::ReplicationRole replication_role); public: InMemoryAccessor(const InMemoryAccessor &) = delete; @@ -214,8 +215,8 @@ class InMemoryStorage final : public Storage { /// case the transaction is automatically aborted. /// @throw std::bad_alloc // NOLINTNEXTLINE(google-default-arguments) - utils::BasicResult Commit(std::optional desired_commit_timestamp = {}, - bool is_main = true) override; + utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) override; /// @throw std::bad_alloc void Abort() override; @@ -301,7 +302,7 @@ class InMemoryStorage final : public Storage { /// @throw std::bad_alloc Result CreateEdgeEx(VertexAccessor *from, VertexAccessor *to, EdgeTypeId edge_type, storage::Gid gid); - Config::Items config_; + SalientConfig::Items config_; }; class ReplicationAccessor final : public InMemoryAccessor { @@ -322,10 +323,10 @@ class InMemoryStorage final : public Storage { }; using Storage::Access; - std::unique_ptr Access(memgraph::replication::ReplicationRole replication_role, + std::unique_ptr Access(memgraph::replication_coordination_glue::ReplicationRole replication_role, std::optional override_isolation_level) override; using Storage::UniqueAccess; - std::unique_ptr UniqueAccess(memgraph::replication::ReplicationRole replication_role, + std::unique_ptr UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role, std::optional override_isolation_level) override; void FreeMemory(std::unique_lock main_guard) override; @@ -335,12 +336,12 @@ class InMemoryStorage final : public Storage { utils::FileRetainer::FileLockerAccessor::ret_type UnlockPath(); utils::BasicResult CreateSnapshot( - memgraph::replication::ReplicationRole replication_role); + memgraph::replication_coordination_glue::ReplicationRole replication_role); void CreateSnapshotHandler(std::function()> cb); Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role) override; + memgraph::replication_coordination_glue::ReplicationRole replication_role) override; void SetStorageMode(StorageMode storage_mode); @@ -365,13 +366,13 @@ class InMemoryStorage final : public Storage { void FinalizeWalFile(); StorageInfo GetBaseInfo(bool force_directory) override; - StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) override; + StorageInfo GetInfo(bool force_directory, + memgraph::replication_coordination_glue::ReplicationRole replication_role) override; - /// Return true in all cases except if any sync replicas have not sent confirmation. - [[nodiscard]] bool AppendToWalDataManipulation(const Transaction &transaction, uint64_t final_commit_timestamp); - /// Return true in all cases except if any sync replicas have not sent confirmation. - [[nodiscard]] bool AppendToWalDataDefinition(const Transaction &transaction, uint64_t final_commit_timestamp); - /// Return true in all cases except if any sync replicas have not sent confirmation. + /// Return true in all cases excepted if any sync replicas have not sent confirmation. + [[nodiscard]] bool AppendToWal(const Transaction &transaction, uint64_t final_commit_timestamp, + DatabaseAccessProtector db_acc); + /// Return true in all cases excepted if any sync replicas have not sent confirmation. void AppendToWalDataDefinition(durability::StorageMetadataOperation operation, LabelId label, uint64_t final_commit_timestamp); /// Return true in all cases except if any sync replicas have not sent confirmation. @@ -464,6 +465,9 @@ class InMemoryStorage final : public Storage { // Moved the create snapshot to a user defined handler so we can remove the global replication state from the storage std::function create_snapshot_handler{}; + + // A way to tell async operation to stop + std::stop_source stop_source; }; } // namespace memgraph::storage diff --git a/src/storage/v2/inmemory/unique_constraints.cpp b/src/storage/v2/inmemory/unique_constraints.cpp index 76cda1730..667d0229f 100644 --- a/src/storage/v2/inmemory/unique_constraints.cpp +++ b/src/storage/v2/inmemory/unique_constraints.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -15,6 +15,7 @@ #include "storage/v2/constraints/utils.hpp" #include "storage/v2/durability/recovery_type.hpp" #include "storage/v2/id_types.hpp" +#include "utils/counter.hpp" #include "utils/logging.hpp" #include "utils/skip_list.hpp" namespace memgraph::storage { @@ -487,10 +488,18 @@ std::vector>> InMemoryUniqueConstraints: return ret; } -void InMemoryUniqueConstraints::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) { +void InMemoryUniqueConstraints::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token) { + auto maybe_stop = utils::ResettableCounter<2048>(); + for (auto &[label_props, storage] : constraints_) { + // before starting constraint, check if stop_requested + if (token.stop_requested()) return; + auto acc = storage.access(); for (auto it = acc.begin(); it != acc.end();) { + // Hot loop, don't check stop_requested every time + if (maybe_stop() && token.stop_requested()) return; + auto next_it = it; ++next_it; diff --git a/src/storage/v2/inmemory/unique_constraints.hpp b/src/storage/v2/inmemory/unique_constraints.hpp index 15107f131..27fae1b30 100644 --- a/src/storage/v2/inmemory/unique_constraints.hpp +++ b/src/storage/v2/inmemory/unique_constraints.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -122,7 +122,7 @@ class InMemoryUniqueConstraints : public UniqueConstraints { std::vector>> ListConstraints() const override; /// GC method that removes outdated entries from constraints' storages. - void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp); + void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp, std::stop_token token); void Clear() override; diff --git a/src/storage/v2/property_store.cpp b/src/storage/v2/property_store.cpp index f92241b80..efe426f24 100644 --- a/src/storage/v2/property_store.cpp +++ b/src/storage/v2/property_store.cpp @@ -1050,14 +1050,11 @@ bool PropertyStore::HasProperty(PropertyId property) const { return ExistsSpecificProperty(&reader, property) == ExpectedPropertyStatus::EQUAL; } -/// TODO: andi write a unit test for it bool PropertyStore::HasAllProperties(const std::set &properties) const { return std::all_of(properties.begin(), properties.end(), [this](const auto &prop) { return HasProperty(prop); }); } -/// TODO: andi write a unit test for it bool PropertyStore::HasAllPropertyValues(const std::vector &property_values) const { - /// TODO: andi extract this into a private method auto property_map = Properties(); std::vector all_property_values; transform(property_map.begin(), property_map.end(), back_inserter(all_property_values), diff --git a/src/storage/v2/property_store.hpp b/src/storage/v2/property_store.hpp index 10f884527..c217cbd81 100644 --- a/src/storage/v2/property_store.hpp +++ b/src/storage/v2/property_store.hpp @@ -55,7 +55,6 @@ class PropertyStore { /// Checks whether all property values in the vector `property_values` exist in the store. The time /// complexity of this function is O(n^2). - /// TODO: andi Not so sure it is quadratic complexity bool HasAllPropertyValues(const std::vector &property_values) const; /// Extracts property values for all property ids in the set `properties`. The time diff --git a/src/storage/v2/property_value.hpp b/src/storage/v2/property_value.hpp index 05ab1d3db..727c75377 100644 --- a/src/storage/v2/property_value.hpp +++ b/src/storage/v2/property_value.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -123,21 +123,21 @@ class PropertyValue { // value getters for primitive types /// @throw PropertyValueException if value isn't of correct type. bool ValueBool() const { - if (type_ != Type::Bool) { + if (type_ != Type::Bool) [[unlikely]] { throw PropertyValueException("The value isn't a bool!"); } return bool_v; } /// @throw PropertyValueException if value isn't of correct type. int64_t ValueInt() const { - if (type_ != Type::Int) { + if (type_ != Type::Int) [[unlikely]] { throw PropertyValueException("The value isn't an int!"); } return int_v; } /// @throw PropertyValueException if value isn't of correct type. double ValueDouble() const { - if (type_ != Type::Double) { + if (type_ != Type::Double) [[unlikely]] { throw PropertyValueException("The value isn't a double!"); } return double_v; @@ -145,7 +145,7 @@ class PropertyValue { /// @throw PropertyValueException if value isn't of correct type. TemporalData ValueTemporalData() const { - if (type_ != Type::TemporalData) { + if (type_ != Type::TemporalData) [[unlikely]] { throw PropertyValueException("The value isn't a temporal data!"); } @@ -155,7 +155,7 @@ class PropertyValue { // const value getters for non-primitive types /// @throw PropertyValueException if value isn't of correct type. const std::string &ValueString() const { - if (type_ != Type::String) { + if (type_ != Type::String) [[unlikely]] { throw PropertyValueException("The value isn't a string!"); } return string_v; @@ -163,7 +163,7 @@ class PropertyValue { /// @throw PropertyValueException if value isn't of correct type. const std::vector &ValueList() const { - if (type_ != Type::List) { + if (type_ != Type::List) [[unlikely]] { throw PropertyValueException("The value isn't a list!"); } return list_v; @@ -171,7 +171,7 @@ class PropertyValue { /// @throw PropertyValueException if value isn't of correct type. const std::map &ValueMap() const { - if (type_ != Type::Map) { + if (type_ != Type::Map) [[unlikely]] { throw PropertyValueException("The value isn't a map!"); } return map_v; @@ -180,7 +180,7 @@ class PropertyValue { // reference value getters for non-primitive types /// @throw PropertyValueException if value isn't of correct type. std::string &ValueString() { - if (type_ != Type::String) { + if (type_ != Type::String) [[unlikely]] { throw PropertyValueException("The value isn't a string!"); } return string_v; @@ -188,7 +188,7 @@ class PropertyValue { /// @throw PropertyValueException if value isn't of correct type. std::vector &ValueList() { - if (type_ != Type::List) { + if (type_ != Type::List) [[unlikely]] { throw PropertyValueException("The value isn't a list!"); } return list_v; @@ -196,7 +196,7 @@ class PropertyValue { /// @throw PropertyValueException if value isn't of correct type. std::map &ValueMap() { - if (type_ != Type::Map) { + if (type_ != Type::Map) [[unlikely]] { throw PropertyValueException("The value isn't a map!"); } return map_v; @@ -279,7 +279,7 @@ inline bool operator==(const PropertyValue &first, const PropertyValue &second) case PropertyValue::Type::Bool: return first.ValueBool() == second.ValueBool(); case PropertyValue::Type::Int: - if (second.type() == PropertyValue::Type::Double) { + if (second.type() == PropertyValue::Type::Double) [[unlikely]] { return first.ValueInt() == second.ValueDouble(); } else { return first.ValueInt() == second.ValueInt(); @@ -310,7 +310,7 @@ inline bool operator<(const PropertyValue &first, const PropertyValue &second) n case PropertyValue::Type::Bool: return first.ValueBool() < second.ValueBool(); case PropertyValue::Type::Int: - if (second.type() == PropertyValue::Type::Double) { + if (second.type() == PropertyValue::Type::Double) [[unlikely]] { return first.ValueInt() < second.ValueDouble(); } else { return first.ValueInt() < second.ValueInt(); @@ -363,36 +363,35 @@ inline PropertyValue::PropertyValue(const PropertyValue &other) : type_(other.ty } } -inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(other.type_) { - switch (other.type_) { +inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(std::exchange(other.type_, Type::Null)) { + switch (type_) { case Type::Null: break; case Type::Bool: - this->bool_v = other.bool_v; + bool_v = other.bool_v; break; case Type::Int: - this->int_v = other.int_v; + int_v = other.int_v; break; case Type::Double: - this->double_v = other.double_v; + double_v = other.double_v; break; case Type::String: - new (&string_v) std::string(std::move(other.string_v)); + std::construct_at(&string_v, std::move(other.string_v)); + std::destroy_at(&other.string_v); break; case Type::List: - new (&list_v) std::vector(std::move(other.list_v)); + std::construct_at(&list_v, std::move(other.list_v)); + std::destroy_at(&other.list_v); break; case Type::Map: - new (&map_v) std::map(std::move(other.map_v)); + std::construct_at(&map_v, std::move(other.map_v)); + std::destroy_at(&other.map_v); break; case Type::TemporalData: - this->temporal_data_v = other.temporal_data_v; + temporal_data_v = other.temporal_data_v; break; } - - // reset the type of other - other.DestroyValue(); - other.type_ = Type::Null; } inline PropertyValue &PropertyValue::operator=(const PropertyValue &other) { @@ -431,46 +430,48 @@ inline PropertyValue &PropertyValue::operator=(const PropertyValue &other) { } inline PropertyValue &PropertyValue::operator=(PropertyValue &&other) noexcept { - if (this == &other) return *this; + if (type_ == other.type_) { + // maybe the same object, check if no work is required + if (this == &other) return *this; - DestroyValue(); - type_ = other.type_; - - switch (other.type_) { - case Type::Null: - break; - case Type::Bool: - this->bool_v = other.bool_v; - break; - case Type::Int: - this->int_v = other.int_v; - break; - case Type::Double: - this->double_v = other.double_v; - break; - case Type::String: - new (&string_v) std::string(std::move(other.string_v)); - break; - case Type::List: - new (&list_v) std::vector(std::move(other.list_v)); - break; - case Type::Map: - new (&map_v) std::map(std::move(other.map_v)); - break; - case Type::TemporalData: - this->temporal_data_v = other.temporal_data_v; - break; + switch (type_) { + case Type::Null: + break; + case Type::Bool: + bool_v = other.bool_v; + break; + case Type::Int: + int_v = other.int_v; + break; + case Type::Double: + double_v = other.double_v; + break; + case Type::String: + string_v = std::move(other.string_v); + std::destroy_at(&other.string_v); + break; + case Type::List: + list_v = std::move(other.list_v); + std::destroy_at(&other.list_v); + break; + case Type::Map: + map_v = std::move(other.map_v); + std::destroy_at(&other.map_v); + break; + case Type::TemporalData: + temporal_data_v = other.temporal_data_v; + break; + } + other.type_ = Type::Null; + return *this; + } else { + std::destroy_at(this); + return *std::construct_at(std::launder(this), std::move(other)); } - - // reset the type of other - other.DestroyValue(); - other.type_ = Type::Null; - - return *this; } inline void PropertyValue::DestroyValue() noexcept { - switch (type_) { + switch (std::exchange(type_, Type::Null)) { // destructor for primitive types does nothing case Type::Null: case Type::Bool: diff --git a/src/storage/v2/replication/global.hpp b/src/storage/v2/replication/global.hpp index 7892fb990..ebcec1206 100644 --- a/src/storage/v2/replication/global.hpp +++ b/src/storage/v2/replication/global.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -31,7 +31,7 @@ struct TimestampInfo { struct ReplicaInfo { std::string name; - memgraph::replication::ReplicationMode mode; + replication_coordination_glue::ReplicationMode mode; io::network::Endpoint endpoint; replication::ReplicaState state; TimestampInfo timestamp_info; diff --git a/src/storage/v2/replication/replication_client.cpp b/src/storage/v2/replication/replication_client.cpp index ae0664bc4..b68618e04 100644 --- a/src/storage/v2/replication/replication_client.cpp +++ b/src/storage/v2/replication/replication_client.cpp @@ -10,14 +10,13 @@ // licenses/APL.txt. #include "replication/replication_client.hpp" -#include "storage/v2/durability/durability.hpp" #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" #include "utils/exceptions.hpp" +#include "utils/on_scope_exit.hpp" #include "utils/variant_helpers.hpp" #include -#include namespace { template @@ -29,14 +28,26 @@ namespace memgraph::storage { ReplicationStorageClient::ReplicationStorageClient(::memgraph::replication::ReplicationClient &client) : client_{client} {} -void ReplicationStorageClient::CheckReplicaState(Storage *storage) { +void ReplicationStorageClient::UpdateReplicaState(Storage *storage, DatabaseAccessProtector db_acc) { uint64_t current_commit_timestamp{kTimestampInitialId}; auto &replStorageState = storage->repl_storage_state_; - auto stream{client_.rpc_client_.Stream( - storage->id(), replStorageState.last_commit_timestamp_, std::string{replStorageState.epoch_.id()})}; - const auto replica = stream.AwaitResponse(); + auto hb_stream{client_.rpc_client_.Stream( + storage->uuid(), replStorageState.last_commit_timestamp_, std::string{replStorageState.epoch_.id()})}; + const auto replica = hb_stream.AwaitResponse(); + +#ifdef MG_ENTERPRISE // Multi-tenancy is only supported in enterprise + if (!replica.success) { // Replica is missing the current database + client_.state_.WithLock([&](auto &state) { + spdlog::debug("Replica '{}' missing database '{}' - '{}'", client_.name_, storage->name(), + std::string{storage->uuid()}); + state = memgraph::replication::ReplicationClient::State::BEHIND; + }); + return; + } +#endif + std::optional branching_point; if (replica.epoch_id != replStorageState.epoch_.id() && replica.current_commit_timestamp != kTimestampInitialId) { auto const &history = replStorageState.history; @@ -56,6 +67,7 @@ void ReplicationStorageClient::CheckReplicaState(Storage *storage) { "now hold unique data. Please resolve data conflicts and start the " "replication on a clean instance.", client_.name_, client_.name_, client_.name_); + // TODO: (andi) Talk about renaming MAYBE_BEHIND to branching // State not updated, hence in MAYBE_BEHIND state return; } @@ -70,8 +82,9 @@ void ReplicationStorageClient::CheckReplicaState(Storage *storage) { } else { spdlog::debug("Replica '{}' is behind", client_.name_); state = replication::ReplicaState::RECOVERY; - client_.thread_pool_.AddTask( - [storage, current_commit_timestamp, this] { this->RecoverReplica(current_commit_timestamp, storage); }); + client_.thread_pool_.AddTask([storage, current_commit_timestamp, gk = std::move(db_acc), this] { + this->RecoverReplica(current_commit_timestamp, storage); + }); } }); } @@ -82,16 +95,18 @@ TimestampInfo ReplicationStorageClient::GetTimestampInfo(Storage const *storage) info.current_number_of_timestamp_behind_master = 0; try { - auto stream{client_.rpc_client_.Stream(storage->id())}; + auto stream{client_.rpc_client_.Stream(storage->uuid())}; const auto response = stream.AwaitResponse(); const auto is_success = response.success; - if (!is_success) { - replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); - LogRpcFailure(); - } + auto main_time_stamp = storage->repl_storage_state_.last_commit_timestamp_.load(); info.current_timestamp_of_replica = response.current_commit_timestamp; info.current_number_of_timestamp_behind_master = response.current_commit_timestamp - main_time_stamp; + + if (!is_success || info.current_number_of_timestamp_behind_master != 0) { + replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); + LogRpcFailure(); + } } catch (const rpc::RpcFailedException &) { replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); LogRpcFailure(); // mutex already unlocked, if the new enqueued task dispatches immediately it probably @@ -106,13 +121,15 @@ void ReplicationStorageClient::LogRpcFailure() { utils::MessageWithLink("Couldn't replicate data to {}.", client_.name_, "https://memgr.ph/replication")); } -void ReplicationStorageClient::TryCheckReplicaStateAsync(Storage *storage) { - client_.thread_pool_.AddTask([storage, this] { this->TryCheckReplicaStateSync(storage); }); +void ReplicationStorageClient::TryCheckReplicaStateAsync(Storage *storage, DatabaseAccessProtector db_acc) { + client_.thread_pool_.AddTask([storage, db_acc = std::move(db_acc), this]() mutable { + this->TryCheckReplicaStateSync(storage, std::move(db_acc)); + }); } -void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage) { +void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage, DatabaseAccessProtector db_acc) { try { - CheckReplicaState(storage); + UpdateReplicaState(storage, std::move(db_acc)); } catch (const rpc::VersionMismatchRpcFailedException &) { replica_state_.WithLock([](auto &val) { val = replication::ReplicaState::MAYBE_BEHIND; }); spdlog::error( @@ -126,7 +143,8 @@ void ReplicationStorageClient::TryCheckReplicaStateSync(Storage *storage) { } } -void ReplicationStorageClient::StartTransactionReplication(const uint64_t current_wal_seq_num, Storage *storage) { +void ReplicationStorageClient::StartTransactionReplication(const uint64_t current_wal_seq_num, Storage *storage, + DatabaseAccessProtector db_acc) { auto locked_state = replica_state_.Lock(); switch (*locked_state) { using enum replication::ReplicaState; @@ -150,7 +168,7 @@ void ReplicationStorageClient::StartTransactionReplication(const uint64_t curren case MAYBE_BEHIND: spdlog::error( utils::MessageWithLink("Couldn't replicate data to {}.", client_.name_, "https://memgr.ph/replication")); - TryCheckReplicaStateAsync(storage); + TryCheckReplicaStateAsync(storage, std::move(db_acc)); return; case READY: MG_ASSERT(!replica_stream_); @@ -165,7 +183,7 @@ void ReplicationStorageClient::StartTransactionReplication(const uint64_t curren } } -bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) { +bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage, DatabaseAccessProtector db_acc) { // We can only check the state because it guarantees to be only // valid during a single transaction replication (if the assumption // that this and other transaction replication functions can only be @@ -174,18 +192,26 @@ bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) return false; } - if (replica_stream_->IsDefunct()) return false; + if (!replica_stream_ || replica_stream_->IsDefunct()) { + replica_state_.WithLock([this](auto &state) { + replica_stream_.reset(); + state = replication::ReplicaState::MAYBE_BEHIND; + }); + LogRpcFailure(); + return false; + } - auto task = [storage, this]() { + auto task = [storage, db_acc = std::move(db_acc), this]() mutable { MG_ASSERT(replica_stream_, "Missing stream for transaction deltas"); try { auto response = replica_stream_->Finalize(); - return replica_state_.WithLock([storage, &response, this](auto &state) { + return replica_state_.WithLock([storage, &response, db_acc = std::move(db_acc), this](auto &state) mutable { replica_stream_.reset(); if (!response.success || state == replication::ReplicaState::RECOVERY) { state = replication::ReplicaState::RECOVERY; - client_.thread_pool_.AddTask( - [storage, &response, this] { this->RecoverReplica(response.current_commit_timestamp, storage); }); + client_.thread_pool_.AddTask([storage, &response, db_acc = std::move(db_acc), this] { + this->RecoverReplica(response.current_commit_timestamp, storage); + }); return false; } state = replication::ReplicaState::READY; @@ -201,17 +227,17 @@ bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage) } }; - if (client_.mode_ == memgraph::replication::ReplicationMode::ASYNC) { - client_.thread_pool_.AddTask([task = std::move(task)] { (void)task(); }); + if (client_.mode_ == replication_coordination_glue::ReplicationMode::ASYNC) { + client_.thread_pool_.AddTask([task = std::move(task)]() mutable { (void)task(); }); return true; } return task(); } -void ReplicationStorageClient::Start(Storage *storage) { - spdlog::trace("Replication client started for database \"{}\"", storage->id()); - TryCheckReplicaStateSync(storage); +void ReplicationStorageClient::Start(Storage *storage, DatabaseAccessProtector db_acc) { + spdlog::trace("Replication client started for database \"{}\"", storage->name()); + TryCheckReplicaStateSync(storage, std::move(db_acc)); } void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph::storage::Storage *storage) { @@ -233,12 +259,12 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: std::visit(utils::Overloaded{ [&replica_commit, mem_storage, &rpcClient](RecoverySnapshot const &snapshot) { spdlog::debug("Sending the latest snapshot file: {}", snapshot); - auto response = TransferSnapshot(mem_storage->id(), rpcClient, snapshot); + auto response = TransferSnapshot(mem_storage->uuid(), rpcClient, snapshot); replica_commit = response.current_commit_timestamp; }, [&replica_commit, mem_storage, &rpcClient](RecoveryWals const &wals) { spdlog::debug("Sending the latest wal files"); - auto response = TransferWalFiles(mem_storage->id(), rpcClient, wals); + auto response = TransferWalFiles(mem_storage->uuid(), rpcClient, wals); replica_commit = response.current_commit_timestamp; spdlog::debug("Wal files successfully transferred."); }, @@ -246,11 +272,11 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: std::unique_lock transaction_guard(mem_storage->engine_lock_); if (mem_storage->wal_file_ && mem_storage->wal_file_->SequenceNumber() == current_wal.current_wal_seq_num) { + utils::OnScopeExit on_exit([mem_storage]() { mem_storage->wal_file_->EnableFlushing(); }); mem_storage->wal_file_->DisableFlushing(); transaction_guard.unlock(); spdlog::debug("Sending current wal file"); replica_commit = ReplicateCurrentWal(mem_storage, rpcClient, *mem_storage->wal_file_); - mem_storage->wal_file_->EnableFlushing(); } else { spdlog::debug("Cannot recover using current wal file"); } @@ -291,14 +317,14 @@ void ReplicationStorageClient::RecoverReplica(uint64_t replica_commit, memgraph: ReplicaStream::ReplicaStream(Storage *storage, rpc::Client &rpc_client, const uint64_t current_seq_num) : storage_{storage}, stream_(rpc_client.Stream( - storage->id(), storage->repl_storage_state_.last_commit_timestamp_.load(), current_seq_num)) { + storage->uuid(), storage->repl_storage_state_.last_commit_timestamp_.load(), current_seq_num)) { replication::Encoder encoder{stream_.GetBuilder()}; encoder.WriteString(storage->repl_storage_state_.epoch_.id()); } void ReplicaStream::AppendDelta(const Delta &delta, const Vertex &vertex, uint64_t final_commit_timestamp) { replication::Encoder encoder(stream_.GetBuilder()); - EncodeDelta(&encoder, storage_->name_id_mapper_.get(), storage_->config_.items, delta, vertex, + EncodeDelta(&encoder, storage_->name_id_mapper_.get(), storage_->config_.salient.items, delta, vertex, final_commit_timestamp); } diff --git a/src/storage/v2/replication/replication_client.hpp b/src/storage/v2/replication/replication_client.hpp index 4ef00f623..fbcffe422 100644 --- a/src/storage/v2/replication/replication_client.hpp +++ b/src/storage/v2/replication/replication_client.hpp @@ -13,9 +13,10 @@ #include "replication/config.hpp" #include "replication/epoch.hpp" -#include "replication/messages.hpp" #include "replication/replication_client.hpp" +#include "replication_coordination_glue/messages.hpp" #include "rpc/client.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/durability/storage_global_operation.hpp" #include "storage/v2/id_types.hpp" #include "storage/v2/indices/label_index_stats.hpp" @@ -93,20 +94,34 @@ class ReplicationStorageClient { ~ReplicationStorageClient() = default; // TODO Remove the client related functions - auto Mode() const -> memgraph::replication::ReplicationMode { return client_.mode_; } + auto Mode() const -> memgraph::replication_coordination_glue::ReplicationMode { return client_.mode_; } auto Name() const -> std::string const & { return client_.name_; } auto Endpoint() const -> io::network::Endpoint const & { return client_.rpc_client_.Endpoint(); } auto State() const -> replication::ReplicaState { return replica_state_.WithLock(std::identity()); } auto GetTimestampInfo(Storage const *storage) -> TimestampInfo; - void Start(Storage *storage); - void StartTransactionReplication(uint64_t current_wal_seq_num, Storage *storage); + /** + * @brief Check the replica state + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void Start(Storage *storage, DatabaseAccessProtector db_acc); + + /** + * @brief Start a new transaction replication (open up a stream) + * + * @param current_wal_seq_num + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void StartTransactionReplication(uint64_t current_wal_seq_num, Storage *storage, DatabaseAccessProtector db_acc); // Replication clients can be removed at any point // so to avoid any complexity of checking if the client was removed whenever // we want to send part of transaction and to avoid adding some GC logic this - // function will run a callback if, after previously callling + // function will run a callback if, after previously calling // StartTransactionReplication, stream is created. template void IfStreamingTransaction(F &&callback) { @@ -118,7 +133,11 @@ class ReplicationStorageClient { return; } if (!replica_stream_ || replica_stream_->IsDefunct()) { - replica_state_.WithLock([](auto &state) { state = replication::ReplicaState::MAYBE_BEHIND; }); + replica_state_.WithLock([this](auto &state) { + replica_stream_.reset(); + state = replication::ReplicaState::MAYBE_BEHIND; + }); + LogRpcFailure(); return; } try { @@ -126,20 +145,56 @@ class ReplicationStorageClient { } catch (const rpc::RpcFailedException &) { replica_state_.WithLock([](auto &state) { state = replication::ReplicaState::MAYBE_BEHIND; }); LogRpcFailure(); + return; } } - // Return whether the transaction could be finalized on the replication client or not. - [[nodiscard]] bool FinalizeTransactionReplication(Storage *storage); + /** + * @brief Return whether the transaction could be finalized on the replication client or not. + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + * @return true + * @return false + */ + [[nodiscard]] bool FinalizeTransactionReplication(Storage *storage, DatabaseAccessProtector db_acc); + + /** + * @brief Asynchronously try to check the replica state and start a recovery thread if necessary + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void TryCheckReplicaStateAsync(Storage *storage, DatabaseAccessProtector db_acc); // TODO Move back to private + + auto &Client() { return client_; } - void TryCheckReplicaStateAsync(Storage *storage); // TODO Move back to private private: + /** + * @brief Get necessary recovery steps and execute them. + * + * @param replica_commit the commit up to which we should recover to + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ void RecoverReplica(uint64_t replica_commit, memgraph::storage::Storage *storage); - void CheckReplicaState(Storage *storage); + /** + * @brief Check replica state + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void UpdateReplicaState(Storage *storage, DatabaseAccessProtector db_acc); + void LogRpcFailure(); - void TryCheckReplicaStateSync(Storage *storage); - void FrequentCheck(Storage *storage); + + /** + * @brief Synchronously try to check the replica state and start a recovery thread if necessary + * + * @param storage pointer to the storage associated with the client + * @param gk gatekeeper access that protects the database; std::any to have separation between dbms and storage + */ + void TryCheckReplicaStateSync(Storage *storage, DatabaseAccessProtector db_acc); ::memgraph::replication::ReplicationClient &client_; // TODO Do not store the stream, make is a local variable diff --git a/src/storage/v2/replication/replication_storage_state.cpp b/src/storage/v2/replication/replication_storage_state.cpp index a443c7171..25cf484c9 100644 --- a/src/storage/v2/replication/replication_storage_state.cpp +++ b/src/storage/v2/replication/replication_storage_state.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -16,10 +16,11 @@ namespace memgraph::storage { -void ReplicationStorageState::InitializeTransaction(uint64_t seq_num, Storage *storage) { - replication_clients_.WithLock([=](auto &clients) { +void ReplicationStorageState::InitializeTransaction(uint64_t seq_num, Storage *storage, + DatabaseAccessProtector db_acc) { + replication_clients_.WithLock([=, db_acc = std::move(db_acc)](auto &clients) mutable { for (auto &client : clients) { - client->StartTransactionReplication(seq_num, storage); + client->StartTransactionReplication(seq_num, storage, std::move(db_acc)); } }); } @@ -52,14 +53,18 @@ void ReplicationStorageState::AppendOperation(durability::StorageMetadataOperati }); } -bool ReplicationStorageState::FinalizeTransaction(uint64_t timestamp, Storage *storage) { - return replication_clients_.WithLock([=](auto &clients) { +bool ReplicationStorageState::FinalizeTransaction(uint64_t timestamp, Storage *storage, + DatabaseAccessProtector db_acc) { + return replication_clients_.WithLock([=, db_acc = std::move(db_acc)](auto &clients) mutable { bool finalized_on_all_replicas = true; + MG_ASSERT(clients.empty() || db_acc.has_value(), + "Any clients assumes we are MAIN, we should have gatekeeper_access_wrapper so we can correctly " + "handle ASYNC tasks"); for (ReplicationClientPtr &client : clients) { client->IfStreamingTransaction([&](auto &stream) { stream.AppendTransactionEnd(timestamp); }); - const auto finalized = client->FinalizeTransactionReplication(storage); + const auto finalized = client->FinalizeTransactionReplication(storage, std::move(db_acc)); - if (client->Mode() == memgraph::replication::ReplicationMode::SYNC) { + if (client->Mode() == replication_coordination_glue::ReplicationMode::SYNC) { finalized_on_all_replicas = finalized && finalized_on_all_replicas; } } @@ -83,7 +88,8 @@ std::vector ReplicationStorageState::ReplicasInfo(const Storage *st std::vector replica_infos; replica_infos.reserve(clients.size()); auto const asReplicaInfo = [storage](ReplicationClientPtr const &client) -> ReplicaInfo { - return {client->Name(), client->Mode(), client->Endpoint(), client->State(), client->GetTimestampInfo(storage)}; + const auto ts = client->GetTimestampInfo(storage); + return {client->Name(), client->Mode(), client->Endpoint(), client->State(), ts}; }; std::transform(clients.begin(), clients.end(), std::back_inserter(replica_infos), asReplicaInfo); return replica_infos; diff --git a/src/storage/v2/replication/replication_storage_state.hpp b/src/storage/v2/replication/replication_storage_state.hpp index e3d6b94a0..adbf87aa9 100644 --- a/src/storage/v2/replication/replication_storage_state.hpp +++ b/src/storage/v2/replication/replication_storage_state.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -25,6 +25,7 @@ #include "replication/config.hpp" #include "replication/epoch.hpp" #include "replication/state.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/replication/enums.hpp" #include "storage/v2/replication/global.hpp" #include "storage/v2/replication/rpc.hpp" @@ -39,13 +40,13 @@ class ReplicationStorageClient; struct ReplicationStorageState { // Only MAIN can send - void InitializeTransaction(uint64_t seq_num, Storage *storage); + void InitializeTransaction(uint64_t seq_num, Storage *storage, DatabaseAccessProtector db_acc); void AppendDelta(const Delta &delta, const Vertex &vertex, uint64_t timestamp); void AppendDelta(const Delta &delta, const Edge &edge, uint64_t timestamp); void AppendOperation(durability::StorageMetadataOperation operation, LabelId label, const std::set &properties, const LabelIndexStats &stats, const LabelPropertyIndexStats &property_stats, uint64_t final_commit_timestamp); - bool FinalizeTransaction(uint64_t timestamp, Storage *storage); + bool FinalizeTransaction(uint64_t timestamp, Storage *storage, DatabaseAccessProtector db_acc); // Getters auto GetReplicaState(std::string_view name) const -> std::optional; diff --git a/src/storage/v2/replication/rpc.cpp b/src/storage/v2/replication/rpc.cpp index b722dfebf..27fc1a0d6 100644 --- a/src/storage/v2/replication/rpc.cpp +++ b/src/storage/v2/replication/rpc.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,6 +10,9 @@ // licenses/APL.txt. #include "storage/v2/replication/rpc.hpp" +#include +#include "slk/streams.hpp" +#include "utils/enum.hpp" #include "utils/typeinfo.hpp" namespace memgraph { @@ -56,6 +59,38 @@ void TimestampRes::Save(const TimestampRes &self, memgraph::slk::Builder *builde memgraph::slk::Save(self, builder); } void TimestampRes::Load(TimestampRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void CreateDatabaseReq::Save(const CreateDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void CreateDatabaseReq::Load(CreateDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void CreateDatabaseRes::Save(const CreateDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void CreateDatabaseRes::Load(CreateDatabaseRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void DropDatabaseReq::Save(const DropDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void DropDatabaseReq::Load(DropDatabaseReq *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void DropDatabaseRes::Save(const DropDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void DropDatabaseRes::Load(DropDatabaseRes *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(self, reader); } +void SystemRecoveryReq::Save(const SystemRecoveryReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemRecoveryReq::Load(SystemRecoveryReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} +void SystemRecoveryRes::Save(const SystemRecoveryRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self, builder); +} +void SystemRecoveryRes::Load(SystemRecoveryRes *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(self, reader); +} } // namespace storage::replication @@ -95,18 +130,34 @@ constexpr utils::TypeInfo storage::replication::TimestampReq::kType{utils::TypeI constexpr utils::TypeInfo storage::replication::TimestampRes::kType{utils::TypeId::REP_TIMESTAMP_RES, "TimestampRes", nullptr}; +constexpr utils::TypeInfo storage::replication::CreateDatabaseReq::kType{utils::TypeId::REP_CREATE_DATABASE_REQ, + "CreateDatabaseReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::CreateDatabaseRes::kType{utils::TypeId::REP_CREATE_DATABASE_RES, + "CreateDatabaseRes", nullptr}; + +constexpr utils::TypeInfo storage::replication::DropDatabaseReq::kType{utils::TypeId::REP_DROP_DATABASE_REQ, + "DropDatabaseReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::DropDatabaseRes::kType{utils::TypeId::REP_DROP_DATABASE_RES, + "DropDatabaseRes", nullptr}; + +constexpr utils::TypeInfo storage::replication::SystemRecoveryReq::kType{utils::TypeId::REP_SYSTEM_RECOVERY_REQ, + "SystemRecoveryReq", nullptr}; + +constexpr utils::TypeInfo storage::replication::SystemRecoveryRes::kType{utils::TypeId::REP_SYSTEM_RECOVERY_RES, + "SystemRecoveryRes", nullptr}; + // Autogenerated SLK serialization code namespace slk { // Serialize code for TimestampRes void Save(const memgraph::storage::replication::TimestampRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::TimestampRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -114,23 +165,21 @@ void Load(memgraph::storage::replication::TimestampRes *self, memgraph::slk::Rea // Serialize code for TimestampReq void Save(const memgraph::storage::replication::TimestampReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::TimestampReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for CurrentWalRes void Save(const memgraph::storage::replication::CurrentWalRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::CurrentWalRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -138,23 +187,21 @@ void Load(memgraph::storage::replication::CurrentWalRes *self, memgraph::slk::Re // Serialize code for CurrentWalReq void Save(const memgraph::storage::replication::CurrentWalReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::CurrentWalReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for WalFilesRes void Save(const memgraph::storage::replication::WalFilesRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -162,25 +209,23 @@ void Load(memgraph::storage::replication::WalFilesRes *self, memgraph::slk::Read // Serialize code for WalFilesReq void Save(const memgraph::storage::replication::WalFilesReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.file_number, builder); } void Load(memgraph::storage::replication::WalFilesReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->file_number, reader); } // Serialize code for SnapshotRes void Save(const memgraph::storage::replication::SnapshotRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::SnapshotRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -188,24 +233,22 @@ void Load(memgraph::storage::replication::SnapshotRes *self, memgraph::slk::Read // Serialize code for SnapshotReq void Save(const memgraph::storage::replication::SnapshotReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); } void Load(memgraph::storage::replication::SnapshotReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); } // Serialize code for HeartbeatRes void Save(const memgraph::storage::replication::HeartbeatRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); memgraph::slk::Save(self.epoch_id, builder); } void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); memgraph::slk::Load(&self->epoch_id, reader); @@ -214,13 +257,13 @@ void Load(memgraph::storage::replication::HeartbeatRes *self, memgraph::slk::Rea // Serialize code for HeartbeatReq void Save(const memgraph::storage::replication::HeartbeatReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.main_commit_timestamp, builder); memgraph::slk::Save(self.epoch_id, builder); } void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->main_commit_timestamp, reader); memgraph::slk::Load(&self->epoch_id, reader); } @@ -228,13 +271,11 @@ void Load(memgraph::storage::replication::HeartbeatReq *self, memgraph::slk::Rea // Serialize code for AppendDeltasRes void Save(const memgraph::storage::replication::AppendDeltasRes &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); memgraph::slk::Save(self.success, builder); memgraph::slk::Save(self.current_commit_timestamp, builder); } void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); memgraph::slk::Load(&self->success, reader); memgraph::slk::Load(&self->current_commit_timestamp, reader); } @@ -242,15 +283,124 @@ void Load(memgraph::storage::replication::AppendDeltasRes *self, memgraph::slk:: // Serialize code for AppendDeltasReq void Save(const memgraph::storage::replication::AppendDeltasReq &self, memgraph::slk::Builder *builder) { - memgraph::slk::Save(self.db_name, builder); + memgraph::slk::Save(self.uuid, builder); memgraph::slk::Save(self.previous_commit_timestamp, builder); memgraph::slk::Save(self.seq_num, builder); } void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::Reader *reader) { - memgraph::slk::Load(&self->db_name, reader); + memgraph::slk::Load(&self->uuid, reader); memgraph::slk::Load(&self->previous_commit_timestamp, reader); memgraph::slk::Load(&self->seq_num, reader); } + +// Serialize SalientConfig + +void Save(const memgraph::storage::SalientConfig &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.name, builder); + memgraph::slk::Save(self.uuid, builder); + memgraph::slk::Save(utils::EnumToNum<3, uint8_t>(self.storage_mode), builder); + memgraph::slk::Save(self.items.properties_on_edges, builder); + memgraph::slk::Save(self.items.enable_schema_metadata, builder); +} + +void Load(memgraph::storage::SalientConfig *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->name, reader); + memgraph::slk::Load(&self->uuid, reader); + uint8_t sm = 0; + memgraph::slk::Load(&sm, reader); + if (!utils::NumToEnum<3>(sm, self->storage_mode)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } + memgraph::slk::Load(&self->items.properties_on_edges, reader); + memgraph::slk::Load(&self->items.enable_schema_metadata, reader); +} + +// Serialize code for CreateDatabaseReq + +void Save(const memgraph::storage::replication::CreateDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.epoch_id, builder); + memgraph::slk::Save(self.expected_group_timestamp, builder); + memgraph::slk::Save(self.new_group_timestamp, builder); + memgraph::slk::Save(self.config, builder); +} + +void Load(memgraph::storage::replication::CreateDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->epoch_id, reader); + memgraph::slk::Load(&self->expected_group_timestamp, reader); + memgraph::slk::Load(&self->new_group_timestamp, reader); + memgraph::slk::Load(&self->config, reader); +} + +// Serialize code for CreateDatabaseRes + +void Save(const memgraph::storage::replication::CreateDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::CreateDatabaseRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + +// Serialize code for DropDatabaseReq + +void Save(const memgraph::storage::replication::DropDatabaseReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.epoch_id, builder); + memgraph::slk::Save(self.expected_group_timestamp, builder); + memgraph::slk::Save(self.new_group_timestamp, builder); + memgraph::slk::Save(self.uuid, builder); +} + +void Load(memgraph::storage::replication::DropDatabaseReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->epoch_id, reader); + memgraph::slk::Load(&self->expected_group_timestamp, reader); + memgraph::slk::Load(&self->new_group_timestamp, reader); + memgraph::slk::Load(&self->uuid, reader); +} + +// Serialize code for DropDatabaseRes + +void Save(const memgraph::storage::replication::DropDatabaseRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::DropDatabaseRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + +// Serialize code for SystemRecoveryReq + +void Save(const memgraph::storage::replication::SystemRecoveryReq &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(self.forced_group_timestamp, builder); + memgraph::slk::Save(self.database_configs, builder); +} + +void Load(memgraph::storage::replication::SystemRecoveryReq *self, memgraph::slk::Reader *reader) { + memgraph::slk::Load(&self->forced_group_timestamp, reader); + memgraph::slk::Load(&self->database_configs, reader); +} + +// Serialize code for SystemRecoveryRes + +void Save(const memgraph::storage::replication::SystemRecoveryRes &self, memgraph::slk::Builder *builder) { + memgraph::slk::Save(utils::EnumToNum(self.result), builder); +} + +void Load(memgraph::storage::replication::SystemRecoveryRes *self, memgraph::slk::Reader *reader) { + uint8_t res = 0; + memgraph::slk::Load(&res, reader); + if (!utils::NumToEnum(res, self->result)) { + throw SlkReaderException("Unexpected result line:{}!", __LINE__); + } +} + } // namespace slk } // namespace memgraph diff --git a/src/storage/v2/replication/rpc.hpp b/src/storage/v2/replication/rpc.hpp index 9e2f0b35e..62f8b680c 100644 --- a/src/storage/v2/replication/rpc.hpp +++ b/src/storage/v2/replication/rpc.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,6 +19,9 @@ #include "rpc/messages.hpp" #include "slk/serialization.hpp" #include "slk/streams.hpp" +#include "storage/v2/config.hpp" +#include "utils/enum.hpp" +#include "utils/uuid.hpp" namespace memgraph::storage::replication { @@ -29,10 +32,10 @@ struct AppendDeltasReq { static void Load(AppendDeltasReq *self, memgraph::slk::Reader *reader); static void Save(const AppendDeltasReq &self, memgraph::slk::Builder *builder); AppendDeltasReq() = default; - AppendDeltasReq(std::string name, uint64_t previous_commit_timestamp, uint64_t seq_num) - : db_name(std::move(name)), previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {} + AppendDeltasReq(const utils::UUID &uuid, uint64_t previous_commit_timestamp, uint64_t seq_num) + : uuid{uuid}, previous_commit_timestamp(previous_commit_timestamp), seq_num(seq_num) {} - std::string db_name; + utils::UUID uuid; uint64_t previous_commit_timestamp; uint64_t seq_num; }; @@ -44,10 +47,9 @@ struct AppendDeltasRes { static void Load(AppendDeltasRes *self, memgraph::slk::Reader *reader); static void Save(const AppendDeltasRes &self, memgraph::slk::Builder *builder); AppendDeltasRes() = default; - AppendDeltasRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + AppendDeltasRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -61,10 +63,10 @@ struct HeartbeatReq { static void Load(HeartbeatReq *self, memgraph::slk::Reader *reader); static void Save(const HeartbeatReq &self, memgraph::slk::Builder *builder); HeartbeatReq() = default; - HeartbeatReq(std::string name, uint64_t main_commit_timestamp, std::string epoch_id) - : db_name(std::move(name)), main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {} + HeartbeatReq(const utils::UUID &uuid, uint64_t main_commit_timestamp, std::string epoch_id) + : uuid{uuid}, main_commit_timestamp(main_commit_timestamp), epoch_id(std::move(epoch_id)) {} - std::string db_name; + utils::UUID uuid; uint64_t main_commit_timestamp; std::string epoch_id; }; @@ -76,13 +78,9 @@ struct HeartbeatRes { static void Load(HeartbeatRes *self, memgraph::slk::Reader *reader); static void Save(const HeartbeatRes &self, memgraph::slk::Builder *builder); HeartbeatRes() = default; - HeartbeatRes(std::string name, bool success, uint64_t current_commit_timestamp, std::string epoch_id) - : db_name(std::move(name)), - success(success), - current_commit_timestamp(current_commit_timestamp), - epoch_id(std::move(epoch_id)) {} + HeartbeatRes(bool success, uint64_t current_commit_timestamp, std::string epoch_id) + : success(success), current_commit_timestamp(current_commit_timestamp), epoch_id(std::move(epoch_id)) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; std::string epoch_id; @@ -97,9 +95,9 @@ struct SnapshotReq { static void Load(SnapshotReq *self, memgraph::slk::Reader *reader); static void Save(const SnapshotReq &self, memgraph::slk::Builder *builder); SnapshotReq() = default; - explicit SnapshotReq(std::string name) : db_name(std::move(name)) {} + explicit SnapshotReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct SnapshotRes { @@ -109,10 +107,9 @@ struct SnapshotRes { static void Load(SnapshotRes *self, memgraph::slk::Reader *reader); static void Save(const SnapshotRes &self, memgraph::slk::Builder *builder); SnapshotRes() = default; - SnapshotRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + SnapshotRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -126,9 +123,9 @@ struct WalFilesReq { static void Load(WalFilesReq *self, memgraph::slk::Reader *reader); static void Save(const WalFilesReq &self, memgraph::slk::Builder *builder); WalFilesReq() = default; - explicit WalFilesReq(std::string name, uint64_t file_number) : db_name(std::move(name)), file_number(file_number) {} + explicit WalFilesReq(const utils::UUID &uuid, uint64_t file_number) : uuid{uuid}, file_number(file_number) {} - std::string db_name; + utils::UUID uuid; uint64_t file_number; }; @@ -139,10 +136,9 @@ struct WalFilesRes { static void Load(WalFilesRes *self, memgraph::slk::Reader *reader); static void Save(const WalFilesRes &self, memgraph::slk::Builder *builder); WalFilesRes() = default; - WalFilesRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + WalFilesRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -156,9 +152,9 @@ struct CurrentWalReq { static void Load(CurrentWalReq *self, memgraph::slk::Reader *reader); static void Save(const CurrentWalReq &self, memgraph::slk::Builder *builder); CurrentWalReq() = default; - explicit CurrentWalReq(std::string name) : db_name(std::move(name)) {} + explicit CurrentWalReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct CurrentWalRes { @@ -168,10 +164,9 @@ struct CurrentWalRes { static void Load(CurrentWalRes *self, memgraph::slk::Reader *reader); static void Save(const CurrentWalRes &self, memgraph::slk::Builder *builder); CurrentWalRes() = default; - CurrentWalRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + CurrentWalRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; @@ -185,9 +180,9 @@ struct TimestampReq { static void Load(TimestampReq *self, memgraph::slk::Reader *reader); static void Save(const TimestampReq &self, memgraph::slk::Builder *builder); TimestampReq() = default; - explicit TimestampReq(std::string name) : db_name(std::move(name)) {} + explicit TimestampReq(const utils::UUID &uuid) : uuid{uuid} {} - std::string db_name; + utils::UUID uuid; }; struct TimestampRes { @@ -197,15 +192,117 @@ struct TimestampRes { static void Load(TimestampRes *self, memgraph::slk::Reader *reader); static void Save(const TimestampRes &self, memgraph::slk::Builder *builder); TimestampRes() = default; - TimestampRes(std::string name, bool success, uint64_t current_commit_timestamp) - : db_name(std::move(name)), success(success), current_commit_timestamp(current_commit_timestamp) {} + TimestampRes(bool success, uint64_t current_commit_timestamp) + : success(success), current_commit_timestamp(current_commit_timestamp) {} - std::string db_name; bool success; uint64_t current_commit_timestamp; }; using TimestampRpc = rpc::RequestResponse; + +struct CreateDatabaseReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(CreateDatabaseReq *self, memgraph::slk::Reader *reader); + static void Save(const CreateDatabaseReq &self, memgraph::slk::Builder *builder); + CreateDatabaseReq() = default; + CreateDatabaseReq(std::string epoch_id, uint64_t expected_group_timestamp, uint64_t new_group_timestamp, + storage::SalientConfig config) + : epoch_id(std::move(epoch_id)), + expected_group_timestamp{expected_group_timestamp}, + new_group_timestamp(new_group_timestamp), + config(std::move(config)) {} + + std::string epoch_id; + uint64_t expected_group_timestamp; + uint64_t new_group_timestamp; + storage::SalientConfig config; +}; + +struct CreateDatabaseRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(CreateDatabaseRes *self, memgraph::slk::Reader *reader); + static void Save(const CreateDatabaseRes &self, memgraph::slk::Builder *builder); + CreateDatabaseRes() = default; + explicit CreateDatabaseRes(Result res) : result(res) {} + + Result result; +}; + +using CreateDatabaseRpc = rpc::RequestResponse; + +struct DropDatabaseReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(DropDatabaseReq *self, memgraph::slk::Reader *reader); + static void Save(const DropDatabaseReq &self, memgraph::slk::Builder *builder); + DropDatabaseReq() = default; + DropDatabaseReq(std::string epoch_id, uint64_t expected_group_timestamp, uint64_t new_group_timestamp, + const utils::UUID &uuid) + : epoch_id(std::move(epoch_id)), + expected_group_timestamp{expected_group_timestamp}, + new_group_timestamp(new_group_timestamp), + uuid(uuid) {} + + std::string epoch_id; + uint64_t expected_group_timestamp; + uint64_t new_group_timestamp; + utils::UUID uuid; +}; + +struct DropDatabaseRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(DropDatabaseRes *self, memgraph::slk::Reader *reader); + static void Save(const DropDatabaseRes &self, memgraph::slk::Builder *builder); + DropDatabaseRes() = default; + explicit DropDatabaseRes(Result res) : result(res) {} + + Result result; +}; + +using DropDatabaseRpc = rpc::RequestResponse; + +struct SystemRecoveryReq { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + static void Load(SystemRecoveryReq *self, memgraph::slk::Reader *reader); + static void Save(const SystemRecoveryReq &self, memgraph::slk::Builder *builder); + SystemRecoveryReq() = default; + SystemRecoveryReq(uint64_t forced_group_timestamp, std::vector database_configs) + : forced_group_timestamp{forced_group_timestamp}, database_configs(std::move(database_configs)) {} + + uint64_t forced_group_timestamp; + std::vector database_configs; +}; + +struct SystemRecoveryRes { + static const utils::TypeInfo kType; + static const utils::TypeInfo &GetTypeInfo() { return kType; } + + enum class Result : uint8_t { SUCCESS, NO_NEED, FAILURE, /* Leave at end */ N }; + + static void Load(SystemRecoveryRes *self, memgraph::slk::Reader *reader); + static void Save(const SystemRecoveryRes &self, memgraph::slk::Builder *builder); + SystemRecoveryRes() = default; + explicit SystemRecoveryRes(Result res) : result(res) {} + + Result result; +}; + +using SystemRecoveryRpc = rpc::RequestResponse; + } // namespace memgraph::storage::replication // SLK serialization declarations @@ -259,4 +356,28 @@ void Save(const memgraph::storage::replication::AppendDeltasReq &self, memgraph: void Load(memgraph::storage::replication::AppendDeltasReq *self, memgraph::slk::Reader *reader); +void Save(const memgraph::storage::replication::CreateDatabaseReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::CreateDatabaseReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::CreateDatabaseRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::CreateDatabaseRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::DropDatabaseReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::DropDatabaseReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::DropDatabaseRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::DropDatabaseRes *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::SystemRecoveryReq &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::SystemRecoveryReq *self, memgraph::slk::Reader *reader); + +void Save(const memgraph::storage::replication::SystemRecoveryRes &self, memgraph::slk::Builder *builder); + +void Load(memgraph::storage::replication::SystemRecoveryRes *self, memgraph::slk::Reader *reader); + } // namespace memgraph::slk diff --git a/src/storage/v2/replication/slk.hpp b/src/storage/v2/replication/slk.hpp index a202e55af..1d1399cb8 100644 --- a/src/storage/v2/replication/slk.hpp +++ b/src/storage/v2/replication/slk.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -25,17 +25,4 @@ void Load(storage::Gid *gid, slk::Reader *reader); void Save(const storage::PropertyValue &value, slk::Builder *builder); void Load(storage::PropertyValue *value, slk::Reader *reader); -template -void Save(const T &enum_value, slk::Builder *builder) { - slk::Save(utils::UnderlyingCast(enum_value), builder); -} - -template -void Load(T *enum_value, slk::Reader *reader) { - using UnderlyingType = std::underlying_type_t; - UnderlyingType value; - slk::Load(&value, reader); - *enum_value = static_cast(value); -} - } // namespace memgraph::slk diff --git a/src/storage/v2/storage.cpp b/src/storage/v2/storage.cpp index fcfcb2a82..8b5c79cf5 100644 --- a/src/storage/v2/storage.cpp +++ b/src/storage/v2/storage.cpp @@ -45,13 +45,13 @@ Storage::Storage(Config config, StorageMode storage_mode) isolation_level_(config.transaction.isolation_level), storage_mode_(storage_mode), indices_(config, storage_mode), - constraints_(config, storage_mode), - id_(config.name) { + constraints_(config, storage_mode) { spdlog::info("Created database with {} storage mode.", StorageModeToString(storage_mode)); } Storage::Accessor::Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level, - StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role) + StorageMode storage_mode, + memgraph::replication_coordination_glue::ReplicationRole replication_role) : storage_(storage), // The lock must be acquired before creating the transaction object to // prevent freshly created transactions from dangling in an active state @@ -63,7 +63,8 @@ Storage::Accessor::Accessor(SharedAccess /* tag */, Storage *storage, IsolationL creation_storage_mode_(storage_mode) {} Storage::Accessor::Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level, - StorageMode storage_mode, memgraph::replication::ReplicationRole replication_role) + StorageMode storage_mode, + memgraph::replication_coordination_glue::ReplicationRole replication_role) : storage_(storage), // The lock must be acquired before creating the transaction object to // prevent freshly created transactions from dangling in an active state @@ -337,7 +338,7 @@ EdgeInfoForDeletion Storage::Accessor::PrepareDeletableEdges(const std::unordere const auto &[edge_type, opposing_vertex, edge] = item; if (!vertices.contains(opposing_vertex)) { partial_delete_vertices.insert(opposing_vertex); - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge.ptr->gid : edge.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge.ptr->gid : edge.gid; edge_ids.insert(edge_gid); } }; @@ -399,7 +400,7 @@ Result>> Storage::Accessor::ClearEdgesOn /// TODO: (andi) Again here, no need to lock the edge if using on disk storage. std::unique_lock guard; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto edge_ptr = edge_ref.ptr; guard = std::unique_lock{edge_ptr->lock}; @@ -416,12 +417,12 @@ Result>> Storage::Accessor::ClearEdgesOn edge_type = edge_type, opposing_vertex = opposing_vertex, edge_ref = edge_ref, this]() { attached_edges_to_vertex->pop_back(); - if (this->storage_->config_.items.properties_on_edges) { + if (this->storage_->config_.salient.items.properties_on_edges) { auto *edge_ptr = edge_ref.ptr; MarkEdgeAsDeleted(edge_ptr); } - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; auto const [_, was_inserted] = deleted_edge_ids.insert(edge_gid); bool const edge_cleared_from_both_directions = !was_inserted; if (edge_cleared_from_both_directions) { @@ -471,7 +472,7 @@ Result>> Storage::Accessor::DetachRemain auto mid = std::partition( edges_attached_to_vertex->begin(), edges_attached_to_vertex->end(), [this, &set_for_erasure](auto &edge) { auto const &[edge_type, opposing_vertex, edge_ref] = edge; - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; return !set_for_erasure.contains(edge_gid); }); @@ -483,7 +484,7 @@ Result>> Storage::Accessor::DetachRemain for (auto it = mid; it != edges_attached_to_vertex->end(); it++) { auto const &[edge_type, opposing_vertex, edge_ref] = *it; std::unique_lock guard; - if (storage_->config_.items.properties_on_edges) { + if (storage_->config_.salient.items.properties_on_edges) { auto edge_ptr = edge_ref.ptr; guard = std::unique_lock{edge_ptr->lock}; // this can happen only if we marked edges for deletion with no nodes, @@ -493,7 +494,7 @@ Result>> Storage::Accessor::DetachRemain CreateAndLinkDelta(&transaction_, vertex_ptr, deletion_delta, edge_type, opposing_vertex, edge_ref); - auto const edge_gid = storage_->config_.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; + auto const edge_gid = storage_->config_.salient.items.properties_on_edges ? edge_ref.ptr->gid : edge_ref.gid; auto const [_, was_inserted] = partially_detached_edge_ids.insert(edge_gid); bool const edge_cleared_from_both_directions = !was_inserted; if (edge_cleared_from_both_directions) { @@ -506,7 +507,6 @@ Result>> Storage::Accessor::DetachRemain }}; std::invoke(atomic_memory_block); - return std::make_optional(); }; diff --git a/src/storage/v2/storage.hpp b/src/storage/v2/storage.hpp index 288eae272..c6b4a6846 100644 --- a/src/storage/v2/storage.hpp +++ b/src/storage/v2/storage.hpp @@ -12,6 +12,8 @@ #pragma once #include +#include +#include #include #include #include @@ -24,6 +26,7 @@ #include "storage/v2/all_vertices_iterable.hpp" #include "storage/v2/commit_log.hpp" #include "storage/v2/config.hpp" +#include "storage/v2/database_access.hpp" #include "storage/v2/durability/paths.hpp" #include "storage/v2/durability/wal.hpp" #include "storage/v2/edge_accessor.hpp" @@ -52,7 +55,6 @@ extern const Event ActiveLabelPropertyIndices; } // namespace memgraph::metrics namespace memgraph::storage { - struct Transaction; class EdgeAccessor; @@ -109,6 +111,15 @@ struct EdgeInfoForDeletion { std::unordered_set partial_dest_vertices{}; }; +struct CommitReplArgs { + // REPLICA on recipt of Deltas will have a desired commit timestamp + std::optional desired_commit_timestamp = std::nullopt; + + bool is_main = true; + + bool IsMain() { return is_main; } +}; + class Storage { friend class ReplicationServer; friend class ReplicationStorageClient; @@ -123,7 +134,9 @@ class Storage { virtual ~Storage() = default; - const std::string &id() const { return id_; } + const std::string &name() const { return config_.salient.name; } + + const utils::UUID &uuid() const { return config_.salient.uuid; } class Accessor { public: @@ -133,9 +146,9 @@ class Storage { } unique_access; Accessor(SharedAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role); + memgraph::replication_coordination_glue::ReplicationRole replication_role); Accessor(UniqueAccess /* tag */, Storage *storage, IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role); + memgraph::replication_coordination_glue::ReplicationRole replication_role); Accessor(const Accessor &) = delete; Accessor &operator=(const Accessor &) = delete; Accessor &operator=(Accessor &&other) = delete; @@ -225,8 +238,8 @@ class Storage { virtual ConstraintsInfo ListAllConstraints() const = 0; // NOLINTNEXTLINE(google-default-arguments) - virtual utils::BasicResult Commit( - std::optional desired_commit_timestamp = {}, bool is_main = true) = 0; + virtual utils::BasicResult Commit(CommitReplArgs reparg = {}, + DatabaseAccessProtector db_acc = {}) = 0; virtual void Abort() = 0; @@ -250,7 +263,7 @@ class Storage { StorageMode GetCreationStorageMode() const noexcept; - const std::string &id() const { return storage_->id(); } + const std::string &id() const { return storage_->name(); } std::vector ListAllPossiblyPresentVertexLabels() const; @@ -337,16 +350,17 @@ class Storage { void FreeMemory() { FreeMemory({}); } - virtual std::unique_ptr Access(memgraph::replication::ReplicationRole replication_role, + virtual std::unique_ptr Access(memgraph::replication_coordination_glue::ReplicationRole replication_role, std::optional override_isolation_level) = 0; - std::unique_ptr Access(memgraph::replication::ReplicationRole replication_role) { + std::unique_ptr Access(memgraph::replication_coordination_glue::ReplicationRole replication_role) { return Access(replication_role, {}); } - virtual std::unique_ptr UniqueAccess(memgraph::replication::ReplicationRole replication_role, - std::optional override_isolation_level) = 0; - std::unique_ptr UniqueAccess(memgraph::replication::ReplicationRole replication_role) { + virtual std::unique_ptr UniqueAccess( + memgraph::replication_coordination_glue::ReplicationRole replication_role, + std::optional override_isolation_level) = 0; + std::unique_ptr UniqueAccess(memgraph::replication_coordination_glue::ReplicationRole replication_role) { return UniqueAccess(replication_role, {}); } @@ -365,10 +379,11 @@ class Storage { return GetBaseInfo(force_dir); } - virtual StorageInfo GetInfo(bool force_directory, memgraph::replication::ReplicationRole replication_role) = 0; + virtual StorageInfo GetInfo(bool force_directory, + memgraph::replication_coordination_glue::ReplicationRole replication_role) = 0; virtual Transaction CreateTransaction(IsolationLevel isolation_level, StorageMode storage_mode, - memgraph::replication::ReplicationRole replication_role) = 0; + memgraph::replication_coordination_glue::ReplicationRole replication_role) = 0; virtual void PrepareForNewEpoch() = 0; @@ -421,7 +436,6 @@ class Storage { std::atomic vertex_id_{0}; std::atomic edge_id_{0}; - const std::string id_; //!< High-level assigned ID }; } // namespace memgraph::storage diff --git a/src/storage/v2/storage_mode.hpp b/src/storage/v2/storage_mode.hpp index c02d3c177..f4a133f38 100644 --- a/src/storage/v2/storage_mode.hpp +++ b/src/storage/v2/storage_mode.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,14 +11,19 @@ #pragma once +#include #include #include - namespace memgraph::storage { enum class StorageMode : std::uint8_t { IN_MEMORY_ANALYTICAL, IN_MEMORY_TRANSACTIONAL, ON_DISK_TRANSACTIONAL }; -bool IsTransactional(const StorageMode storage_mode) noexcept; +inline constexpr std::array storage_mode_mappings{ + std::pair{std::string_view{"IN_MEMORY_TRANSACTIONAL"}, memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL}, + std::pair{std::string_view{"IN_MEMORY_ANALYTICAL"}, memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL}, + std::pair{std::string_view{"ON_DISK_TRANSACTIONAL"}, memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL}}; + +bool IsTransactional(StorageMode storage_mode) noexcept; std::string_view StorageModeToString(memgraph::storage::StorageMode storage_mode); diff --git a/src/storage/v2/vertex_accessor.cpp b/src/storage/v2/vertex_accessor.cpp index dfd39a43c..d6a87933b 100644 --- a/src/storage/v2/vertex_accessor.cpp +++ b/src/storage/v2/vertex_accessor.cpp @@ -117,7 +117,7 @@ Result VertexAccessor::AddLabel(LabelId label, bool update_text_index) { }}; std::invoke(atomic_memory_block); - if (storage_->config_.items.enable_schema_metadata) { + if (storage_->config_.salient.items.enable_schema_metadata) { storage_->stored_node_labels_.try_insert(label); } diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 276927725..bac3e78f3 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -24,8 +24,8 @@ find_package(Threads REQUIRED) add_library(mg-utils STATIC ${utils_src_files}) add_library(mg::utils ALIAS mg-utils) -target_link_libraries(mg-utils PUBLIC Boost::headers fmt::fmt spdlog::spdlog) -target_link_libraries(mg-utils PRIVATE librdtsc stdc++fs Threads::Threads gflags json uuid rt) +target_link_libraries(mg-utils PUBLIC Boost::headers fmt::fmt spdlog::spdlog json) +target_link_libraries(mg-utils PRIVATE librdtsc stdc++fs Threads::Threads gflags uuid rt) set(settings_src_files settings.cpp) diff --git a/src/utils/counter.hpp b/src/utils/counter.hpp new file mode 100644 index 000000000..0d9aabca8 --- /dev/null +++ b/src/utils/counter.hpp @@ -0,0 +1,29 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#pragma once + +#include + +namespace memgraph::utils { + +/// A resetable counter, every Nth call returns true +template +auto ResettableCounter() { + return [counter = N]() mutable { + --counter; + if (counter != 0) return false; + counter = N; + return true; + }; +} + +} // namespace memgraph::utils diff --git a/src/utils/enum.hpp b/src/utils/enum.hpp index 505802088..50a6d4621 100644 --- a/src/utils/enum.hpp +++ b/src/utils/enum.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -54,4 +54,46 @@ std::optional StringToEnum(const auto &value, const auto &mappings) { return mapping_iter->second; } + +// Tries to convert a enum into string, which would then contain a value if the conversion +// has been successful. +template +auto EnumToString(const auto &value, const auto &mappings) -> std::optional { + const auto mapping_iter = + std::find_if(mappings.begin(), mappings.end(), [&](const auto &mapping) { return mapping.second == value; }); + if (mapping_iter == mappings.cend()) { + return std::nullopt; + } + return mapping_iter->first; +} + +template +requires std::integral +inline T EnumToNum(Enum res) { + static_assert(std::numeric_limits::max() >= static_cast(Enum::N)); + return static_cast(res); +} + +template +requires std::integral +inline bool NumToEnum(T input, Enum &res) { + if (input >= EnumToNum(Enum::N)) return false; + res = static_cast(input); + return true; +} + +template +requires std::integral +inline T EnumToNum(Enum res) { + static_assert(std::numeric_limits::max() >= Num); + return static_cast(res); +} + +template +requires std::integral +inline bool NumToEnum(T input, Enum &res) { + if (input >= Num) return false; + res = static_cast(input); + return true; +} } // namespace memgraph::utils diff --git a/src/utils/file.cpp b/src/utils/file.cpp index de6590620..73ea424ac 100644 --- a/src/utils/file.cpp +++ b/src/utils/file.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -504,7 +504,7 @@ void OutputFile::Close() noexcept { void OutputFile::FlushBuffer(bool force_flush) { MG_ASSERT(IsOpen(), "Flushing an unopend file."); - if (!force_flush && buffer_position_.load() < kFileBufferSize) return; + if (!force_flush && buffer_position_ < kFileBufferSize) return; std::unique_lock flush_guard(flush_lock_); FlushBufferInternal(); diff --git a/src/utils/gatekeeper.hpp b/src/utils/gatekeeper.hpp index 21dad2543..862cad982 100644 --- a/src/utils/gatekeeper.hpp +++ b/src/utils/gatekeeper.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -78,21 +78,33 @@ struct EvalResult { template EvalResult(run_t, Func &&, T &) -> EvalResult>; +template +struct GKInternals { + template + explicit GKInternals(Args &&...args) : value_{std::in_place, std::forward(args)...} {} + + std::optional value_; + uint64_t count_ = 0; + std::atomic_bool is_deleting = false; + std::mutex mutex_; // TODO change to something cheaper? + std::condition_variable cv_; +}; + template struct Gatekeeper { template - explicit Gatekeeper(Args &&...args) : value_{std::in_place, std::forward(args)...} {} + explicit Gatekeeper(Args &&...args) : pimpl_(std::make_unique>(std::forward(args)...)) {} Gatekeeper(Gatekeeper const &) = delete; - Gatekeeper(Gatekeeper &&) noexcept = delete; + Gatekeeper(Gatekeeper &&) noexcept = default; Gatekeeper &operator=(Gatekeeper const &) = delete; - Gatekeeper &operator=(Gatekeeper &&) = delete; + Gatekeeper &operator=(Gatekeeper &&) noexcept = default; struct Accessor { friend Gatekeeper; private: - explicit Accessor(Gatekeeper *owner) : owner_{owner} { ++owner_->count_; } + explicit Accessor(Gatekeeper *owner) : owner_{owner->pimpl_.get()} { ++owner_->count_; } public: Accessor(Accessor const &other) : owner_{other.owner_} { @@ -139,6 +151,14 @@ struct Gatekeeper { return *this; } + [[nodiscard]] bool is_deleting() const { return owner_->is_deleting; } + + void prepare_for_deletion() { + if (owner_) { + owner_->is_deleting = true; + } + } + ~Accessor() { reset(); } auto get() -> T * { return std::addressof(*owner_->value_); } @@ -159,18 +179,26 @@ struct Gatekeeper { } // Completely invalidated the accessor if return true - [[nodiscard]] bool try_delete(std::chrono::milliseconds timeout = std::chrono::milliseconds(100)) { + template + [[nodiscard]] bool try_delete(std::chrono::milliseconds timeout = std::chrono::milliseconds(100), + Func &&predicate = {}) { // Prevent new access auto guard = std::unique_lock{owner_->mutex_}; if (!owner_->cv_.wait_for(guard, timeout, [this] { return owner_->count_ == 1; })) { return false; } - // Delete value + // Already deleted + if (owner_->value_ == std::nullopt) return true; + // Delete value if ok + if (!predicate(*owner_->value_)) return false; owner_->value_ = std::nullopt; return true; } - explicit operator bool() const { return owner_ != nullptr; } + explicit operator bool() const { + return owner_ != nullptr // we have access + && !owner_->is_deleting; // AND we are allowed to use it + } void reset() { if (owner_) { @@ -186,28 +214,27 @@ struct Gatekeeper { friend bool operator==(Accessor const &lhs, Accessor const &rhs) { return lhs.owner_ == rhs.owner_; } private: - Gatekeeper *owner_ = nullptr; + GKInternals *owner_ = nullptr; }; std::optional access() { - auto guard = std::unique_lock{mutex_}; - if (value_) { + auto guard = std::unique_lock{pimpl_->mutex_}; + if (pimpl_->value_) { return Accessor{this}; } return std::nullopt; } ~Gatekeeper() { + if (!pimpl_) return; // Moved out, nothing to do + pimpl_->is_deleting = true; // wait for count to drain to 0 - auto lock = std::unique_lock{mutex_}; - cv_.wait(lock, [this] { return count_ == 0; }); + auto lock = std::unique_lock{pimpl_->mutex_}; + pimpl_->cv_.wait(lock, [this] { return pimpl_->count_ == 0; }); } private: - std::optional value_; - uint64_t count_ = 0; - std::mutex mutex_; // TODO change to something cheaper? - std::condition_variable cv_; + std::unique_ptr> pimpl_; }; } // namespace memgraph::utils diff --git a/src/utils/resource_lock.hpp b/src/utils/resource_lock.hpp index 7d6be2685..7a3ef9444 100644 --- a/src/utils/resource_lock.hpp +++ b/src/utils/resource_lock.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -48,6 +48,15 @@ struct ResourceLock { } return false; } + + template + bool try_lock_for(const std::chrono::duration &timeout_duration) { + auto lock = std::unique_lock{mtx}; + if (!cv.wait_for(lock, timeout_duration, [this] { return state == UNLOCKED; })) return false; + state = UNIQUE; + return true; + } + bool try_lock_shared() { auto lock = std::unique_lock{mtx}; if (state != UNIQUE) { @@ -71,6 +80,22 @@ struct ResourceLock { } } + void upgrade_to_unique() { + auto lock = std::unique_lock{mtx}; + cv.wait(lock, [this] { return count == 1; }); + state = UNIQUE; + count = 0; + } + + template + bool try_upgrade_to_unique(const std::chrono::duration &timeout_duration) { + auto lock = std::unique_lock{mtx}; + if (!cv.wait_for(lock, timeout_duration, [this] { return count == 1; })) return false; + state = UNIQUE; + count = 0; + return true; + } + private: std::mutex mtx; std::condition_variable cv; @@ -78,4 +103,46 @@ struct ResourceLock { uint64_t count = 0; }; +struct ResourceLockGuard { + private: + enum states { UNIQUE, SHARED }; + + public: + explicit ResourceLockGuard(ResourceLock &thing) + : ptr{&thing}, state{[this]() { + ptr->lock_shared(); + return SHARED; + }()} {} + + void upgrade_to_unique() { + if (state == SHARED) { + ptr->upgrade_to_unique(); + state = UNIQUE; + } + } + + template + bool try_upgrade_to_unique(const std::chrono::duration &timeout_duration) { + if (state != SHARED) return true; // already locked + if (!ptr->try_upgrade_to_unique(timeout_duration)) return false; // timeout + state = UNIQUE; + return true; + } + + ~ResourceLockGuard() { + switch (state) { + case UNIQUE: + ptr->unlock(); + break; + case SHARED: + ptr->unlock_shared(); + break; + } + } + + private: + ResourceLock *ptr; + states state; +}; + } // namespace memgraph::utils diff --git a/src/utils/scheduler.hpp b/src/utils/scheduler.hpp index d96178598..742271a95 100644 --- a/src/utils/scheduler.hpp +++ b/src/utils/scheduler.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -58,28 +58,40 @@ class Scheduler { // the start of the program. Since Server will log some messages on // the program start we let him log first and we make sure by first // waiting that funcion f will not log before it. + // Check for pause also. std::unique_lock lk(mutex_); auto now = std::chrono::system_clock::now(); start_time += pause; if (start_time > now) { - condition_variable_.wait_until(lk, start_time, [&] { return is_working_.load() == false; }); + condition_variable_.wait_until(lk, start_time, [&] { return !is_working_.load(); }); } else { start_time = now; } + pause_cv_.wait(lk, [&] { return !is_paused_.load(); }); + if (!is_working_) break; f(); } }); } + void Resume() { + is_paused_.store(false); + pause_cv_.notify_one(); + } + + void Pause() { is_paused_.store(true); } + /** * @brief Stops the thread execution. This is a blocking call and may take as * much time as one call to the function given previously to Run takes. * @throw std::system_error */ void Stop() { + is_paused_.store(false); is_working_.store(false); + pause_cv_.notify_one(); condition_variable_.notify_one(); if (thread_.joinable()) thread_.join(); } @@ -97,6 +109,16 @@ class Scheduler { */ std::atomic is_working_{false}; + /** + * Variable is true when thread is paused. + */ + std::atomic is_paused_{false}; + + /* + * Wait until the thread is resumed. + */ + std::condition_variable pause_cv_; + /** * Mutex used to synchronize threads using condition variable. */ diff --git a/src/utils/settings.cpp b/src/utils/settings.cpp index 330b43f48..4768edc42 100644 --- a/src/utils/settings.cpp +++ b/src/utils/settings.cpp @@ -27,6 +27,7 @@ void Settings::Finalize() { std::lock_guard settings_guard{settings_lock_}; storage_.reset(); on_change_callbacks_.clear(); + validations_.clear(); } void Settings::RegisterSetting(std::string name, const std::string &default_value, OnChangeCallback callback, diff --git a/src/utils/skip_list.hpp b/src/utils/skip_list.hpp index de4892375..193d83b0b 100644 --- a/src/utils/skip_list.hpp +++ b/src/utils/skip_list.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -365,9 +365,7 @@ class SkipListGc final { leftover.Push(*item); } } - while ((item = leftover.Pop())) { - deleted_.Push(*item); - } + deleted_ = std::move(leftover); } MemoryResource *GetMemoryResource() const { return memory_; } @@ -384,11 +382,14 @@ class SkipListGc final { } // Delete all items that have to be garbage collected. - std::optional item; - while ((item = deleted_.Pop())) { - size_t bytes = SkipListNodeSize(*item->second); - item->second->~TNode(); - memory_->Deallocate(item->second, bytes); + { + std::optional item; + std::unique_lock guard(lock_); + while ((item = deleted_.Pop())) { + size_t bytes = SkipListNodeSize(*item->second); + item->second->~TNode(); + memory_->Deallocate(item->second, bytes); + } } // Reset all variables. @@ -591,13 +592,20 @@ class SkipList final : detail::SkipListNode_base { Iterator(TNode *node) : node_(node) {} public: - TObj &operator*() const { return node_->obj; } + using value_type = TObj; + using difference_type = std::ptrdiff_t; - TObj *operator->() const { return &node_->obj; } + Iterator() = default; + Iterator(Iterator const &) = default; + Iterator(Iterator &&) = default; + Iterator &operator=(Iterator const &) = default; + Iterator &operator=(Iterator &&) = default; - bool operator==(const Iterator &other) const { return node_ == other.node_; } + value_type &operator*() const { return node_->obj; } - bool operator!=(const Iterator &other) const { return node_ != other.node_; } + value_type *operator->() const { return &node_->obj; } + + friend bool operator==(Iterator const &lhs, Iterator const &rhs) { return lhs.node_ == rhs.node_; } Iterator &operator++() { while (true) { @@ -610,8 +618,14 @@ class SkipList final : detail::SkipListNode_base { } } + Iterator operator++(int) { + Iterator old = *this; + ++(*this); + return old; + } + private: - TNode *node_; + TNode *node_{nullptr}; }; class ConstIterator final { @@ -621,15 +635,22 @@ class SkipList final : detail::SkipListNode_base { ConstIterator(TNode *node) : node_(node) {} public: + using value_type = TObj const; + using difference_type = std::ptrdiff_t; + + ConstIterator() = default; + ConstIterator(ConstIterator const &) = default; + ConstIterator(ConstIterator &&) = default; + ConstIterator &operator=(ConstIterator const &) = default; + ConstIterator &operator=(ConstIterator &&) = default; + ConstIterator(const Iterator &it) : node_(it.node_) {} - const TObj &operator*() const { return node_->obj; } + value_type &operator*() const { return node_->obj; } - const TObj *operator->() const { return &node_->obj; } + value_type *operator->() const { return &node_->obj; } - bool operator==(const ConstIterator &other) const { return node_ == other.node_; } - - bool operator!=(const ConstIterator &other) const { return node_ != other.node_; } + friend bool operator==(ConstIterator const &lhs, ConstIterator const &rhs) { return lhs.node_ == rhs.node_; } ConstIterator &operator++() { while (true) { @@ -642,6 +663,12 @@ class SkipList final : detail::SkipListNode_base { } } + ConstIterator operator++(int) { + ConstIterator old = *this; + ++(*this); + return old; + } + private: TNode *node_; }; @@ -653,6 +680,10 @@ class SkipList final : detail::SkipListNode_base { explicit Accessor(SkipList *skiplist) : skiplist_(skiplist), id_(skiplist->gc_.AllocateId()) {} public: + using value_type = TObj; + using iterator = Iterator; + using const_iterator = ConstIterator; + ~Accessor() { if (skiplist_ != nullptr) skiplist_->gc_.ReleaseId(id_); } @@ -695,7 +726,7 @@ class SkipList final : detail::SkipListNode_base { /// @return bool indicating whether the item exists template bool contains(const TKey &key) const { - return skiplist_->template contains(key); + return skiplist_->contains(key); } /// Finds the key in the list and returns an iterator to the item. @@ -703,8 +734,17 @@ class SkipList final : detail::SkipListNode_base { /// @return Iterator to the item in the list, will be equal to `end()` when /// the key isn't found template - Iterator find(const TKey &key) const { - return skiplist_->template find(key); + Iterator find(const TKey &key) { + return skiplist_->find(key); + } + + /// Finds the key in the list and returns an iterator to the item. + /// + /// @return ConstIterator to the item in the list, will be equal to `cend()` when + /// the key isn't found + template + ConstIterator find(const TKey &key) const { + return skiplist_->find(key); } /// Finds the key or the first larger key in the list and returns an @@ -713,8 +753,18 @@ class SkipList final : detail::SkipListNode_base { /// @return Iterator to the item in the list, will be equal to `end()` when /// no items match the search template - Iterator find_equal_or_greater(const TKey &key) const { - return skiplist_->template find_equal_or_greater(key); + Iterator find_equal_or_greater(const TKey &key) { + return skiplist_->find_equal_or_greater(key); + } + + /// Finds the key or the first larger key in the list and returns an + /// iterator to the item. + /// + /// @return ConstIterator to the item in the list, will be equal to `end()` when + /// no items match the search + template + ConstIterator find_equal_or_greater(const TKey &key) const { + return skiplist_->find_equal_or_greater(key); } /// Estimates the number of items that are contained in the list that are @@ -727,7 +777,7 @@ class SkipList final : detail::SkipListNode_base { /// @return uint64_t estimated count of identical items in the list template uint64_t estimate_count(const TKey &key, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_count(key, max_layer_for_estimation); + return skiplist_->estimate_count(key, max_layer_for_estimation); } /// Estimates the number of items that are contained in the list that are @@ -742,7 +792,7 @@ class SkipList final : detail::SkipListNode_base { uint64_t estimate_range_count(const std::optional> &lower, const std::optional> &upper, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_range_count(lower, upper, max_layer_for_estimation); + return skiplist_->estimate_range_count(lower, upper, max_layer_for_estimation); } /// Estimates the average number of objects in the list that have the same @@ -759,7 +809,7 @@ class SkipList final : detail::SkipListNode_base { template uint64_t estimate_average_number_of_equals( const TCallable &equal_cmp, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_average_number_of_equals(equal_cmp, max_layer_for_estimation); + return skiplist_->estimate_average_number_of_equals(equal_cmp, max_layer_for_estimation); } /// Removes the key from the list. @@ -767,7 +817,7 @@ class SkipList final : detail::SkipListNode_base { /// @return bool indicating whether the removal was successful template bool remove(const TKey &key) { - return skiplist_->template remove(key); + return skiplist_->remove(key); } /// Returns the number of items contained in the list. @@ -787,6 +837,10 @@ class SkipList final : detail::SkipListNode_base { explicit ConstAccessor(const SkipList *skiplist) : skiplist_(skiplist), id_(skiplist->gc_.AllocateId()) {} public: + using value_type = TObj; + using iterator = ConstIterator; + using const_iterator = ConstIterator; + ~ConstAccessor() { if (skiplist_ != nullptr) skiplist_->gc_.ReleaseId(id_); } @@ -812,35 +866,35 @@ class SkipList final : detail::SkipListNode_base { template bool contains(const TKey &key) const { - return skiplist_->template contains(key); + return skiplist_->contains(key); } template ConstIterator find(const TKey &key) const { - return skiplist_->template find(key); + return skiplist_->find(key); } template ConstIterator find_equal_or_greater(const TKey &key) const { - return skiplist_->template find_equal_or_greater(key); + return skiplist_->find_equal_or_greater(key); } template uint64_t estimate_count(const TKey &key, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_count(key, max_layer_for_estimation); + return skiplist_->estimate_count(key, max_layer_for_estimation); } template uint64_t estimate_range_count(const std::optional> &lower, const std::optional> &upper, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_range_count(lower, upper, max_layer_for_estimation); + return skiplist_->estimate_range_count(lower, upper, max_layer_for_estimation); } template uint64_t estimate_average_number_of_equals( const TCallable &equal_cmp, int max_layer_for_estimation = kSkipListCountEstimateDefaultLayer) const { - return skiplist_->template estimate_average_number_of_equals(equal_cmp, max_layer_for_estimation); + return skiplist_->estimate_average_number_of_equals(equal_cmp, max_layer_for_estimation); } uint64_t size() const { return skiplist_->size(); } @@ -975,40 +1029,46 @@ class SkipList final : detail::SkipListNode_base { continue; } - std::unique_lock guards[kSkipListMaxHeight]; - TNode *pred, *succ, *prev_pred = nullptr; - bool valid = true; - // The paper has a wrong condition here. In the paper it states that this - // loop should have `(layer <= top_layer)`, but that isn't correct. - for (int layer = 0; valid && (layer < top_layer); ++layer) { - pred = preds[layer]; - succ = succs[layer]; - if (pred != prev_pred) { - guards[layer] = std::unique_lock(pred->lock); - prev_pred = pred; + TNode *new_node; + { + TNode *prev_pred = nullptr; + bool valid = true; + std::unique_lock guards[kSkipListMaxHeight]; + // The paper has a wrong condition here. In the paper it states that this + // loop should have `(layer <= top_layer)`, but that isn't correct. + for (int layer = 0; valid && (layer < top_layer); ++layer) { + TNode *pred = preds[layer]; + TNode *succ = succs[layer]; + if (pred != prev_pred) { + guards[layer] = std::unique_lock(pred->lock); + prev_pred = pred; + } + // Existence test is missing in the paper. + valid = !pred->marked.load(std::memory_order_acquire) && + pred->nexts[layer].load(std::memory_order_acquire) == succ && + (succ == nullptr || !succ->marked.load(std::memory_order_acquire)); } - // Existence test is missing in the paper. - valid = !pred->marked.load(std::memory_order_acquire) && - pred->nexts[layer].load(std::memory_order_acquire) == succ && - (succ == nullptr || !succ->marked.load(std::memory_order_acquire)); - } - if (!valid) continue; + if (!valid) continue; - size_t node_bytes = sizeof(TNode) + top_layer * sizeof(std::atomic); - void *ptr = GetMemoryResource()->Allocate(node_bytes); - // `calloc` would be faster, but the API has no such call. - memset(ptr, 0, node_bytes); - auto *new_node = static_cast(ptr); - // Construct through allocator so it propagates if needed. - Allocator allocator(GetMemoryResource()); - allocator.construct(new_node, top_layer, std::forward(object)); + size_t node_bytes = sizeof(TNode) + top_layer * sizeof(std::atomic); - // The paper is also wrong here. It states that the loop should go up to - // `top_layer` which is wrong. - for (int layer = 0; layer < top_layer; ++layer) { - new_node->nexts[layer].store(succs[layer], std::memory_order_release); - preds[layer]->nexts[layer].store(new_node, std::memory_order_release); + MemoryResource *memoryResource = GetMemoryResource(); + void *ptr = memoryResource->Allocate(node_bytes); + // `calloc` would be faster, but the API has no such call. + memset(ptr, 0, node_bytes); + new_node = static_cast(ptr); + + // Construct through allocator so it propagates if needed. + Allocator allocator(memoryResource); + allocator.construct(new_node, top_layer, std::forward(object)); + + // The paper is also wrong here. It states that the loop should go up to + // `top_layer` which is wrong. + for (int layer = 0; layer < top_layer; ++layer) { + new_node->nexts[layer].store(succs[layer], std::memory_order_release); + preds[layer]->nexts[layer].store(new_node, std::memory_order_release); + } } new_node->fully_linked.store(true, std::memory_order_release); @@ -1018,26 +1078,33 @@ class SkipList final : detail::SkipListNode_base { } template - bool contains(const TKey &key) const { - TNode *preds[kSkipListMaxHeight], *succs[kSkipListMaxHeight]; - int layer_found = find_node(key, preds, succs); - return (layer_found != -1 && succs[layer_found]->fully_linked.load(std::memory_order_acquire) && - !succs[layer_found]->marked.load(std::memory_order_acquire)); - } - - template - Iterator find(const TKey &key) const { + SkipListNode *find_(const TKey &key) const { TNode *preds[kSkipListMaxHeight], *succs[kSkipListMaxHeight]; int layer_found = find_node(key, preds, succs); if (layer_found != -1 && succs[layer_found]->fully_linked.load(std::memory_order_acquire) && !succs[layer_found]->marked.load(std::memory_order_acquire)) { - return Iterator{succs[layer_found]}; + return succs[layer_found]; } - return Iterator{nullptr}; + return nullptr; } template - Iterator find_equal_or_greater(const TKey &key) const { + bool contains(const TKey &key) const { + return find_(key) != nullptr; + } + + template + Iterator find(const TKey &key) { + return {find_(key)}; + } + + template + ConstIterator find(const TKey &key) const { + return {find_(key)}; + } + + template + Iterator find_equal_or_greater_(const TKey &key) const { TNode *preds[kSkipListMaxHeight], *succs[kSkipListMaxHeight]; find_node(key, preds, succs); if (succs[0] && succs[0]->fully_linked.load(std::memory_order_acquire) && @@ -1047,6 +1114,16 @@ class SkipList final : detail::SkipListNode_base { return Iterator{nullptr}; } + template + Iterator find_equal_or_greater(const TKey &key) { + return {find_equal_or_greater_(key)}; + } + + template + ConstIterator find_equal_or_greater(const TKey &key) const { + return {find_equal_or_greater_(key)}; + } + template uint64_t estimate_count(const TKey &key, int max_layer_for_estimation) const { MG_ASSERT(max_layer_for_estimation >= 1 && max_layer_for_estimation <= kSkipListMaxHeight, diff --git a/src/utils/thread_pool.hpp b/src/utils/thread_pool.hpp index 8597a78a1..c6f0bb6ee 100644 --- a/src/utils/thread_pool.hpp +++ b/src/utils/thread_pool.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -23,6 +23,16 @@ namespace memgraph::utils { +template +struct CopyMovableFunctionWrapper { + CopyMovableFunctionWrapper(Func &&func) : func_{std::make_shared(std::move(func))} {} + + void operator()() { (*func_)(); } + + private: + std::shared_ptr func_; +}; + class ThreadPool { using TaskSignature = std::function; diff --git a/src/utils/typeinfo.hpp b/src/utils/typeinfo.hpp index 944d35fab..fd0d1fdeb 100644 --- a/src/utils/typeinfo.hpp +++ b/src/utils/typeinfo.hpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -17,9 +17,10 @@ namespace memgraph::utils { enum class TypeId : uint64_t { + UNKNOWN = 0, + // Operators - UNKNOWN, - LOGICAL_OPERATOR, + LOGICAL_OPERATOR = 1000, ONCE, NODE_CREATION_INFO, CREATE_NODE, @@ -69,7 +70,8 @@ enum class TypeId : uint64_t { HASH_JOIN, // Replication - REP_APPEND_DELTAS_REQ, + // NOTE: these NEED to be stable in the 2000+ range (see rpc version) + REP_APPEND_DELTAS_REQ = 2000, REP_APPEND_DELTAS_RES, REP_HEARTBEAT_REQ, REP_HEARTBEAT_RES, @@ -83,9 +85,23 @@ enum class TypeId : uint64_t { REP_CURRENT_WAL_RES, REP_TIMESTAMP_REQ, REP_TIMESTAMP_RES, + REP_CREATE_DATABASE_REQ, + REP_CREATE_DATABASE_RES, + REP_DROP_DATABASE_REQ, + REP_DROP_DATABASE_RES, + REP_SYSTEM_HEARTBEAT_REQ, + REP_SYSTEM_HEARTBEAT_RES, + REP_SYSTEM_RECOVERY_REQ, + REP_SYSTEM_RECOVERY_RES, + + // Coordinator + COORD_FAILOVER_REQ, + COORD_FAILOVER_RES, + COORD_SET_REPL_MAIN_REQ, + COORD_SET_REPL_MAIN_RES, // AST - AST_LABELIX, + AST_LABELIX = 3000, AST_PROPERTYIX, AST_EDGETYPEIX, AST_TREE, @@ -191,8 +207,10 @@ enum class TypeId : uint64_t { AST_SHOW_DATABASES, AST_EDGE_IMPORT_MODE_QUERY, AST_PATTERN_COMPREHENSION, + AST_COORDINATOR_QUERY, + // Symbol - SYMBOL, + SYMBOL = 4000, }; /// Type information on a C++ type. diff --git a/src/utils/uuid.cpp b/src/utils/uuid.cpp index 9b13b8965..fbcf662de 100644 --- a/src/utils/uuid.cpp +++ b/src/utils/uuid.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -10,8 +10,8 @@ // licenses/APL.txt. #include "utils/uuid.hpp" - #include +#include "slk/serialization.hpp" namespace memgraph::utils { @@ -24,3 +24,13 @@ std::string GenerateUUID() { } } // namespace memgraph::utils + +// Serialize UUID +namespace memgraph::slk { +void Save(const memgraph::utils::UUID &self, memgraph::slk::Builder *builder) { + const auto &arr = static_cast(self); + memgraph::slk::Save(arr, builder); +} + +void Load(memgraph::utils::UUID *self, memgraph::slk::Reader *reader) { memgraph::slk::Load(&self->uuid, reader); } +} // namespace memgraph::slk diff --git a/src/utils/uuid.hpp b/src/utils/uuid.hpp index 8bbb1a1a1..bca55d73b 100644 --- a/src/utils/uuid.hpp +++ b/src/utils/uuid.hpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,8 +11,22 @@ #pragma once +#include +#include +#include #include +namespace memgraph::utils { +struct UUID; +} + +namespace memgraph::slk { +class Reader; +class Builder; +void Save(const ::memgraph::utils::UUID &self, Builder *builder); +void Load(::memgraph::utils::UUID *self, Reader *reader); +} // namespace memgraph::slk + namespace memgraph::utils { /** @@ -20,4 +34,35 @@ namespace memgraph::utils { */ std::string GenerateUUID(); +struct UUID { + using arr_t = std::array; + + UUID() { uuid_generate(uuid.data()); } + explicit operator std::string() const { + auto decoded = std::array{}; + uuid_unparse(uuid.data(), decoded.data()); + return std::string{decoded.data(), UUID_STR_LEN - 1}; + } + + explicit operator arr_t() const { return uuid; } + + friend bool operator==(UUID const &, UUID const &) = default; + + private: + friend void to_json(nlohmann::json &j, const UUID &uuid); + friend void from_json(const nlohmann::json &j, UUID &uuid); + friend void ::memgraph::slk::Load(UUID *self, slk::Reader *reader); + explicit UUID(arr_t const &arr) : uuid(arr) {} + + arr_t uuid; +}; + +inline void to_json(nlohmann::json &j, const UUID &uuid) { j = nlohmann::json(uuid.uuid); } + +inline void from_json(const nlohmann::json &j, UUID &uuid) { + auto arr = UUID::arr_t{}; + j.get_to(arr); + uuid = UUID(arr); +} + } // namespace memgraph::utils diff --git a/tests/benchmark/query/eval.cpp b/tests/benchmark/query/eval.cpp index 09e789137..92ba67cd6 100644 --- a/tests/benchmark/query/eval.cpp +++ b/tests/benchmark/query/eval.cpp @@ -17,7 +17,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // The following classes are wrappers for memgraph::utils::MemoryResource, so that we can // use BENCHMARK_TEMPLATE diff --git a/tests/benchmark/query/execution.cpp b/tests/benchmark/query/execution.cpp index 750dd5564..d49b14fc3 100644 --- a/tests/benchmark/query/execution.cpp +++ b/tests/benchmark/query/execution.cpp @@ -33,7 +33,7 @@ #include "query/interpreter.hpp" #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // The following classes are wrappers for memgraph::utils::MemoryResource, so that we can // use BENCHMARK_TEMPLATE diff --git a/tests/benchmark/query/planner.cpp b/tests/benchmark/query/planner.cpp index b64c4c39f..c70de0869 100644 --- a/tests/benchmark/query/planner.cpp +++ b/tests/benchmark/query/planner.cpp @@ -20,7 +20,7 @@ #include "query/plan/vertex_count_cache.hpp" #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // Add chained MATCH (node1) -- (node2), MATCH (node2) -- (node3) ... clauses. static memgraph::query::CypherQuery *AddChainedMatches(int num_matches, memgraph::query::AstStorage &storage) { diff --git a/tests/benchmark/storage_v2_gc.cpp b/tests/benchmark/storage_v2_gc.cpp index 246df09f6..6f0e5712d 100644 --- a/tests/benchmark/storage_v2_gc.cpp +++ b/tests/benchmark/storage_v2_gc.cpp @@ -17,7 +17,7 @@ #include "storage/v2/storage.hpp" #include "utils/timer.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // This benchmark should be run for a fixed amount of time that is // large compared to GC interval to make the output relevant. diff --git a/tests/benchmark/storage_v2_gc2.cpp b/tests/benchmark/storage_v2_gc2.cpp index 52aa01870..f3986edd3 100644 --- a/tests/benchmark/storage_v2_gc2.cpp +++ b/tests/benchmark/storage_v2_gc2.cpp @@ -17,7 +17,7 @@ #include "storage/v2/storage.hpp" #include "utils/timer.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // This benchmark should be run for a fixed amount of time that is // large compared to GC interval to make the output relevant. diff --git a/tests/concurrent/storage_indices.cpp b/tests/concurrent/storage_indices.cpp index 967e98c91..fc4d75a76 100644 --- a/tests/concurrent/storage_indices.cpp +++ b/tests/concurrent/storage_indices.cpp @@ -19,7 +19,7 @@ #include "storage/v2/storage_error.hpp" #include "utils/thread.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; const uint64_t kNumVerifiers = 5; const uint64_t kNumMutators = 1; diff --git a/tests/concurrent/storage_unique_constraints.cpp b/tests/concurrent/storage_unique_constraints.cpp index 7defb4211..dc3b30146 100644 --- a/tests/concurrent/storage_unique_constraints.cpp +++ b/tests/concurrent/storage_unique_constraints.cpp @@ -16,7 +16,7 @@ #include "storage/v2/constraints/constraints.hpp" #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; const int kNumThreads = 8; diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt index fcf7f45b6..a95297301 100644 --- a/tests/e2e/CMakeLists.txt +++ b/tests/e2e/CMakeLists.txt @@ -77,6 +77,15 @@ add_subdirectory(query_modules_storage_modes) add_subdirectory(garbage_collection) add_subdirectory(query_planning) +if (MG_EXPERIMENTAL_HIGH_AVAILABILITY) + add_subdirectory(high_availability_experimental) +endif () + + +if (MG_EXPERIMENTAL_REPLICATION_MULTITENANCY) + add_subdirectory(replication_experimental) +endif () + copy_e2e_python_files(pytest_runner pytest_runner.sh "") copy_e2e_python_files(x x.sh "") file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/memgraph-selfsigned.crt DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/tests/e2e/analytical_mode/CMakeLists.txt b/tests/e2e/analytical_mode/CMakeLists.txt index e22830770..1756c9980 100644 --- a/tests/e2e/analytical_mode/CMakeLists.txt +++ b/tests/e2e/analytical_mode/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_analytical_mode_e2e_python_files(common.py) copy_analytical_mode_e2e_python_files(free_memory.py) + +copy_e2e_files(analytical_mode workloads.yaml) diff --git a/tests/e2e/analyze_graph/CMakeLists.txt b/tests/e2e/analyze_graph/CMakeLists.txt index 1b96eb960..0faa37caa 100644 --- a/tests/e2e/analyze_graph/CMakeLists.txt +++ b/tests/e2e/analyze_graph/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_analyze_graph_e2e_python_files(common.py) copy_analyze_graph_e2e_python_files(optimize_indexes.py) + +copy_e2e_files(analyze_graph workloads.yaml) diff --git a/tests/e2e/batched_procedures/CMakeLists.txt b/tests/e2e/batched_procedures/CMakeLists.txt index 19aacb15c..28eb46bee 100644 --- a/tests/e2e/batched_procedures/CMakeLists.txt +++ b/tests/e2e/batched_procedures/CMakeLists.txt @@ -7,3 +7,5 @@ copy_batched_procedures_e2e_python_files(conftest.py) copy_batched_procedures_e2e_python_files(simple_read.py) add_subdirectory(procedures) + +copy_e2e_files(batched_procedures workloads.yaml) diff --git a/tests/e2e/concurrent_query_modules/CMakeLists.txt b/tests/e2e/concurrent_query_modules/CMakeLists.txt index 6c92387cb..9f3996585 100644 --- a/tests/e2e/concurrent_query_modules/CMakeLists.txt +++ b/tests/e2e/concurrent_query_modules/CMakeLists.txt @@ -6,3 +6,5 @@ copy_concurrent_query_modules_e2e_python_files(client.py) copy_concurrent_query_modules_e2e_python_files(con_query_modules.py) add_subdirectory(test_query_modules) + +copy_e2e_files(concurrent_query_modules workloads.yaml) diff --git a/tests/e2e/configuration/CMakeLists.txt b/tests/e2e/configuration/CMakeLists.txt index 0411c70e3..b02f69639 100644 --- a/tests/e2e/configuration/CMakeLists.txt +++ b/tests/e2e/configuration/CMakeLists.txt @@ -1,7 +1,9 @@ function(copy_configuration_check_e2e_python_files FILE_NAME) - copy_e2e_python_files(write_procedures ${FILE_NAME}) + copy_e2e_python_files(configuration ${FILE_NAME}) endfunction() copy_configuration_check_e2e_python_files(default_config.py) copy_configuration_check_e2e_python_files(configuration_check.py) copy_configuration_check_e2e_python_files(storage_info.py) + +copy_e2e_files(configuration workloads.yaml) diff --git a/tests/e2e/configuration/default_config.py b/tests/e2e/configuration/default_config.py index d0029bbd7..915a14d14 100644 --- a/tests/e2e/configuration/default_config.py +++ b/tests/e2e/configuration/default_config.py @@ -66,6 +66,8 @@ startup_config_dict = { "Time in seconds after which inactive Bolt sessions will be closed.", ), "cartesian_product_enabled": ("true", "true", "Enable cartesian product expansion."), + "coordinator": ("false", "false", "Controls whether the instance is a replication coordinator."), + "coordinator_server_port": ("0", "0", "Port on which coordinator servers will be started."), "data_directory": ("mg_data", "mg_data", "Path to directory in which to save all permanent data."), "data_recovery_on_startup": ( "false", @@ -174,11 +176,6 @@ startup_config_dict = { "Default storage mode Memgraph uses. Allowed values: IN_MEMORY_TRANSACTIONAL, IN_MEMORY_ANALYTICAL, ON_DISK_TRANSACTIONAL", ), "storage_wal_file_size_kib": ("20480", "20480", "Minimum file size of each WAL file."), - "storage_delete_on_drop": ( - "true", - "true", - "If set to true the query 'DROP DATABASE x' will delete the underlying storage as well.", - ), "stream_transaction_conflict_retries": ( "30", "30", diff --git a/tests/e2e/configuration/storage_info.py b/tests/e2e/configuration/storage_info.py index 042a57b08..8af294eca 100644 --- a/tests/e2e/configuration/storage_info.py +++ b/tests/e2e/configuration/storage_info.py @@ -11,7 +11,6 @@ import sys -import default_config import mgclient import pytest diff --git a/tests/e2e/constraints/CMakeLists.txt b/tests/e2e/constraints/CMakeLists.txt index 0c4ff72d9..b099dab4d 100644 --- a/tests/e2e/constraints/CMakeLists.txt +++ b/tests/e2e/constraints/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_constraint_validation_e2e_python_files(common.py) copy_constraint_validation_e2e_python_files(constraints_validation.py) + +copy_e2e_files(constraint_validation workloads.yaml) diff --git a/tests/e2e/disk_storage/CMakeLists.txt b/tests/e2e/disk_storage/CMakeLists.txt index 777277178..5e0822ef8 100644 --- a/tests/e2e/disk_storage/CMakeLists.txt +++ b/tests/e2e/disk_storage/CMakeLists.txt @@ -13,3 +13,5 @@ copy_disk_storage_e2e_python_files(snapshot_disabled.py) copy_disk_storage_e2e_python_files(lock_data_dir_disabled.py) copy_disk_storage_e2e_python_files(create_edge_from_indices.py) copy_disk_storage_e2e_python_files(storage_info.py) + +copy_e2e_files(disk_storage workloads.yaml) diff --git a/tests/e2e/fine_grained_access/CMakeLists.txt b/tests/e2e/fine_grained_access/CMakeLists.txt index 6b277694f..71a02cd4b 100644 --- a/tests/e2e/fine_grained_access/CMakeLists.txt +++ b/tests/e2e/fine_grained_access/CMakeLists.txt @@ -7,3 +7,5 @@ copy_fine_grained_access_e2e_python_files(create_delete_filtering_tests.py) copy_fine_grained_access_e2e_python_files(edge_type_filtering_tests.py) copy_fine_grained_access_e2e_python_files(path_filtering_tests.py) copy_fine_grained_access_e2e_python_files(show_db.py) + +copy_e2e_files(fine_grained_access workloads.yaml) diff --git a/tests/e2e/fine_grained_access/show_db.py b/tests/e2e/fine_grained_access/show_db.py index 546d4e24a..c5378dca6 100644 --- a/tests/e2e/fine_grained_access/show_db.py +++ b/tests/e2e/fine_grained_access/show_db.py @@ -23,13 +23,20 @@ def test_show_databases_w_user(): user3_connection = common.connect(username="user3", password="test") assert common.execute_and_fetch_all(admin_connection.cursor(), "SHOW DATABASES") == [ - ("db1", ""), - ("db2", ""), - ("memgraph", "*"), + ("db1",), + ("db2",), + ("memgraph",), ] - assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASES") == [("db1", ""), ("memgraph", "*")] - assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASES") == [("db2", "*")] - assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASES") == [("db1", "*"), ("db2", "")] + assert common.execute_and_fetch_all(admin_connection.cursor(), "SHOW DATABASE") == [("memgraph",)] + + assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASES") == [("db1",), ("memgraph",)] + assert common.execute_and_fetch_all(user_connection.cursor(), "SHOW DATABASE") == [("memgraph",)] + + assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASES") == [("db2",)] + assert common.execute_and_fetch_all(user2_connection.cursor(), "SHOW DATABASE") == [("db2",)] + + assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASES") == [("db1",), ("db2",)] + assert common.execute_and_fetch_all(user3_connection.cursor(), "SHOW DATABASE") == [("db1",)] if __name__ == "__main__": diff --git a/tests/e2e/garbage_collection/CMakeLists.txt b/tests/e2e/garbage_collection/CMakeLists.txt index 690edf344..e515247ec 100644 --- a/tests/e2e/garbage_collection/CMakeLists.txt +++ b/tests/e2e/garbage_collection/CMakeLists.txt @@ -5,3 +5,5 @@ endfunction() garbage_collection_e2e_python_files(common.py) garbage_collection_e2e_python_files(conftest.py) garbage_collection_e2e_python_files(gc_periodic.py) + +copy_e2e_files(garbage_collection workloads.yaml) diff --git a/tests/e2e/graphql/CMakeLists.txt b/tests/e2e/graphql/CMakeLists.txt index 7ad1624b6..384d534d8 100644 --- a/tests/e2e/graphql/CMakeLists.txt +++ b/tests/e2e/graphql/CMakeLists.txt @@ -8,3 +8,5 @@ copy_graphql_e2e_python_files(callable_alias_mapping.json) add_subdirectory(graphql_library_config) add_subdirectory(temporary_procedures) + +copy_e2e_files(graphql workloads.yaml) diff --git a/tests/e2e/high_availability_experimental/CMakeLists.txt b/tests/e2e/high_availability_experimental/CMakeLists.txt new file mode 100644 index 000000000..76e1a6956 --- /dev/null +++ b/tests/e2e/high_availability_experimental/CMakeLists.txt @@ -0,0 +1,12 @@ +find_package(gflags REQUIRED) + +copy_e2e_python_files(ha_experimental coordinator.py) +copy_e2e_python_files(ha_experimental automatic_failover.py) +copy_e2e_python_files(ha_experimental manual_setting_replicas.py) +copy_e2e_python_files(ha_experimental common.py) +copy_e2e_python_files(ha_experimental conftest.py) +copy_e2e_python_files(ha_experimental workloads.yaml) + +copy_e2e_python_files_from_parent_folder(ha_experimental ".." memgraph.py) +copy_e2e_python_files_from_parent_folder(ha_experimental ".." interactive_mg_runner.py) +copy_e2e_python_files_from_parent_folder(ha_experimental ".." mg_utils.py) diff --git a/tests/e2e/high_availability_experimental/automatic_failover.py b/tests/e2e/high_availability_experimental/automatic_failover.py new file mode 100644 index 000000000..f3ffadfe8 --- /dev/null +++ b/tests/e2e/high_availability_experimental/automatic_failover.py @@ -0,0 +1,223 @@ +# Copyright 2024 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import os +import sys + +import interactive_mg_runner +import pytest +from common import execute_and_fetch_all +from mg_utils import mg_sleep_and_assert + +interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +interactive_mg_runner.PROJECT_DIR = os.path.normpath( + os.path.join(interactive_mg_runner.SCRIPT_DIR, "..", "..", "..", "..") +) +interactive_mg_runner.BUILD_DIR = os.path.normpath(os.path.join(interactive_mg_runner.PROJECT_DIR, "build")) +interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactive_mg_runner.BUILD_DIR, "memgraph")) + +MEMGRAPH_INSTANCES_DESCRIPTION = { + "instance_1": { + "args": ["--bolt-port", "7688", "--log-level", "TRACE", "--coordinator-server-port", "10011"], + "log_file": "replica1.log", + "setup_queries": [], + }, + "instance_2": { + "args": ["--bolt-port", "7689", "--log-level", "TRACE", "--coordinator-server-port", "10012"], + "log_file": "replica2.log", + "setup_queries": [], + }, + "instance_3": { + "args": ["--bolt-port", "7687", "--log-level", "TRACE", "--coordinator-server-port", "10013"], + "log_file": "main.log", + "setup_queries": [], + }, + "coordinator": { + "args": ["--bolt-port", "7690", "--log-level=TRACE", "--coordinator"], + "log_file": "replica3.log", + "setup_queries": [ + "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001';", + "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002';", + "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003';", + "SET INSTANCE instance_3 TO MAIN", + ], + }, +} + + +def test_replication_works_on_failover(connection): + # Goal of this test is to check the replication works after failover command. + # 1. We start all replicas, main and coordinator manually: we want to be able to kill them ourselves without relying on external tooling to kill processes. + # 2. We check that main has correct state + # 3. We kill main + # 4. We check that coordinator and new main have correct state + # 5. We insert one vertex on new main + # 6. We check that vertex appears on new replica + + # 1 + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + # 2 + main_cursor = connection(7687, "instance_3").cursor() + expected_data_on_main = [ + ("instance_1", "127.0.0.1:10001", "sync", 0, 0, "ready"), + ("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"), + ] + actual_data_on_main = sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS;"))) + assert actual_data_on_main == expected_data_on_main + + # 3 + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3") + + # 4 + coord_cursor = connection(7690, "coordinator").cursor() + + def retrieve_data_show_repl_cluster(): + return sorted(list(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;"))) + + expected_data_on_coord = [ + ("instance_1", "127.0.0.1:10011", True, "main"), + ("instance_2", "127.0.0.1:10012", True, "replica"), + ("instance_3", "127.0.0.1:10013", False, ""), + ] + mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster) + + new_main_cursor = connection(7688, "instance_1").cursor() + + def retrieve_data_show_replicas(): + return sorted(list(execute_and_fetch_all(new_main_cursor, "SHOW REPLICAS;"))) + + expected_data_on_new_main = [ + ("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"), + ] + mg_sleep_and_assert(expected_data_on_new_main, retrieve_data_show_replicas) + + # 5 + execute_and_fetch_all(new_main_cursor, "CREATE ();") + + # 6 + alive_replica_cursror = connection(7689, "instance_2").cursor() + res = execute_and_fetch_all(alive_replica_cursror, "MATCH (n) RETURN count(n) as count;")[0][0] + assert res == 1, "Vertex should be replicated" + interactive_mg_runner.stop_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + +def test_show_replication_cluster(connection): + # Goal of this test is to check the SHOW REPLICATION CLUSTER command. + # 1. We start all replicas, main and coordinator manually: we want to be able to kill them ourselves without relying on external tooling to kill processes. + # 2. We check that all replicas and main have the correct state: they should all be alive. + # 3. We kill one replica. It should not appear anymore in the SHOW REPLICATION CLUSTER command. + # 4. We kill main. It should not appear anymore in the SHOW REPLICATION CLUSTER command. + + # 1. + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + cursor = connection(7690, "coordinator").cursor() + + # 2. + + # We leave some time for the coordinator to realise the replicas are down. + def retrieve_data(): + return sorted(list(execute_and_fetch_all(cursor, "SHOW REPLICATION CLUSTER;"))) + + expected_data = [ + ("instance_1", "127.0.0.1:10011", True, "replica"), + ("instance_2", "127.0.0.1:10012", True, "replica"), + ("instance_3", "127.0.0.1:10013", True, "main"), + ] + mg_sleep_and_assert(expected_data, retrieve_data) + + # 3. + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_1") + + expected_data = [ + ("instance_1", "127.0.0.1:10011", False, ""), + ("instance_2", "127.0.0.1:10012", True, "replica"), + ("instance_3", "127.0.0.1:10013", True, "main"), + ] + mg_sleep_and_assert(expected_data, retrieve_data) + + # 4. + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_2") + + expected_data = [ + ("instance_1", "127.0.0.1:10011", False, ""), + ("instance_2", "127.0.0.1:10012", False, ""), + ("instance_3", "127.0.0.1:10013", True, "main"), + ] + mg_sleep_and_assert(expected_data, retrieve_data) + + +def test_simple_automatic_failover(connection): + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + main_cursor = connection(7687, "instance_3").cursor() + expected_data_on_main = [ + ("instance_1", "127.0.0.1:10001", "sync", 0, 0, "ready"), + ("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"), + ] + actual_data_on_main = sorted(list(execute_and_fetch_all(main_cursor, "SHOW REPLICAS;"))) + assert actual_data_on_main == expected_data_on_main + + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, "instance_3") + + coord_cursor = connection(7690, "coordinator").cursor() + + def retrieve_data_show_repl_cluster(): + return sorted(list(execute_and_fetch_all(coord_cursor, "SHOW REPLICATION CLUSTER;"))) + + expected_data_on_coord = [ + ("instance_1", "127.0.0.1:10011", True, "main"), + ("instance_2", "127.0.0.1:10012", True, "replica"), + ("instance_3", "127.0.0.1:10013", False, ""), + ] + mg_sleep_and_assert(expected_data_on_coord, retrieve_data_show_repl_cluster) + + new_main_cursor = connection(7688, "instance_1").cursor() + + def retrieve_data_show_replicas(): + return sorted(list(execute_and_fetch_all(new_main_cursor, "SHOW REPLICAS;"))) + + expected_data_on_new_main = [ + ("instance_2", "127.0.0.1:10002", "sync", 0, 0, "ready"), + ] + mg_sleep_and_assert(expected_data_on_new_main, retrieve_data_show_replicas) + + +def test_registering_replica_fails_name_exists(connection): + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + coord_cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all( + coord_cursor, + "REGISTER INSTANCE instance_1 ON '127.0.0.1:10051' WITH '127.0.0.1:10111';", + ) + assert str(e.value) == "Couldn't register replica instance since instance with such name already exists!" + + +def test_registering_replica_fails_endpoint_exists(connection): + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + coord_cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all( + coord_cursor, + "REGISTER INSTANCE instance_5 ON '127.0.0.1:10001' WITH '127.0.0.1:10013';", + ) + assert ( + str(e.value) + == "Couldn't register replica because promotion on replica failed! Check logs on replica to find out more info!" + ) + + +if __name__ == "__main__": + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/high_availability_experimental/common.py b/tests/e2e/high_availability_experimental/common.py new file mode 100644 index 000000000..dc104d628 --- /dev/null +++ b/tests/e2e/high_availability_experimental/common.py @@ -0,0 +1,25 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import typing + +import mgclient + + +def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}) -> typing.List[tuple]: + cursor.execute(query, params) + return cursor.fetchall() + + +def connect(**kwargs) -> mgclient.Connection: + connection = mgclient.connect(**kwargs) + connection.autocommit = True + return connection diff --git a/tests/e2e/high_availability_experimental/conftest.py b/tests/e2e/high_availability_experimental/conftest.py new file mode 100644 index 000000000..9100a63cf --- /dev/null +++ b/tests/e2e/high_availability_experimental/conftest.py @@ -0,0 +1,43 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import pytest +from common import connect, execute_and_fetch_all + + +# The fixture here is more complex because the connection has to be +# parameterized based on the test parameters (info has to be available on both +# sides). +# +# https://docs.pytest.org/en/latest/example/parametrize.html#indirect-parametrization +# is not an elegant/feasible solution here. +# +# The solution was independently developed and then I stumbled upon the same +# approach here https://stackoverflow.com/a/68286553/4888809 which I think is +# optimal. +@pytest.fixture(scope="function") +def connection(): + connection_holder = None + role_holder = None + + def inner_connection(port, role): + nonlocal connection_holder, role_holder + connection_holder = connect(host="localhost", port=port) + role_holder = role + return connection_holder + + yield inner_connection + + # Only main instance can be cleaned up because replicas do NOT accept + # writes. + if role_holder == "main": + cursor = connection_holder.cursor() + execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n;") diff --git a/tests/e2e/high_availability_experimental/coordinator.py b/tests/e2e/high_availability_experimental/coordinator.py new file mode 100644 index 000000000..e34e9f069 --- /dev/null +++ b/tests/e2e/high_availability_experimental/coordinator.py @@ -0,0 +1,87 @@ +# Copyright 2024 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import sys + +import pytest +from common import execute_and_fetch_all +from mg_utils import mg_sleep_and_assert + + +def test_disable_cypher_queries(connection): + cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(cursor, "CREATE (n:TestNode {prop: 'test'})") + assert str(e.value) == "Coordinator can run only coordinator queries!" + + +def test_coordinator_cannot_be_replica_role(connection): + cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(cursor, "SET REPLICATION ROLE TO REPLICA WITH PORT 10001;") + assert str(e.value) == "Coordinator can run only coordinator queries!" + + +def test_coordinator_cannot_run_show_repl_role(connection): + cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(cursor, "SHOW REPLICATION ROLE;") + assert str(e.value) == "Coordinator can run only coordinator queries!" + + +def test_coordinator_show_replication_cluster(connection): + cursor = connection(7690, "coordinator").cursor() + + def retrieve_data(): + return sorted(list(execute_and_fetch_all(cursor, "SHOW REPLICATION CLUSTER;"))) + + expected_data = [ + ("instance_1", "127.0.0.1:10011", True, "replica"), + ("instance_2", "127.0.0.1:10012", True, "replica"), + ("instance_3", "127.0.0.1:10013", True, "main"), + ] + mg_sleep_and_assert(expected_data, retrieve_data) + + +def test_coordinator_cannot_call_show_replicas(connection): + cursor = connection(7690, "coordinator").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(cursor, "SHOW REPLICAS;") + assert str(e.value) == "Coordinator can run only coordinator queries!" + + +@pytest.mark.parametrize( + "port, role", + [(7687, "main"), (7688, "replica"), (7689, "replica")], +) +def test_main_and_replicas_cannot_call_show_repl_cluster(port, role, connection): + cursor = connection(port, role).cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(cursor, "SHOW REPLICATION CLUSTER;") + assert str(e.value) == "Only coordinator can run SHOW REPLICATION CLUSTER." + + +@pytest.mark.parametrize( + "port, role", + [(7687, "main"), (7688, "replica"), (7689, "replica")], +) +def test_main_and_replicas_cannot_register_coord_server(port, role, connection): + cursor = connection(port, role).cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all( + cursor, + "REGISTER INSTANCE instance_1 ON '127.0.0.1:10001' WITH '127.0.0.1:10011';", + ) + assert str(e.value) == "Only coordinator can register coordinator server!" + + +if __name__ == "__main__": + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/high_availability_experimental/manual_setting_replicas.py b/tests/e2e/high_availability_experimental/manual_setting_replicas.py new file mode 100644 index 000000000..f2d48ffd7 --- /dev/null +++ b/tests/e2e/high_availability_experimental/manual_setting_replicas.py @@ -0,0 +1,57 @@ +# Copyright 2024 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import os +import sys + +import interactive_mg_runner +import pytest +from common import execute_and_fetch_all +from mg_utils import mg_sleep_and_assert + +interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +interactive_mg_runner.PROJECT_DIR = os.path.normpath( + os.path.join(interactive_mg_runner.SCRIPT_DIR, "..", "..", "..", "..") +) +interactive_mg_runner.BUILD_DIR = os.path.normpath(os.path.join(interactive_mg_runner.PROJECT_DIR, "build")) +interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactive_mg_runner.BUILD_DIR, "memgraph")) + +MEMGRAPH_INSTANCES_DESCRIPTION = { + "instance_3": { + "args": ["--bolt-port", "7687", "--log-level", "TRACE", "--coordinator-server-port", "10013"], + "log_file": "main.log", + "setup_queries": [], + }, +} + + +def test_no_manual_setup_on_main(connection): + # Goal of this test is to check that all manual registration actions are disabled on instances with coordiantor server port + + # 1 + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + + any_main = connection(7687, "instance_3").cursor() + with pytest.raises(Exception) as e: + execute_and_fetch_all(any_main, "REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:10001';") + assert str(e.value) == "Can't register replica manually on instance with coordinator server port." + + with pytest.raises(Exception) as e: + execute_and_fetch_all(any_main, "DROP REPLICA replica_1;") + assert str(e.value) == "Can't drop replica manually on instance with coordinator server port." + + with pytest.raises(Exception) as e: + execute_and_fetch_all(any_main, "SET REPLICATION ROLE TO REPLICA WITH PORT 10002;") + assert str(e.value) == "Can't set role manually on instance with coordinator server port." + + +if __name__ == "__main__": + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/high_availability_experimental/workloads.yaml b/tests/e2e/high_availability_experimental/workloads.yaml new file mode 100644 index 000000000..1d692084a --- /dev/null +++ b/tests/e2e/high_availability_experimental/workloads.yaml @@ -0,0 +1,37 @@ +ha_cluster: &ha_cluster + cluster: + replica_1: + args: ["--bolt-port", "7688", "--log-level=TRACE", "--coordinator-server-port=10011"] + log_file: "replication-e2e-replica1.log" + setup_queries: [] + replica_2: + args: ["--bolt-port", "7689", "--log-level=TRACE", "--coordinator-server-port=10012"] + log_file: "replication-e2e-replica2.log" + setup_queries: [] + main: + args: ["--bolt-port", "7687", "--log-level=TRACE", "--coordinator-server-port=10013"] + log_file: "replication-e2e-main.log" + setup_queries: [] + coordinator: + args: ["--bolt-port", "7690", "--log-level=TRACE", "--coordinator"] + log_file: "replication-e2e-coordinator.log" + setup_queries: [ + "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001';", + "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002';", + "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003';", + "SET INSTANCE instance_3 TO MAIN;" + ] + +workloads: + - name: "Coordinator" + binary: "tests/e2e/pytest_runner.sh" + args: ["high_availability_experimental/coordinator.py"] + <<: *ha_cluster + + - name: "Automatic failover" + binary: "tests/e2e/pytest_runner.sh" + args: ["high_availability_experimental/automatic_failover.py"] + + - name: "Disabled manual setting of replication cluster" + binary: "tests/e2e/pytest_runner.sh" + args: ["high_availability_experimental/manual_setting_replicas.py"] diff --git a/tests/e2e/import_mode/CMakeLists.txt b/tests/e2e/import_mode/CMakeLists.txt index e316b7e82..b48b4f12d 100644 --- a/tests/e2e/import_mode/CMakeLists.txt +++ b/tests/e2e/import_mode/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_import_mode_e2e_python_files(common.py) copy_import_mode_e2e_python_files(test_command.py) + +copy_e2e_files(import_mode workloads.yaml) diff --git a/tests/e2e/index_hints/CMakeLists.txt b/tests/e2e/index_hints/CMakeLists.txt index 5261baacc..38b3baef0 100644 --- a/tests/e2e/index_hints/CMakeLists.txt +++ b/tests/e2e/index_hints/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_index_hints_e2e_python_files(common.py) copy_index_hints_e2e_python_files(index_hints.py) + +copy_e2e_files(index_hints workloads.yaml) diff --git a/tests/e2e/init_file_flags/CMakeLists.txt b/tests/e2e/init_file_flags/CMakeLists.txt index 8d98898e8..50ac5e8e5 100644 --- a/tests/e2e/init_file_flags/CMakeLists.txt +++ b/tests/e2e/init_file_flags/CMakeLists.txt @@ -10,3 +10,5 @@ copy_init_file_flags_e2e_python_files(init_file_setup.py) copy_init_file_flags_e2e_python_files(init_data_file_setup.py) copy_init_file_flags_e2e_files(init_file.cypherl) + +copy_e2e_files(init_file_flags workloads.yaml) diff --git a/tests/e2e/inspect_query/CMakeLists.txt b/tests/e2e/inspect_query/CMakeLists.txt index f0dbdb7cc..4b9b3d82d 100644 --- a/tests/e2e/inspect_query/CMakeLists.txt +++ b/tests/e2e/inspect_query/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_inspect_query_e2e_python_files(common.py) copy_inspect_query_e2e_python_files(inspect_query.py) + +copy_e2e_files(inspect_query workloads.yaml) diff --git a/tests/e2e/interactive_mg_runner.py b/tests/e2e/interactive_mg_runner.py old mode 100644 new mode 100755 index 13aa951db..f0e4e6da1 --- a/tests/e2e/interactive_mg_runner.py +++ b/tests/e2e/interactive_mg_runner.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2022 Memgraph Ltd. # # Use of this software is governed by the Business Source License diff --git a/tests/e2e/isolation_levels/CMakeLists.txt b/tests/e2e/isolation_levels/CMakeLists.txt index a5f31a79d..1835d75df 100644 --- a/tests/e2e/isolation_levels/CMakeLists.txt +++ b/tests/e2e/isolation_levels/CMakeLists.txt @@ -2,3 +2,5 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__isolation_levels isolation_levels.cpp) target_link_libraries(memgraph__e2e__isolation_levels gflags mgclient mg-utils mg-io Threads::Threads) + +copy_e2e_files(isolation_levels workloads.yaml) diff --git a/tests/e2e/isolation_levels/isolation_levels.cpp b/tests/e2e/isolation_levels/isolation_levels.cpp index 2ead05750..751e63594 100644 --- a/tests/e2e/isolation_levels/isolation_levels.cpp +++ b/tests/e2e/isolation_levels/isolation_levels.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -87,18 +87,13 @@ void SwitchToDB(const std::string &name, std::unique_ptr &client) { void SwitchToCleanDB(std::unique_ptr &client) { SwitchToDB("clean", client); } void SwitchToSameDB(std::unique_ptr &main, std::unique_ptr &client) { - MG_ASSERT(main->Execute("SHOW DATABASES;")); + MG_ASSERT(main->Execute("SHOW DATABASE;")); auto dbs = main->FetchAll(); MG_ASSERT(dbs, "Failed to show databases"); - for (const auto &elem : *dbs) { - MG_ASSERT(!elem.empty(), "Show databases wrong output"); - const auto &active = elem[1].ValueString(); - if (active == "*") { - const auto &name = elem[0].ValueString(); - SwitchToDB(std::string(name), client); - break; - } - } + MG_ASSERT(!dbs->empty(), "Show databases wrong output"); + MG_ASSERT(!(*dbs)[0].empty(), "Show databases wrong output"); + const auto &name = (*dbs)[0][0].ValueString(); + SwitchToDB(std::string(name), client); } void TestSnapshotIsolation(std::unique_ptr &client) { diff --git a/tests/e2e/lba_procedures/CMakeLists.txt b/tests/e2e/lba_procedures/CMakeLists.txt index 8e1ebb41b..9547ef430 100644 --- a/tests/e2e/lba_procedures/CMakeLists.txt +++ b/tests/e2e/lba_procedures/CMakeLists.txt @@ -11,3 +11,5 @@ copy_lba_procedures_e2e_python_files(read_permission_queries.py) copy_lba_procedures_e2e_python_files(update_permission_queries.py) add_subdirectory(procedures) + +copy_e2e_files(lba_procedures workloads.yaml) diff --git a/tests/e2e/lba_procedures/show_privileges.py b/tests/e2e/lba_procedures/show_privileges.py index 1021ea7be..247140a60 100644 --- a/tests/e2e/lba_procedures/show_privileges.py +++ b/tests/e2e/lba_procedures/show_privileges.py @@ -40,6 +40,7 @@ BASIC_PRIVILEGES = [ "STORAGE_MODE", "MULTI_DATABASE_EDIT", "MULTI_DATABASE_USE", + "COORDINATOR", ] @@ -63,7 +64,7 @@ def test_lba_procedures_show_privileges_first_user(): cursor = connect(username="Josip", password="").cursor() result = execute_and_fetch_all(cursor, "SHOW PRIVILEGES FOR Josip;") - assert len(result) == 34 + assert len(result) == 35 fine_privilege_results = [res for res in result if res[0] not in BASIC_PRIVILEGES] diff --git a/tests/e2e/load_csv/CMakeLists.txt b/tests/e2e/load_csv/CMakeLists.txt index 368915dbe..6c1ebc38a 100644 --- a/tests/e2e/load_csv/CMakeLists.txt +++ b/tests/e2e/load_csv/CMakeLists.txt @@ -11,3 +11,5 @@ copy_load_csv_e2e_files(simple.csv) copy_load_csv_e2e_python_files(load_csv_nullif.py) copy_load_csv_e2e_files(nullif.csv) + +copy_e2e_files(load_csv workloads.yaml) diff --git a/tests/e2e/magic_functions/CMakeLists.txt b/tests/e2e/magic_functions/CMakeLists.txt index 3ab627e22..0f009f635 100644 --- a/tests/e2e/magic_functions/CMakeLists.txt +++ b/tests/e2e/magic_functions/CMakeLists.txt @@ -8,3 +8,5 @@ copy_magic_functions_e2e_python_files(conftest.py) copy_magic_functions_e2e_python_files(function_example.py) add_subdirectory(functions) + +copy_e2e_files(functions workloads.yaml) diff --git a/tests/e2e/memory/CMakeLists.txt b/tests/e2e/memory/CMakeLists.txt index 3c4cdc279..256107724 100644 --- a/tests/e2e/memory/CMakeLists.txt +++ b/tests/e2e/memory/CMakeLists.txt @@ -49,3 +49,5 @@ target_link_libraries(memgraph__e2e__procedure_memory_limit gflags mgclient mg-u add_executable(memgraph__e2e__procedure_memory_limit_multi_proc procedure_memory_limit_multi_proc.cpp) target_link_libraries(memgraph__e2e__procedure_memory_limit_multi_proc gflags mgclient mg-utils mg-io) + +copy_e2e_files(memory workloads.yaml) diff --git a/tests/e2e/mock_api/CMakeLists.txt b/tests/e2e/mock_api/CMakeLists.txt index aa170dc62..ef5845b26 100644 --- a/tests/e2e/mock_api/CMakeLists.txt +++ b/tests/e2e/mock_api/CMakeLists.txt @@ -6,3 +6,5 @@ add_subdirectory(procedures) copy_mock_python_api_e2e_files(common.py) copy_mock_python_api_e2e_files(test_compare_mock.py) + +copy_e2e_files(mock_python_api workloads.yaml) diff --git a/tests/e2e/module_file_manager/CMakeLists.txt b/tests/e2e/module_file_manager/CMakeLists.txt index 84d8845ff..d8eea3f9b 100644 --- a/tests/e2e/module_file_manager/CMakeLists.txt +++ b/tests/e2e/module_file_manager/CMakeLists.txt @@ -2,3 +2,5 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__module_file_manager module_file_manager.cpp) target_link_libraries(memgraph__e2e__module_file_manager gflags mgclient mg-utils mg-io Threads::Threads) + +copy_e2e_files(module_file_manager workloads.yaml) diff --git a/tests/e2e/monitoring_server/CMakeLists.txt b/tests/e2e/monitoring_server/CMakeLists.txt index 4c2c441e2..7062e978d 100644 --- a/tests/e2e/monitoring_server/CMakeLists.txt +++ b/tests/e2e/monitoring_server/CMakeLists.txt @@ -6,3 +6,5 @@ target_link_libraries(memgraph__e2e__monitoring_server mgclient mg-utils json gf add_executable(memgraph__e2e__monitoring_server_ssl monitoring_ssl.cpp) target_link_libraries(memgraph__e2e__monitoring_server_ssl mgclient mg-utils json gflags Boost::headers) + +copy_e2e_files(monitoring_server workloads.yaml) diff --git a/tests/e2e/python_query_modules_reloading/CMakeLists.txt b/tests/e2e/python_query_modules_reloading/CMakeLists.txt index ee8f29f90..27320e91b 100644 --- a/tests/e2e/python_query_modules_reloading/CMakeLists.txt +++ b/tests/e2e/python_query_modules_reloading/CMakeLists.txt @@ -6,3 +6,5 @@ copy_query_modules_reloading_procedures_e2e_python_files(common.py) copy_query_modules_reloading_procedures_e2e_python_files(test_reload_query_module.py) add_subdirectory(procedures) + +copy_e2e_files(python_query_modules_reloading workloads.yaml) diff --git a/tests/e2e/queries/CMakeLists.txt b/tests/e2e/queries/CMakeLists.txt index f672b8591..720599a18 100644 --- a/tests/e2e/queries/CMakeLists.txt +++ b/tests/e2e/queries/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_queries_e2e_python_files(common.py) copy_queries_e2e_python_files(queries.py) + +copy_e2e_files(queries workloads.yaml) diff --git a/tests/e2e/query_modules/CMakeLists.txt b/tests/e2e/query_modules/CMakeLists.txt index a97bbf1a5..3af2b80b6 100644 --- a/tests/e2e/query_modules/CMakeLists.txt +++ b/tests/e2e/query_modules/CMakeLists.txt @@ -7,3 +7,5 @@ copy_query_modules_e2e_python_files(conftest.py) copy_query_modules_e2e_python_files(convert_test.py) copy_query_modules_e2e_python_files(mgps_test.py) copy_query_modules_e2e_python_files(schema_test.py) + +copy_e2e_files(query_modules workloads.yaml) diff --git a/tests/e2e/replication/CMakeLists.txt b/tests/e2e/replication/CMakeLists.txt index 39f179a3d..4abd10278 100644 --- a/tests/e2e/replication/CMakeLists.txt +++ b/tests/e2e/replication/CMakeLists.txt @@ -9,11 +9,13 @@ target_link_libraries(memgraph__e2e__replication__indices gflags mgclient mg-uti add_executable(memgraph__e2e__replication__read_write_benchmark read_write_benchmark.cpp) target_link_libraries(memgraph__e2e__replication__read_write_benchmark gflags json mgclient mg-utils mg-io Threads::Threads) -copy_e2e_python_files(replication_show common.py) -copy_e2e_python_files(replication_show conftest.py) -copy_e2e_python_files(replication_show show.py) -copy_e2e_python_files(replication_show show_while_creating_invalid_state.py) -copy_e2e_python_files(replication_show edge_delete.py) -copy_e2e_python_files_from_parent_folder(replication_show ".." memgraph.py) -copy_e2e_python_files_from_parent_folder(replication_show ".." interactive_mg_runner.py) -copy_e2e_python_files_from_parent_folder(replication_show ".." mg_utils.py) +copy_e2e_python_files(replication common.py) +copy_e2e_python_files(replication conftest.py) +copy_e2e_python_files(replication show.py) +copy_e2e_python_files(replication show_while_creating_invalid_state.py) +copy_e2e_python_files(replication edge_delete.py) +copy_e2e_python_files_from_parent_folder(replication ".." memgraph.py) +copy_e2e_python_files_from_parent_folder(replication ".." interactive_mg_runner.py) +copy_e2e_python_files_from_parent_folder(replication ".." mg_utils.py) + +copy_e2e_files(replication workloads.yaml) diff --git a/tests/e2e/replication/constraints.cpp b/tests/e2e/replication/constraints.cpp index 01c1217f2..6f7e2991a 100644 --- a/tests/e2e/replication/constraints.cpp +++ b/tests/e2e/replication/constraints.cpp @@ -49,7 +49,7 @@ int main(int argc, char **argv) { const auto label_name = (*data)[0][1].ValueString(); const auto property_name = (*data)[0][2].ValueList()[0].ValueString(); if (label_name != "Node" || property_name != "id") { - LOG_FATAL("{} does NOT hava valid constraint created.", database_endpoint); + LOG_FATAL("{} does NOT have a valid constraint created.", database_endpoint); } } else { LOG_FATAL("Unable to get CONSTRAINT INFO from {}", database_endpoint); diff --git a/tests/e2e/replication/show_while_creating_invalid_state.py b/tests/e2e/replication/show_while_creating_invalid_state.py index a94310f0a..8da0c560a 100644 --- a/tests/e2e/replication/show_while_creating_invalid_state.py +++ b/tests/e2e/replication/show_while_creating_invalid_state.py @@ -308,7 +308,7 @@ def test_basic_recovery(connection): "--bolt-port", "7687", "--log-level=TRACE", - "--storage-recover-on-startup=true", + "--data-recovery-on-startup=true", "--replication-restore-state-on-startup=true", ], "log_file": "main.log", diff --git a/tests/e2e/replication/workloads.yaml b/tests/e2e/replication/workloads.yaml index fc239b221..c455ccc76 100644 --- a/tests/e2e/replication/workloads.yaml +++ b/tests/e2e/replication/workloads.yaml @@ -11,7 +11,7 @@ template_validation_queries: &template_validation_queries template_simple_cluster: &template_simple_cluster cluster: replica_1: - args: [ "--bolt-port", "7688", "--log-level=TRACE" ] + args: [ "--bolt-port", "7688", "--log-level=TRACE"] log_file: "replication-e2e-replica1.log" setup_queries: [ "SET REPLICATION ROLE TO REPLICA WITH PORT 10001;" ] replica_2: @@ -25,6 +25,7 @@ template_simple_cluster: &template_simple_cluster "REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:10001'", "REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:10002'", ] + template_cluster: &template_cluster cluster: replica_1: @@ -50,7 +51,6 @@ template_cluster: &template_cluster "REGISTER REPLICA replica_2 SYNC TO '127.0.0.1:10002'", "REGISTER REPLICA replica_3 ASYNC TO '127.0.0.1:10003'" ] - <<: *template_validation_queries workloads: - name: "Constraints" diff --git a/tests/e2e/replication_experimental/CMakeLists.txt b/tests/e2e/replication_experimental/CMakeLists.txt new file mode 100644 index 000000000..cd6e09f38 --- /dev/null +++ b/tests/e2e/replication_experimental/CMakeLists.txt @@ -0,0 +1,10 @@ +find_package(gflags REQUIRED) + +copy_e2e_python_files(replication_experiment common.py) +copy_e2e_python_files(replication_experiment conftest.py) +copy_e2e_python_files(replication_experiment multitenancy.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." memgraph.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." interactive_mg_runner.py) +copy_e2e_python_files_from_parent_folder(replication_experiment ".." mg_utils.py) + +copy_e2e_files(replication_experiment workloads.yaml) diff --git a/tests/e2e/replication_experimental/common.py b/tests/e2e/replication_experimental/common.py new file mode 100644 index 000000000..dc104d628 --- /dev/null +++ b/tests/e2e/replication_experimental/common.py @@ -0,0 +1,25 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import typing + +import mgclient + + +def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}) -> typing.List[tuple]: + cursor.execute(query, params) + return cursor.fetchall() + + +def connect(**kwargs) -> mgclient.Connection: + connection = mgclient.connect(**kwargs) + connection.autocommit = True + return connection diff --git a/tests/e2e/replication_experimental/conftest.py b/tests/e2e/replication_experimental/conftest.py new file mode 100644 index 000000000..f91333cbf --- /dev/null +++ b/tests/e2e/replication_experimental/conftest.py @@ -0,0 +1,33 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import pytest +from common import connect, execute_and_fetch_all + + +@pytest.fixture(scope="function") +def connection(): + connection_holder = None + role_holder = None + + def inner_connection(port, role): + nonlocal connection_holder, role_holder + connection_holder = connect(host="localhost", port=port) + role_holder = role + return connection_holder + + yield inner_connection + + # Only main instance can be cleaned up because replicas do NOT accept + # writes. + if role_holder == "main": + cursor = connection_holder.cursor() + execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n;") diff --git a/tests/e2e/replication_experimental/multitenancy.py b/tests/e2e/replication_experimental/multitenancy.py new file mode 100644 index 000000000..7eb699341 --- /dev/null +++ b/tests/e2e/replication_experimental/multitenancy.py @@ -0,0 +1,1046 @@ +# Copyright 2022 Memgraph Ltd. +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +# License, and you may not use this file except in compliance with the Business Source License. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +import atexit +import os +import shutil +import sys +import tempfile +import time +from functools import partial + +import interactive_mg_runner +import mgclient +import pytest +from common import execute_and_fetch_all +from mg_utils import mg_sleep_and_assert + +interactive_mg_runner.SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +interactive_mg_runner.PROJECT_DIR = os.path.normpath( + os.path.join(interactive_mg_runner.SCRIPT_DIR, "..", "..", "..", "..") +) +interactive_mg_runner.BUILD_DIR = os.path.normpath(os.path.join(interactive_mg_runner.PROJECT_DIR, "build")) +interactive_mg_runner.MEMGRAPH_BINARY = os.path.normpath(os.path.join(interactive_mg_runner.BUILD_DIR, "memgraph")) + +BOLT_PORTS = {"main": 7687, "replica_1": 7688, "replica_2": 7689} +REPLICATION_PORTS = {"replica_1": 10001, "replica_2": 10002} + +MEMGRAPH_INSTANCES_DESCRIPTION = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};"], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};"], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, +} + +TEMP_DIR = tempfile.TemporaryDirectory().name + +MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY = { + "replica_1": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['replica_1']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "replica1.log", + "data_directory": TEMP_DIR + "/replica1", + }, + "replica_2": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['replica_2']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "replica2.log", + "data_directory": TEMP_DIR + "/replica2", + }, + "main": { + "args": [ + "--bolt-port", + f"{BOLT_PORTS['main']}", + "--log-level=TRACE", + "--replication-restore-state-on-startup", + "--data-recovery-on-startup", + ], + "log_file": "main.log", + "data_directory": TEMP_DIR + "/main", + }, +} + + +def safe_execute(function, *args): + try: + function(*args) + except: + pass + + +def setup_replication(connection): + # Setup replica1 + cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor, f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};") + # Setup replica2 + cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + execute_and_fetch_all(cursor, f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};") + # Setup main + cursor = connection(BOLT_PORTS["main"], "main").cursor() + execute_and_fetch_all(cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';") + execute_and_fetch_all(cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';") + + +def setup_main(main_cursor): + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + + +def show_replicas_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return set(execute_and_fetch_all(cursor, "SHOW REPLICAS;")) + + return func + + +def show_databases_func(cursor): + def func(): + return execute_and_fetch_all(cursor, "SHOW DATABASES;") + + return func + + +def get_number_of_nodes_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return execute_and_fetch_all(cursor, "MATCH (n) RETURN count(*);")[0][0] + + return func + + +def get_number_of_edges_func(cursor, db_name): + def func(): + execute_and_fetch_all(cursor, f"USE DATABASE {db_name};") + return execute_and_fetch_all(cursor, "MATCH ()-[r]->() RETURN count(*);")[0][0] + + return func + + +def test_manual_databases_create_multitenancy_replication(connection): + # Goal: to show that replication can be established against REPLICA which already + # has the clean databases we need + # 0/ MAIN CREATE DATABASE A + B + # REPLICA CREATE DATABASE A + B + # Setup replication + # 1/ Write to MAIN A, Write to MAIN B + # 2/ Validate replication of changes to A + B have arrived at REPLICA + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(cursor, "USE DATABASE A;") + execute_and_fetch_all(cursor, "CREATE ();") + execute_and_fetch_all(cursor, "USE DATABASE B;") + execute_and_fetch_all(cursor, "CREATE ()-[:EDGE]->();") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 1 + assert get_number_of_edges_func(cursor_replica, "A")() == 0 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 1 + + cursor_replica2 = connection(BOLT_PORTS["replica_1"], "replica_2").cursor() + assert get_number_of_nodes_func(cursor_replica2, "A")() == 1 + assert get_number_of_edges_func(cursor_replica2, "A")() == 0 + assert get_number_of_nodes_func(cursor_replica2, "B")() == 2 + assert get_number_of_edges_func(cursor_replica2, "B")() == 1 + + +def test_manual_databases_create_multitenancy_replication_branching(connection): + # Goal: to show that replication can be established against REPLICA which already + # has all the databases and the same data + # 0/ MAIN CREATE DATABASE A + B and fill with data + # REPLICA CREATE DATABASE A + B and fil with exact data + # Setup REPLICA + # 1/ Registering REPLICA on MAIN should not fail due to tenant branching + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE ()", + "CREATE DATABASE B;", + "USE DATABASE B;", + "CREATE ()-[:EDGE]->()", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + failed = False + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + +def test_manual_databases_create_multitenancy_replication_dirty_replica(connection): + # Goal: to show that replication can be established against REPLICA which already + # has all the databases we need, even when they branched + # 0/ MAIN CREATE DATABASE A + # REPLICA CREATE DATABASE A + # REPLICA write to A + # Setup REPLICA + # 1/ Register replica; should fail + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + failed = False + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + try: + execute_and_fetch_all( + cursor, f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';" + ) + except mgclient.DatabaseError: + failed = True + assert not failed + + +def test_manual_databases_create_multitenancy_replication_main_behind(connection): + # Goal: to show that replication can be established against REPLICA which has + # different branched databases + # 0/ REPLICA CREATE DATABASE A + # REPLICA write to A + # Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Check that database has been replicated + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "replica_2": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_2']}", "--log-level=TRACE"], + "log_file": "replica2.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_2']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + f"REGISTER REPLICA replica_2 ASYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_2']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +def test_automatic_databases_create_multitenancy_replication(connection): + # Goal: to show that replication can be established against REPLICA where a new databases + # needs replication + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + execute_and_fetch_all(main_cursor, "CREATE (:Node)-[:EDGE]->(:Node)") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 0 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + cursor_replica = connection(BOLT_PORTS["replica_2"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 0 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_automatic_databases_multitenancy_replication_predefined(connection): + # Goal: to show that replication can be established against REPLICA which doesn't + # have any additional databases; MAIN's database clean at registration time + # 0/ MAIN CREATE DATABASE A + B + # Setup replication + # 1/ Write to MAIN A, Write to MAIN B + # 2/ Validate replication of changes to A + B have arrived at REPLICA + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "CREATE DATABASE B;", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(cursor, "USE DATABASE A;") + execute_and_fetch_all(cursor, "CREATE ();") + execute_and_fetch_all(cursor, "USE DATABASE B;") + execute_and_fetch_all(cursor, "CREATE ()-[:EDGE]->();") + + # 2/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "B")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 1 + assert get_number_of_edges_func(cursor_replica, "A")() == 0 + + +def test_automatic_databases_create_multitenancy_replication_dirty_main(connection): + # Goal: to show that replication can be established against REPLICA which doesn't + # have any additional databases; MAIN's database dirty at registration time + # 0/ MAIN CREATE DATABASE A + # MAIN write to A + # Setup replication + # 1/ Validate + + MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL = { + "replica_1": { + "args": ["--bolt-port", f"{BOLT_PORTS['replica_1']}", "--log-level=TRACE"], + "log_file": "replica1.log", + "setup_queries": [ + f"SET REPLICATION ROLE TO REPLICA WITH PORT {REPLICATION_PORTS['replica_1']};", + ], + }, + "main": { + "args": ["--bolt-port", f"{BOLT_PORTS['main']}", "--log-level=TRACE"], + "log_file": "main.log", + "setup_queries": [ + "CREATE DATABASE A;", + "USE DATABASE A;", + "CREATE (:Node{from:'A'})", + f"REGISTER REPLICA replica_1 SYNC TO '127.0.0.1:{REPLICATION_PORTS['replica_1']}';", + ], + }, + } + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_MANUAL) + cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(cursor, "A")) + + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + actual_data = execute_and_fetch_all(cursor_replica, "MATCH (n) RETURN count(*);") + assert actual_data[0][0] == 1 # one node + actual_data = execute_and_fetch_all(cursor_replica, "MATCH ()-[r]->() RETURN count(*);") + assert actual_data[0][0] == 0 # zero relationships + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_w_fc(connection, replica_name): + # Goal: show that a replica can be recovered with the frequent checker + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + time.sleep(3) # In order for the frequent check to run + # Check that the FC did invalidate + expected_data = { + "replica_1": { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "invalid"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + }, + "replica_2": { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "invalid"), + }, + } + assert expected_data[replica_name] == show_replicas_func(main_cursor, "A")() + # Restart + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_wo_fc(connection, replica_name): + # Goal: show that a replica can be recovered without the frequent checker detecting it being down + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_restart_replica_w_fc_w_rec(connection, replica_name): + # Goal: show that a replica recovers data on reconnect + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart replica + # 4/ Validate data on replica + + # 0/ + # Tmp dir should already be removed, but sometimes its not... + safe_execute(shutil.rmtree, TEMP_DIR) + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY) + setup_replication(connection) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, replica_name) + safe_execute(execute_and_fetch_all, main_cursor, "USE DATABASE A;") + safe_execute(execute_and_fetch_all, main_cursor, "CREATE (:Node{on:'A'});") + safe_execute(execute_and_fetch_all, main_cursor, "USE DATABASE B;") + safe_execute(execute_and_fetch_all, main_cursor, "CREATE (:Node{on:'B'});") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, replica_name) + + # 4/ + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + + mg_sleep_and_assert(8, get_number_of_nodes_func(cursor_replica, "A")) + mg_sleep_and_assert(3, get_number_of_edges_func(cursor_replica, "A")) + + mg_sleep_and_assert(3, get_number_of_nodes_func(cursor_replica, "B")) + mg_sleep_and_assert(0, get_number_of_edges_func(cursor_replica, "B")) + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_multitenancy_replication_drop_replica(connection, replica_name): + # Goal: show that the cluster can recover if a replica is dropped and registered again + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Drop and add the same replica + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + execute_and_fetch_all(main_cursor, f"DROP REPLICA {replica_name};") + sync = {"replica_1": "SYNC", "replica_2": "ASYNC"} + execute_and_fetch_all( + main_cursor, + f"REGISTER REPLICA {replica_name} {sync[replica_name]} TO '127.0.0.1:{REPLICATION_PORTS[replica_name]}';", + ) + + # 4/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 7, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 7, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 3, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 3, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + cursor_replica = connection(BOLT_PORTS[replica_name], "replica").cursor() + assert get_number_of_nodes_func(cursor_replica, "A")() == 7 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 2 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_multitenancy_replication_restart_main(connection): + # Goal: show that the cluster can restore to a correct state if the MAIN restarts + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart main and write new data + # 4/ Validate data on replica + + # 0/ + # Tmp dir should already be removed, but sometimes its not... + safe_execute(shutil.rmtree, TEMP_DIR) + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY) + setup_replication(connection) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, "main") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION_WITH_RECOVERY, "main") + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'B'});") + + # 4/ + cursor_replica = connection(BOLT_PORTS["replica_1"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + assert get_number_of_nodes_func(cursor_replica, "A")() == 8 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 3 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + cursor_replica = connection(BOLT_PORTS["replica_2"], "replica").cursor() + execute_and_fetch_all(cursor_replica, "USE DATABASE A;") + assert get_number_of_nodes_func(cursor_replica, "A")() == 8 + assert get_number_of_edges_func(cursor_replica, "A")() == 3 + assert get_number_of_nodes_func(cursor_replica, "B")() == 3 + assert get_number_of_edges_func(cursor_replica, "B")() == 0 + + +def test_automatic_databases_drop_multitenancy_replication(connection): + # Goal: show that drop database can be replicated + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ DROP DATABASE A/B + # 5/ Check that the drop replicated + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + # 4/ + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + execute_and_fetch_all(main_cursor, "DROP DATABASE B;") + + # 5/ + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +@pytest.mark.parametrize("replica_name", [("replica_1"), ("replica_2")]) +def test_drop_multitenancy_replication_restart_replica(connection, replica_name): + # Goal: show that the drop database can be restored + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A and B + # 2/ Write on MAIN to A and B + # 3/ Restart SYNC replica and drop database + # 4/ Validate data on replica + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + + # 2/ + setup_main(main_cursor) + + # 3/ + interactive_mg_runner.kill(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE B;") + interactive_mg_runner.start(MEMGRAPH_INSTANCES_DESCRIPTION, replica_name) + + # 4/ + databases_on_main = show_databases_func(main_cursor)() + + replica_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + replica_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + mg_sleep_and_assert(databases_on_main, show_databases_func(replica_cursor)) + + +def test_multitenancy_drop_while_replica_using(connection): + # Goal: show that the replica can handle a transaction on a database being dropped (will persist until tx finishes) + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ Start A transaction on replica 1, Use A on replica2 + # 5/ Check that the drop replicated + # 6/ Validate that the transaction is still active and working and that the replica2 is not pointing to anything + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 4/ + replica1_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + replica2_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + + execute_and_fetch_all(replica1_cursor, "USE DATABASE A;") + execute_and_fetch_all(replica1_cursor, "BEGIN") + execute_and_fetch_all(replica2_cursor, "USE DATABASE A;") + + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + + # 5/ + # TODO Remove this once there is a replica state for the system + execute_and_fetch_all(main_cursor, "CREATE DATABASE B;") + execute_and_fetch_all(main_cursor, "USE DATABASE B;") + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "B")) + + # 6/ + assert execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN count(*);")[0][0] == 1 + execute_and_fetch_all(replica1_cursor, "COMMIT") + failed = False + try: + execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + failed = False + try: + execute_and_fetch_all(replica2_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + +def test_multitenancy_drop_and_recreate_while_replica_using(connection): + # Goal: show that the replica can handle a transaction on a database being dropped and the same name reused + # Original storage should persist in a nameless state until tx is over + # needs replicating over + # 0/ Setup replication + # 1/ MAIN CREATE DATABASE A + # 2/ Write to MAIN A + # 3/ Validate replication of changes to A have arrived at REPLICA + # 4/ Start A transaction on replica 1, Use A on replica2 + # 5/ Check that the drop/create replicated + # 6/ Validate that the transaction is still active and working and that the replica2 is not pointing to anything + + # 0/ + interactive_mg_runner.start_all(MEMGRAPH_INSTANCES_DESCRIPTION) + main_cursor = connection(BOLT_PORTS["main"], "main").cursor() + + # 1/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + + # 2/ + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + execute_and_fetch_all(main_cursor, "CREATE (:Node{on:'A'});") + + # 3/ + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 1, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 1, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 4/ + replica1_cursor = connection(BOLT_PORTS["replica_1"], "replica").cursor() + replica2_cursor = connection(BOLT_PORTS["replica_2"], "replica").cursor() + + execute_and_fetch_all(replica1_cursor, "USE DATABASE A;") + execute_and_fetch_all(replica1_cursor, "BEGIN") + execute_and_fetch_all(replica2_cursor, "USE DATABASE A;") + + execute_and_fetch_all(main_cursor, "USE DATABASE memgraph;") + execute_and_fetch_all(main_cursor, "DROP DATABASE A;") + + # 5/ + execute_and_fetch_all(main_cursor, "CREATE DATABASE A;") + execute_and_fetch_all(main_cursor, "USE DATABASE A;") + + expected_data = { + ("replica_1", f"127.0.0.1:{REPLICATION_PORTS['replica_1']}", "sync", 0, 0, "ready"), + ("replica_2", f"127.0.0.1:{REPLICATION_PORTS['replica_2']}", "async", 0, 0, "ready"), + } + mg_sleep_and_assert(expected_data, show_replicas_func(main_cursor, "A")) + + # 6/ + assert execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN count(*);")[0][0] == 1 + execute_and_fetch_all(replica1_cursor, "COMMIT") + failed = False + try: + execute_and_fetch_all(replica1_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + failed = False + try: + execute_and_fetch_all(replica2_cursor, "MATCH(n) RETURN n;") + except mgclient.DatabaseError: + failed = True + assert failed + + +if __name__ == "__main__": + interactive_mg_runner.cleanup_directories_on_exit() + sys.exit(pytest.main([__file__, "-rA"])) diff --git a/tests/e2e/replication_experimental/workloads.yaml b/tests/e2e/replication_experimental/workloads.yaml new file mode 100644 index 000000000..e48515f4f --- /dev/null +++ b/tests/e2e/replication_experimental/workloads.yaml @@ -0,0 +1,4 @@ +workloads: + - name: "Replicate multitenancy" + binary: "tests/e2e/pytest_runner.sh" + args: ["replication_experimental/multitenancy.py"] diff --git a/tests/e2e/run.sh b/tests/e2e/run.sh index 1aba6a517..88b70ae32 100755 --- a/tests/e2e/run.sh +++ b/tests/e2e/run.sh @@ -25,7 +25,7 @@ if [ "$#" -eq 0 ]; then # NOTE: If you want to run all tests under specific folder/section just # replace the dot (root directory below) with the folder name, e.g. # `--workloads-root-directory replication`. - python3 runner.py --workloads-root-directory . + python3 runner.py --workloads-root-directory "$SCRIPT_DIR/../../build" elif [ "$#" -eq 1 ]; then if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then print_help @@ -34,7 +34,7 @@ elif [ "$#" -eq 1 ]; then # NOTE: --workload-name comes from each individual folder/section # workloads.yaml file. E.g. `streams/workloads.yaml` has a list of # `workloads:` and each workload has it's `-name`. - python3 runner.py --workloads-root-directory . --workload-name "$1" + python3 runner.py --workloads-root-directory "$SCRIPT_DIR/../../build" --workload-name "$1" else print_help fi diff --git a/tests/e2e/runner.py b/tests/e2e/runner.py index 949670d43..ae022d4d8 100755 --- a/tests/e2e/runner.py +++ b/tests/e2e/runner.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + # Copyright 2021 Memgraph Ltd. # # Use of this software is governed by the Business Source License diff --git a/tests/e2e/server/CMakeLists.txt b/tests/e2e/server/CMakeLists.txt index a408f4a2e..2e62f2035 100644 --- a/tests/e2e/server/CMakeLists.txt +++ b/tests/e2e/server/CMakeLists.txt @@ -6,3 +6,5 @@ target_link_libraries(memgraph__e2e__server_connection mgclient mg-utils gflags) add_executable(memgraph__e2e__server_ssl_connection server_ssl_connection.cpp) target_link_libraries(memgraph__e2e__server_ssl_connection mgclient mg-utils gflags) + +copy_e2e_files(server workloads.yaml) diff --git a/tests/e2e/set_properties/CMakeLists.txt b/tests/e2e/set_properties/CMakeLists.txt index 10cc03584..66a8039b7 100644 --- a/tests/e2e/set_properties/CMakeLists.txt +++ b/tests/e2e/set_properties/CMakeLists.txt @@ -6,3 +6,5 @@ copy_set_properties_e2e_python_files(common.py) copy_set_properties_e2e_python_files(set_properties.py) add_subdirectory(procedures) + +copy_e2e_files(set_properties workloads.yaml) diff --git a/tests/e2e/show_index_info/CMakeLists.txt b/tests/e2e/show_index_info/CMakeLists.txt index dd9bd28bb..b5d154355 100644 --- a/tests/e2e/show_index_info/CMakeLists.txt +++ b/tests/e2e/show_index_info/CMakeLists.txt @@ -4,3 +4,5 @@ endfunction() copy_show_index_info_e2e_python_files(common.py) copy_show_index_info_e2e_python_files(test_show_index_info.py) + +copy_e2e_files(show_index_info workloads.yaml) diff --git a/tests/e2e/streams/CMakeLists.txt b/tests/e2e/streams/CMakeLists.txt index 3c0ffac98..cbca225f7 100644 --- a/tests/e2e/streams/CMakeLists.txt +++ b/tests/e2e/streams/CMakeLists.txt @@ -11,3 +11,5 @@ copy_streams_e2e_python_files(pulsar_streams_tests.py) add_subdirectory(transformations) copy_e2e_python_files_from_parent_folder(streams ".." mg_utils.py) + +copy_e2e_files(streams workloads.yaml) diff --git a/tests/e2e/temporal_types/CMakeLists.txt b/tests/e2e/temporal_types/CMakeLists.txt index aad9561fe..dac9c2000 100644 --- a/tests/e2e/temporal_types/CMakeLists.txt +++ b/tests/e2e/temporal_types/CMakeLists.txt @@ -4,3 +4,4 @@ find_package(gflags REQUIRED) add_executable(memgraph__e2e__temporal_roundtrip roundtrip.cpp) target_link_libraries(memgraph__e2e__temporal_roundtrip PUBLIC mgclient mg-utils gflags) +copy_e2e_files(temporal_roundtrip workloads.yaml) diff --git a/tests/e2e/transaction_queue/CMakeLists.txt b/tests/e2e/transaction_queue/CMakeLists.txt index 574c46bfd..f2e7db170 100644 --- a/tests/e2e/transaction_queue/CMakeLists.txt +++ b/tests/e2e/transaction_queue/CMakeLists.txt @@ -6,3 +6,5 @@ copy_query_modules_reloading_procedures_e2e_python_files(common.py) copy_query_modules_reloading_procedures_e2e_python_files(test_transaction_queue.py) add_subdirectory(procedures) + +copy_e2e_files(transaction_queue workloads.yaml) diff --git a/tests/e2e/transaction_rollback/CMakeLists.txt b/tests/e2e/transaction_rollback/CMakeLists.txt index a64d3bfeb..4b9fd289f 100644 --- a/tests/e2e/transaction_rollback/CMakeLists.txt +++ b/tests/e2e/transaction_rollback/CMakeLists.txt @@ -7,3 +7,5 @@ transaction_rollback_e2e_python_files(conftest.py) transaction_rollback_e2e_python_files(transaction.py) add_subdirectory(procedures) + +copy_e2e_files(transaction_rollback workloads.yaml) diff --git a/tests/e2e/triggers/CMakeLists.txt b/tests/e2e/triggers/CMakeLists.txt index 7b540d59f..8f5fe7676 100644 --- a/tests/e2e/triggers/CMakeLists.txt +++ b/tests/e2e/triggers/CMakeLists.txt @@ -27,3 +27,5 @@ endfunction() copy_triggers_e2e_python_files(common.py) copy_triggers_e2e_python_files(triggers_properties_false.py) + +copy_e2e_files(triggers workloads.yaml) diff --git a/tests/e2e/write_procedures/CMakeLists.txt b/tests/e2e/write_procedures/CMakeLists.txt index 27a9a73e2..f7dc2d8b3 100644 --- a/tests/e2e/write_procedures/CMakeLists.txt +++ b/tests/e2e/write_procedures/CMakeLists.txt @@ -8,3 +8,5 @@ copy_write_procedures_e2e_python_files(simple_write.py) copy_write_procedures_e2e_python_files(read_subgraph.py) add_subdirectory(procedures) + +copy_e2e_files(write_procedures workloads.yaml) diff --git a/tests/gql_behave/tests/memgraph_V1/features/parameters.feature b/tests/gql_behave/tests/memgraph_V1/features/parameters.feature index 288f93206..d9b25ff8c 100644 --- a/tests/gql_behave/tests/memgraph_V1/features/parameters.feature +++ b/tests/gql_behave/tests/memgraph_V1/features/parameters.feature @@ -153,3 +153,20 @@ Feature: Parameters Then the result should be: | a | | (:Label1 {x: 10}) | + + Scenario: Parameters for limit in return returnBody + Given an empty graph + And having executed: + """ + FOREACH (id IN range(1, 10) | CREATE (:Node {id: id})) + """ + And parameters are: + | limit | 2 | + When executing query: + """ + MATCH (n) RETURN n LIMIT $limit + """ + Then the result should be: + | n | + | (:Node {id: 1}) | + | (:Node {id: 2}) | diff --git a/tests/integration/telemetry/client.cpp b/tests/integration/telemetry/client.cpp index 34e1c2a67..b93b1ada5 100644 --- a/tests/integration/telemetry/client.cpp +++ b/tests/integration/telemetry/client.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -13,6 +13,7 @@ #include "dbms/dbms_handler.hpp" #include "glue/auth_checker.hpp" +#include "glue/auth_global.hpp" #include "glue/auth_handler.hpp" #include "requests/requests.hpp" #include "storage/v2/config.hpp" @@ -32,9 +33,10 @@ int main(int argc, char **argv) { // Memgraph backend std::filesystem::path data_directory{std::filesystem::temp_directory_path() / "MG_telemetry_integration_test"}; - memgraph::utils::Synchronized auth_{data_directory / - "auth"}; - memgraph::glue::AuthQueryHandler auth_handler(&auth_, ""); + memgraph::utils::Synchronized auth_{ + data_directory / "auth", + memgraph::auth::Auth::Config{std::string{memgraph::glue::kDefaultUserRoleRegex}, "", true}}; + memgraph::glue::AuthQueryHandler auth_handler(&auth_); memgraph::glue::AuthChecker auth_checker(&auth_); memgraph::storage::Config db_config; @@ -44,7 +46,7 @@ int main(int argc, char **argv) { memgraph::dbms::DbmsHandler dbms_handler(db_config #ifdef MG_ENTERPRISE , - &auth_, false, false + &auth_, false #endif ); memgraph::query::InterpreterContext interpreter_context_({}, &dbms_handler, &repl_state, &auth_handler, diff --git a/tests/jepsen/run.sh b/tests/jepsen/run.sh index b366e7846..a1587c8a1 100755 --- a/tests/jepsen/run.sh +++ b/tests/jepsen/run.sh @@ -24,7 +24,7 @@ PRINT_CONTEXT() { HELP_EXIT() { echo "" - echo "HELP: $0 help|cluster-up|cluster-cleanup|cluster-dealloc|mgbuild|test|test-all-individually [args]" + echo "HELP: $0 help|cluster-up|cluster-refresh|cluster-cleanup|cluster-dealloc|mgbuild|test|test-all-individually [args]" echo "" echo " test args --binary MEMGRAPH_BINARY_PATH" echo " --ignore-run-stdout-logs Ignore lein run stdout logs." @@ -184,6 +184,37 @@ PROCESS_RESULTS() { INFO "Result processing (printing and packing) DONE." } +CLUSTER_UP() { + PRINT_CONTEXT + "$script_dir/jepsen/docker/bin/up" --daemon + sleep 10 + # Ensure all SSH connections between Jepsen containers work + for node in $(docker ps --filter name=jepsen* --filter status=running --format "{{.Names}}"); do + if [ "$node" == "jepsen-control" ]; then + continue + fi + node_hostname="${node##jepsen-}" + docker exec jepsen-control bash -c "ssh -oStrictHostKeyChecking=no -t $node_hostname exit" + done +} + +CLUSTER_DEALLOC() { + ps=$(docker ps --filter name=jepsen* --filter status=running -q) + if [[ ! -z ${ps} ]]; then + echo "Killing ${ps}" + docker rm -f ${ps} + imgs=$(docker images "jepsen*" -q) + if [[ ! -z ${imgs} ]]; then + echo "Removing ${imgs}" + docker images "jepsen*" -q | xargs docker image rmi -f + else + echo "No Jepsen images detected!" + fi + else + echo "No Jepsen containers detected!" + fi +} + # Initialize testing context by copying source/binary files. Inside CI, # Memgraph is tested on a single machine cluster based on Docker containers. # Once these tests will be part of the official Jepsen repo, the majority of @@ -196,8 +227,16 @@ case $1 in # the current cluster is broken because it relies on the folder. That can # happen easiliy because the jepsen folder is git ignored. cluster-up) - PRINT_CONTEXT - "$script_dir/jepsen/docker/bin/up" --daemon + CLUSTER_UP + ;; + + cluster-refresh) + CLUSTER_DEALLOC + CLUSTER_UP + ;; + + cluster-dealloc) + CLUSTER_DEALLOC ;; cluster-cleanup) @@ -212,23 +251,6 @@ case $1 in done ;; - cluster-dealloc) - ps=$(docker ps --filter name=jepsen* --filter status=running -q) - if [[ ! -z ${ps} ]]; then - echo "Killing ${ps}" - docker rm -f ${ps} - imgs=$(docker images "jepsen*" -q) - if [[ ! -z ${imgs} ]]; then - echo "Removing ${imgs}" - docker images "jepsen*" -q | xargs docker image rmi -f - else - echo "No Jepsen images detected!" - fi - else - echo "No Jepsen containers detected!" - fi - ;; - mgbuild) PRINT_CONTEXT echo "" diff --git a/tests/manual/query_planner.cpp b/tests/manual/query_planner.cpp index f455bf716..8f2c107bc 100644 --- a/tests/manual/query_planner.cpp +++ b/tests/manual/query_planner.cpp @@ -14,7 +14,7 @@ #include #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; DECLARE_int32(min_log_level); int main(int argc, char *argv[]) { diff --git a/tests/property_based/random_graph.cpp b/tests/property_based/random_graph.cpp index ae71a68c8..097c2dc0e 100644 --- a/tests/property_based/random_graph.cpp +++ b/tests/property_based/random_graph.cpp @@ -23,7 +23,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" #include "storage/v2/vertex_accessor.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; /** * It is possible to run test with custom seed with: * RC_PARAMS="seed=1" ./random_graph diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 956cba781..6f7b3bbef 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -350,7 +350,7 @@ add_unit_test(storage_v2_wal_file.cpp) target_link_libraries(${test_prefix}storage_v2_wal_file mg-storage-v2 storage_test_utils fmt) add_unit_test(storage_v2_replication.cpp) -target_link_libraries(${test_prefix}storage_v2_replication mg-storage-v2 mg-dbms fmt) +target_link_libraries(${test_prefix}storage_v2_replication mg-storage-v2 mg-dbms fmt mg-repl_coord_glue) add_unit_test(storage_v2_isolation_level.cpp) target_link_libraries(${test_prefix}storage_v2_isolation_level mg-storage-v2) @@ -368,7 +368,7 @@ add_unit_test(storage_v2_storage_mode.cpp) target_link_libraries(${test_prefix}storage_v2_storage_mode mg-storage-v2 storage_test_utils mg-query mg-glue) add_unit_test(replication_persistence_helper.cpp) -target_link_libraries(${test_prefix}replication_persistence_helper mg-storage-v2) +target_link_libraries(${test_prefix}replication_persistence_helper mg-storage-v2 mg-repl_coord_glue) add_unit_test(auth_checker.cpp) target_link_libraries(${test_prefix}auth_checker mg-glue mg-auth) @@ -389,7 +389,7 @@ endif() # Test mg-slk if(MG_ENTERPRISE) add_unit_test(slk_advanced.cpp) - target_link_libraries(${test_prefix}slk_advanced mg-storage-v2) + target_link_libraries(${test_prefix}slk_advanced mg-storage-v2 mg-replication mg-coordination mg-repl_coord_glue) endif() add_unit_test(slk_core.cpp) @@ -415,6 +415,9 @@ if(MG_ENTERPRISE) add_unit_test_with_custom_main(dbms_handler.cpp) target_link_libraries(${test_prefix}dbms_handler mg-query mg-auth mg-glue mg-dbms) + + add_unit_test(multi_tenancy.cpp) + target_link_libraries(${test_prefix}multi_tenancy mg-query mg-auth mg-glue mg-dbms) else() add_unit_test_with_custom_main(dbms_handler_community.cpp) target_link_libraries(${test_prefix}dbms_handler_community mg-query mg-auth mg-glue mg-dbms) diff --git a/tests/unit/auth.cpp b/tests/unit/auth.cpp index 6dbe20914..bc2947a12 100644 --- a/tests/unit/auth.cpp +++ b/tests/unit/auth.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -19,6 +19,7 @@ #include "auth/auth.hpp" #include "auth/crypto.hpp" #include "auth/models.hpp" +#include "glue/auth_global.hpp" #include "license/license.hpp" #include "utils/cast.hpp" #include "utils/file.hpp" @@ -26,90 +27,70 @@ using namespace memgraph::auth; namespace fs = std::filesystem; -DECLARE_bool(auth_password_permit_null); -DECLARE_string(auth_password_strength_regex); DECLARE_string(password_encryption_algorithm); class AuthWithStorage : public ::testing::Test { protected: void SetUp() override { memgraph::utils::EnsureDir(test_folder_); - FLAGS_auth_password_permit_null = true; - FLAGS_auth_password_strength_regex = ".+"; - memgraph::license::global_license_checker.EnableTesting(); + auth.emplace(test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), auth_config); } void TearDown() override { fs::remove_all(test_folder_); } fs::path test_folder_{fs::temp_directory_path() / "MG_tests_unit_auth"}; - - Auth auth{test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid())))}; + Auth::Config auth_config{}; + std::optional auth{}; }; TEST_F(AuthWithStorage, AddRole) { - ASSERT_TRUE(auth.AddRole("admin")); - ASSERT_TRUE(auth.AddRole("user")); - ASSERT_FALSE(auth.AddRole("admin")); + ASSERT_TRUE(auth->AddRole("admin")); + ASSERT_TRUE(auth->AddRole("user")); + ASSERT_FALSE(auth->AddRole("admin")); } TEST_F(AuthWithStorage, RemoveRole) { - ASSERT_TRUE(auth.AddRole("admin")); - ASSERT_TRUE(auth.RemoveRole("admin")); - class AuthWithStorage : public ::testing::Test { - protected: - void SetUp() override { - memgraph::utils::EnsureDir(test_folder_); - FLAGS_auth_password_permit_null = true; - FLAGS_auth_password_strength_regex = ".+"; - - memgraph::license::global_license_checker.EnableTesting(); - } - - void TearDown() override { fs::remove_all(test_folder_); } - - fs::path test_folder_{fs::temp_directory_path() / "MG_tests_unit_auth"}; - - Auth auth{test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid())))}; - }; - ASSERT_FALSE(auth.HasUsers()); - ASSERT_FALSE(auth.RemoveUser("test2")); - ASSERT_FALSE(auth.RemoveUser("test")); - ASSERT_FALSE(auth.HasUsers()); + ASSERT_TRUE(auth->AddRole("admin")); + ASSERT_TRUE(auth->RemoveRole("admin")); + ASSERT_FALSE(auth->HasUsers()); + ASSERT_FALSE(auth->RemoveUser("test2")); + ASSERT_FALSE(auth->RemoveUser("test")); + ASSERT_FALSE(auth->HasUsers()); } TEST_F(AuthWithStorage, Authenticate) { - ASSERT_FALSE(auth.HasUsers()); + ASSERT_FALSE(auth->HasUsers()); - auto user = auth.AddUser("test"); + auto user = auth->AddUser("test"); ASSERT_NE(user, std::nullopt); - ASSERT_TRUE(auth.HasUsers()); + ASSERT_TRUE(auth->HasUsers()); - ASSERT_TRUE(auth.Authenticate("test", "123")); + ASSERT_TRUE(auth->Authenticate("test", "123")); user->UpdatePassword("123"); - auth.SaveUser(*user); + auth->SaveUser(*user); - ASSERT_NE(auth.Authenticate("test", "123"), std::nullopt); + ASSERT_NE(auth->Authenticate("test", "123"), std::nullopt); - ASSERT_EQ(auth.Authenticate("test", "456"), std::nullopt); - ASSERT_NE(auth.Authenticate("test", "123"), std::nullopt); + ASSERT_EQ(auth->Authenticate("test", "456"), std::nullopt); + ASSERT_NE(auth->Authenticate("test", "123"), std::nullopt); user->UpdatePassword(); - auth.SaveUser(*user); + auth->SaveUser(*user); - ASSERT_NE(auth.Authenticate("test", "123"), std::nullopt); - ASSERT_NE(auth.Authenticate("test", "456"), std::nullopt); + ASSERT_NE(auth->Authenticate("test", "123"), std::nullopt); + ASSERT_NE(auth->Authenticate("test", "456"), std::nullopt); - ASSERT_EQ(auth.Authenticate("nonexistant", "123"), std::nullopt); + ASSERT_EQ(auth->Authenticate("nonexistant", "123"), std::nullopt); } TEST_F(AuthWithStorage, UserRolePermissions) { - ASSERT_FALSE(auth.HasUsers()); - ASSERT_TRUE(auth.AddUser("test")); - ASSERT_TRUE(auth.HasUsers()); + ASSERT_FALSE(auth->HasUsers()); + ASSERT_TRUE(auth->AddUser("test")); + ASSERT_TRUE(auth->HasUsers()); - auto user = auth.GetUser("test"); + auto user = auth->GetUser("test"); ASSERT_NE(user, std::nullopt); // Test initial user permissions. @@ -130,8 +111,8 @@ TEST_F(AuthWithStorage, UserRolePermissions) { ASSERT_EQ(user->permissions(), user->GetPermissions()); // Create role. - ASSERT_TRUE(auth.AddRole("admin")); - auto role = auth.GetRole("admin"); + ASSERT_TRUE(auth->AddRole("admin")); + auto role = auth->GetRole("admin"); ASSERT_NE(role, std::nullopt); // Assign permissions to role and role to user. @@ -163,11 +144,11 @@ TEST_F(AuthWithStorage, UserRolePermissions) { #ifdef MG_ENTERPRISE TEST_F(AuthWithStorage, UserRoleFineGrainedAccessHandler) { - ASSERT_FALSE(auth.HasUsers()); - ASSERT_TRUE(auth.AddUser("test")); - ASSERT_TRUE(auth.HasUsers()); + ASSERT_FALSE(auth->HasUsers()); + ASSERT_TRUE(auth->AddUser("test")); + ASSERT_TRUE(auth->HasUsers()); - auto user = auth.GetUser("test"); + auto user = auth->GetUser("test"); ASSERT_NE(user, std::nullopt); // Test initial user fine grained access permissions. @@ -204,8 +185,8 @@ TEST_F(AuthWithStorage, UserRoleFineGrainedAccessHandler) { user->GetFineGrainedAccessEdgeTypePermissions()); // Create role. - ASSERT_TRUE(auth.AddRole("admin")); - auto role = auth.GetRole("admin"); + ASSERT_TRUE(auth->AddRole("admin")); + auto role = auth->GetRole("admin"); ASSERT_NE(role, std::nullopt); // Grant label and edge type to role and role to user. @@ -236,44 +217,44 @@ TEST_F(AuthWithStorage, UserRoleFineGrainedAccessHandler) { TEST_F(AuthWithStorage, RoleManipulations) { { - auto user1 = auth.AddUser("user1"); + auto user1 = auth->AddUser("user1"); ASSERT_TRUE(user1); - auto role1 = auth.AddRole("role1"); + auto role1 = auth->AddRole("role1"); ASSERT_TRUE(role1); user1->SetRole(*role1); - auth.SaveUser(*user1); + auth->SaveUser(*user1); - auto user2 = auth.AddUser("user2"); + auto user2 = auth->AddUser("user2"); ASSERT_TRUE(user2); - auto role2 = auth.AddRole("role2"); + auto role2 = auth->AddRole("role2"); ASSERT_TRUE(role2); user2->SetRole(*role2); - auth.SaveUser(*user2); + auth->SaveUser(*user2); } { - auto user1 = auth.GetUser("user1"); + auto user1 = auth->GetUser("user1"); ASSERT_TRUE(user1); const auto *role1 = user1->role(); ASSERT_NE(role1, nullptr); ASSERT_EQ(role1->rolename(), "role1"); - auto user2 = auth.GetUser("user2"); + auto user2 = auth->GetUser("user2"); ASSERT_TRUE(user2); const auto *role2 = user2->role(); ASSERT_NE(role2, nullptr); ASSERT_EQ(role2->rolename(), "role2"); } - ASSERT_TRUE(auth.RemoveRole("role1")); + ASSERT_TRUE(auth->RemoveRole("role1")); { - auto user1 = auth.GetUser("user1"); + auto user1 = auth->GetUser("user1"); ASSERT_TRUE(user1); const auto *role = user1->role(); ASSERT_EQ(role, nullptr); - auto user2 = auth.GetUser("user2"); + auto user2 = auth->GetUser("user2"); ASSERT_TRUE(user2); const auto *role2 = user2->role(); ASSERT_NE(role2, nullptr); @@ -281,17 +262,17 @@ TEST_F(AuthWithStorage, RoleManipulations) { } { - auto role1 = auth.AddRole("role1"); + auto role1 = auth->AddRole("role1"); ASSERT_TRUE(role1); } { - auto user1 = auth.GetUser("user1"); + auto user1 = auth->GetUser("user1"); ASSERT_TRUE(user1); const auto *role1 = user1->role(); ASSERT_EQ(role1, nullptr); - auto user2 = auth.GetUser("user2"); + auto user2 = auth->GetUser("user2"); ASSERT_TRUE(user2); const auto *role2 = user2->role(); ASSERT_NE(role2, nullptr); @@ -299,7 +280,7 @@ TEST_F(AuthWithStorage, RoleManipulations) { } { - auto users = auth.AllUsers(); + auto users = auth->AllUsers(); std::sort(users.begin(), users.end(), [](const User &a, const User &b) { return a.username() < b.username(); }); ASSERT_EQ(users.size(), 2); ASSERT_EQ(users[0].username(), "user1"); @@ -307,7 +288,7 @@ TEST_F(AuthWithStorage, RoleManipulations) { } { - auto roles = auth.AllRoles(); + auto roles = auth->AllRoles(); std::sort(roles.begin(), roles.end(), [](const Role &a, const Role &b) { return a.rolename() < b.rolename(); }); ASSERT_EQ(roles.size(), 2); ASSERT_EQ(roles[0].rolename(), "role1"); @@ -315,7 +296,7 @@ TEST_F(AuthWithStorage, RoleManipulations) { } { - auto users = auth.AllUsersForRole("role2"); + auto users = auth->AllUsersForRole("role2"); ASSERT_EQ(users.size(), 1); ASSERT_EQ(users[0].username(), "user2"); } @@ -323,16 +304,16 @@ TEST_F(AuthWithStorage, RoleManipulations) { TEST_F(AuthWithStorage, UserRoleLinkUnlink) { { - auto user = auth.AddUser("user"); + auto user = auth->AddUser("user"); ASSERT_TRUE(user); - auto role = auth.AddRole("role"); + auto role = auth->AddRole("role"); ASSERT_TRUE(role); user->SetRole(*role); - auth.SaveUser(*user); + auth->SaveUser(*user); } { - auto user = auth.GetUser("user"); + auto user = auth->GetUser("user"); ASSERT_TRUE(user); const auto *role = user->role(); ASSERT_NE(role, nullptr); @@ -340,14 +321,14 @@ TEST_F(AuthWithStorage, UserRoleLinkUnlink) { } { - auto user = auth.GetUser("user"); + auto user = auth->GetUser("user"); ASSERT_TRUE(user); user->ClearRole(); - auth.SaveUser(*user); + auth->SaveUser(*user); } { - auto user = auth.GetUser("user"); + auto user = auth->GetUser("user"); ASSERT_TRUE(user); ASSERT_EQ(user->role(), nullptr); } @@ -355,19 +336,19 @@ TEST_F(AuthWithStorage, UserRoleLinkUnlink) { TEST_F(AuthWithStorage, UserPasswordCreation) { { - auto user = auth.AddUser("test"); + auto user = auth->AddUser("test"); ASSERT_TRUE(user); - ASSERT_TRUE(auth.Authenticate("test", "123")); - ASSERT_TRUE(auth.Authenticate("test", "456")); - ASSERT_TRUE(auth.RemoveUser(user->username())); + ASSERT_TRUE(auth->Authenticate("test", "123")); + ASSERT_TRUE(auth->Authenticate("test", "456")); + ASSERT_TRUE(auth->RemoveUser(user->username())); } { - auto user = auth.AddUser("test", "123"); + auto user = auth->AddUser("test", "123"); ASSERT_TRUE(user); - ASSERT_TRUE(auth.Authenticate("test", "123")); - ASSERT_FALSE(auth.Authenticate("test", "456")); - ASSERT_TRUE(auth.RemoveUser(user->username())); + ASSERT_TRUE(auth->Authenticate("test", "123")); + ASSERT_FALSE(auth->Authenticate("test", "456")); + ASSERT_TRUE(auth->RemoveUser(user->username())); } } @@ -382,36 +363,53 @@ TEST_F(AuthWithStorage, PasswordStrength) { const std::string kAlmostStrongPassword = "ThisPasswordMeetsAllButOneCriterion1234"; const std::string kStrongPassword = "ThisIsAVeryStrongPassword123$"; - auto user = auth.AddUser("user"); - ASSERT_TRUE(user); + { + auth.reset(); + auth.emplace(test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), + Auth::Config{std::string{memgraph::glue::kDefaultUserRoleRegex}, kWeakRegex, true}); + auto user = auth->AddUser("user1"); + ASSERT_TRUE(user); + ASSERT_NO_THROW(auth->UpdatePassword(*user, std::nullopt)); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kWeakPassword)); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kAlmostStrongPassword)); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kStrongPassword)); + } - FLAGS_auth_password_permit_null = true; - FLAGS_auth_password_strength_regex = kWeakRegex; - ASSERT_NO_THROW(user->UpdatePassword()); - ASSERT_NO_THROW(user->UpdatePassword(kWeakPassword)); - ASSERT_NO_THROW(user->UpdatePassword(kAlmostStrongPassword)); - ASSERT_NO_THROW(user->UpdatePassword(kStrongPassword)); + { + auth.reset(); + auth.emplace(test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), + Auth::Config{std::string{memgraph::glue::kDefaultUserRoleRegex}, kWeakRegex, false}); + ASSERT_THROW(auth->AddUser("user2", std::nullopt), AuthException); + auto user = auth->AddUser("user2", kWeakPassword); + ASSERT_TRUE(user); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kWeakPassword)); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kAlmostStrongPassword)); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kStrongPassword)); + } - FLAGS_auth_password_permit_null = false; - FLAGS_auth_password_strength_regex = kWeakRegex; - ASSERT_THROW(user->UpdatePassword(), AuthException); - ASSERT_NO_THROW(user->UpdatePassword(kWeakPassword)); - ASSERT_NO_THROW(user->UpdatePassword(kAlmostStrongPassword)); - ASSERT_NO_THROW(user->UpdatePassword(kStrongPassword)); + { + auth.reset(); + auth.emplace(test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), + Auth::Config{std::string{memgraph::glue::kDefaultUserRoleRegex}, kStrongRegex, true}); + auto user = auth->AddUser("user3"); + ASSERT_TRUE(user); + ASSERT_NO_THROW(auth->UpdatePassword(*user, std::nullopt)); + ASSERT_THROW(auth->UpdatePassword(*user, kWeakPassword), AuthException); + ASSERT_THROW(auth->UpdatePassword(*user, kAlmostStrongPassword), AuthException); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kStrongPassword)); + } - FLAGS_auth_password_permit_null = true; - FLAGS_auth_password_strength_regex = kStrongRegex; - ASSERT_NO_THROW(user->UpdatePassword()); - ASSERT_THROW(user->UpdatePassword(kWeakPassword), AuthException); - ASSERT_THROW(user->UpdatePassword(kAlmostStrongPassword), AuthException); - ASSERT_NO_THROW(user->UpdatePassword(kStrongPassword)); - - FLAGS_auth_password_permit_null = false; - FLAGS_auth_password_strength_regex = kStrongRegex; - ASSERT_THROW(user->UpdatePassword(), AuthException); - ASSERT_THROW(user->UpdatePassword(kWeakPassword), AuthException); - ASSERT_THROW(user->UpdatePassword(kAlmostStrongPassword), AuthException); - ASSERT_NO_THROW(user->UpdatePassword(kStrongPassword)); + { + auth.reset(); + auth.emplace(test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), + Auth::Config{std::string{memgraph::glue::kDefaultUserRoleRegex}, kStrongRegex, false}); + ASSERT_THROW(auth->AddUser("user4", std::nullopt);, AuthException); + ASSERT_THROW(auth->AddUser("user4", kWeakPassword);, AuthException); + ASSERT_THROW(auth->AddUser("user4", kAlmostStrongPassword);, AuthException); + auto user = auth->AddUser("user4", kStrongPassword); + ASSERT_TRUE(user); + ASSERT_NO_THROW(auth->UpdatePassword(*user, kStrongPassword)); + } } TEST(AuthWithoutStorage, Permissions) { @@ -668,6 +666,17 @@ TEST(AuthWithoutStorage, UserSerializeDeserialize) { ASSERT_EQ(user, output); } +TEST(AuthWithoutStorage, UserSerializeDeserializeWithOutPassword) { + auto user = User("test"); + user.permissions().Grant(Permission::MATCH); + user.permissions().Deny(Permission::MERGE); + + auto data = user.Serialize(); + + auto output = User::Deserialize(data); + ASSERT_EQ(user, output); +} + TEST(AuthWithoutStorage, RoleSerializeDeserialize) { auto role = Role("test"); role.permissions().Grant(Permission::MATCH); @@ -680,30 +689,30 @@ TEST(AuthWithoutStorage, RoleSerializeDeserialize) { } TEST_F(AuthWithStorage, UserWithRoleSerializeDeserialize) { - auto role = auth.AddRole("role"); + auto role = auth->AddRole("role"); ASSERT_TRUE(role); role->permissions().Grant(Permission::MATCH); role->permissions().Deny(Permission::MERGE); - auth.SaveRole(*role); + auth->SaveRole(*role); - auto user = auth.AddUser("user"); + auto user = auth->AddUser("user"); ASSERT_TRUE(user); user->permissions().Grant(Permission::MATCH); user->permissions().Deny(Permission::MERGE); user->UpdatePassword("world"); user->SetRole(*role); - auth.SaveUser(*user); + auth->SaveUser(*user); - auto new_user = auth.GetUser("user"); + auto new_user = auth->GetUser("user"); ASSERT_TRUE(new_user); ASSERT_EQ(*user, *new_user); } TEST_F(AuthWithStorage, UserRoleUniqueName) { - ASSERT_TRUE(auth.AddUser("user")); - ASSERT_TRUE(auth.AddRole("role")); - ASSERT_FALSE(auth.AddRole("user")); - ASSERT_FALSE(auth.AddUser("role")); + ASSERT_TRUE(auth->AddUser("user")); + ASSERT_TRUE(auth->AddRole("role")); + ASSERT_FALSE(auth->AddRole("user")); + ASSERT_FALSE(auth->AddUser("role")); } TEST(AuthWithoutStorage, CaseInsensitivity) { @@ -718,8 +727,9 @@ TEST(AuthWithoutStorage, CaseInsensitivity) { { auto perms = Permissions(); auto fine_grained_access_handler = FineGrainedAccessHandler(); - auto user1 = User("test", "pw", perms, fine_grained_access_handler); - auto user2 = User("Test", "pw", perms, fine_grained_access_handler); + auto passwordHash = HashPassword("pw"); + auto user1 = User("test", passwordHash, perms, fine_grained_access_handler); + auto user2 = User("Test", passwordHash, perms, fine_grained_access_handler); ASSERT_EQ(user1, user2); ASSERT_EQ(user1.username(), user2.username()); ASSERT_EQ(user1.username(), "test"); @@ -748,58 +758,58 @@ TEST(AuthWithoutStorage, CaseInsensitivity) { TEST_F(AuthWithStorage, CaseInsensitivity) { // AddUser { - auto user = auth.AddUser("Alice", "alice"); + auto user = auth->AddUser("Alice", "alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); - ASSERT_FALSE(auth.AddUser("alice")); - ASSERT_FALSE(auth.AddUser("alicE")); + ASSERT_FALSE(auth->AddUser("alice")); + ASSERT_FALSE(auth->AddUser("alicE")); } { - auto user = auth.AddUser("BoB", "bob"); + auto user = auth->AddUser("BoB", "bob"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "bob"); - ASSERT_FALSE(auth.AddUser("bob")); - ASSERT_FALSE(auth.AddUser("bOb")); + ASSERT_FALSE(auth->AddUser("bob")); + ASSERT_FALSE(auth->AddUser("bOb")); } // Authenticate { - auto user = auth.Authenticate("alice", "alice"); + auto user = auth->Authenticate("alice", "alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } { - auto user = auth.Authenticate("alICe", "alice"); + auto user = auth->Authenticate("alICe", "alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } // GetUser { - auto user = auth.GetUser("alice"); + auto user = auth->GetUser("alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } { - auto user = auth.GetUser("aLicE"); + auto user = auth->GetUser("aLicE"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } - ASSERT_FALSE(auth.GetUser("carol")); + ASSERT_FALSE(auth->GetUser("carol")); // RemoveUser { - auto user = auth.AddUser("caRol", "carol"); + auto user = auth->AddUser("caRol", "carol"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "carol"); - ASSERT_TRUE(auth.RemoveUser("cAROl")); - ASSERT_FALSE(auth.RemoveUser("carol")); - ASSERT_FALSE(auth.GetUser("CAROL")); + ASSERT_TRUE(auth->RemoveUser("cAROl")); + ASSERT_FALSE(auth->RemoveUser("carol")); + ASSERT_FALSE(auth->GetUser("CAROL")); } // AllUsers { - auto users = auth.AllUsers(); + auto users = auth->AllUsers(); ASSERT_EQ(users.size(), 2); std::sort(users.begin(), users.end(), [](const auto &a, const auto &b) { return a.username() < b.username(); }); ASSERT_EQ(users[0].username(), "alice"); @@ -808,48 +818,48 @@ TEST_F(AuthWithStorage, CaseInsensitivity) { // AddRole { - auto role = auth.AddRole("Moderator"); + auto role = auth->AddRole("Moderator"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); - ASSERT_FALSE(auth.AddRole("moderator")); - ASSERT_FALSE(auth.AddRole("MODERATOR")); + ASSERT_FALSE(auth->AddRole("moderator")); + ASSERT_FALSE(auth->AddRole("MODERATOR")); } { - auto role = auth.AddRole("adMIN"); + auto role = auth->AddRole("adMIN"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "admin"); - ASSERT_FALSE(auth.AddRole("Admin")); - ASSERT_FALSE(auth.AddRole("ADMIn")); + ASSERT_FALSE(auth->AddRole("Admin")); + ASSERT_FALSE(auth->AddRole("ADMIn")); } - ASSERT_FALSE(auth.AddRole("ALICE")); - ASSERT_FALSE(auth.AddUser("ModeRAtor")); + ASSERT_FALSE(auth->AddRole("ALICE")); + ASSERT_FALSE(auth->AddUser("ModeRAtor")); // GetRole { - auto role = auth.GetRole("moderator"); + auto role = auth->GetRole("moderator"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); } { - auto role = auth.GetRole("MoDERATOR"); + auto role = auth->GetRole("MoDERATOR"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); } - ASSERT_FALSE(auth.GetRole("root")); + ASSERT_FALSE(auth->GetRole("root")); // RemoveRole { - auto role = auth.AddRole("RooT"); + auto role = auth->AddRole("RooT"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "root"); - ASSERT_TRUE(auth.RemoveRole("rOOt")); - ASSERT_FALSE(auth.RemoveRole("RoOt")); - ASSERT_FALSE(auth.GetRole("RoOt")); + ASSERT_TRUE(auth->RemoveRole("rOOt")); + ASSERT_FALSE(auth->RemoveRole("RoOt")); + ASSERT_FALSE(auth->GetRole("RoOt")); } // AllRoles { - auto roles = auth.AllRoles(); + auto roles = auth->AllRoles(); ASSERT_EQ(roles.size(), 2); std::sort(roles.begin(), roles.end(), [](const auto &a, const auto &b) { return a.rolename() < b.rolename(); }); ASSERT_EQ(roles[0].rolename(), "admin"); @@ -858,14 +868,14 @@ TEST_F(AuthWithStorage, CaseInsensitivity) { // SaveRole { - auto role = auth.GetRole("MODErator"); + auto role = auth->GetRole("MODErator"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); role->permissions().Grant(memgraph::auth::Permission::MATCH); - auth.SaveRole(*role); + auth->SaveRole(*role); } { - auto role = auth.GetRole("modeRATOR"); + auto role = auth->GetRole("modeRATOR"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); ASSERT_EQ(role->permissions().Has(memgraph::auth::Permission::MATCH), memgraph::auth::PermissionLevel::GRANT); @@ -873,17 +883,17 @@ TEST_F(AuthWithStorage, CaseInsensitivity) { // SaveUser { - auto user = auth.GetUser("aLice"); + auto user = auth->GetUser("aLice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); - auto role = auth.GetRole("moderAtor"); + auto role = auth->GetRole("moderAtor"); ASSERT_TRUE(role); ASSERT_EQ(role->rolename(), "moderator"); user->SetRole(*role); - auth.SaveUser(*user); + auth->SaveUser(*user); } { - auto user = auth.GetUser("aLIce"); + auto user = auth->GetUser("aLIce"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); const auto *role = user->role(); @@ -893,27 +903,27 @@ TEST_F(AuthWithStorage, CaseInsensitivity) { // AllUsersForRole { - auto carol = auth.AddUser("caROl"); + auto carol = auth->AddUser("caROl"); ASSERT_TRUE(carol); ASSERT_EQ(carol->username(), "carol"); - auto dave = auth.AddUser("daVe"); + auto dave = auth->AddUser("daVe"); ASSERT_TRUE(dave); ASSERT_EQ(dave->username(), "dave"); - auto admin = auth.GetRole("aDMin"); + auto admin = auth->GetRole("aDMin"); ASSERT_TRUE(admin); ASSERT_EQ(admin->rolename(), "admin"); carol->SetRole(*admin); - auth.SaveUser(*carol); + auth->SaveUser(*carol); dave->SetRole(*admin); - auth.SaveUser(*dave); + auth->SaveUser(*dave); } { - auto users = auth.AllUsersForRole("modeRAtoR"); + auto users = auth->AllUsersForRole("modeRAtoR"); ASSERT_EQ(users.size(), 1); ASSERT_EQ(users[0].username(), "alice"); } { - auto users = auth.AllUsersForRole("AdmiN"); + auto users = auth->AllUsersForRole("AdmiN"); ASSERT_EQ(users.size(), 2); std::sort(users.begin(), users.end(), [](const auto &a, const auto &b) { return a.username() < b.username(); }); ASSERT_EQ(users[0].username(), "carol"); @@ -922,53 +932,49 @@ TEST_F(AuthWithStorage, CaseInsensitivity) { } TEST(AuthWithoutStorage, Crypto) { - auto hash = EncryptPassword("hello"); - ASSERT_TRUE(VerifyPassword("hello", hash)); - ASSERT_FALSE(VerifyPassword("hello1", hash)); + auto hash = HashPassword("hello"); + ASSERT_TRUE(hash.VerifyPassword("hello")); + ASSERT_FALSE(hash.VerifyPassword("hello1")); } class AuthWithVariousEncryptionAlgorithms : public ::testing::Test { protected: - void SetUp() override { FLAGS_password_encryption_algorithm = "bcrypt"; } + void SetUp() override { SetHashAlgorithm("bcrypt"); } }; TEST_F(AuthWithVariousEncryptionAlgorithms, VerifyPasswordDefault) { - auto hash = EncryptPassword("hello"); - ASSERT_TRUE(VerifyPassword("hello", hash)); - ASSERT_FALSE(VerifyPassword("hello1", hash)); + auto hash = HashPassword("hello"); + ASSERT_TRUE(hash.VerifyPassword("hello")); + ASSERT_FALSE(hash.VerifyPassword("hello1")); } TEST_F(AuthWithVariousEncryptionAlgorithms, VerifyPasswordSHA256) { - FLAGS_password_encryption_algorithm = "sha256"; - auto hash = EncryptPassword("hello"); - ASSERT_TRUE(VerifyPassword("hello", hash)); - ASSERT_FALSE(VerifyPassword("hello1", hash)); + SetHashAlgorithm("sha256"); + auto hash = HashPassword("hello"); + ASSERT_TRUE(hash.VerifyPassword("hello")); + ASSERT_FALSE(hash.VerifyPassword("hello1")); } TEST_F(AuthWithVariousEncryptionAlgorithms, VerifyPasswordSHA256_1024) { - FLAGS_password_encryption_algorithm = "sha256-multiple"; - auto hash = EncryptPassword("hello"); - ASSERT_TRUE(VerifyPassword("hello", hash)); - ASSERT_FALSE(VerifyPassword("hello1", hash)); + SetHashAlgorithm("sha256-multiple"); + auto hash = HashPassword("hello"); + ASSERT_TRUE(hash.VerifyPassword("hello")); + ASSERT_FALSE(hash.VerifyPassword("hello1")); } -TEST_F(AuthWithVariousEncryptionAlgorithms, VerifyPasswordThrow) { - FLAGS_password_encryption_algorithm = "abcd"; - ASSERT_THROW(EncryptPassword("hello"), AuthException); +TEST_F(AuthWithVariousEncryptionAlgorithms, SetEncryptionAlgorithmNonsenseThrow) { + ASSERT_THROW(SetHashAlgorithm("abcd"), AuthException); } -TEST_F(AuthWithVariousEncryptionAlgorithms, VerifyPasswordEmptyEncryptionThrow) { - FLAGS_password_encryption_algorithm = ""; - ASSERT_THROW(EncryptPassword("hello"), AuthException); +TEST_F(AuthWithVariousEncryptionAlgorithms, SetEncryptionAlgorithmEmptyThrow) { + ASSERT_THROW(SetHashAlgorithm(""), AuthException); } class AuthWithStorageWithVariousEncryptionAlgorithms : public ::testing::Test { protected: void SetUp() override { memgraph::utils::EnsureDir(test_folder_); - FLAGS_auth_password_permit_null = true; - FLAGS_auth_password_strength_regex = ".+"; - FLAGS_password_encryption_algorithm = "bcrypt"; + SetHashAlgorithm("bcrypt"); memgraph::license::global_license_checker.EnableTesting(); } @@ -976,8 +982,8 @@ class AuthWithStorageWithVariousEncryptionAlgorithms : public ::testing::Test { void TearDown() override { fs::remove_all(test_folder_); } fs::path test_folder_{fs::temp_directory_path() / "MG_tests_unit_auth"}; - - Auth auth{test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid())))}; + Auth::Config auth_config{}; + Auth auth{test_folder_ / ("unit_auth_test_" + std::to_string(static_cast(getpid()))), auth_config}; }; TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserDefault) { @@ -987,25 +993,26 @@ TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserDefault) { } TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserSha256) { - FLAGS_password_encryption_algorithm = "sha256"; + SetHashAlgorithm("sha256"); auto user = auth.AddUser("Alice", "alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserSha256_1024) { - FLAGS_password_encryption_algorithm = "sha256-multiple"; + SetHashAlgorithm("sha256-multiple"); auto user = auth.AddUser("Alice", "alice"); ASSERT_TRUE(user); ASSERT_EQ(user->username(), "alice"); } -TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserThrow) { - FLAGS_password_encryption_algorithm = "abcd"; - ASSERT_THROW(auth.AddUser("Alice", "alice"), AuthException); -} - -TEST_F(AuthWithStorageWithVariousEncryptionAlgorithms, AddUserEmptyPasswordEncryptionThrow) { - FLAGS_password_encryption_algorithm = ""; - ASSERT_THROW(auth.AddUser("Alice", "alice"), AuthException); +TEST(Serialize, HashedPassword) { + for (auto algo : + {PasswordHashAlgorithm::BCRYPT, PasswordHashAlgorithm::SHA256, PasswordHashAlgorithm::SHA256_MULTIPLE}) { + auto sut = HashPassword("password", algo); + nlohmann::json j = sut; + auto ret = j.get(); + ASSERT_EQ(sut, ret); + ASSERT_TRUE(ret.VerifyPassword("password")); + } } diff --git a/tests/unit/auth_checker.cpp b/tests/unit/auth_checker.cpp index 0122a4440..f4c499cd7 100644 --- a/tests/unit/auth_checker.cpp +++ b/tests/unit/auth_checker.cpp @@ -22,7 +22,7 @@ #include "storage/v2/disk/storage.hpp" #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/view.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; #ifdef MG_ENTERPRISE template class FineGrainedAuthCheckerFixture : public testing::Test { diff --git a/tests/unit/auth_handler.cpp b/tests/unit/auth_handler.cpp index 6537575fd..a162d1838 100644 --- a/tests/unit/auth_handler.cpp +++ b/tests/unit/auth_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -26,8 +26,9 @@ class AuthQueryHandlerFixture : public testing::Test { protected: std::filesystem::path test_folder_{std::filesystem::temp_directory_path() / "MG_tests_unit_auth_handler"}; memgraph::utils::Synchronized auth{ - test_folder_ / ("unit_auth_handler_test_" + std::to_string(static_cast(getpid())))}; - memgraph::glue::AuthQueryHandler auth_handler{&auth, memgraph::glue::kDefaultUserRoleRegex.data()}; + test_folder_ / ("unit_auth_handler_test_" + std::to_string(static_cast(getpid()))), + memgraph::auth::Auth::Config{/* default */}}; + memgraph::glue::AuthQueryHandler auth_handler{&auth}; std::string user_name = "Mate"; std::string edge_type_repr = "EdgeType1"; @@ -56,7 +57,7 @@ TEST_F(AuthQueryHandlerFixture, GivenAuthQueryHandlerWhenInitializedHaveNoUserna } TEST_F(AuthQueryHandlerFixture, GivenUserWhenNoDeniesOrGrantsThenNothingIsReturned) { - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; auth->SaveUser(user); { ASSERT_EQ(auth_handler.GetUsernames().size(), 1); } @@ -70,7 +71,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenNoDeniesOrGrantsThenNothingIsReturn TEST_F(AuthQueryHandlerFixture, GivenUserWhenAddedGrantPermissionThenItIsReturned) { perms.Grant(memgraph::auth::Permission::MATCH); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -91,7 +92,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenAddedGrantPermissionThenItIsReturne TEST_F(AuthQueryHandlerFixture, GivenUserWhenAddedDenyPermissionThenItIsReturned) { perms.Deny(memgraph::auth::Permission::MATCH); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -113,7 +114,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenAddedDenyPermissionThenItIsReturned TEST_F(AuthQueryHandlerFixture, GivenUserWhenPrivilegeRevokedThenNothingIsReturned) { perms.Deny(memgraph::auth::Permission::MATCH); perms.Revoke(memgraph::auth::Permission::MATCH); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -179,7 +180,7 @@ TEST_F(AuthQueryHandlerFixture, GivenRoleWhenPrivilegeRevokedThenNothingIsReturn TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedTwoPrivilegesThenBothAreReturned) { perms.Grant(memgraph::auth::Permission::MATCH); perms.Grant(memgraph::auth::Permission::CREATE); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -190,7 +191,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserAndRoleWhenOneGrantedAndOtherGrantedThe perms.Grant(memgraph::auth::Permission::MATCH); memgraph::auth::Role role = memgraph::auth::Role{"Mates_role", perms}; auth->SaveRole(role); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; user.SetRole(role); auth->SaveUser(user); @@ -214,7 +215,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserAndRoleWhenOneDeniedAndOtherDeniedThenB perms.Deny(memgraph::auth::Permission::MATCH); memgraph::auth::Role role = memgraph::auth::Role{"Mates_role", perms}; auth->SaveRole(role); - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms}; user.SetRole(role); auth->SaveUser(user); @@ -244,7 +245,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserAndRoleWhenOneGrantedAndOtherDeniedThen user_perms.Grant(memgraph::auth::Permission::MATCH); memgraph::auth::User user = memgraph::auth::User{ user_name, - "", + std::nullopt, user_perms, }; user.SetRole(role); @@ -274,7 +275,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserAndRoleWhenOneDeniedAndOtherGrantedThen memgraph::auth::Permissions user_perms{}; user_perms.Deny(memgraph::auth::Permission::MATCH); - memgraph::auth::User user = memgraph::auth::User{user_name, "", user_perms}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, user_perms}; user.SetRole(role); auth->SaveUser(user); @@ -304,7 +305,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedPrivilegeOnLabelThenIsDispla memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -333,7 +334,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedMultiplePrivilegesOnLabelThe memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -363,7 +364,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedAllPrivilegesOnLabelThenTopO memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -391,7 +392,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalPrivilegeOnLabelThenIs memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -420,7 +421,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalMultiplePrivilegesOnLa memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -450,7 +451,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalAllPrivilegesOnLabelTh memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -479,7 +480,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedPrivilegeOnEdgeTypeThenIsDis memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -508,7 +509,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedMultiplePrivilegesOnEdgeType memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -538,7 +539,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedAllPrivilegesOnEdgeTypeThenT memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -566,7 +567,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalPrivilegeOnEdgeTypeThe memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -596,7 +597,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalMultiplePrivilegesOnEd }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -626,7 +627,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedGlobalAllPrivilegesOnEdgeTyp memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -655,7 +656,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedAndDeniedOnLabelThenNoPermis memgraph::auth::FineGrainedAccessPermissions{}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -684,7 +685,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedAndDeniedOnEdgeTypeThenNoPer memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); @@ -713,7 +714,7 @@ TEST_F(AuthQueryHandlerFixture, GivenUserWhenGrantedReadAndDeniedUpdateThenOneIs memgraph::auth::FineGrainedAccessPermissions{read_permission}, }; - memgraph::auth::User user = memgraph::auth::User{user_name, "", perms, handler}; + memgraph::auth::User user = memgraph::auth::User{user_name, std::nullopt, perms, handler}; auth->SaveUser(user); auto privileges = auth_handler.GetPrivileges(user_name); diff --git a/tests/unit/bfs_fine_grained.cpp b/tests/unit/bfs_fine_grained.cpp index 1557910fe..568206dfd 100644 --- a/tests/unit/bfs_fine_grained.cpp +++ b/tests/unit/bfs_fine_grained.cpp @@ -43,7 +43,7 @@ class VertexDb : public Database { } std::unique_ptr Access() override { - return db_->Access(memgraph::replication::ReplicationRole::MAIN); + return db_->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); } std::unique_ptr MakeBfsOperator(Symbol source_sym, Symbol sink_sym, Symbol edge_sym, diff --git a/tests/unit/bfs_single_node.cpp b/tests/unit/bfs_single_node.cpp index a518a7729..a6816242d 100644 --- a/tests/unit/bfs_single_node.cpp +++ b/tests/unit/bfs_single_node.cpp @@ -32,7 +32,7 @@ class SingleNodeDb : public Database { } std::unique_ptr Access() override { - return db_->Access(memgraph::replication::ReplicationRole::MAIN); + return db_->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); } std::unique_ptr MakeBfsOperator(Symbol source_sym, Symbol sink_sym, Symbol edge_sym, diff --git a/tests/unit/bolt_encoder.cpp b/tests/unit/bolt_encoder.cpp index 83add3cd3..19a958118 100644 --- a/tests/unit/bolt_encoder.cpp +++ b/tests/unit/bolt_encoder.cpp @@ -182,7 +182,7 @@ void TestVertexAndEdgeWithDifferentStorages(std::unique_ptrAccess(memgraph::replication::ReplicationRole::MAIN); + auto dba = db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); auto va1 = dba->CreateVertex(); auto va2 = dba->CreateVertex(); auto l1 = dba->NameToLabel("label1"); @@ -206,11 +206,11 @@ void TestVertexAndEdgeWithDifferentStorages(std::unique_ptr vals; - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va1)), *db, - memgraph::storage::View::NEW)); - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va2)), *db, - memgraph::storage::View::NEW)); - vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::EdgeAccessor(ea)), *db, + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va1)), + db.get(), memgraph::storage::View::NEW)); + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::VertexAccessor(va2)), + db.get(), memgraph::storage::View::NEW)); + vals.push_back(*memgraph::glue::ToBoltValue(memgraph::query::TypedValue(memgraph::query::EdgeAccessor(ea)), db.get(), memgraph::storage::View::NEW)); bolt_encoder.MessageRecord(vals); diff --git a/tests/unit/clearing_old_disk_data.cpp b/tests/unit/clearing_old_disk_data.cpp index 76315115a..395391e12 100644 --- a/tests/unit/clearing_old_disk_data.cpp +++ b/tests/unit/clearing_old_disk_data.cpp @@ -20,7 +20,7 @@ #include "storage/v2/property_value.hpp" #include "storage/v2/view.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; class ClearingOldDiskDataTest : public ::testing::Test { public: diff --git a/tests/unit/cpp_api.cpp b/tests/unit/cpp_api.cpp index ce968b13e..84ca0b195 100644 --- a/tests/unit/cpp_api.cpp +++ b/tests/unit/cpp_api.cpp @@ -43,7 +43,8 @@ struct CppApiTestFixture : public ::testing::Test { } memgraph::query::DbAccessor &CreateDbAccessor(const memgraph::storage::IsolationLevel isolationLevel) { - accessors_.push_back(storage->Access(memgraph::replication::ReplicationRole::MAIN, isolationLevel)); + accessors_.push_back( + storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN, isolationLevel)); db_accessors_.emplace_back(accessors_.back().get()); return db_accessors_.back(); } diff --git a/tests/unit/cypher_main_visitor.cpp b/tests/unit/cypher_main_visitor.cpp index 9cb33589c..1353a56dd 100644 --- a/tests/unit/cypher_main_visitor.cpp +++ b/tests/unit/cypher_main_visitor.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -43,6 +43,7 @@ #include "query/procedure/module.hpp" #include "query/typed_value.hpp" +#include "utils/logging.hpp" #include "utils/string.hpp" #include "utils/variant_helpers.hpp" @@ -2550,7 +2551,7 @@ TEST_P(CypherMainVisitorTest, ShowUsersForRole) { void check_replication_query(Base *ast_generator, const ReplicationQuery *query, const std::string name, const std::optional socket_address, const ReplicationQuery::SyncMode sync_mode, const std::optional port = {}) { - EXPECT_EQ(query->replica_name_, name); + EXPECT_EQ(query->instance_name_, name); EXPECT_EQ(query->sync_mode_, sync_mode); ASSERT_EQ(static_cast(query->socket_address_), static_cast(socket_address)); if (socket_address) { @@ -2597,7 +2598,7 @@ TEST_P(CypherMainVisitorTest, TestSetReplicationMode) { } { - const std::string query = "SET REPLICATION ROLE TO MAIN WITH PORT 10000"; + const std::string query = "SET REPLICATION ROLE TO REPLICA"; ASSERT_THROW(ast_generator.ParseQuery(query), SemanticException); } @@ -2640,7 +2641,7 @@ TEST_P(CypherMainVisitorTest, TestDeleteReplica) { std::string correct_query = "DROP REPLICA replica1"; auto *correct_query_parsed = dynamic_cast(ast_generator.ParseQuery(correct_query)); ASSERT_TRUE(correct_query_parsed); - EXPECT_EQ(correct_query_parsed->replica_name_, "replica1"); + EXPECT_EQ(correct_query_parsed->instance_name_, "replica1"); } TEST_P(CypherMainVisitorTest, TestExplainRegularQuery) { @@ -3645,7 +3646,7 @@ TEST_P(CypherMainVisitorTest, MemoryLimit) { ASSERT_TRUE(query->single_query_); auto *single_query = query->single_query_; ASSERT_EQ(single_query->clauses_.size(), 2U); - auto *call_proc = dynamic_cast(single_query->clauses_[0]); + [[maybe_unused]] auto *call_proc = dynamic_cast(single_query->clauses_[0]); } { @@ -3705,7 +3706,7 @@ TEST_P(CypherMainVisitorTest, MemoryLimit) { ASSERT_TRUE(query->single_query_); auto *single_query = query->single_query_; ASSERT_EQ(single_query->clauses_.size(), 1U); - auto *call_proc = dynamic_cast(single_query->clauses_[0]); + [[maybe_unused]] auto *call_proc = dynamic_cast(single_query->clauses_[0]); } } diff --git a/tests/unit/database_get_info.cpp b/tests/unit/database_get_info.cpp index 95ed3f326..a8a275a61 100644 --- a/tests/unit/database_get_info.cpp +++ b/tests/unit/database_get_info.cpp @@ -108,7 +108,7 @@ TYPED_TEST(InfoTest, InfoCheck) { auto v2 = acc->CreateVertex(); auto v3 = acc->CreateVertex(); auto v4 = acc->CreateVertex(); - auto v5 = acc->CreateVertex(); + [[maybe_unused]] auto v5 = acc->CreateVertex(); ASSERT_FALSE(v2.AddLabel(lbl).HasError()); ASSERT_FALSE(v3.AddLabel(lbl).HasError()); @@ -165,8 +165,8 @@ TYPED_TEST(InfoTest, InfoCheck) { ASSERT_FALSE(unique_acc->Commit().HasError()); } - const auto &info = - db_acc->GetInfo(true, memgraph::replication::ReplicationRole::MAIN); // force to use configured directory + const auto &info = db_acc->GetInfo( + true, memgraph::replication_coordination_glue::ReplicationRole::MAIN); // force to use configured directory ASSERT_EQ(info.storage_info.vertex_count, 5); ASSERT_EQ(info.storage_info.edge_count, 2); diff --git a/tests/unit/dbms_database.cpp b/tests/unit/dbms_database.cpp index 20e1f55ac..535c0c055 100644 --- a/tests/unit/dbms_database.cpp +++ b/tests/unit/dbms_database.cpp @@ -29,7 +29,8 @@ memgraph::storage::Config default_conf(std::string name = "") { return {.durability = {.storage_directory = storage_directory / name, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / name / "disk"}}; + .disk = {.main_storage_directory = storage_directory / name / "disk"}, + .salient.name = name.empty() ? std::string{"memgraph"} : name}; } class DBMS_Database : public ::testing::Test { @@ -55,20 +56,21 @@ TEST_F(DBMS_Database, New) { .durability = {.storage_directory = storage_directory / "db2", .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / "disk"}}; - auto db2 = db_handler.New("db2", db_config, generic_repl_state); + .disk = {.main_storage_directory = storage_directory / "disk"}, + .salient.name = "db2"}; + auto db2 = db_handler.New(db_config, generic_repl_state); ASSERT_TRUE(db2.HasValue() && db2.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "db2")); } { // With default config - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db3.HasValue() && db3.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "db3")); - auto db4 = db_handler.New("db4", default_conf("four"), generic_repl_state); + auto db4 = db_handler.New(default_conf("four"), generic_repl_state); ASSERT_TRUE(db4.HasValue() && db4.GetValue()); ASSERT_TRUE(std::filesystem::exists(storage_directory / "four")); - auto db5 = db_handler.New("db5", default_conf("db3"), generic_repl_state); + auto db5 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db5.HasError() && db5.GetError() == memgraph::dbms::NewError::EXISTS); } @@ -77,15 +79,15 @@ TEST_F(DBMS_Database, New) { ASSERT_EQ(all.size(), 3); ASSERT_EQ(all[0], "db2"); ASSERT_EQ(all[1], "db3"); - ASSERT_EQ(all[2], "db4"); + ASSERT_EQ(all[2], "four"); } TEST_F(DBMS_Database, Get) { memgraph::dbms::DatabaseHandler db_handler; - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -107,9 +109,9 @@ TEST_F(DBMS_Database, Get) { TEST_F(DBMS_Database, Delete) { memgraph::dbms::DatabaseHandler db_handler; - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); - auto db3 = db_handler.New("db3", default_conf("db3"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); + auto db3 = db_handler.New(default_conf("db3"), generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -119,7 +121,7 @@ TEST_F(DBMS_Database, Delete) { // Release accessor to storage db1.GetValue().reset(); // Delete from handler - ASSERT_TRUE(db_handler.Delete("db1")); + ASSERT_TRUE(db_handler.TryDelete("db1")); ASSERT_FALSE(db_handler.Get("db1")); auto all = db_handler.All(); std::sort(all.begin(), all.end()); @@ -129,8 +131,8 @@ TEST_F(DBMS_Database, Delete) { } { - ASSERT_THROW(db_handler.Delete("db0"), memgraph::utils::BasicException); - ASSERT_THROW(db_handler.Delete("db1"), memgraph::utils::BasicException); + ASSERT_THROW(db_handler.TryDelete("db0"), memgraph::utils::BasicException); + ASSERT_THROW(db_handler.TryDelete("db1"), memgraph::utils::BasicException); auto all = db_handler.All(); std::sort(all.begin(), all.end()); ASSERT_EQ(all.size(), 2); @@ -144,17 +146,18 @@ TEST_F(DBMS_Database, DeleteAndRecover) { memgraph::dbms::DatabaseHandler db_handler; { - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); memgraph::storage::Config conf_w_snap{ .durability = {.storage_directory = storage_directory / "db3", .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, .snapshot_on_exit = true}, - .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}}; + .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}, + .salient.name = "db3"}; - auto db3 = db_handler.New("db3", conf_w_snap, generic_repl_state); + auto db3 = db_handler.New(conf_w_snap, generic_repl_state); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db2.HasValue()); @@ -184,23 +187,24 @@ TEST_F(DBMS_Database, DeleteAndRecover) { } // Delete from handler - ASSERT_TRUE(db_handler.Delete("db1")); - ASSERT_TRUE(db_handler.Delete("db2")); - ASSERT_TRUE(db_handler.Delete("db3")); + ASSERT_TRUE(db_handler.TryDelete("db1")); + ASSERT_TRUE(db_handler.TryDelete("db2")); + ASSERT_TRUE(db_handler.TryDelete("db3")); { // Recover graphs (only db3) - auto db1 = db_handler.New("db1", default_conf("db1"), generic_repl_state); - auto db2 = db_handler.New("db2", default_conf("db2"), generic_repl_state); + auto db1 = db_handler.New(default_conf("db1"), generic_repl_state); + auto db2 = db_handler.New(default_conf("db2"), generic_repl_state); memgraph::storage::Config conf_w_rec{ .durability = {.storage_directory = storage_directory / "db3", .recover_on_startup = true, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL}, - .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}}; + .disk = {.main_storage_directory = storage_directory / "db3" / "disk"}, + .salient.name = "db3"}; - auto db3 = db_handler.New("db3", conf_w_rec, generic_repl_state); + auto db3 = db_handler.New(conf_w_rec, generic_repl_state); // Check content { diff --git a/tests/unit/dbms_handler.cpp b/tests/unit/dbms_handler.cpp index 0ea4197fb..2abe0b77d 100644 --- a/tests/unit/dbms_handler.cpp +++ b/tests/unit/dbms_handler.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -25,8 +25,23 @@ #include "query/config.hpp" #include "query/interpreter.hpp" +namespace { +std::set GetDirs(auto path) { + std::set dirs; + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(path)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + dirs.emplace(name); + } + } + return dirs; +} +} // namespace + // Global std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / "MG_test_unit_dbms_handler"}; +std::filesystem::path db_dir{storage_directory / "databases"}; static memgraph::storage::Config storage_conf; std::unique_ptr> auth; @@ -51,8 +66,8 @@ class TestEnvironment : public ::testing::Environment { } auth = std::make_unique>( - storage_directory / "auth"); - ptr_ = std::make_unique(storage_conf, auth.get(), false, true); + storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}); + ptr_ = std::make_unique(storage_conf, auth.get(), false); } void TearDown() override { @@ -74,7 +89,7 @@ TEST(DBMS_Handler, Init) { std::vector dirs = {"snapshots", "streams", "triggers", "wal"}; for (const auto &dir : dirs) ASSERT_TRUE(std::filesystem::exists(storage_directory / dir)) << (storage_directory / dir); - const auto db_path = storage_directory / "databases" / memgraph::dbms::kDefaultDB; + const auto db_path = db_dir / memgraph::dbms::kDefaultDB; ASSERT_TRUE(std::filesystem::exists(db_path)); for (const auto &dir : dirs) { std::error_code ec; @@ -92,10 +107,14 @@ TEST(DBMS_Handler, New) { ASSERT_EQ(all[0], memgraph::dbms::kDefaultDB); } { + const auto dirs = GetDirs(db_dir); auto db1 = dbms.New("db1"); ASSERT_TRUE(db1.HasValue()); ASSERT_TRUE(db1.GetValue()); - ASSERT_TRUE(std::filesystem::exists(storage_directory / "databases" / "db1")); + // New flow doesn't make db named directories + ASSERT_FALSE(std::filesystem::exists(db_dir / "db1")); + const auto dirs_w_db1 = GetDirs(db_dir); + ASSERT_EQ(dirs_w_db1.size(), dirs.size() + 1); ASSERT_TRUE(db1.GetValue()->storage() != nullptr); ASSERT_TRUE(db1.GetValue()->streams() != nullptr); ASSERT_TRUE(db1.GetValue()->trigger_store() != nullptr); @@ -111,9 +130,13 @@ TEST(DBMS_Handler, New) { ASSERT_TRUE(db2.HasError() && db2.GetError() == memgraph::dbms::NewError::EXISTS); } { + const auto dirs = GetDirs(db_dir); auto db3 = dbms.New("db3"); ASSERT_TRUE(db3.HasValue()); - ASSERT_TRUE(std::filesystem::exists(storage_directory / "databases" / "db3")); + // New flow doesn't make db named directories + ASSERT_FALSE(std::filesystem::exists(db_dir / "db3")); + const auto dirs_w_db3 = GetDirs(db_dir); + ASSERT_EQ(dirs_w_db3.size(), dirs.size() + 1); ASSERT_TRUE(db3.GetValue()->storage() != nullptr); ASSERT_TRUE(db3.GetValue()->streams() != nullptr); ASSERT_TRUE(db3.GetValue()->trigger_store() != nullptr); @@ -156,16 +179,16 @@ TEST(DBMS_Handler, Delete) { auto db1_acc = dbms.Get("db1"); // Holds access to database { - auto del = dbms.Delete(memgraph::dbms::kDefaultDB); + auto del = dbms.TryDelete(memgraph::dbms::kDefaultDB); ASSERT_TRUE(del.HasError() && del.GetError() == memgraph::dbms::DeleteError::DEFAULT_DB); } { - auto del = dbms.Delete("non-existent"); + auto del = dbms.TryDelete("non-existent"); ASSERT_TRUE(del.HasError() && del.GetError() == memgraph::dbms::DeleteError::NON_EXISTENT); } { // db1_acc is using db1 - auto del = dbms.Delete("db1"); + auto del = dbms.TryDelete("db1"); ASSERT_TRUE(del.HasError()); ASSERT_TRUE(del.GetError() == memgraph::dbms::DeleteError::USING); } @@ -173,15 +196,17 @@ TEST(DBMS_Handler, Delete) { // Reset db1_acc (releases access) so delete will succeed db1_acc.reset(); ASSERT_FALSE(db1_acc); - auto del = dbms.Delete("db1"); + auto del = dbms.TryDelete("db1"); ASSERT_FALSE(del.HasError()) << (int)del.GetError(); - auto del2 = dbms.Delete("db1"); + auto del2 = dbms.TryDelete("db1"); ASSERT_TRUE(del2.HasError() && del2.GetError() == memgraph::dbms::DeleteError::NON_EXISTENT); } { - auto del = dbms.Delete("db3"); + const auto dirs = GetDirs(db_dir); + auto del = dbms.TryDelete("db3"); ASSERT_FALSE(del.HasError()); - ASSERT_FALSE(std::filesystem::exists(storage_directory / "databases" / "db3")); + const auto dirs_wo_db3 = GetDirs(db_dir); + ASSERT_EQ(dirs_wo_db3.size(), dirs.size() - 1); } } diff --git a/tests/unit/dbms_handler_community.cpp b/tests/unit/dbms_handler_community.cpp index 860f70ba0..4a47e018b 100644 --- a/tests/unit/dbms_handler_community.cpp +++ b/tests/unit/dbms_handler_community.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -51,7 +51,7 @@ class TestEnvironment : public ::testing::Environment { } auth = std::make_unique>( - storage_directory / "auth"); + storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}); ptr_ = std::make_unique(storage_conf); } @@ -90,9 +90,9 @@ TEST(DBMS_Handler, Get) { ASSERT_TRUE(default_db->streams() != nullptr); ASSERT_TRUE(default_db->trigger_store() != nullptr); ASSERT_TRUE(default_db->thread_pool() != nullptr); - ASSERT_EQ(default_db->storage()->id(), memgraph::dbms::kDefaultDB); + ASSERT_EQ(default_db->storage()->name(), memgraph::dbms::kDefaultDB); auto conf = storage_conf; - conf.name = memgraph::dbms::kDefaultDB; + conf.salient.name = memgraph::dbms::kDefaultDB; ASSERT_EQ(default_db->storage()->config_, conf); } diff --git a/tests/unit/interpreter_faker.hpp b/tests/unit/interpreter_faker.hpp index 5823c6a87..3b6075911 100644 --- a/tests/unit/interpreter_faker.hpp +++ b/tests/unit/interpreter_faker.hpp @@ -21,8 +21,9 @@ struct InterpreterFaker { } auto Prepare(const std::string &query, const std::map ¶ms = {}) { - ResultStreamFaker stream(interpreter.current_db_.db_acc_->get()->storage()); const auto [header, _1, qid, _2] = interpreter.Prepare(query, params, {}); + auto &db = interpreter.current_db_.db_acc_; + ResultStreamFaker stream(db ? db->get()->storage() : nullptr); stream.Header(header); return std::make_pair(std::move(stream), qid); } diff --git a/tests/unit/multi_tenancy.cpp b/tests/unit/multi_tenancy.cpp new file mode 100644 index 000000000..59364776a --- /dev/null +++ b/tests/unit/multi_tenancy.cpp @@ -0,0 +1,377 @@ +// Copyright 2024 Memgraph Ltd. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source +// License, and you may not use this file except in compliance with the Business Source License. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +#include +#include +#include +#include + +#include "communication/bolt/v1/value.hpp" +#include "communication/result_stream_faker.hpp" +#include "csv/parsing.hpp" +#include "dbms/dbms_handler.hpp" +#include "disk_test_utils.hpp" +#include "flags/run_time_configurable.hpp" +#include "glue/communication.hpp" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "interpreter_faker.hpp" +#include "license/license.hpp" +#include "query/auth_checker.hpp" +#include "query/config.hpp" +#include "query/exceptions.hpp" +#include "query/interpreter.hpp" +#include "query/interpreter_context.hpp" +#include "query/metadata.hpp" +#include "query/stream.hpp" +#include "query/typed_value.hpp" +#include "query_common.hpp" +#include "replication/state.hpp" +#include "storage/v2/inmemory/storage.hpp" +#include "storage/v2/isolation_level.hpp" +#include "storage/v2/property_value.hpp" +#include "storage/v2/storage_mode.hpp" +#include "utils/logging.hpp" +#include "utils/lru_cache.hpp" +#include "utils/synchronized.hpp" + +namespace { +std::set GetDirs(auto path) { + std::set dirs; + // Clean the unused directories + for (const auto &entry : std::filesystem::directory_iterator(path)) { + const auto &name = entry.path().filename().string(); + if (entry.is_directory() && !name.empty() && name.front() != '.') { + dirs.emplace(name); + } + } + return dirs; +} + +auto RunMtQuery(auto &interpreter, const std::string &query, std::string_view res) { + auto [stream, qid] = interpreter.Prepare(query); + ASSERT_EQ(stream.GetHeader().size(), 1U); + EXPECT_EQ(stream.GetHeader()[0], "STATUS"); + interpreter.Pull(&stream, 1); + ASSERT_EQ(stream.GetSummary().count("has_more"), 1); + ASSERT_FALSE(stream.GetSummary().at("has_more").ValueBool()); + ASSERT_EQ(stream.GetResults()[0].size(), 1U); + ASSERT_EQ(stream.GetResults()[0][0].ValueString(), res); +} + +auto RunQuery(auto &interpreter, const std::string &query) { + auto [stream, qid] = interpreter.Prepare(query); + interpreter.Pull(&stream, 1); + return stream.GetResults(); +} + +void UseDatabase(auto &interpreter, const std::string &name, std::string_view res) { + RunMtQuery(interpreter, "USE DATABASE " + name, res); +} + +void DropDatabase(auto &interpreter, const std::string &name, std::string_view res) { + RunMtQuery(interpreter, "DROP DATABASE " + name, res); +} +} // namespace + +class MultiTenantTest : public ::testing::Test { + public: + std::filesystem::path data_directory = std::filesystem::temp_directory_path() / "MG_tests_unit_multi_tenancy"; + + MultiTenantTest() = default; + + memgraph::storage::Config config{ + [&]() { + memgraph::storage::Config config{}; + UpdatePaths(config, data_directory); + return config; + }() // iile + }; + + struct MinMemgraph { + explicit MinMemgraph(const memgraph::storage::Config &conf) + : auth{conf.durability.storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}}, + dbms{conf, &auth, true}, + interpreter_context{{}, &dbms, &dbms.ReplicationState()} { + memgraph::utils::global_settings.Initialize(conf.durability.storage_directory / "settings"); + memgraph::license::RegisterLicenseSettings(memgraph::license::global_license_checker, + memgraph::utils::global_settings); + memgraph::flags::run_time::Initialize(); + memgraph::license::global_license_checker.CheckEnvLicense(); + } + + ~MinMemgraph() { memgraph::utils::global_settings.Finalize(); } + + auto NewInterpreter() { return InterpreterFaker{&interpreter_context, dbms.Get()}; } + + memgraph::utils::Synchronized auth; + memgraph::dbms::DbmsHandler dbms; + memgraph::query::InterpreterContext interpreter_context; + }; + + void SetUp() override { + TearDown(); + min_mg.emplace(config); + } + + void TearDown() override { + min_mg.reset(); + if (std::filesystem::exists(data_directory)) std::filesystem::remove_all(data_directory); + } + + auto NewInterpreter() { return min_mg->NewInterpreter(); } + + auto &DBMS() { return min_mg->dbms; } + + std::optional min_mg; +}; + +TEST_F(MultiTenantTest, SimpleCreateDrop) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using both + // 3) Drop databases while the other is using + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto create = [&](auto &interpreter, const std::string &name, bool success) { + RunMtQuery(interpreter, "CREATE DATABASE " + name, + success ? ("Successfully created database " + name) : (name + " already exists.")); + }; + + create(interpreter1, "db1", true); + create(interpreter1, "db1", false); + create(interpreter2, "db1", false); + create(interpreter2, "db2", true); + create(interpreter1, "db2", false); + create(interpreter2, "db3", true); + create(interpreter2, "db4", true); + + // 3 + UseDatabase(interpreter1, "db2", "Using db2"); + UseDatabase(interpreter1, "db2", "Already using db2"); + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + ASSERT_THROW(DropDatabase(interpreter1, memgraph::dbms::kDefaultDB.data(), ""), + memgraph::query::QueryRuntimeException); // default db + + DropDatabase(interpreter1, "db1", "Successfully deleted db1"); + ASSERT_THROW(DropDatabase(interpreter2, "db1", ""), memgraph::query::QueryRuntimeException); // No db1 + ASSERT_THROW(DropDatabase(interpreter1, "db1", ""), memgraph::query::QueryRuntimeException); // No db1 + + ASSERT_THROW(DropDatabase(interpreter1, "db2", ""), memgraph::query::QueryRuntimeException); // i2 using db2 + ASSERT_THROW(DropDatabase(interpreter1, "db4", ""), memgraph::query::QueryRuntimeException); // i1 using db4 +} + +TEST_F(MultiTenantTest, DbmsNewTryDelete) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Try delete databases while the interpreters are using them + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + ASSERT_FALSE(dbms.TryDelete("db1").HasError()); + ASSERT_TRUE(dbms.TryDelete("db2").HasError()); + ASSERT_FALSE(dbms.TryDelete("db3").HasError()); + ASSERT_TRUE(dbms.TryDelete("db4").HasError()); +} + +TEST_F(MultiTenantTest, DbmsUpdate) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Try to update databases + + auto &dbms = DBMS(); + auto interpreter1 = this->NewInterpreter(); + + // Update clean default db + auto default_db = dbms.Get(); + const auto old_uuid = default_db->config().salient.uuid; + const memgraph::utils::UUID new_uuid{/* random */}; + const memgraph::storage::SalientConfig &config{.name = "memgraph", .uuid = new_uuid}; + auto new_default = dbms.Update(config); + ASSERT_TRUE(new_default.HasValue()); + ASSERT_NE(new_uuid, old_uuid); + ASSERT_EQ(default_db->storage(), new_default.GetValue()->storage()); + + // Add node to default + RunQuery(interpreter1, "CREATE (:Node)"); + + // Fail to update dirty default db + const memgraph::storage::SalientConfig &failing_config{.name = "memgraph", .uuid = {}}; + auto failed_update = dbms.Update(failing_config); + ASSERT_TRUE(failed_update.HasError()); + + // Succeed when updating with the same config + auto same_update = dbms.Update(config); + ASSERT_TRUE(same_update.HasValue()); + ASSERT_EQ(new_default.GetValue()->storage(), same_update.GetValue()->storage()); + + // Create new db + auto db1 = dbms.New("db1"); + ASSERT_FALSE(db1.HasError()); + RunMtQuery(interpreter1, "USE DATABASE db1", "Using db1"); + RunQuery(interpreter1, "CREATE (:NewNode)"); + RunQuery(interpreter1, "CREATE (:NewNode)"); + const auto db1_config_old = db1.GetValue()->config(); + + // Begin a transaction on db1 + auto interpreter2 = this->NewInterpreter(); + RunMtQuery(interpreter2, "USE DATABASE db1", "Using db1"); + ASSERT_EQ(RunQuery(interpreter2, "SHOW DATABASE")[0][0].ValueString(), "db1"); + RunQuery(interpreter2, "BEGIN"); + + // Update and check the new db in clean + auto interpreter3 = this->NewInterpreter(); + const memgraph::storage::SalientConfig &db1_config_new{.name = "db1", .uuid = {}}; + auto new_db1 = dbms.Update(db1_config_new); + ASSERT_TRUE(new_db1.HasValue()); + ASSERT_NE(db1_config_new.uuid, db1_config_old.salient.uuid); + RunMtQuery(interpreter3, "USE DATABASE db1", "Using db1"); + ASSERT_EQ(RunQuery(interpreter3, "MATCH(n) RETURN count(*)")[0][0].ValueInt(), 0); + + // Check that the interpreter1 is still valid, but lacking a db + ASSERT_THROW(RunQuery(interpreter1, "CREATE (:Node)"), memgraph::query::DatabaseContextRequiredException); + + // Check that the interpreter2 is still valid and pointing to the old db1 (until commit) + RunQuery(interpreter2, "CREATE (:NewNode)"); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(n) RETURN count(*)")[0][0].ValueInt(), 3); + RunQuery(interpreter2, "COMMIT"); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); +} + +TEST_F(MultiTenantTest, DbmsNewDelete) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Defer delete databases while the interpreters are using them + // 4) Database should be a zombie until the using interpreter retries to query it + // 5) Check it is deleted from disk + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + + ASSERT_FALSE(dbms.Delete("db1").HasError()); + ASSERT_FALSE(dbms.Delete("db2").HasError()); + ASSERT_FALSE(dbms.Delete("db3").HasError()); + ASSERT_FALSE(dbms.Delete("db4").HasError()); + + // 4 + ASSERT_EQ(dbms.All().size(), 1); + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 3); // All used databases remain on disk, but unusable + ASSERT_THROW(RunQuery(interpreter1, "MATCH(:Node{on:db4}) RETURN count(*)"), + memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(:Node{on:db2}) RETURN count(*)"), + memgraph::query::DatabaseContextRequiredException); + + // 5 + using namespace std::chrono_literals; + std::this_thread::sleep_for(100ms); // Wait for the filesystem to be updated + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 1); // Databases deleted from disk + ASSERT_THROW(RunQuery(interpreter1, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); +} + +TEST_F(MultiTenantTest, DbmsNewDeleteWTx) { + // 1) Create multiple interpreters with the default db + // 2) Create multiple databases using dbms + // 3) Defer delete databases while the interpreters are using them + // 4) Interpreters that had an open transaction before should still be working + // 5) New transactions on deleted databases should throw + // 6) Switching databases should still be possible + + // 1 + auto interpreter1 = this->NewInterpreter(); + auto interpreter2 = this->NewInterpreter(); + + // 2 + auto &dbms = DBMS(); + ASSERT_FALSE(dbms.New("db1").HasError()); + ASSERT_FALSE(dbms.New("db2").HasError()); + ASSERT_FALSE(dbms.New("db3").HasError()); + ASSERT_FALSE(dbms.New("db4").HasError()); + + // 3 + UseDatabase(interpreter2, "db2", "Using db2"); + UseDatabase(interpreter1, "db4", "Using db4"); + + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter1, "CREATE (:Node{on:\"db4\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + RunQuery(interpreter2, "CREATE (:Node{on:\"db2\"})"); + + RunQuery(interpreter1, "BEGIN"); + RunQuery(interpreter2, "BEGIN"); + + ASSERT_FALSE(dbms.Delete("db1").HasError()); + ASSERT_FALSE(dbms.Delete("db2").HasError()); + ASSERT_FALSE(dbms.Delete("db3").HasError()); + ASSERT_FALSE(dbms.Delete("db4").HasError()); + + // 4 + ASSERT_EQ(dbms.All().size(), 1); + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 3); // All used databases remain on disk, and usable + ASSERT_EQ(RunQuery(interpreter1, "MATCH(:Node{on:\"db4\"}) RETURN count(*)")[0][0].ValueInt(), 4); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(:Node{on:\"db2\"}) RETURN count(*)")[0][0].ValueInt(), 2); + RunQuery(interpreter1, "MATCH(n:Node{on:\"db4\"}) DELETE n"); + RunQuery(interpreter2, "CREATE(:Node{on:\"db2\"})"); + ASSERT_EQ(RunQuery(interpreter1, "MATCH(:Node{on:\"db4\"}) RETURN count(*)")[0][0].ValueInt(), 0); + ASSERT_EQ(RunQuery(interpreter2, "MATCH(:Node{on:\"db2\"}) RETURN count(*)")[0][0].ValueInt(), 3); + RunQuery(interpreter1, "COMMIT"); + RunQuery(interpreter2, "COMMIT"); + + // 5 + using namespace std::chrono_literals; + std::this_thread::sleep_for(100ms); // Wait for the filesystem to be updated + ASSERT_EQ(GetDirs(data_directory / "databases").size(), 1); // Only the active databases remain + ASSERT_THROW(RunQuery(interpreter1, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + ASSERT_THROW(RunQuery(interpreter2, "MATCH(n) RETURN n"), memgraph::query::DatabaseContextRequiredException); + + // 6 + UseDatabase(interpreter2, memgraph::dbms::kDefaultDB.data(), "Using memgraph"); + UseDatabase(interpreter1, memgraph::dbms::kDefaultDB.data(), "Using memgraph"); +} diff --git a/tests/unit/plan_pretty_print.cpp b/tests/unit/plan_pretty_print.cpp index 4a513e82c..ef2395931 100644 --- a/tests/unit/plan_pretty_print.cpp +++ b/tests/unit/plan_pretty_print.cpp @@ -43,7 +43,7 @@ class PrintToJsonTest : public ::testing::Test { PrintToJsonTest() : config(disk_test_utils::GenerateOnDiskConfig(testSuite)), db(new StorageType(config)), - dba_storage(db->Access(memgraph::replication::ReplicationRole::MAIN)), + dba_storage(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)), dba(dba_storage.get()) {} ~PrintToJsonTest() override { diff --git a/tests/unit/query_cost_estimator.cpp b/tests/unit/query_cost_estimator.cpp index 631d17414..702b6e759 100644 --- a/tests/unit/query_cost_estimator.cpp +++ b/tests/unit/query_cost_estimator.cpp @@ -23,7 +23,7 @@ using namespace memgraph::query; using namespace memgraph::query::plan; -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using CardParam = CostEstimator::CardParam; using CostParam = CostEstimator::CostParam; using MiscParam = CostEstimator::MiscParam; diff --git a/tests/unit/query_dump.cpp b/tests/unit/query_dump.cpp index a1165789b..23eab17e0 100644 --- a/tests/unit/query_dump.cpp +++ b/tests/unit/query_dump.cpp @@ -141,7 +141,7 @@ DatabaseState GetState(memgraph::storage::Storage *db) { // Capture all vertices std::map gid_mapping; std::set vertices; - auto dba = db->Access(memgraph::replication::ReplicationRole::MAIN); + auto dba = db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); for (const auto &vertex : dba->Vertices(memgraph::storage::View::NEW)) { std::set> labels; auto maybe_labels = vertex.Labels(memgraph::storage::View::NEW); @@ -267,7 +267,7 @@ memgraph::storage::EdgeAccessor CreateEdge(memgraph::storage::Storage::Accessor } template -void VerifyQueries(const std::vector> &results, TArgs &&...args) { +void VerifyQueries(const std::vector> &results, TArgs &&... args) { std::vector expected{std::forward(args)...}; std::vector got; got.reserve(results.size()); @@ -704,11 +704,13 @@ TYPED_TEST(DumpTest, CheckStateVertexWithMultipleProperties) { config.disk = disk_test_utils::GenerateOnDiskConfig("query-dump-s1").disk; config.force_on_disk = true; } - auto on_exit_s1 = memgraph::utils::OnScopeExit{[&]() { - if constexpr (std::is_same_v) { + auto clean_up_s1 = memgraph::utils::OnScopeExit{[&] { + if (std::is_same::value) { disk_test_utils::RemoveRocksDbDirs("query-dump-s1"); } + std::filesystem::remove_all(config.durability.storage_directory); }}; + memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(config)); memgraph::utils::Gatekeeper db_gk(config, repl_state); @@ -823,11 +825,13 @@ TYPED_TEST(DumpTest, CheckStateSimpleGraph) { config.disk = disk_test_utils::GenerateOnDiskConfig("query-dump-s2").disk; config.force_on_disk = true; } - auto on_exit_s2 = memgraph::utils::OnScopeExit{[&]() { - if constexpr (std::is_same_v) { + auto clean_up_s2 = memgraph::utils::OnScopeExit{[&] { + if (std::is_same::value) { disk_test_utils::RemoveRocksDbDirs("query-dump-s2"); } + std::filesystem::remove_all(config.durability.storage_directory); }}; + memgraph::replication::ReplicationState repl_state{ReplicationStateRootPath(config)}; memgraph::utils::Gatekeeper db_gk{config, repl_state}; auto db_acc_opt = db_gk.access(); @@ -1101,7 +1105,7 @@ TYPED_TEST(DumpTest, MultiplePartialPulls) { } TYPED_TEST(DumpTest, DumpDatabaseWithTriggers) { - auto acc = this->db->storage()->Access(memgraph::replication::ReplicationRole::MAIN); + auto acc = this->db->storage()->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); memgraph::query::DbAccessor dba(acc.get()); { auto trigger_store = this->db.get()->trigger_store(); diff --git a/tests/unit/query_expression_evaluator.cpp b/tests/unit/query_expression_evaluator.cpp index b2a7c1f7a..c725d7e54 100644 --- a/tests/unit/query_expression_evaluator.cpp +++ b/tests/unit/query_expression_evaluator.cpp @@ -67,7 +67,7 @@ class ExpressionEvaluatorTest : public ::testing::Test { ExpressionEvaluatorTest() : config(disk_test_utils::GenerateOnDiskConfig(testSuite)), db(new StorageType(config)), - storage_dba(db->Access(memgraph::replication::ReplicationRole::MAIN)), + storage_dba(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)), dba(storage_dba.get()) {} ~ExpressionEvaluatorTest() override { diff --git a/tests/unit/query_hint_provider.cpp b/tests/unit/query_hint_provider.cpp index 5510812f1..4165ef9d2 100644 --- a/tests/unit/query_hint_provider.cpp +++ b/tests/unit/query_hint_provider.cpp @@ -39,7 +39,7 @@ class HintProviderSuite : public ::testing::Test { int symbol_count = 0; void SetUp() { - storage_dba.emplace(db->Access(memgraph::replication::ReplicationRole::MAIN)); + storage_dba.emplace(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)); dba.emplace(storage_dba->get()); } diff --git a/tests/unit/query_plan_accumulate_aggregate.cpp b/tests/unit/query_plan_accumulate_aggregate.cpp index 68498cc40..c8f1c30c9 100644 --- a/tests/unit/query_plan_accumulate_aggregate.cpp +++ b/tests/unit/query_plan_accumulate_aggregate.cpp @@ -25,7 +25,7 @@ #include "storage/v2/disk/storage.hpp" #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using namespace memgraph::query; using namespace memgraph::query::plan; diff --git a/tests/unit/query_plan_bag_semantics.cpp b/tests/unit/query_plan_bag_semantics.cpp index 1bdaf68c1..4f3bd5256 100644 --- a/tests/unit/query_plan_bag_semantics.cpp +++ b/tests/unit/query_plan_bag_semantics.cpp @@ -31,7 +31,7 @@ #include "query_plan_common.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using namespace memgraph::query; using namespace memgraph::query::plan; diff --git a/tests/unit/query_plan_create_set_remove_delete.cpp b/tests/unit/query_plan_create_set_remove_delete.cpp index fcb98cbd9..1fa400940 100644 --- a/tests/unit/query_plan_create_set_remove_delete.cpp +++ b/tests/unit/query_plan_create_set_remove_delete.cpp @@ -38,7 +38,7 @@ using namespace memgraph::query; using namespace memgraph::query::plan; -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; template class QueryPlanTest : public testing::Test { diff --git a/tests/unit/query_plan_match_filter_return.cpp b/tests/unit/query_plan_match_filter_return.cpp index e97b10742..d5468b6b5 100644 --- a/tests/unit/query_plan_match_filter_return.cpp +++ b/tests/unit/query_plan_match_filter_return.cpp @@ -42,7 +42,7 @@ using namespace memgraph::query; using namespace memgraph::query::plan; -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; const std::string testSuite = "query_plan_match_filter_return"; diff --git a/tests/unit/query_plan_operator_to_string.cpp b/tests/unit/query_plan_operator_to_string.cpp index 4430e6d23..694552cf0 100644 --- a/tests/unit/query_plan_operator_to_string.cpp +++ b/tests/unit/query_plan_operator_to_string.cpp @@ -37,7 +37,7 @@ class OperatorToStringTest : public ::testing::Test { OperatorToStringTest() : config(disk_test_utils::GenerateOnDiskConfig(testSuite)), db(new StorageType(config)), - dba_storage(db->Access(memgraph::replication::ReplicationRole::MAIN)), + dba_storage(db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)), dba(dba_storage.get()) {} ~OperatorToStringTest() override { diff --git a/tests/unit/query_plan_read_write_typecheck.cpp b/tests/unit/query_plan_read_write_typecheck.cpp index 99b3c3da7..f9f14902b 100644 --- a/tests/unit/query_plan_read_write_typecheck.cpp +++ b/tests/unit/query_plan_read_write_typecheck.cpp @@ -37,7 +37,7 @@ class ReadWriteTypeCheckTest : public ::testing::Test { memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite); std::unique_ptr db{new StorageType(config)}; std::unique_ptr dba_storage{ - db->Access(memgraph::replication::ReplicationRole::MAIN)}; + db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)}; memgraph::query::DbAccessor dba{dba_storage.get()}; void TearDown() override { diff --git a/tests/unit/query_plan_v2_create_set_remove_delete.cpp b/tests/unit/query_plan_v2_create_set_remove_delete.cpp index c6a7b3627..b82454682 100644 --- a/tests/unit/query_plan_v2_create_set_remove_delete.cpp +++ b/tests/unit/query_plan_v2_create_set_remove_delete.cpp @@ -18,7 +18,7 @@ #include "query/plan/operator.hpp" #include "storage/v2/disk/storage.hpp" #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; template class QueryPlan : public testing::Test { diff --git a/tests/unit/query_pretty_print.cpp b/tests/unit/query_pretty_print.cpp index 4382176be..ac789b1da 100644 --- a/tests/unit/query_pretty_print.cpp +++ b/tests/unit/query_pretty_print.cpp @@ -37,7 +37,7 @@ class ExpressionPrettyPrinterTest : public ::testing::Test { memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite); std::unique_ptr db{new StorageType(config)}; std::unique_ptr storage_dba{ - db->Access(memgraph::replication::ReplicationRole::MAIN)}; + db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)}; memgraph::query::DbAccessor dba{storage_dba.get()}; AstStorage storage; diff --git a/tests/unit/query_procedure_mgp_type.cpp b/tests/unit/query_procedure_mgp_type.cpp index 9018d5997..e12a61f28 100644 --- a/tests/unit/query_procedure_mgp_type.cpp +++ b/tests/unit/query_procedure_mgp_type.cpp @@ -23,7 +23,7 @@ #include "disk_test_utils.hpp" #include "test_utils.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; template class CypherType : public testing::Test { diff --git a/tests/unit/query_procedure_py_module.cpp b/tests/unit/query_procedure_py_module.cpp index 90d9cb669..baef2e1c8 100644 --- a/tests/unit/query_procedure_py_module.cpp +++ b/tests/unit/query_procedure_py_module.cpp @@ -21,7 +21,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "test_utils.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; template class PyModule : public testing::Test { diff --git a/tests/unit/query_procedures_mgp_graph.cpp b/tests/unit/query_procedures_mgp_graph.cpp index 22ea64cfd..cf3b5a137 100644 --- a/tests/unit/query_procedures_mgp_graph.cpp +++ b/tests/unit/query_procedures_mgp_graph.cpp @@ -34,7 +34,7 @@ #include "utils/memory.hpp" #include "utils/variant_helpers.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; #define EXPECT_SUCCESS(...) EXPECT_EQ(__VA_ARGS__, mgp_error::MGP_ERROR_NO_ERROR) diff --git a/tests/unit/query_semantic.cpp b/tests/unit/query_semantic.cpp index 4f27fa1e2..c4bb966eb 100644 --- a/tests/unit/query_semantic.cpp +++ b/tests/unit/query_semantic.cpp @@ -35,7 +35,7 @@ class TestSymbolGenerator : public ::testing::Test { memgraph::storage::Config config = disk_test_utils::GenerateOnDiskConfig(testSuite); std::unique_ptr db{new StorageType(config)}; std::unique_ptr storage_dba{ - db->Access(memgraph::replication::ReplicationRole::MAIN)}; + db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)}; memgraph::query::DbAccessor dba{storage_dba.get()}; AstStorage storage; diff --git a/tests/unit/query_trigger.cpp b/tests/unit/query_trigger.cpp index 040af0a22..1b2ca5e9c 100644 --- a/tests/unit/query_trigger.cpp +++ b/tests/unit/query_trigger.cpp @@ -29,7 +29,7 @@ #include "utils/exceptions.hpp" #include "utils/memory.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; namespace { const std::unordered_set kAllEventTypes{ diff --git a/tests/unit/query_variable_start_planner.cpp b/tests/unit/query_variable_start_planner.cpp index ef08e8cca..df7173db2 100644 --- a/tests/unit/query_variable_start_planner.cpp +++ b/tests/unit/query_variable_start_planner.cpp @@ -28,7 +28,7 @@ #include "formatters.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using namespace memgraph::query::plan; using memgraph::query::AstStorage; using Type = memgraph::query::EdgeAtom::Type; diff --git a/tests/unit/replication_persistence_helper.cpp b/tests/unit/replication_persistence_helper.cpp index ade9ef638..ef3ba254d 100644 --- a/tests/unit/replication_persistence_helper.cpp +++ b/tests/unit/replication_persistence_helper.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -22,12 +22,7 @@ using namespace memgraph::replication::durability; using namespace memgraph::replication; - -static_assert(sizeof(ReplicationRoleEntry) == 168, - "Most likely you modified ReplicationRoleEntry without updating the tests. "); - -static_assert(sizeof(ReplicationReplicaEntry) == 160, - "Most likely you modified ReplicationReplicaEntry without updating the tests."); +using namespace memgraph::replication_coordination_glue; TEST(ReplicationDurability, V1Main) { auto const role_entry = ReplicationRoleEntry{.version = DurabilityVersion::V1, diff --git a/tests/unit/skip_list.cpp b/tests/unit/skip_list.cpp index 86fb4ae57..bdebacc1e 100644 --- a/tests/unit/skip_list.cpp +++ b/tests/unit/skip_list.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -18,6 +18,20 @@ #include "utils/skip_list.hpp" #include "utils/timer.hpp" +#include + +template +concept CompatibleIterators = std::forward_iterator && std::forward_iterator && + requires(It it, ConstIt cit) { + { it == cit } -> std::same_as; + { it != cit } -> std::same_as; + { cit == it } -> std::same_as; + { cit != it } -> std::same_as; +}; + +using sut_t = memgraph::utils::SkipList::Accessor; +static_assert(CompatibleIterators); + TEST(SkipList, Int) { memgraph::utils::SkipList list; { diff --git a/tests/unit/slk_advanced.cpp b/tests/unit/slk_advanced.cpp index fab936fe0..f41946388 100644 --- a/tests/unit/slk_advanced.cpp +++ b/tests/unit/slk_advanced.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2024 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -11,10 +11,13 @@ #include +#include "coordination/coordinator_config.hpp" +#include "coordination/coordinator_slk.hpp" +#include "replication/config.hpp" +#include "replication_coordination_glue/mode.hpp" +#include "slk_common.hpp" #include "storage/v2/property_value.hpp" #include "storage/v2/replication/slk.hpp" - -#include "slk_common.hpp" #include "storage/v2/temporal.hpp" TEST(SlkAdvanced, PropertyValueList) { @@ -114,3 +117,34 @@ TEST(SlkAdvanced, PropertyValueComplex) { ASSERT_EQ(original, decoded); } + +TEST(SlkAdvanced, ReplicationClientConfigs) { + using ReplicationClientInfo = memgraph::coordination::CoordinatorClientConfig::ReplicationClientInfo; + using ReplicationClientInfoVec = std::vector; + using ReplicationMode = memgraph::replication_coordination_glue::ReplicationMode; + + ReplicationClientInfoVec original{ReplicationClientInfo{.instance_name = "replica1", + .replication_mode = ReplicationMode::SYNC, + .replication_ip_address = "127.0.0.1", + .replication_port = 10000}, + ReplicationClientInfo{.instance_name = "replica2", + .replication_mode = ReplicationMode::ASYNC, + .replication_ip_address = "127.0.1.1", + .replication_port = 10010}, + ReplicationClientInfo{ + .instance_name = "replica3", + .replication_mode = ReplicationMode::ASYNC, + .replication_ip_address = "127.1.1.1", + .replication_port = 1110, + }}; + + memgraph::slk::Loopback loopback; + auto builder = loopback.GetBuilder(); + memgraph::slk::Save(original, builder); + + ReplicationClientInfoVec decoded; + auto reader = loopback.GetReader(); + memgraph::slk::Load(&decoded, reader); + + ASSERT_EQ(original, decoded); +} diff --git a/tests/unit/storage_rocks.cpp b/tests/unit/storage_rocks.cpp index 365f46ad3..5cdaf4691 100644 --- a/tests/unit/storage_rocks.cpp +++ b/tests/unit/storage_rocks.cpp @@ -30,7 +30,7 @@ #include "storage/v2/view.hpp" #include "utils/rocksdb_serialization.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // NOLINTNEXTLINE(google-build-using-namespace) using namespace memgraph::storage; diff --git a/tests/unit/storage_v2.cpp b/tests/unit/storage_v2.cpp index dc3ec1512..7db51ddd4 100644 --- a/tests/unit/storage_v2.cpp +++ b/tests/unit/storage_v2.cpp @@ -23,7 +23,7 @@ #include "storage/v2/vertex_accessor.hpp" #include "storage_test_utils.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::Types; using testing::UnorderedElementsAre; diff --git a/tests/unit/storage_v2_constraints.cpp b/tests/unit/storage_v2_constraints.cpp index b36bc59c9..7f03f40d1 100644 --- a/tests/unit/storage_v2_constraints.cpp +++ b/tests/unit/storage_v2_constraints.cpp @@ -23,7 +23,7 @@ #include "disk_test_utils.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; // NOLINTNEXTLINE(google-build-using-namespace) using namespace memgraph::storage; diff --git a/tests/unit/storage_v2_durability_inmemory.cpp b/tests/unit/storage_v2_durability_inmemory.cpp index bdec38c00..54671077f 100644 --- a/tests/unit/storage_v2_durability_inmemory.cpp +++ b/tests/unit/storage_v2_durability_inmemory.cpp @@ -48,7 +48,7 @@ #include "utils/timer.hpp" #include "utils/uuid.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::Contains; using testing::UnorderedElementsAre; @@ -803,8 +803,8 @@ INSTANTIATE_TEST_CASE_P(EdgesWithoutProperties, DurabilityTest, ::testing::Value TEST_P(DurabilityTest, SnapshotOnExit) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{.durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}}; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -819,8 +819,10 @@ TEST_P(DurabilityTest, SnapshotOnExit) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -840,10 +842,12 @@ TEST_P(DurabilityTest, SnapshotPeriodic) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::milliseconds(2000)}}; + .snapshot_interval = std::chrono::milliseconds(2000)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -856,8 +860,10 @@ TEST_P(DurabilityTest, SnapshotPeriodic) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -882,13 +888,16 @@ TEST_P(DurabilityTest, SnapshotFallback) { auto const snapshot_interval = std::chrono::milliseconds(3000); memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = snapshot_interval, - .snapshot_retention_count = 10, // We don't anticipate that we make this many - }}; + + .durability = + { + .storage_directory = storage_directory, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, + .snapshot_interval = snapshot_interval, + .snapshot_retention_count = 10, // We don't anticipate that we make this many + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -926,8 +935,10 @@ TEST_P(DurabilityTest, SnapshotFallback) { } // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -946,8 +957,10 @@ TEST_P(DurabilityTest, SnapshotFallback) { TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { // Create unrelated snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -975,10 +988,12 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::milliseconds(2000)}}; + .snapshot_interval = std::chrono::milliseconds(2000)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; @@ -1019,8 +1034,10 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -1032,8 +1049,10 @@ TEST_P(DurabilityTest, SnapshotEverythingCorrupt) { TEST_P(DurabilityTest, SnapshotRetention) { // Create unrelated snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1051,11 +1070,13 @@ TEST_P(DurabilityTest, SnapshotRetention) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, .snapshot_interval = std::chrono::milliseconds(2000), - .snapshot_retention_count = 3}}; + .snapshot_retention_count = 3}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Restore unrelated snapshots after the database has been started. @@ -1089,8 +1110,10 @@ TEST_P(DurabilityTest, SnapshotRetention) { } // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1109,8 +1132,10 @@ TEST_P(DurabilityTest, SnapshotRetention) { TEST_P(DurabilityTest, SnapshotMixedUUID) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1127,8 +1152,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1136,8 +1163,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { // Create another snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1158,8 +1187,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1178,8 +1209,10 @@ TEST_P(DurabilityTest, SnapshotMixedUUID) { TEST_P(DurabilityTest, SnapshotBackup) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1197,10 +1230,12 @@ TEST_P(DurabilityTest, SnapshotBackup) { // Start storage without recovery. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, + .durability = {.storage_directory = storage_directory, .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT, - .snapshot_interval = std::chrono::minutes(20)}}; + .snapshot_interval = std::chrono::minutes(20)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; } @@ -1215,8 +1250,10 @@ TEST_P(DurabilityTest, SnapshotBackup) { TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), false); @@ -1231,8 +1268,10 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false); @@ -1251,8 +1290,10 @@ TEST_F(DurabilityTest, SnapshotWithoutPropertiesOnEdgesRecoveryWithPropertiesOnE TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), true); @@ -1270,8 +1311,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -1283,8 +1326,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesRecoveryWithoutPropertiesOnE TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutPropertiesOnEdges) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = true}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = true}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), true); @@ -1326,8 +1371,10 @@ TEST_F(DurabilityTest, SnapshotWithPropertiesOnEdgesButUnusedRecoveryWithoutProp ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = false}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = false}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, false); @@ -1347,12 +1394,14 @@ TEST_P(DurabilityTest, WalBasic) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1365,8 +1414,10 @@ TEST_P(DurabilityTest, WalBasic) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1386,13 +1437,15 @@ TEST_P(DurabilityTest, WalBackup) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1411,11 +1464,13 @@ TEST_P(DurabilityTest, WalBackup) { // Start storage without recovery. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; } @@ -1431,12 +1486,14 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1450,8 +1507,10 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Recover WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -1460,13 +1519,15 @@ TEST_P(DurabilityTest, WalAppendToExisting) { // Recover WALs and create more WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -1478,8 +1539,10 @@ TEST_P(DurabilityTest, WalAppendToExisting) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -1502,12 +1565,14 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -1541,8 +1606,10 @@ TEST_P(DurabilityTest, WalCreateInSingleTransaction) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1642,12 +1709,14 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1655,7 +1724,7 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { auto indices = [&] { auto acc = db.Access(); auto res = acc->ListAllIndices(); - acc->Commit(); + (void)acc->Commit(); return res; }(); // iile for (const auto &index : indices.label) { @@ -1671,7 +1740,7 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { auto constraints = [&] { auto acc = db.Access(); auto res = acc->ListAllConstraints(); - acc->Commit(); + (void)acc->Commit(); return res; }(); // iile for (const auto &constraint : constraints.existence) { @@ -1698,8 +1767,10 @@ TEST_P(DurabilityTest, WalCreateAndRemoveEverything) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1736,14 +1807,17 @@ TEST_P(DurabilityTest, WalTransactionOrdering) { // Create WAL. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 100000, - .wal_file_flush_every_n_tx = kFlushWalEvery, - }}; + + .durability = + { + .storage_directory = storage_directory, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 100000, + .wal_file_flush_every_n_tx = kFlushWalEvery, + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc1 = db.Access(); @@ -1835,8 +1909,10 @@ TEST_P(DurabilityTest, WalTransactionOrdering) { } // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1869,12 +1945,14 @@ TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -1898,8 +1976,10 @@ TEST_P(DurabilityTest, WalCreateAndRemoveOnlyBaseDataset) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_EXTENDED_WITH_BASE_INDICES_AND_CONSTRAINTS, GetParam()); @@ -1921,12 +2001,14 @@ TEST_P(DurabilityTest, WalDeathResilience) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Create one million vertices. @@ -1958,14 +2040,17 @@ TEST_P(DurabilityTest, WalDeathResilience) { uint64_t count = 0; { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery, - }}; + + .durability = + { + .storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery, + }, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -1992,8 +2077,10 @@ TEST_P(DurabilityTest, WalDeathResilience) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -2021,13 +2108,15 @@ TEST_P(DurabilityTest, WalMissingSecond) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2047,13 +2136,15 @@ TEST_P(DurabilityTest, WalMissingSecond) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; const uint64_t kNumVertices = 1000; @@ -2102,8 +2193,10 @@ TEST_P(DurabilityTest, WalMissingSecond) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2116,13 +2209,15 @@ TEST_P(DurabilityTest, WalCorruptSecond) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2142,13 +2237,15 @@ TEST_P(DurabilityTest, WalCorruptSecond) { // Create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; const uint64_t kNumVertices = 1000; @@ -2196,8 +2293,10 @@ TEST_P(DurabilityTest, WalCorruptSecond) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2210,13 +2309,15 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) { // Create WALs { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2237,8 +2338,10 @@ TEST_P(DurabilityTest, WalCorruptLastTransaction) { } // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // The extended dataset shouldn't be recovered because its WAL transaction was @@ -2260,13 +2363,15 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) { // Create WALs { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2310,8 +2415,10 @@ TEST_P(DurabilityTest, WalAllOperationsInSingleTransaction) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; { @@ -2339,12 +2446,14 @@ TEST_P(DurabilityTest, WalAndSnapshot) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::milliseconds(2000), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::milliseconds(2000), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2358,8 +2467,10 @@ TEST_P(DurabilityTest, WalAndSnapshot) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2378,8 +2489,10 @@ TEST_P(DurabilityTest, WalAndSnapshot) { TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2393,8 +2506,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -2403,13 +2518,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { // Recover snapshot and create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -2421,8 +2538,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2441,8 +2560,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshot) { TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Create snapshot. { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2456,8 +2577,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Recover snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::ONLY_BASE, GetParam()); @@ -2466,13 +2589,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { // Recover snapshot and create WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateExtendedDataset(db.storage()); @@ -2487,13 +2612,15 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { memgraph::storage::Gid vertex_gid; { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2513,8 +2640,10 @@ TEST_P(DurabilityTest, WalAndSnapshotAppendToExistingSnapshotAndWal) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam(), @@ -2551,13 +2680,15 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Create unrelated WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::minutes(20), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = kFlushWalEvery}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::minutes(20), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = kFlushWalEvery}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2579,13 +2710,15 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2), - .wal_file_size_kibibytes = 1, - .wal_file_flush_every_n_tx = 1}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2), + .wal_file_size_kibibytes = 1, + .wal_file_flush_every_n_tx = 1}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; // Restore unrelated snapshots after the database has been started. @@ -2614,8 +2747,10 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { // Recover and verify data. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2634,8 +2769,10 @@ TEST_P(DurabilityTest, WalAndSnapshotWalRetention) { ASSERT_DEATH( ([&]() { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; }()) // iile @@ -2648,11 +2785,13 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { // Create unrelated snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; auto acc = db.Access(); @@ -2671,11 +2810,13 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { // Create snapshot and WALs. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = { - .storage_directory = storage_directory, - .snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - .snapshot_interval = std::chrono::seconds(2)}}; + + .durability = {.storage_directory = storage_directory, + .snapshot_wal_mode = + memgraph::storage::Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + .snapshot_interval = std::chrono::seconds(2)}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2698,8 +2839,10 @@ TEST_P(DurabilityTest, SnapshotAndWalMixedUUID) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot and WALs. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .recover_on_startup = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, .recover_on_startup = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2719,8 +2862,10 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { // Create snapshot. { memgraph::storage::Config config{ - .items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true, .items_per_batch = 13}}; + + .durability = {.storage_directory = storage_directory, .snapshot_on_exit = true, .items_per_batch = 13}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; CreateBaseDataset(db.storage(), GetParam()); @@ -2735,12 +2880,14 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { ASSERT_EQ(GetBackupWalsList().size(), 0); // Recover snapshot. - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_on_exit = false, - .items_per_batch = 13, - .allow_parallel_index_creation = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_on_exit = false, + .items_per_batch = 13, + .allow_parallel_index_creation = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)}; memgraph::dbms::Database db{config, repl_state}; VerifyDataset(db.storage(), DatasetType::BASE_WITH_EXTENDED, GetParam()); @@ -2755,12 +2902,14 @@ TEST_P(DurabilityTest, ParallelConstraintsRecovery) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(DurabilityTest, ConstraintsRecoveryFunctionSetting) { - memgraph::storage::Config config{.items = {.properties_on_edges = GetParam()}, - .durability = {.storage_directory = storage_directory, - .recover_on_startup = true, - .snapshot_on_exit = false, - .items_per_batch = 13, - .allow_parallel_schema_creation = true}}; + memgraph::storage::Config config{ + .durability = {.storage_directory = storage_directory, + .recover_on_startup = true, + .snapshot_on_exit = false, + .items_per_batch = 13, + .allow_parallel_schema_creation = true}, + .salient = {.items = {.properties_on_edges = GetParam()}}, + }; // Create snapshot. { config.durability.recover_on_startup = false; diff --git a/tests/unit/storage_v2_edge_inmemory.cpp b/tests/unit/storage_v2_edge_inmemory.cpp index befa52462..50ae1f14f 100644 --- a/tests/unit/storage_v2_edge_inmemory.cpp +++ b/tests/unit/storage_v2_edge_inmemory.cpp @@ -17,7 +17,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::UnorderedElementsAre; class StorageEdgeTest : public ::testing::TestWithParam {}; @@ -28,7 +28,7 @@ INSTANTIATE_TEST_CASE_P(EdgesWithoutProperties, StorageEdgeTest, ::testing::Valu // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -219,7 +219,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -392,7 +392,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -538,7 +538,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -808,7 +808,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1078,7 +1078,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -1305,7 +1305,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1574,7 +1574,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1843,7 +1843,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -2069,7 +2069,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2492,7 +2492,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2916,7 +2916,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create vertex @@ -3276,7 +3276,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3416,7 +3416,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3746,7 +3746,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3991,7 +3991,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = GetParam()}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = GetParam()}}})); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4637,7 +4637,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyCommit) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -4768,7 +4768,7 @@ TEST(StorageWithProperties, EdgePropertyCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); // Create the vertex. @@ -5060,7 +5060,7 @@ TEST(StorageWithProperties, EdgePropertyAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertySerializationError) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -5170,7 +5170,7 @@ TEST(StorageWithProperties, EdgePropertySerializationError) { TEST(StorageWithProperties, EdgePropertyClear) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); memgraph::storage::Gid gid; auto property1 = store->NameToProperty("property1"); auto property2 = store->NameToProperty("property2"); @@ -5286,7 +5286,7 @@ TEST(StorageWithProperties, EdgePropertyClear) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithoutProperties, EdgePropertyAbort) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = false}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = false}}})); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { auto acc = store->Access(ReplicationRole::MAIN); @@ -5355,7 +5355,7 @@ TEST(StorageWithoutProperties, EdgePropertyAbort) { TEST(StorageWithoutProperties, EdgePropertyClear) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = false}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = false}}})); memgraph::storage::Gid gid; { auto acc = store->Access(ReplicationRole::MAIN); @@ -5382,7 +5382,7 @@ TEST(StorageWithoutProperties, EdgePropertyClear) { TEST(StorageWithProperties, EdgeNonexistentPropertyAPI) { std::unique_ptr store( - new memgraph::storage::InMemoryStorage({.items = {.properties_on_edges = true}})); + new memgraph::storage::InMemoryStorage({.salient = {.items = {.properties_on_edges = true}}})); auto property = store->NameToProperty("property"); diff --git a/tests/unit/storage_v2_edge_ondisk.cpp b/tests/unit/storage_v2_edge_ondisk.cpp index 57ba1a562..7f3357b10 100644 --- a/tests/unit/storage_v2_edge_ondisk.cpp +++ b/tests/unit/storage_v2_edge_ondisk.cpp @@ -18,7 +18,7 @@ #include "storage/v2/disk/storage.hpp" #include "storage/v2/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::UnorderedElementsAre; class StorageEdgeTest : public ::testing::TestWithParam {}; @@ -31,7 +31,7 @@ const std::string testSuite = "storage_v2_edge_ondisk"; // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -224,7 +224,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -399,7 +399,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -547,7 +547,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -819,7 +819,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1091,7 +1091,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1320,7 +1320,7 @@ TEST_P(StorageEdgeTest, EdgeCreateFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1591,7 +1591,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -1862,7 +1862,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2090,7 +2090,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2515,7 +2515,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSmallerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -2941,7 +2941,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromLargerAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3303,7 +3303,7 @@ TEST_P(StorageEdgeTest, EdgeDeleteFromSameAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3445,7 +3445,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -3777,7 +3777,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_from = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_to = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4024,7 +4024,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteSingleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = GetParam(); + config.salient.items.properties_on_edges = GetParam(); std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid_vertex1 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); memgraph::storage::Gid gid_vertex2 = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -4672,7 +4672,7 @@ TEST_P(StorageEdgeTest, VertexDetachDeleteMultipleAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyCommit) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -4808,7 +4808,7 @@ TEST(StorageWithProperties, EdgePropertyCommit) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertyAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); @@ -5109,7 +5109,7 @@ TEST(StorageWithProperties, EdgePropertyAbort) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithProperties, EdgePropertySerializationError) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -5228,7 +5228,7 @@ TEST(StorageWithProperties, EdgePropertySerializationError) { TEST(StorageWithProperties, EdgePropertyClear) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid; auto property1 = store->NameToProperty("property1"); @@ -5350,7 +5350,7 @@ TEST(StorageWithProperties, EdgePropertyClear) { // NOLINTNEXTLINE(hicpp-special-member-functions) TEST(StorageWithoutProperties, EdgePropertyAbort) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = false; + config.salient.items.properties_on_edges = false; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid = memgraph::storage::Gid::FromUint(std::numeric_limits::max()); { @@ -5424,7 +5424,7 @@ TEST(StorageWithoutProperties, EdgePropertyAbort) { TEST(StorageWithoutProperties, EdgePropertyClear) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = false; + config.salient.items.properties_on_edges = false; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); memgraph::storage::Gid gid; { @@ -5454,7 +5454,7 @@ TEST(StorageWithoutProperties, EdgePropertyClear) { TEST(StorageWithProperties, EdgeNonexistentPropertyAPI) { auto config = disk_test_utils::GenerateOnDiskConfig(testSuite); - config.items.properties_on_edges = true; + config.salient.items.properties_on_edges = true; std::unique_ptr store(new memgraph::storage::DiskStorage(config)); auto property = store->NameToProperty("property"); diff --git a/tests/unit/storage_v2_gc.cpp b/tests/unit/storage_v2_gc.cpp index 770d570bc..e619f3723 100644 --- a/tests/unit/storage_v2_gc.cpp +++ b/tests/unit/storage_v2_gc.cpp @@ -14,7 +14,7 @@ #include "storage/v2/inmemory/storage.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::UnorderedElementsAre; // TODO: The point of these is not to test GC fully, these are just simple diff --git a/tests/unit/storage_v2_get_info.cpp b/tests/unit/storage_v2_get_info.cpp index aa864d7cd..c0f7e2dbc 100644 --- a/tests/unit/storage_v2_get_info.cpp +++ b/tests/unit/storage_v2_get_info.cpp @@ -22,7 +22,7 @@ // NOLINTNEXTLINE(google-build-using-namespace) using namespace memgraph::storage; -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; constexpr auto testSuite = "storage_v2_get_info"; const std::filesystem::path storage_directory{std::filesystem::temp_directory_path() / testSuite}; diff --git a/tests/unit/storage_v2_indices.cpp b/tests/unit/storage_v2_indices.cpp index 10ccb7660..8ee053087 100644 --- a/tests/unit/storage_v2_indices.cpp +++ b/tests/unit/storage_v2_indices.cpp @@ -25,7 +25,7 @@ // NOLINTNEXTLINE(google-build-using-namespace) using namespace memgraph::storage; -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; using testing::IsEmpty; using testing::Types; using testing::UnorderedElementsAre; diff --git a/tests/unit/storage_v2_isolation_level.cpp b/tests/unit/storage_v2_isolation_level.cpp index 5efedf7f9..39d7a92ec 100644 --- a/tests/unit/storage_v2_isolation_level.cpp +++ b/tests/unit/storage_v2_isolation_level.cpp @@ -16,7 +16,7 @@ #include "storage/v2/inmemory/storage.hpp" #include "storage/v2/isolation_level.hpp" #include "utils/on_scope_exit.hpp" -using memgraph::replication::ReplicationRole; +using memgraph::replication_coordination_glue::ReplicationRole; namespace { int64_t VerticesCount(memgraph::storage::Storage::Accessor *accessor) { diff --git a/tests/unit/storage_v2_replication.cpp b/tests/unit/storage_v2_replication.cpp index 9399b7ba0..e572440ca 100644 --- a/tests/unit/storage_v2_replication.cpp +++ b/tests/unit/storage_v2_replication.cpp @@ -22,6 +22,7 @@ #include #include #include +#include "auth/auth.hpp" #include "dbms/database.hpp" #include "dbms/dbms_handler.hpp" #include "dbms/replication_handler.hpp" @@ -31,6 +32,7 @@ #include "storage/v2/indices/label_index_stats.hpp" #include "storage/v2/storage.hpp" #include "storage/v2/view.hpp" +#include "utils/rw_lock.hpp" #include "utils/synchronized.hpp" using testing::UnorderedElementsAre; @@ -39,9 +41,9 @@ using memgraph::dbms::RegisterReplicaError; using memgraph::dbms::ReplicationHandler; using memgraph::dbms::UnregisterReplicaResult; using memgraph::replication::ReplicationClientConfig; -using memgraph::replication::ReplicationMode; -using memgraph::replication::ReplicationRole; using memgraph::replication::ReplicationServerConfig; +using memgraph::replication_coordination_glue::ReplicationMode; +using memgraph::replication_coordination_glue::ReplicationRole; using memgraph::storage::Config; using memgraph::storage::EdgeAccessor; using memgraph::storage::Gid; @@ -64,26 +66,35 @@ class ReplicationTest : public ::testing::Test { void TearDown() override { Clear(); } Config main_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, storage_directory); return config; }(); Config repl_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, repl_storage_directory); return config; }(); Config repl2_conf = [&] { - Config config{.items = {.properties_on_edges = true}, - .durability = { - .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, - }}; + Config config{ + .durability = + { + .snapshot_wal_mode = Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL, + }, + .salient.items = {.properties_on_edges = true}, + }; UpdatePaths(config, repl2_storage_directory); return config; }(); @@ -102,20 +113,23 @@ class ReplicationTest : public ::testing::Test { struct MinMemgraph { MinMemgraph(const memgraph::storage::Config &conf) - : dbms{conf + : auth{conf.durability.storage_directory / "auth", memgraph::auth::Auth::Config{/* default */}}, + dbms{conf #ifdef MG_ENTERPRISE , - reinterpret_cast< - memgraph::utils::Synchronized *>(0), - true, false + &auth, true #endif }, repl_state{dbms.ReplicationState()}, - db{*dbms.Get().get()}, + db_acc{dbms.Get()}, + db{*db_acc.get()}, repl_handler(dbms) { } + + memgraph::utils::Synchronized auth; memgraph::dbms::DbmsHandler dbms; memgraph::replication::ReplicationState &repl_state; + memgraph::dbms::DatabaseAccess db_acc; memgraph::dbms::Database &db; ReplicationHandler repl_handler; }; @@ -152,7 +166,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ASSERT_TRUE(v.AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value)) .HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -178,7 +192,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); ASSERT_TRUE(v->RemoveLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -197,7 +211,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); ASSERT_TRUE(acc->DeleteVertex(&*v).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -224,7 +238,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ASSERT_TRUE(edge.SetProperty(main.db.storage()->NameToProperty(edge_property), PropertyValue(edge_property_value)) .HasValue()); edge_gid.emplace(edge.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } const auto find_edge = [&](const auto &edges, const Gid edge_gid) -> std::optional { @@ -261,7 +275,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { auto edge = find_edge(out_edges->edges, *edge_gid); ASSERT_TRUE(edge); ASSERT_TRUE(acc->DeleteEdge(&*edge).HasValue()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { @@ -287,25 +301,25 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE(unique_acc->CreateIndex(main.db.storage()->NameToLabel(label)).HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), l_stats); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE( unique_acc->CreateIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->SetIndexStats(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property), lp_stats); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -313,7 +327,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ->CreateExistenceConstraint(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -322,7 +336,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { {main.db.storage()->NameToProperty(property), main.db.storage()->NameToProperty(property_extra)}) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { @@ -360,24 +374,24 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { { auto unique_acc = main.db.UniqueAccess(); unique_acc->DeleteLabelIndexStats(main.db.storage()->NameToLabel(label)); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE(unique_acc->DropIndex(main.db.storage()->NameToLabel(label)).HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); unique_acc->DeleteLabelPropertyIndexStats(main.db.storage()->NameToLabel(label)); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); ASSERT_FALSE( unique_acc->DropIndex(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -385,7 +399,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { ->DropExistenceConstraint(main.db.storage()->NameToLabel(label), main.db.storage()->NameToProperty(property)) .HasError()); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { auto unique_acc = main.db.UniqueAccess(); @@ -393,7 +407,7 @@ TEST_F(ReplicationTest, BasicSynchronousReplicationTest) { main.db.storage()->NameToLabel(label), {main.db.storage()->NameToProperty(property), main.db.storage()->NameToProperty(property_extra)}), memgraph::storage::UniqueConstraints::DeletionStatus::SUCCESS); - ASSERT_FALSE(unique_acc->Commit().HasError()); + ASSERT_FALSE(unique_acc->Commit({}, main.db_acc).HasError()); } { @@ -455,7 +469,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) { ASSERT_TRUE(v.SetProperty(main.db.storage()->NameToProperty(vertex_property), PropertyValue(vertex_property_value)) .HasValue()); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } const auto check_replica = [&](memgraph::dbms::Database &replica_database) { @@ -477,7 +491,7 @@ TEST_F(ReplicationTest, MultipleSynchronousReplicationTest) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } // REPLICA1 should contain the new vertex @@ -515,7 +529,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { // Create the vertex before registering a replica auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } } @@ -531,13 +545,13 @@ TEST_F(ReplicationTest, RecoveryProcess) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = main.db.Access(); auto v = acc->CreateVertex(); vertex_gids.emplace_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } } @@ -560,7 +574,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { ASSERT_TRUE( v->SetProperty(main.db.storage()->NameToProperty(property_name), PropertyValue(property_value)).HasValue()); } - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } static constexpr const auto *vertex_label = "vertex_label"; @@ -594,7 +608,7 @@ TEST_F(ReplicationTest, RecoveryProcess) { ASSERT_TRUE(v); ASSERT_TRUE(v->AddLabel(main.db.storage()->NameToLabel(vertex_label)).HasValue()); } - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica.db.Access(); @@ -663,7 +677,7 @@ TEST_F(ReplicationTest, BasicAsynchronousReplicationTest) { auto acc = main.db.Access(); auto v = acc->CreateVertex(); created_vertices.push_back(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); if (i == 0) { ASSERT_EQ(main.db.storage()->GetReplicaState("REPLICA_ASYNC"), ReplicaState::REPLICATING); @@ -723,13 +737,13 @@ TEST_F(ReplicationTest, EpochTest) { auto acc = main.db.Access(); const auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica1.db.Access(); const auto v = acc->FindVertex(*vertex_gid, View::OLD); ASSERT_TRUE(v); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica2.db.Access(); @@ -756,13 +770,13 @@ TEST_F(ReplicationTest, EpochTest) { { auto acc = main.db.Access(); acc->CreateVertex(); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } { auto acc = replica1.db.Access(); auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, replica1.db_acc).HasError()); } // Replica1 should forward it's vertex to Replica2 { @@ -790,7 +804,7 @@ TEST_F(ReplicationTest, EpochTest) { auto acc = main.db.Access(); const auto v = acc->CreateVertex(); vertex_gid.emplace(v.Gid()); - ASSERT_FALSE(acc->Commit().HasError()); + ASSERT_FALSE(acc->Commit({}, main.db_acc).HasError()); } // Replica1 is not compatible with the main so it shouldn't contain // it's newest vertex @@ -926,7 +940,7 @@ TEST_F(ReplicationTest, ReplicationReplicaWithExistingEndPoint) { .ip_address = local_host, .port = common_port, }) - .GetError() == RegisterReplicaError::END_POINT_EXISTS); + .GetError() == RegisterReplicaError::ENDPOINT_EXISTS); } TEST_F(ReplicationTest, RestoringReplicationAtStartupAfterDroppingReplica) { diff --git a/tests/unit/storage_v2_show_storage_info.cpp b/tests/unit/storage_v2_show_storage_info.cpp index 2fb750eb8..73d33a77d 100644 --- a/tests/unit/storage_v2_show_storage_info.cpp +++ b/tests/unit/storage_v2_show_storage_info.cpp @@ -44,7 +44,7 @@ class ShowStorageInfoTest : public testing::Test { }; TEST_F(ShowStorageInfoTest, CountOnAbort) { - auto acc = this->storage->Access(memgraph::replication::ReplicationRole::MAIN); + auto acc = this->storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); auto src_vertex = acc->CreateVertex(); auto dest_vertex = acc->CreateVertex(); auto et = acc->NameToEdgeType("et5"); diff --git a/tests/unit/storage_v2_storage_mode.cpp b/tests/unit/storage_v2_storage_mode.cpp index dbf3394d3..487319d3c 100644 --- a/tests/unit/storage_v2_storage_mode.cpp +++ b/tests/unit/storage_v2_storage_mode.cpp @@ -44,8 +44,8 @@ TEST_P(StorageModeTest, Mode) { .transaction{.isolation_level = memgraph::storage::IsolationLevel::SNAPSHOT_ISOLATION}}); static_cast(storage.get())->SetStorageMode(storage_mode); - auto creator = storage->Access(memgraph::replication::ReplicationRole::MAIN); - auto other_analytics_mode_reader = storage->Access(memgraph::replication::ReplicationRole::MAIN); + auto creator = storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); + auto other_analytics_mode_reader = storage->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN); ASSERT_EQ(CountVertices(*creator, memgraph::storage::View::OLD), 0); ASSERT_EQ(CountVertices(*other_analytics_mode_reader, memgraph::storage::View::OLD), 0); diff --git a/tests/unit/typed_value.cpp b/tests/unit/typed_value.cpp index fa2d3cb95..41dd6e3ba 100644 --- a/tests/unit/typed_value.cpp +++ b/tests/unit/typed_value.cpp @@ -38,7 +38,7 @@ class AllTypesFixture : public testing::Test { memgraph::storage::Config config_{disk_test_utils::GenerateOnDiskConfig(testSuite)}; std::unique_ptr db{new StorageType(config_)}; std::unique_ptr storage_dba{ - db->Access(memgraph::replication::ReplicationRole::MAIN)}; + db->Access(memgraph::replication_coordination_glue::ReplicationRole::MAIN)}; memgraph::query::DbAccessor dba{storage_dba.get()}; void SetUp() override {