Merge branch 'master' into text-search-integration-poc
This commit is contained in:
commit
cdf7b53aa7
2
.github/workflows/daily_benchmark.yaml
vendored
2
.github/workflows/daily_benchmark.yaml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
|
153
.github/workflows/diff.yaml
vendored
153
.github/workflows/diff.yaml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -96,7 +96,7 @@ jobs:
|
||||
|
||||
- name: Python code analysis
|
||||
run: |
|
||||
CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only)
|
||||
CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only --diff-filter=d)
|
||||
for file in ${CHANGED_FILES}; do
|
||||
echo ${file}
|
||||
if [[ ${file} == *.py ]]; then
|
||||
@ -137,9 +137,9 @@ jobs:
|
||||
tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu
|
||||
|
||||
- name: Save code coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Code analysis)"
|
||||
path: tools/github/generated/code_coverage.tar.gz
|
||||
|
||||
- name: Run clang-tidy
|
||||
@ -162,7 +162,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -208,9 +208,9 @@ jobs:
|
||||
./cppcheck_and_clang_format diff
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Debug build)"
|
||||
path: tools/github/cppcheck_and_clang_format.txt
|
||||
|
||||
release_build:
|
||||
@ -223,7 +223,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -251,7 +251,7 @@ jobs:
|
||||
./continuous_integration
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "GQL Behave Status"
|
||||
path: |
|
||||
@ -268,6 +268,7 @@ jobs:
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
|
||||
- name: Ensure Kafka and Pulsar are up
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose up -d
|
||||
@ -275,6 +276,7 @@ jobs:
|
||||
docker-compose up -d
|
||||
|
||||
- name: Run e2e tests
|
||||
if: false
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
@ -283,7 +285,7 @@ jobs:
|
||||
./run.sh
|
||||
|
||||
- name: Ensure Kafka and Pulsar are down
|
||||
if: always()
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose down
|
||||
@ -323,16 +325,128 @@ jobs:
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
|
||||
- name: Save enterprise DEB package
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Enterprise DEB package"
|
||||
path: build/output/memgraph*.deb
|
||||
|
||||
- name: Save test data
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: "Test data"
|
||||
name: "Test data(Release build)"
|
||||
path: |
|
||||
# multiple paths could be defined
|
||||
build/logs
|
||||
|
||||
experimental_build_ha:
|
||||
name: "High availability build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
source /opt/toolchain-v4/activate
|
||||
./init
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DMG_EXPERIMENTAL_HIGH_AVAILABILITY=ON ..
|
||||
make -j$THREADS
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
source /opt/toolchain-v4/activate
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
- name: Run e2e tests
|
||||
if: false
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
source ve3/bin/activate_e2e
|
||||
cd e2e
|
||||
./run.sh "Coordinator"
|
||||
./run.sh "Client initiated failover"
|
||||
./run.sh "Uninitialized cluster"
|
||||
- name: Save test data
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: "Test data(High availability build)"
|
||||
path: |
|
||||
# multiple paths could be defined
|
||||
build/logs
|
||||
|
||||
experimental_build_mt:
|
||||
name: "MultiTenancy replication build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build MT replication experimental binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -D MG_EXPERIMENTAL_REPLICATION_MULTITENANCY=ON ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
|
||||
- name: Run e2e tests
|
||||
if: false
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
source ve3/bin/activate_e2e
|
||||
cd e2e
|
||||
|
||||
# Just the replication based e2e tests
|
||||
./run.sh "Replicate multitenancy"
|
||||
./run.sh "Show"
|
||||
./run.sh "Show while creating invalid state"
|
||||
./run.sh "Delete edge replication"
|
||||
./run.sh "Read-write benchmark"
|
||||
./run.sh "Index replication"
|
||||
./run.sh "Constraints"
|
||||
|
||||
- name: Save test data
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: "Test data(MultiTenancy replication build)"
|
||||
path: |
|
||||
# multiple paths could be defined
|
||||
build/logs
|
||||
@ -348,7 +462,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -365,13 +479,18 @@ jobs:
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
make -j$THREADS memgraph
|
||||
|
||||
- name: Refresh Jepsen Cluster
|
||||
run: |
|
||||
cd tests/jepsen
|
||||
./run.sh cluster-refresh
|
||||
|
||||
- name: Run Jepsen tests
|
||||
run: |
|
||||
cd tests/jepsen
|
||||
./run.sh test-all-individually --binary ../../build/memgraph --ignore-run-stdout-logs --ignore-run-stderr-logs
|
||||
|
||||
- name: Save Jepsen report
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: "Jepsen Report"
|
||||
@ -387,7 +506,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
|
2
.github/workflows/full_clang_tidy.yaml
vendored
2
.github/workflows/full_clang_tidy.yaml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
|
54
.github/workflows/package_memgraph.yaml
vendored
54
.github/workflows/package_memgraph.yaml
vendored
@ -42,14 +42,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package amzn-2 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: amzn-2
|
||||
path: build/output/amzn-2/memgraph*.rpm
|
||||
@ -60,14 +60,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package centos-7 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: centos-7
|
||||
path: build/output/centos-7/memgraph*.rpm
|
||||
@ -78,14 +78,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package centos-9 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: centos-9
|
||||
path: build/output/centos-9/memgraph*.rpm
|
||||
@ -96,14 +96,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-10 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-10
|
||||
path: build/output/debian-10/memgraph*.deb
|
||||
@ -114,14 +114,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-11
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
@ -132,14 +132,14 @@ jobs:
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11-arm ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-11-aarch64
|
||||
path: build/output/debian-11-arm/memgraph*.deb
|
||||
@ -150,14 +150,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-platform
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debian-11-platform
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
@ -168,7 +168,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
@ -177,7 +177,7 @@ jobs:
|
||||
./run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-docker
|
||||
./run.sh docker
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker
|
||||
path: build/output/docker/memgraph*.tar.gz
|
||||
@ -188,14 +188,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package fedora-36 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fedora-36
|
||||
path: build/output/fedora-36/memgraph*.rpm
|
||||
@ -206,14 +206,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-18.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ubuntu-18.04
|
||||
path: build/output/ubuntu-18.04/memgraph*.deb
|
||||
@ -224,14 +224,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-20.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ubuntu-20.04
|
||||
path: build/output/ubuntu-20.04/memgraph*.deb
|
||||
@ -242,14 +242,14 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-22.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ubuntu-22.04
|
||||
path: build/output/ubuntu-22.04/memgraph*.deb
|
||||
@ -260,14 +260,14 @@ jobs:
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-22.04-arm ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ubuntu-22.04-aarch64
|
||||
path: build/output/ubuntu-22.04-arm/memgraph*.deb
|
||||
@ -279,7 +279,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
# name: # if name input parameter is not provided, all artifacts are downloaded
|
||||
# and put in directories named after each one.
|
||||
|
@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
|
221
.github/workflows/release_debian10.yaml
vendored
221
.github/workflows/release_debian10.yaml
vendored
@ -14,19 +14,21 @@ on:
|
||||
schedule:
|
||||
- cron: "0 22 * * *"
|
||||
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }}
|
||||
|
||||
jobs:
|
||||
community_build:
|
||||
name: "Community build"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 960
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -40,10 +42,6 @@ jobs:
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Set default build_type to Release
|
||||
INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }}
|
||||
BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"}
|
||||
|
||||
# Build community binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DMG_ENTERPRISE=OFF ..
|
||||
@ -65,10 +63,11 @@ jobs:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -110,22 +109,19 @@ jobs:
|
||||
tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu
|
||||
|
||||
- name: Save code coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Coverage build)"
|
||||
path: tools/github/generated/code_coverage.tar.gz
|
||||
|
||||
debug_build:
|
||||
name: "Debug build"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -157,10 +153,6 @@ jobs:
|
||||
run: |
|
||||
./tests/drivers/run.sh
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
|
||||
- name: Run cppcheck and clang-format
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
@ -171,23 +163,49 @@ jobs:
|
||||
./cppcheck_and_clang_format diff
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Debug build)"
|
||||
path: tools/github/cppcheck_and_clang_format.txt
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10, BigMemory]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 960
|
||||
debug_integration_test:
|
||||
name: "Debug integration tests"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build debug binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build debug binaries.
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -201,10 +219,6 @@ jobs:
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Set default build_type to Release
|
||||
INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }}
|
||||
BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"}
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
@ -226,11 +240,60 @@ jobs:
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
|
||||
- name: Save enterprise DEB package
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Enterprise DEB package"
|
||||
path: build/output/memgraph*.deb
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "GQL Behave Status"
|
||||
path: |
|
||||
tests/gql_behave/gql_behave_status.csv
|
||||
tests/gql_behave/gql_behave_status.html
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure
|
||||
|
||||
release_benchmark_tests:
|
||||
name: "Release Benchmark Tests"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run micro benchmark tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
@ -257,29 +320,31 @@ jobs:
|
||||
--num-database-workers 9 --num-clients-workers 30 \
|
||||
--no-strict
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
release_e2e_test:
|
||||
if: false
|
||||
name: "Release End-to-end Test"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
timeout-minutes: 90
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v3
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
name: "GQL Behave Status"
|
||||
path: |
|
||||
tests/gql_behave/gql_behave_status.csv
|
||||
tests/gql_behave/gql_behave_status.html
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run unit tests
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Run unit tests.
|
||||
# Build release binaries
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Ensure Kafka and Pulsar are up
|
||||
run: |
|
||||
@ -304,6 +369,32 @@ jobs:
|
||||
cd ../pulsar
|
||||
docker-compose down
|
||||
|
||||
release_durability_stress_tests:
|
||||
name: "Release durability and stress tests"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run stress test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
@ -314,11 +405,6 @@ jobs:
|
||||
cd tests/stress
|
||||
./continuous_integration --use-ssl
|
||||
|
||||
- name: Run stress test (large)
|
||||
run: |
|
||||
cd tests/stress
|
||||
./continuous_integration --large-dataset
|
||||
|
||||
- name: Run durability test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
@ -334,15 +420,11 @@ jobs:
|
||||
release_jepsen_test:
|
||||
name: "Release Jepsen Test"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -355,22 +437,23 @@ jobs:
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Set default build_type to Release
|
||||
INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }}
|
||||
BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"}
|
||||
|
||||
# Build only memgraph release binary.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS memgraph
|
||||
|
||||
- name: Refresh Jepsen Cluster
|
||||
run: |
|
||||
cd tests/jepsen
|
||||
./run.sh cluster-refresh
|
||||
|
||||
- name: Run Jepsen tests
|
||||
run: |
|
||||
cd tests/jepsen
|
||||
./run.sh test-all-individually --binary ../../build/memgraph --ignore-run-stdout-logs --ignore-run-stderr-logs
|
||||
|
||||
- name: Save Jepsen report
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: "Jepsen Report"
|
||||
|
2
.github/workflows/release_docker.yaml
vendored
2
.github/workflows/release_docker.yaml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
DOCKER_REPOSITORY_NAME: memgraph
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
200
.github/workflows/release_ubuntu2004.yaml
vendored
200
.github/workflows/release_ubuntu2004.yaml
vendored
@ -14,19 +14,21 @@ on:
|
||||
schedule:
|
||||
- cron: "0 22 * * *"
|
||||
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }}
|
||||
|
||||
jobs:
|
||||
community_build:
|
||||
name: "Community build"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 960
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -40,10 +42,6 @@ jobs:
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Set default build_type to Release
|
||||
INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }}
|
||||
BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"}
|
||||
|
||||
# Build community binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DMG_ENTERPRISE=OFF ..
|
||||
@ -61,14 +59,11 @@ jobs:
|
||||
coverage_build:
|
||||
name: "Coverage build"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -110,22 +105,19 @@ jobs:
|
||||
tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu
|
||||
|
||||
- name: Save code coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Coverage build)"
|
||||
path: tools/github/generated/code_coverage.tar.gz
|
||||
|
||||
debug_build:
|
||||
name: "Debug build"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -157,10 +149,6 @@ jobs:
|
||||
run: |
|
||||
./tests/drivers/run.sh
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
|
||||
- name: Run cppcheck and clang-format
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
@ -171,23 +159,49 @@ jobs:
|
||||
./cppcheck_and_clang_format diff
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage"
|
||||
name: "Code coverage(Debug build)"
|
||||
path: tools/github/cppcheck_and_clang_format.txt
|
||||
|
||||
debug_integration_test:
|
||||
name: "Debug integration tests"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build debug binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build debug binaries.
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
timeout-minutes: 960
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
@ -201,10 +215,6 @@ jobs:
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Set default build_type to Release
|
||||
INPUT_BUILD_TYPE=${{ github.event.inputs.build_type }}
|
||||
BUILD_TYPE=${INPUT_BUILD_TYPE:-"Release"}
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
@ -226,11 +236,60 @@ jobs:
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
|
||||
- name: Save enterprise DEB package
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Enterprise DEB package"
|
||||
path: build/output/memgraph*.deb
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "GQL Behave Status"
|
||||
path: |
|
||||
tests/gql_behave/gql_behave_status.csv
|
||||
tests/gql_behave/gql_behave_status.html
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure
|
||||
|
||||
release_benchmark_tests:
|
||||
name: "Release Benchmark Tests"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run micro benchmark tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
@ -257,29 +316,31 @@ jobs:
|
||||
--num-database-workers 9 --num-clients-workers 30 \
|
||||
--no-strict
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
release_e2e_test:
|
||||
if: false
|
||||
name: "Release End-to-end Test"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
timeout-minutes: 90
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v3
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
name: "GQL Behave Status"
|
||||
path: |
|
||||
tests/gql_behave/gql_behave_status.csv
|
||||
tests/gql_behave/gql_behave_status.html
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run unit tests
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Run unit tests.
|
||||
# Build release binaries
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Ensure Kafka and Pulsar are up
|
||||
run: |
|
||||
@ -304,6 +365,32 @@ jobs:
|
||||
cd ../pulsar
|
||||
docker-compose down
|
||||
|
||||
release_durability_stress_tests:
|
||||
name: "Release durability and stress tests"
|
||||
runs-on: [self-hosted, Linux, X64, Ubuntu20.04]
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run stress test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
@ -314,11 +401,6 @@ jobs:
|
||||
cd tests/stress
|
||||
./continuous_integration --use-ssl
|
||||
|
||||
- name: Run stress test (large)
|
||||
run: |
|
||||
cd tests/stress
|
||||
./continuous_integration --large-dataset
|
||||
|
||||
- name: Run durability test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
|
62
.github/workflows/stress_test_large.yaml
vendored
Normal file
62
.github/workflows/stress_test_large.yaml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
name: Stress test large
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
build_type:
|
||||
type: choice
|
||||
description: "Memgraph Build type. Default value is Release."
|
||||
default: 'Release'
|
||||
options:
|
||||
- Release
|
||||
- RelWithDebInfo
|
||||
|
||||
schedule:
|
||||
- cron: "0 22 * * *"
|
||||
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }}
|
||||
|
||||
jobs:
|
||||
stress_test_large:
|
||||
name: "Stress test large"
|
||||
timeout-minutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
os: [Debian10, Ubuntu20.04]
|
||||
extra: [BigMemory, Gen8]
|
||||
exclude:
|
||||
- os: Debian10
|
||||
extra: Gen8
|
||||
- os: Ubuntu20.04
|
||||
extra: BigMemory
|
||||
runs-on: [self-hosted, Linux, X64, "${{ matrix.os }}", "${{ matrix.extra }}"]
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Number of commits to fetch. `0` indicates all history for all
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
|
||||
make -j$THREADS
|
||||
|
||||
- name: Run stress test (large)
|
||||
run: |
|
||||
cd tests/stress
|
||||
./continuous_integration --large-dataset
|
2
.github/workflows/upload_to_s3.yaml
vendored
2
.github/workflows/upload_to_s3.yaml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: dawidd6/action-download-artifact@v2
|
||||
uses: dawidd6/action-download-artifact@v4
|
||||
with:
|
||||
workflow: package_all.yaml
|
||||
workflow_conclusion: success
|
||||
|
@ -271,6 +271,17 @@ endif()
|
||||
set(libs_dir ${CMAKE_SOURCE_DIR}/libs)
|
||||
add_subdirectory(libs EXCLUDE_FROM_ALL)
|
||||
|
||||
option(MG_EXPERIMENTAL_HIGH_AVAILABILITY "Feature flag for experimental high availability" OFF)
|
||||
|
||||
if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_HIGH_AVAILABILITY)
|
||||
set(MG_EXPERIMENTAL_HIGH_AVAILABILITY OFF)
|
||||
message(FATAL_ERROR "MG_EXPERIMENTAL_HIGH_AVAILABILITY must be used with enterpise version of the code.")
|
||||
endif ()
|
||||
|
||||
if (MG_EXPERIMENTAL_HIGH_AVAILABILITY)
|
||||
add_compile_definitions(MG_EXPERIMENTAL_HIGH_AVAILABILITY)
|
||||
endif ()
|
||||
|
||||
# Optional subproject configuration -------------------------------------------
|
||||
option(TEST_COVERAGE "Generate coverage reports from running memgraph" OFF)
|
||||
option(TOOLS "Build tools binaries" ON)
|
||||
@ -279,6 +290,18 @@ option(ASAN "Build with Address Sanitizer. To get a reasonable performance optio
|
||||
option(TSAN "Build with Thread Sanitizer. To get a reasonable performance option should be used only in Release or RelWithDebInfo build " OFF)
|
||||
option(UBSAN "Build with Undefined Behaviour Sanitizer" OFF)
|
||||
|
||||
# Build feature flags
|
||||
option(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY "Feature flag for experimental replicaition of multitenacy" OFF)
|
||||
|
||||
if (NOT MG_ENTERPRISE AND MG_EXPERIMENTAL_REPLICATION_MULTITENANCY)
|
||||
set(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY OFF)
|
||||
message(FATAL_ERROR "MG_EXPERIMENTAL_REPLICATION_MULTITENANCY with community edition build isn't possible")
|
||||
endif ()
|
||||
|
||||
if (MG_EXPERIMENTAL_REPLICATION_MULTITENANCY)
|
||||
add_compile_definitions(MG_EXPERIMENTAL_REPLICATION_MULTITENANCY)
|
||||
endif ()
|
||||
|
||||
if (TEST_COVERAGE)
|
||||
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
|
||||
if (NOT lower_build_type STREQUAL "debug")
|
||||
|
@ -22,6 +22,8 @@ add_subdirectory(dbms)
|
||||
add_subdirectory(flags)
|
||||
add_subdirectory(distributed)
|
||||
add_subdirectory(replication)
|
||||
add_subdirectory(coordination)
|
||||
add_subdirectory(replication_coordination_glue)
|
||||
|
||||
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -8,17 +8,15 @@
|
||||
|
||||
#include "auth/auth.hpp"
|
||||
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "auth/crypto.hpp"
|
||||
#include "auth/exceptions.hpp"
|
||||
#include "license/license.hpp"
|
||||
#include "utils/flag_validation.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/message.hpp"
|
||||
#include "utils/settings.hpp"
|
||||
#include "utils/string.hpp"
|
||||
@ -46,6 +44,9 @@ namespace memgraph::auth {
|
||||
const std::string kUserPrefix = "user:";
|
||||
const std::string kRolePrefix = "role:";
|
||||
const std::string kLinkPrefix = "link:";
|
||||
const std::string kVersion = "version";
|
||||
|
||||
static constexpr auto kVersionV1 = "V1";
|
||||
|
||||
/**
|
||||
* All data stored in the `Auth` storage is stored in an underlying
|
||||
@ -64,7 +65,59 @@ const std::string kLinkPrefix = "link:";
|
||||
* key="link:<username>", value="<rolename>"
|
||||
*/
|
||||
|
||||
Auth::Auth(const std::string &storage_directory) : storage_(storage_directory), module_(FLAGS_auth_module_executable) {}
|
||||
namespace {
|
||||
void MigrateVersions(kvstore::KVStore &store) {
|
||||
static constexpr auto kPasswordHashV0V1 = "password_hash";
|
||||
auto version_str = store.Get(kVersion);
|
||||
|
||||
if (!version_str) {
|
||||
using namespace std::string_literals;
|
||||
|
||||
// pre versioning, add version to the store
|
||||
auto puts = std::map<std::string, std::string>{{kVersion, kVersionV1}};
|
||||
|
||||
// also add hash kind into durability
|
||||
|
||||
auto it = store.begin(kUserPrefix);
|
||||
auto const e = store.end(kUserPrefix);
|
||||
|
||||
if (it != e) {
|
||||
const auto hash_algo = CurrentHashAlgorithm();
|
||||
spdlog::info("Updating auth durability, assuming previously stored as {}", AsString(hash_algo));
|
||||
|
||||
for (; it != e; ++it) {
|
||||
auto const &[key, value] = *it;
|
||||
try {
|
||||
auto user_data = nlohmann::json::parse(value);
|
||||
|
||||
auto password_hash = user_data[kPasswordHashV0V1];
|
||||
if (!password_hash.is_string()) {
|
||||
throw AuthException("Couldn't load user data!");
|
||||
}
|
||||
// upgrade the password_hash to include the hash algortihm
|
||||
if (password_hash.empty()) {
|
||||
user_data[kPasswordHashV0V1] = nullptr;
|
||||
} else {
|
||||
user_data[kPasswordHashV0V1] = HashedPassword{hash_algo, password_hash};
|
||||
}
|
||||
puts.emplace(key, user_data.dump());
|
||||
} catch (const nlohmann::json::parse_error &e) {
|
||||
throw AuthException("Couldn't load user data!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform migration to V1
|
||||
store.PutMultiple(puts);
|
||||
version_str = kVersionV1;
|
||||
}
|
||||
}
|
||||
}; // namespace
|
||||
|
||||
Auth::Auth(std::string storage_directory, Config config)
|
||||
: storage_(std::move(storage_directory)), module_(FLAGS_auth_module_executable), config_{std::move(config)} {
|
||||
MigrateVersions(storage_);
|
||||
}
|
||||
|
||||
std::optional<User> Auth::Authenticate(const std::string &username, const std::string &password) {
|
||||
if (module_.IsUsed()) {
|
||||
@ -113,7 +166,7 @@ std::optional<User> Auth::Authenticate(const std::string &username, const std::s
|
||||
return std::nullopt;
|
||||
}
|
||||
} else {
|
||||
user->UpdatePassword(password);
|
||||
UpdatePassword(*user, password);
|
||||
}
|
||||
if (FLAGS_auth_module_manage_roles) {
|
||||
if (!rolename.empty()) {
|
||||
@ -155,6 +208,10 @@ std::optional<User> Auth::Authenticate(const std::string &username, const std::s
|
||||
username, "https://memgr.ph/auth"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (user->UpgradeHash(password)) {
|
||||
SaveUser(*user);
|
||||
}
|
||||
|
||||
return user;
|
||||
}
|
||||
}
|
||||
@ -197,13 +254,46 @@ void Auth::SaveUser(const User &user) {
|
||||
}
|
||||
}
|
||||
|
||||
void Auth::UpdatePassword(auth::User &user, const std::optional<std::string> &password) {
|
||||
// Check if null
|
||||
if (!password) {
|
||||
if (!config_.password_permit_null) {
|
||||
throw AuthException("Null passwords aren't permitted!");
|
||||
}
|
||||
} else {
|
||||
// Check if compliant with our filter
|
||||
if (config_.custom_password_regex) {
|
||||
if (const auto license_check_result = license::global_license_checker.IsEnterpriseValid(utils::global_settings);
|
||||
license_check_result.HasError()) {
|
||||
throw AuthException(
|
||||
"Custom password regex is a Memgraph Enterprise feature. Please set the config "
|
||||
"(\"--auth-password-strength-regex\") to its default value (\"{}\") or remove the flag.\n{}",
|
||||
glue::kDefaultPasswordRegex,
|
||||
license::LicenseCheckErrorToString(license_check_result.GetError(), "password regex"));
|
||||
}
|
||||
}
|
||||
if (!std::regex_match(*password, config_.password_regex)) {
|
||||
throw AuthException(
|
||||
"The user password doesn't conform to the required strength! Regex: "
|
||||
"\"{}\"",
|
||||
config_.password_regex_str);
|
||||
}
|
||||
}
|
||||
|
||||
// All checks passed; update
|
||||
user.UpdatePassword(password);
|
||||
}
|
||||
|
||||
std::optional<User> Auth::AddUser(const std::string &username, const std::optional<std::string> &password) {
|
||||
if (!NameRegexMatch(username)) {
|
||||
throw AuthException("Invalid user name.");
|
||||
}
|
||||
auto existing_user = GetUser(username);
|
||||
if (existing_user) return std::nullopt;
|
||||
auto existing_role = GetRole(username);
|
||||
if (existing_role) return std::nullopt;
|
||||
auto new_user = User(username);
|
||||
new_user.UpdatePassword(password);
|
||||
UpdatePassword(new_user, password);
|
||||
SaveUser(new_user);
|
||||
return new_user;
|
||||
}
|
||||
@ -255,10 +345,11 @@ void Auth::SaveRole(const Role &role) {
|
||||
}
|
||||
|
||||
std::optional<Role> Auth::AddRole(const std::string &rolename) {
|
||||
auto existing_role = GetRole(rolename);
|
||||
if (existing_role) return std::nullopt;
|
||||
auto existing_user = GetUser(rolename);
|
||||
if (existing_user) return std::nullopt;
|
||||
if (!NameRegexMatch(rolename)) {
|
||||
throw AuthException("Invalid role name.");
|
||||
}
|
||||
if (auto existing_role = GetRole(rolename)) return std::nullopt;
|
||||
if (auto existing_user = GetUser(rolename)) return std::nullopt;
|
||||
auto new_role = Role(rolename);
|
||||
SaveRole(new_role);
|
||||
return new_role;
|
||||
@ -285,8 +376,7 @@ std::vector<auth::Role> Auth::AllRoles() const {
|
||||
for (auto it = storage_.begin(kRolePrefix); it != storage_.end(kRolePrefix); ++it) {
|
||||
auto rolename = it->first.substr(kRolePrefix.size());
|
||||
if (rolename != utils::ToLowerCase(rolename)) continue;
|
||||
auto role = GetRole(rolename);
|
||||
if (role) {
|
||||
if (auto role = GetRole(rolename)) {
|
||||
ret.push_back(*role);
|
||||
} else {
|
||||
throw AuthException("Couldn't load role '{}'!", rolename);
|
||||
@ -296,15 +386,14 @@ std::vector<auth::Role> Auth::AllRoles() const {
|
||||
}
|
||||
|
||||
std::vector<auth::User> Auth::AllUsersForRole(const std::string &rolename_orig) const {
|
||||
auto rolename = utils::ToLowerCase(rolename_orig);
|
||||
const auto rolename = utils::ToLowerCase(rolename_orig);
|
||||
std::vector<auth::User> ret;
|
||||
for (auto it = storage_.begin(kLinkPrefix); it != storage_.end(kLinkPrefix); ++it) {
|
||||
auto username = it->first.substr(kLinkPrefix.size());
|
||||
if (username != utils::ToLowerCase(username)) continue;
|
||||
if (it->second != utils::ToLowerCase(it->second)) continue;
|
||||
if (it->second == rolename) {
|
||||
auto user = GetUser(username);
|
||||
if (user) {
|
||||
if (auto user = GetUser(username)) {
|
||||
ret.push_back(std::move(*user));
|
||||
} else {
|
||||
throw AuthException("Couldn't load user '{}'!", username);
|
||||
@ -316,8 +405,7 @@ std::vector<auth::User> Auth::AllUsersForRole(const std::string &rolename_orig)
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) {
|
||||
auto user = GetUser(name);
|
||||
if (user) {
|
||||
if (auto user = GetUser(name)) {
|
||||
if (db == kAllDatabases) {
|
||||
user->db_access().GrantAll();
|
||||
} else {
|
||||
@ -330,8 +418,7 @@ bool Auth::GrantDatabaseToUser(const std::string &db, const std::string &name) {
|
||||
}
|
||||
|
||||
bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name) {
|
||||
auto user = GetUser(name);
|
||||
if (user) {
|
||||
if (auto user = GetUser(name)) {
|
||||
if (db == kAllDatabases) {
|
||||
user->db_access().DenyAll();
|
||||
} else {
|
||||
@ -346,17 +433,15 @@ bool Auth::RevokeDatabaseFromUser(const std::string &db, const std::string &name
|
||||
void Auth::DeleteDatabase(const std::string &db) {
|
||||
for (auto it = storage_.begin(kUserPrefix); it != storage_.end(kUserPrefix); ++it) {
|
||||
auto username = it->first.substr(kUserPrefix.size());
|
||||
auto user = GetUser(username);
|
||||
if (user) {
|
||||
if (auto user = GetUser(username)) {
|
||||
user->db_access().Delete(db);
|
||||
SaveUser(*user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Auth::SetMainDatabase(const std::string &db, const std::string &name) {
|
||||
auto user = GetUser(name);
|
||||
if (user) {
|
||||
bool Auth::SetMainDatabase(std::string_view db, const std::string &name) {
|
||||
if (auto user = GetUser(name)) {
|
||||
if (!user->db_access().SetDefault(db)) {
|
||||
throw AuthException("Couldn't set default database '{}' for user '{}'!", db, name);
|
||||
}
|
||||
@ -367,4 +452,19 @@ bool Auth::SetMainDatabase(const std::string &db, const std::string &name) {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool Auth::NameRegexMatch(const std::string &user_or_role) const {
|
||||
if (config_.custom_name_regex) {
|
||||
if (const auto license_check_result =
|
||||
memgraph::license::global_license_checker.IsEnterpriseValid(memgraph::utils::global_settings);
|
||||
license_check_result.HasError()) {
|
||||
throw memgraph::auth::AuthException(
|
||||
"Custom user/role regex is a Memgraph Enterprise feature. Please set the config "
|
||||
"(\"--auth-user-or-role-name-regex\") to its default value (\"{}\") or remove the flag.\n{}",
|
||||
glue::kDefaultUserRoleRegex,
|
||||
memgraph::license::LicenseCheckErrorToString(license_check_result.GetError(), "user/role regex"));
|
||||
}
|
||||
}
|
||||
return std::regex_match(user_or_role, config_.name_regex);
|
||||
}
|
||||
|
||||
} // namespace memgraph::auth
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -10,11 +10,13 @@
|
||||
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <regex>
|
||||
#include <vector>
|
||||
|
||||
#include "auth/exceptions.hpp"
|
||||
#include "auth/models.hpp"
|
||||
#include "auth/module.hpp"
|
||||
#include "glue/auth_global.hpp"
|
||||
#include "kvstore/kvstore.hpp"
|
||||
#include "utils/settings.hpp"
|
||||
|
||||
@ -31,7 +33,40 @@ static const constexpr char *const kAllDatabases = "*";
|
||||
*/
|
||||
class Auth final {
|
||||
public:
|
||||
explicit Auth(const std::string &storage_directory);
|
||||
struct Config {
|
||||
Config() {}
|
||||
Config(std::string name_regex, std::string password_regex, bool password_permit_null)
|
||||
: name_regex_str{std::move(name_regex)},
|
||||
password_regex_str{std::move(password_regex)},
|
||||
password_permit_null{password_permit_null},
|
||||
custom_name_regex{name_regex_str != glue::kDefaultUserRoleRegex},
|
||||
name_regex{name_regex_str},
|
||||
custom_password_regex{password_regex_str != glue::kDefaultPasswordRegex},
|
||||
password_regex{password_regex_str} {}
|
||||
|
||||
std::string name_regex_str{glue::kDefaultUserRoleRegex};
|
||||
std::string password_regex_str{glue::kDefaultPasswordRegex};
|
||||
bool password_permit_null{true};
|
||||
|
||||
private:
|
||||
friend class Auth;
|
||||
bool custom_name_regex{false};
|
||||
std::regex name_regex{name_regex_str};
|
||||
bool custom_password_regex{false};
|
||||
std::regex password_regex{password_regex_str};
|
||||
};
|
||||
|
||||
explicit Auth(std::string storage_directory, Config config);
|
||||
|
||||
/**
|
||||
* @brief Set the Config object
|
||||
*
|
||||
* @param config
|
||||
*/
|
||||
void SetConfig(Config config) {
|
||||
// NOTE: The Auth class itself is not thread-safe, higher-level code needs to synchronize it when using it.
|
||||
config_ = std::move(config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticates a user using his username and password.
|
||||
@ -85,6 +120,14 @@ class Auth final {
|
||||
*/
|
||||
bool RemoveUser(const std::string &username);
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param user
|
||||
* @param password
|
||||
*/
|
||||
void UpdatePassword(auth::User &user, const std::optional<std::string> &password);
|
||||
|
||||
/**
|
||||
* Gets all users from the storage.
|
||||
*
|
||||
@ -195,14 +238,24 @@ class Auth final {
|
||||
* @return true on success
|
||||
* @throw AuthException if unable to find or update the user
|
||||
*/
|
||||
bool SetMainDatabase(const std::string &db, const std::string &name);
|
||||
bool SetMainDatabase(std::string_view db, const std::string &name);
|
||||
#endif
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param user_or_role
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
bool NameRegexMatch(const std::string &user_or_role) const;
|
||||
|
||||
// Even though the `kvstore::KVStore` class is guaranteed to be thread-safe,
|
||||
// Auth is not thread-safe because modifying users and roles might require
|
||||
// more than one operation on the storage.
|
||||
kvstore::KVStore storage_;
|
||||
auth::Module module_;
|
||||
Config config_;
|
||||
};
|
||||
} // namespace memgraph::auth
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -22,10 +22,14 @@
|
||||
|
||||
namespace {
|
||||
using namespace std::literals;
|
||||
inline constexpr std::array password_encryption_mappings{
|
||||
std::pair{"bcrypt"sv, memgraph::auth::PasswordEncryptionAlgorithm::BCRYPT},
|
||||
std::pair{"sha256"sv, memgraph::auth::PasswordEncryptionAlgorithm::SHA256},
|
||||
std::pair{"sha256-multiple"sv, memgraph::auth::PasswordEncryptionAlgorithm::SHA256_MULTIPLE}};
|
||||
|
||||
constexpr auto kHashAlgo = "hash_algo";
|
||||
constexpr auto kPasswordHash = "password_hash";
|
||||
|
||||
inline constexpr std::array password_hash_mappings{
|
||||
std::pair{"bcrypt"sv, memgraph::auth::PasswordHashAlgorithm::BCRYPT},
|
||||
std::pair{"sha256"sv, memgraph::auth::PasswordHashAlgorithm::SHA256},
|
||||
std::pair{"sha256-multiple"sv, memgraph::auth::PasswordHashAlgorithm::SHA256_MULTIPLE}};
|
||||
|
||||
inline constexpr uint64_t ONE_SHA_ITERATION = 1;
|
||||
inline constexpr uint64_t MULTIPLE_SHA_ITERATIONS = 1024;
|
||||
@ -35,7 +39,7 @@ inline constexpr uint64_t MULTIPLE_SHA_ITERATIONS = 1024;
|
||||
DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt",
|
||||
"The password encryption algorithm used for authentication.", {
|
||||
if (const auto result =
|
||||
memgraph::utils::IsValidEnumValueString(value, password_encryption_mappings);
|
||||
memgraph::utils::IsValidEnumValueString(value, password_hash_mappings);
|
||||
result.HasError()) {
|
||||
const auto error = result.GetError();
|
||||
switch (error) {
|
||||
@ -45,7 +49,7 @@ DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt",
|
||||
}
|
||||
case memgraph::utils::ValidationError::InvalidValue: {
|
||||
std::cout << "Invalid value for password encryption algorithm. Allowed values: "
|
||||
<< memgraph::utils::GetAllowedEnumValuesString(password_encryption_mappings)
|
||||
<< memgraph::utils::GetAllowedEnumValuesString(password_hash_mappings)
|
||||
<< std::endl;
|
||||
break;
|
||||
}
|
||||
@ -58,7 +62,7 @@ DEFINE_VALIDATED_string(password_encryption_algorithm, "bcrypt",
|
||||
|
||||
namespace memgraph::auth {
|
||||
namespace BCrypt {
|
||||
std::string EncryptPassword(const std::string &password) {
|
||||
std::string HashPassword(const std::string &password) {
|
||||
char salt[BCRYPT_HASHSIZE];
|
||||
char hash[BCRYPT_HASHSIZE];
|
||||
|
||||
@ -86,16 +90,30 @@ bool VerifyPassword(const std::string &password, const std::string &hash) {
|
||||
} // namespace BCrypt
|
||||
|
||||
namespace SHA {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr auto SHA_LENGTH = 64U;
|
||||
constexpr auto SALT_SIZE = 16U;
|
||||
constexpr auto SALT_SIZE_DURABLE = SALT_SIZE * 2;
|
||||
|
||||
#if OPENSSL_VERSION_MAJOR >= 3
|
||||
std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t number_of_iterations) {
|
||||
std::string HashPasswordOpenSSL3(std::string_view password, const uint64_t number_of_iterations,
|
||||
std::string_view salt) {
|
||||
unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
|
||||
EVP_MD_CTX *ctx = EVP_MD_CTX_new();
|
||||
EVP_MD *md = EVP_MD_fetch(nullptr, "SHA2-256", nullptr);
|
||||
|
||||
EVP_DigestInit_ex(ctx, md, nullptr);
|
||||
|
||||
if (!salt.empty()) {
|
||||
DMG_ASSERT(salt.size() == SALT_SIZE);
|
||||
EVP_DigestUpdate(ctx, salt.data(), salt.size());
|
||||
}
|
||||
|
||||
for (auto i = 0; i < number_of_iterations; i++) {
|
||||
EVP_DigestUpdate(ctx, password.c_str(), password.size());
|
||||
EVP_DigestUpdate(ctx, password.data(), password.size());
|
||||
}
|
||||
EVP_DigestFinal_ex(ctx, hash, nullptr);
|
||||
|
||||
@ -103,6 +121,11 @@ std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t
|
||||
EVP_MD_CTX_free(ctx);
|
||||
|
||||
std::stringstream result_stream;
|
||||
|
||||
for (unsigned char salt_char : salt) {
|
||||
result_stream << std::hex << std::setw(2) << std::setfill('0') << (((unsigned int)salt_char) & 0xFFU);
|
||||
}
|
||||
|
||||
for (auto hash_char : hash) {
|
||||
result_stream << std::hex << std::setw(2) << std::setfill('0') << (int)hash_char;
|
||||
}
|
||||
@ -110,17 +133,27 @@ std::string EncryptPasswordOpenSSL3(const std::string &password, const uint64_t
|
||||
return result_stream.str();
|
||||
}
|
||||
#else
|
||||
std::string EncryptPasswordOpenSSL1_1(const std::string &password, const uint64_t number_of_iterations) {
|
||||
std::string HashPasswordOpenSSL1_1(std::string_view password, const uint64_t number_of_iterations,
|
||||
std::string_view salt) {
|
||||
unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
|
||||
SHA256_CTX sha256;
|
||||
SHA256_Init(&sha256);
|
||||
|
||||
if (!salt.empty()) {
|
||||
DMG_ASSERT(salt.size() == SALT_SIZE);
|
||||
SHA256_Update(&sha256, salt.data(), salt.size());
|
||||
}
|
||||
|
||||
for (auto i = 0; i < number_of_iterations; i++) {
|
||||
SHA256_Update(&sha256, password.c_str(), password.size());
|
||||
SHA256_Update(&sha256, password.data(), password.size());
|
||||
}
|
||||
SHA256_Final(hash, &sha256);
|
||||
|
||||
std::stringstream ss;
|
||||
for (unsigned char salt_char : salt) {
|
||||
ss << std::hex << std::setw(2) << std::setfill('0') << (((unsigned int)salt_char) & 0xFFU);
|
||||
}
|
||||
for (auto hash_char : hash) {
|
||||
ss << std::hex << std::setw(2) << std::setfill('0') << (int)hash_char;
|
||||
}
|
||||
@ -129,55 +162,144 @@ std::string EncryptPasswordOpenSSL1_1(const std::string &password, const uint64_
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string EncryptPassword(const std::string &password, const uint64_t number_of_iterations) {
|
||||
std::string HashPassword(std::string_view password, const uint64_t number_of_iterations, std::string_view salt) {
|
||||
#if OPENSSL_VERSION_MAJOR >= 3
|
||||
return EncryptPasswordOpenSSL3(password, number_of_iterations);
|
||||
return HashPasswordOpenSSL3(password, number_of_iterations, salt);
|
||||
#else
|
||||
return EncryptPasswordOpenSSL1_1(password, number_of_iterations);
|
||||
return HashPasswordOpenSSL1_1(password, number_of_iterations, salt);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool VerifyPassword(const std::string &password, const std::string &hash, const uint64_t number_of_iterations) {
|
||||
auto password_hash = EncryptPassword(password, number_of_iterations);
|
||||
auto ExtractSalt(std::string_view salt_durable) -> std::array<char, SALT_SIZE> {
|
||||
static_assert(SALT_SIZE_DURABLE % 2 == 0);
|
||||
static_assert(SALT_SIZE_DURABLE / 2 == SALT_SIZE);
|
||||
|
||||
MG_ASSERT(salt_durable.size() == SALT_SIZE_DURABLE);
|
||||
auto const *b = salt_durable.cbegin();
|
||||
auto const *const e = salt_durable.cend();
|
||||
|
||||
auto salt = std::array<char, SALT_SIZE>{};
|
||||
auto *inserter = salt.begin();
|
||||
|
||||
auto const toval = [](char a) -> uint8_t {
|
||||
if ('0' <= a && a <= '9') {
|
||||
return a - '0';
|
||||
}
|
||||
if ('a' <= a && a <= 'f') {
|
||||
return 10 + (a - 'a');
|
||||
}
|
||||
MG_ASSERT(false, "Currupt hash, can't extract salt");
|
||||
__builtin_unreachable();
|
||||
};
|
||||
|
||||
for (; b != e; b += 2, ++inserter) {
|
||||
*inserter = static_cast<char>(static_cast<uint8_t>(toval(b[0]) << 4U) | toval(b[1]));
|
||||
}
|
||||
return salt;
|
||||
}
|
||||
|
||||
bool IsSalted(std::string_view hash) { return hash.size() == SHA_LENGTH + SALT_SIZE_DURABLE; }
|
||||
|
||||
bool VerifyPassword(std::string_view password, std::string_view hash, const uint64_t number_of_iterations) {
|
||||
auto password_hash = std::invoke([&] {
|
||||
if (hash.size() == SHA_LENGTH) [[unlikely]] {
|
||||
// Just SHA256
|
||||
return HashPassword(password, number_of_iterations, {});
|
||||
} else {
|
||||
// SHA256 + SALT
|
||||
MG_ASSERT(IsSalted(hash));
|
||||
auto const salt_durable = std::string_view{hash.data(), SALT_SIZE_DURABLE};
|
||||
std::array<char, SALT_SIZE> salt = ExtractSalt(salt_durable);
|
||||
return HashPassword(password, number_of_iterations, {salt.data(), salt.size()});
|
||||
}
|
||||
});
|
||||
return password_hash == hash;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace SHA
|
||||
|
||||
bool VerifyPassword(const std::string &password, const std::string &hash) {
|
||||
const auto password_encryption_algorithm = utils::StringToEnum<PasswordEncryptionAlgorithm>(
|
||||
FLAGS_password_encryption_algorithm, password_encryption_mappings);
|
||||
HashedPassword HashPassword(const std::string &password, std::optional<PasswordHashAlgorithm> override_algo) {
|
||||
auto const hash_algo = override_algo.value_or(CurrentHashAlgorithm());
|
||||
auto password_hash = std::invoke([&] {
|
||||
switch (hash_algo) {
|
||||
case PasswordHashAlgorithm::BCRYPT: {
|
||||
return BCrypt::HashPassword(password);
|
||||
}
|
||||
case PasswordHashAlgorithm::SHA256:
|
||||
case PasswordHashAlgorithm::SHA256_MULTIPLE: {
|
||||
auto gen = std::mt19937(std::random_device{}());
|
||||
auto salt = std::array<char, SHA::SALT_SIZE>{};
|
||||
auto dis = std::uniform_int_distribution<unsigned char>(0, 255);
|
||||
std::generate(salt.begin(), salt.end(), [&]() { return dis(gen); });
|
||||
auto iterations = (hash_algo == PasswordHashAlgorithm::SHA256) ? ONE_SHA_ITERATION : MULTIPLE_SHA_ITERATIONS;
|
||||
return SHA::HashPassword(password, iterations, {salt.data(), salt.size()});
|
||||
}
|
||||
}
|
||||
});
|
||||
return HashedPassword{hash_algo, std::move(password_hash)};
|
||||
};
|
||||
|
||||
if (!password_encryption_algorithm.has_value()) {
|
||||
throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm);
|
||||
namespace {
|
||||
|
||||
auto InternalParseHashAlgorithm(std::string_view algo) -> PasswordHashAlgorithm {
|
||||
auto maybe_parsed = utils::StringToEnum<PasswordHashAlgorithm>(algo, password_hash_mappings);
|
||||
if (!maybe_parsed) {
|
||||
throw AuthException("Invalid password encryption '{}'!", algo);
|
||||
}
|
||||
|
||||
switch (password_encryption_algorithm.value()) {
|
||||
case PasswordEncryptionAlgorithm::BCRYPT:
|
||||
return BCrypt::VerifyPassword(password, hash);
|
||||
case PasswordEncryptionAlgorithm::SHA256:
|
||||
return SHA::VerifyPassword(password, hash, ONE_SHA_ITERATION);
|
||||
case PasswordEncryptionAlgorithm::SHA256_MULTIPLE:
|
||||
return SHA::VerifyPassword(password, hash, MULTIPLE_SHA_ITERATIONS);
|
||||
}
|
||||
|
||||
throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm);
|
||||
return *maybe_parsed;
|
||||
}
|
||||
|
||||
std::string EncryptPassword(const std::string &password) {
|
||||
const auto password_encryption_algorithm = utils::StringToEnum<PasswordEncryptionAlgorithm>(
|
||||
FLAGS_password_encryption_algorithm, password_encryption_mappings);
|
||||
PasswordHashAlgorithm &InternalCurrentHashAlgorithm() {
|
||||
static auto current = PasswordHashAlgorithm::BCRYPT;
|
||||
static std::once_flag flag;
|
||||
std::call_once(flag, [] { current = InternalParseHashAlgorithm(FLAGS_password_encryption_algorithm); });
|
||||
return current;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
if (!password_encryption_algorithm.has_value()) {
|
||||
throw AuthException("Invalid password encryption flag '{}'!", FLAGS_password_encryption_algorithm);
|
||||
auto CurrentHashAlgorithm() -> PasswordHashAlgorithm { return InternalCurrentHashAlgorithm(); }
|
||||
|
||||
void SetHashAlgorithm(std::string_view algo) {
|
||||
auto ¤t = InternalCurrentHashAlgorithm();
|
||||
current = InternalParseHashAlgorithm(algo);
|
||||
}
|
||||
|
||||
auto AsString(PasswordHashAlgorithm hash_algo) -> std::string_view {
|
||||
return *utils::EnumToString<PasswordHashAlgorithm>(hash_algo, password_hash_mappings);
|
||||
}
|
||||
|
||||
bool HashedPassword::VerifyPassword(const std::string &password) {
|
||||
switch (hash_algo) {
|
||||
case PasswordHashAlgorithm::BCRYPT:
|
||||
return BCrypt::VerifyPassword(password, password_hash);
|
||||
case PasswordHashAlgorithm::SHA256:
|
||||
return SHA::VerifyPassword(password, password_hash, ONE_SHA_ITERATION);
|
||||
case PasswordHashAlgorithm::SHA256_MULTIPLE:
|
||||
return SHA::VerifyPassword(password, password_hash, MULTIPLE_SHA_ITERATIONS);
|
||||
}
|
||||
}
|
||||
|
||||
switch (password_encryption_algorithm.value()) {
|
||||
case PasswordEncryptionAlgorithm::BCRYPT:
|
||||
return BCrypt::EncryptPassword(password);
|
||||
case PasswordEncryptionAlgorithm::SHA256:
|
||||
return SHA::EncryptPassword(password, ONE_SHA_ITERATION);
|
||||
case PasswordEncryptionAlgorithm::SHA256_MULTIPLE:
|
||||
return SHA::EncryptPassword(password, MULTIPLE_SHA_ITERATIONS);
|
||||
void to_json(nlohmann::json &j, const HashedPassword &p) {
|
||||
j = nlohmann::json{{kHashAlgo, p.hash_algo}, {kPasswordHash, p.password_hash}};
|
||||
}
|
||||
|
||||
void from_json(const nlohmann::json &j, HashedPassword &p) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
PasswordHashAlgorithm hash_algo;
|
||||
j.at(kHashAlgo).get_to(hash_algo);
|
||||
auto password_hash = j.value(kPasswordHash, std::string());
|
||||
p = HashedPassword{hash_algo, std::move(password_hash)};
|
||||
}
|
||||
|
||||
bool HashedPassword::IsSalted() const {
|
||||
switch (hash_algo) {
|
||||
case PasswordHashAlgorithm::BCRYPT:
|
||||
return true;
|
||||
case PasswordHashAlgorithm::SHA256:
|
||||
case PasswordHashAlgorithm::SHA256_MULTIPLE:
|
||||
return SHA::IsSalted(password_hash);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -8,14 +8,45 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <json/json.hpp>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace memgraph::auth {
|
||||
enum class PasswordEncryptionAlgorithm : uint8_t { BCRYPT, SHA256, SHA256_MULTIPLE };
|
||||
/// Need to be stable, auth durability depends on this
|
||||
enum class PasswordHashAlgorithm : uint8_t { BCRYPT = 0, SHA256 = 1, SHA256_MULTIPLE = 2 };
|
||||
|
||||
/// @throw AuthException if unable to encrypt the password.
|
||||
std::string EncryptPassword(const std::string &password);
|
||||
void SetHashAlgorithm(std::string_view algo);
|
||||
|
||||
/// @throw AuthException if unable to verify the password.
|
||||
bool VerifyPassword(const std::string &password, const std::string &hash);
|
||||
auto CurrentHashAlgorithm() -> PasswordHashAlgorithm;
|
||||
|
||||
auto AsString(PasswordHashAlgorithm hash_algo) -> std::string_view;
|
||||
|
||||
struct HashedPassword {
|
||||
HashedPassword() = default;
|
||||
HashedPassword(PasswordHashAlgorithm hash_algo, std::string password_hash)
|
||||
: hash_algo{hash_algo}, password_hash{std::move(password_hash)} {}
|
||||
HashedPassword(HashedPassword const &) = default;
|
||||
HashedPassword(HashedPassword &&) = default;
|
||||
HashedPassword &operator=(HashedPassword const &) = default;
|
||||
HashedPassword &operator=(HashedPassword &&) = default;
|
||||
|
||||
friend bool operator==(HashedPassword const &, HashedPassword const &) = default;
|
||||
|
||||
bool VerifyPassword(const std::string &password);
|
||||
|
||||
bool IsSalted() const;
|
||||
|
||||
auto HashAlgo() const -> PasswordHashAlgorithm { return hash_algo; }
|
||||
|
||||
friend void to_json(nlohmann::json &j, const HashedPassword &p);
|
||||
friend void from_json(const nlohmann::json &j, HashedPassword &p);
|
||||
|
||||
private:
|
||||
PasswordHashAlgorithm hash_algo{PasswordHashAlgorithm::BCRYPT};
|
||||
std::string password_hash{};
|
||||
};
|
||||
|
||||
/// @throw AuthException if unable to hash the password.
|
||||
HashedPassword HashPassword(const std::string &password, std::optional<PasswordHashAlgorithm> override_algo = {});
|
||||
} // namespace memgraph::auth
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -9,7 +9,6 @@
|
||||
#include "auth/models.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <regex>
|
||||
#include <utility>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
@ -21,22 +20,26 @@
|
||||
#include "query/constants.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "utils/cast.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/settings.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(auth_password_permit_null, true, "Set to false to disable null passwords.");
|
||||
|
||||
inline constexpr std::string_view default_password_regex = ".+";
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(auth_password_strength_regex, default_password_regex.data(),
|
||||
"The regular expression that should be used to match the entire "
|
||||
"entered password to ensure its strength.");
|
||||
|
||||
namespace memgraph::auth {
|
||||
namespace {
|
||||
|
||||
constexpr auto kRoleName = "rolename";
|
||||
constexpr auto kPermissions = "permissions";
|
||||
constexpr auto kGrants = "grants";
|
||||
constexpr auto kDenies = "denies";
|
||||
constexpr auto kUsername = "username";
|
||||
constexpr auto kPasswordHash = "password_hash";
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
constexpr auto kGlobalPermission = "global_permission";
|
||||
constexpr auto kFineGrainedAccessHandler = "fine_grained_access_handler";
|
||||
constexpr auto kAllowAll = "allow_all";
|
||||
constexpr auto kDefault = "default";
|
||||
constexpr auto kDatabases = "databases";
|
||||
#endif
|
||||
|
||||
// Constant list of all available permissions.
|
||||
const std::vector<Permission> kPermissionsAll = {Permission::MATCH,
|
||||
Permission::CREATE,
|
||||
@ -62,7 +65,8 @@ const std::vector<Permission> kPermissionsAll = {Permission::MATCH,
|
||||
Permission::TRANSACTION_MANAGEMENT,
|
||||
Permission::STORAGE_MODE,
|
||||
Permission::MULTI_DATABASE_EDIT,
|
||||
Permission::MULTI_DATABASE_USE};
|
||||
Permission::MULTI_DATABASE_USE,
|
||||
Permission::COORDINATOR};
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -118,6 +122,8 @@ std::string PermissionToString(Permission permission) {
|
||||
return "MULTI_DATABASE_EDIT";
|
||||
case Permission::MULTI_DATABASE_USE:
|
||||
return "MULTI_DATABASE_USE";
|
||||
case Permission::COORDINATOR:
|
||||
return "COORDINATOR";
|
||||
}
|
||||
}
|
||||
|
||||
@ -242,8 +248,9 @@ std::vector<Permission> Permissions::GetDenies() const {
|
||||
|
||||
nlohmann::json Permissions::Serialize() const {
|
||||
nlohmann::json data = nlohmann::json::object();
|
||||
data["grants"] = grants_;
|
||||
data["denies"] = denies_;
|
||||
|
||||
data[kGrants] = grants_;
|
||||
data[kDenies] = denies_;
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -251,10 +258,10 @@ Permissions Permissions::Deserialize(const nlohmann::json &data) {
|
||||
if (!data.is_object()) {
|
||||
throw AuthException("Couldn't load permissions data!");
|
||||
}
|
||||
if (!data["grants"].is_number_unsigned() || !data["denies"].is_number_unsigned()) {
|
||||
if (!data[kGrants].is_number_unsigned() || !data[kDenies].is_number_unsigned()) {
|
||||
throw AuthException("Couldn't load permissions data!");
|
||||
}
|
||||
return Permissions{data["grants"], data["denies"]};
|
||||
return Permissions{data[kGrants], data[kDenies]};
|
||||
}
|
||||
|
||||
uint64_t Permissions::grants() const { return grants_; }
|
||||
@ -316,8 +323,8 @@ nlohmann::json FineGrainedAccessPermissions::Serialize() const {
|
||||
return {};
|
||||
}
|
||||
nlohmann::json data = nlohmann::json::object();
|
||||
data["permissions"] = permissions_;
|
||||
data["global_permission"] = global_permission_.has_value() ? global_permission_.value() : -1;
|
||||
data[kPermissions] = permissions_;
|
||||
data[kGlobalPermission] = global_permission_.has_value() ? global_permission_.value() : -1;
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -330,13 +337,13 @@ FineGrainedAccessPermissions FineGrainedAccessPermissions::Deserialize(const nlo
|
||||
}
|
||||
std::optional<uint64_t> global_permission;
|
||||
|
||||
if (data["global_permission"].empty() || data["global_permission"] == -1) {
|
||||
if (data[kGlobalPermission].empty() || data[kGlobalPermission] == -1) {
|
||||
global_permission = std::nullopt;
|
||||
} else {
|
||||
global_permission = data["global_permission"];
|
||||
global_permission = data[kGlobalPermission];
|
||||
}
|
||||
|
||||
return FineGrainedAccessPermissions(data["permissions"], global_permission);
|
||||
return FineGrainedAccessPermissions(data[kPermissions], global_permission);
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string, uint64_t> &FineGrainedAccessPermissions::GetPermissions() const {
|
||||
@ -442,13 +449,13 @@ const FineGrainedAccessPermissions &Role::GetFineGrainedAccessEdgeTypePermission
|
||||
|
||||
nlohmann::json Role::Serialize() const {
|
||||
nlohmann::json data = nlohmann::json::object();
|
||||
data["rolename"] = rolename_;
|
||||
data["permissions"] = permissions_.Serialize();
|
||||
data[kRoleName] = rolename_;
|
||||
data[kPermissions] = permissions_.Serialize();
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
data["fine_grained_access_handler"] = fine_grained_access_handler_.Serialize();
|
||||
data[kFineGrainedAccessHandler] = fine_grained_access_handler_.Serialize();
|
||||
} else {
|
||||
data["fine_grained_access_handler"] = {};
|
||||
data[kFineGrainedAccessHandler] = {};
|
||||
}
|
||||
#endif
|
||||
return data;
|
||||
@ -458,21 +465,21 @@ Role Role::Deserialize(const nlohmann::json &data) {
|
||||
if (!data.is_object()) {
|
||||
throw AuthException("Couldn't load role data!");
|
||||
}
|
||||
if (!data["rolename"].is_string() || !data["permissions"].is_object()) {
|
||||
if (!data[kRoleName].is_string() || !data[kPermissions].is_object()) {
|
||||
throw AuthException("Couldn't load role data!");
|
||||
}
|
||||
auto permissions = Permissions::Deserialize(data["permissions"]);
|
||||
auto permissions = Permissions::Deserialize(data[kPermissions]);
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
FineGrainedAccessHandler fine_grained_access_handler;
|
||||
// We can have an empty fine_grained if the user was created without a valid license
|
||||
if (data["fine_grained_access_handler"].is_object()) {
|
||||
fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data["fine_grained_access_handler"]);
|
||||
if (data[kFineGrainedAccessHandler].is_object()) {
|
||||
fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data[kFineGrainedAccessHandler]);
|
||||
}
|
||||
return {data["rolename"], permissions, std::move(fine_grained_access_handler)};
|
||||
return {data[kRoleName], permissions, std::move(fine_grained_access_handler)};
|
||||
}
|
||||
#endif
|
||||
return {data["rolename"], permissions};
|
||||
return {data[kRoleName], permissions};
|
||||
}
|
||||
|
||||
bool operator==(const Role &first, const Role &second) {
|
||||
@ -486,13 +493,13 @@ bool operator==(const Role &first, const Role &second) {
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
void Databases::Add(const std::string &db) {
|
||||
void Databases::Add(std::string_view db) {
|
||||
if (allow_all_) {
|
||||
grants_dbs_.clear();
|
||||
allow_all_ = false;
|
||||
}
|
||||
grants_dbs_.emplace(db);
|
||||
denies_dbs_.erase(db);
|
||||
denies_dbs_.erase(std::string{db}); // TODO: C++23 use transparent key compare
|
||||
}
|
||||
|
||||
void Databases::Remove(const std::string &db) {
|
||||
@ -523,13 +530,13 @@ void Databases::DenyAll() {
|
||||
denies_dbs_.clear();
|
||||
}
|
||||
|
||||
bool Databases::SetDefault(const std::string &db) {
|
||||
bool Databases::SetDefault(std::string_view db) {
|
||||
if (!Contains(db)) return false;
|
||||
default_db_ = db;
|
||||
return true;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Databases::Contains(const std::string &db) const {
|
||||
[[nodiscard]] bool Databases::Contains(std::string_view db) const {
|
||||
return !denies_dbs_.contains(db) && (allow_all_ || grants_dbs_.contains(db));
|
||||
}
|
||||
|
||||
@ -542,10 +549,10 @@ const std::string &Databases::GetDefault() const {
|
||||
|
||||
nlohmann::json Databases::Serialize() const {
|
||||
nlohmann::json data = nlohmann::json::object();
|
||||
data["grants"] = grants_dbs_;
|
||||
data["denies"] = denies_dbs_;
|
||||
data["allow_all"] = allow_all_;
|
||||
data["default"] = default_db_;
|
||||
data[kGrants] = grants_dbs_;
|
||||
data[kDenies] = denies_dbs_;
|
||||
data[kAllowAll] = allow_all_;
|
||||
data[kDefault] = default_db_;
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -553,22 +560,22 @@ Databases Databases::Deserialize(const nlohmann::json &data) {
|
||||
if (!data.is_object()) {
|
||||
throw AuthException("Couldn't load database data!");
|
||||
}
|
||||
if (!data["grants"].is_structured() || !data["denies"].is_structured() || !data["allow_all"].is_boolean() ||
|
||||
!data["default"].is_string()) {
|
||||
if (!data[kGrants].is_structured() || !data[kDenies].is_structured() || !data[kAllowAll].is_boolean() ||
|
||||
!data[kDefault].is_string()) {
|
||||
throw AuthException("Couldn't load database data!");
|
||||
}
|
||||
return {data["allow_all"], data["grants"], data["denies"], data["default"]};
|
||||
return {data[kAllowAll], data[kGrants], data[kDenies], data[kDefault]};
|
||||
}
|
||||
#endif
|
||||
|
||||
User::User() = default;
|
||||
|
||||
User::User(const std::string &username) : username_(utils::ToLowerCase(username)) {}
|
||||
User::User(const std::string &username, std::string password_hash, const Permissions &permissions)
|
||||
User::User(const std::string &username, std::optional<HashedPassword> password_hash, const Permissions &permissions)
|
||||
: username_(utils::ToLowerCase(username)), password_hash_(std::move(password_hash)), permissions_(permissions) {}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
User::User(const std::string &username, std::string password_hash, const Permissions &permissions,
|
||||
User::User(const std::string &username, std::optional<HashedPassword> password_hash, const Permissions &permissions,
|
||||
FineGrainedAccessHandler fine_grained_access_handler, Databases db_access)
|
||||
: username_(utils::ToLowerCase(username)),
|
||||
password_hash_(std::move(password_hash)),
|
||||
@ -578,38 +585,16 @@ User::User(const std::string &username, std::string password_hash, const Permiss
|
||||
#endif
|
||||
|
||||
bool User::CheckPassword(const std::string &password) {
|
||||
if (password_hash_.empty()) return true;
|
||||
return VerifyPassword(password, password_hash_);
|
||||
return password_hash_ ? password_hash_->VerifyPassword(password) : true;
|
||||
}
|
||||
|
||||
void User::UpdatePassword(const std::optional<std::string> &password) {
|
||||
void User::UpdatePassword(const std::optional<std::string> &password,
|
||||
std::optional<PasswordHashAlgorithm> algo_override) {
|
||||
if (!password) {
|
||||
if (!FLAGS_auth_password_permit_null) {
|
||||
throw AuthException("Null passwords aren't permitted!");
|
||||
}
|
||||
password_hash_ = "";
|
||||
password_hash_.reset();
|
||||
return;
|
||||
}
|
||||
|
||||
if (FLAGS_auth_password_strength_regex != default_password_regex) {
|
||||
if (const auto license_check_result = license::global_license_checker.IsEnterpriseValid(utils::global_settings);
|
||||
license_check_result.HasError()) {
|
||||
throw AuthException(
|
||||
"Custom password regex is a Memgraph Enterprise feature. Please set the config "
|
||||
"(\"--auth-password-strength-regex\") to its default value (\"{}\") or remove the flag.\n{}",
|
||||
default_password_regex,
|
||||
license::LicenseCheckErrorToString(license_check_result.GetError(), "password regex"));
|
||||
}
|
||||
}
|
||||
std::regex re(FLAGS_auth_password_strength_regex);
|
||||
if (!std::regex_match(*password, re)) {
|
||||
throw AuthException(
|
||||
"The user password doesn't conform to the required strength! Regex: "
|
||||
"\"{}\"",
|
||||
FLAGS_auth_password_strength_regex);
|
||||
}
|
||||
|
||||
password_hash_ = EncryptPassword(*password);
|
||||
password_hash_ = HashPassword(*password, algo_override);
|
||||
}
|
||||
|
||||
void User::SetRole(const Role &role) { role_.emplace(role); }
|
||||
@ -668,16 +653,20 @@ const Role *User::role() const {
|
||||
|
||||
nlohmann::json User::Serialize() const {
|
||||
nlohmann::json data = nlohmann::json::object();
|
||||
data["username"] = username_;
|
||||
data["password_hash"] = password_hash_;
|
||||
data["permissions"] = permissions_.Serialize();
|
||||
data[kUsername] = username_;
|
||||
if (password_hash_.has_value()) {
|
||||
data[kPasswordHash] = *password_hash_;
|
||||
} else {
|
||||
data[kPasswordHash] = nullptr;
|
||||
}
|
||||
data[kPermissions] = permissions_.Serialize();
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
data["fine_grained_access_handler"] = fine_grained_access_handler_.Serialize();
|
||||
data["databases"] = database_access_.Serialize();
|
||||
data[kFineGrainedAccessHandler] = fine_grained_access_handler_.Serialize();
|
||||
data[kDatabases] = database_access_.Serialize();
|
||||
} else {
|
||||
data["fine_grained_access_handler"] = {};
|
||||
data["databases"] = {};
|
||||
data[kFineGrainedAccessHandler] = {};
|
||||
data[kDatabases] = {};
|
||||
}
|
||||
#endif
|
||||
// The role shouldn't be serialized here, it is stored as a foreign key.
|
||||
@ -688,15 +677,23 @@ User User::Deserialize(const nlohmann::json &data) {
|
||||
if (!data.is_object()) {
|
||||
throw AuthException("Couldn't load user data!");
|
||||
}
|
||||
if (!data["username"].is_string() || !data["password_hash"].is_string() || !data["permissions"].is_object()) {
|
||||
auto password_hash_json = data[kPasswordHash];
|
||||
if (!data[kUsername].is_string() || !(password_hash_json.is_object() || password_hash_json.is_null()) ||
|
||||
!data[kPermissions].is_object()) {
|
||||
throw AuthException("Couldn't load user data!");
|
||||
}
|
||||
auto permissions = Permissions::Deserialize(data["permissions"]);
|
||||
|
||||
std::optional<HashedPassword> password_hash{};
|
||||
if (password_hash_json.is_object()) {
|
||||
password_hash = password_hash_json.get<HashedPassword>();
|
||||
}
|
||||
|
||||
auto permissions = Permissions::Deserialize(data[kPermissions]);
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
Databases db_access;
|
||||
if (data["databases"].is_structured()) {
|
||||
db_access = Databases::Deserialize(data["databases"]);
|
||||
if (data[kDatabases].is_structured()) {
|
||||
db_access = Databases::Deserialize(data[kDatabases]);
|
||||
} else {
|
||||
// Back-compatibility
|
||||
spdlog::warn("User without specified database access. Given access to the default database.");
|
||||
@ -705,13 +702,13 @@ User User::Deserialize(const nlohmann::json &data) {
|
||||
}
|
||||
FineGrainedAccessHandler fine_grained_access_handler;
|
||||
// We can have an empty fine_grained if the user was created without a valid license
|
||||
if (data["fine_grained_access_handler"].is_object()) {
|
||||
fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data["fine_grained_access_handler"]);
|
||||
if (data[kFineGrainedAccessHandler].is_object()) {
|
||||
fine_grained_access_handler = FineGrainedAccessHandler::Deserialize(data[kFineGrainedAccessHandler]);
|
||||
}
|
||||
return {data["username"], data["password_hash"], permissions, std::move(fine_grained_access_handler), db_access};
|
||||
return {data[kUsername], std::move(password_hash), permissions, std::move(fine_grained_access_handler), db_access};
|
||||
}
|
||||
#endif
|
||||
return {data["username"], data["password_hash"], permissions};
|
||||
return {data[kUsername], std::move(password_hash), permissions};
|
||||
}
|
||||
|
||||
bool operator==(const User &first, const User &second) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Licensed as a Memgraph Enterprise file under the Memgraph Enterprise
|
||||
// License (the "License"); by using this file, you agree to be bound by the terms of the License, and you may not use
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include <json/json.hpp>
|
||||
#include <utility>
|
||||
#include "crypto.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
@ -48,6 +49,7 @@ enum class Permission : uint64_t {
|
||||
STORAGE_MODE = 1U << 22U,
|
||||
MULTI_DATABASE_EDIT = 1U << 23U,
|
||||
MULTI_DATABASE_USE = 1U << 24U,
|
||||
COORDINATOR = 1U << 25U,
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
@ -246,7 +248,7 @@ bool operator==(const Role &first, const Role &second);
|
||||
#ifdef MG_ENTERPRISE
|
||||
class Databases final {
|
||||
public:
|
||||
Databases() : grants_dbs_({dbms::kDefaultDB}), allow_all_(false), default_db_(dbms::kDefaultDB) {}
|
||||
Databases() : grants_dbs_{std::string{dbms::kDefaultDB}}, allow_all_(false), default_db_(dbms::kDefaultDB) {}
|
||||
|
||||
Databases(const Databases &) = default;
|
||||
Databases &operator=(const Databases &) = default;
|
||||
@ -259,7 +261,7 @@ class Databases final {
|
||||
*
|
||||
* @param db name of the database to grant access to
|
||||
*/
|
||||
void Add(const std::string &db);
|
||||
void Add(std::string_view db);
|
||||
|
||||
/**
|
||||
* @brief Remove database to the list of granted access.
|
||||
@ -291,7 +293,7 @@ class Databases final {
|
||||
/**
|
||||
* @brief Set the default database.
|
||||
*/
|
||||
bool SetDefault(const std::string &db);
|
||||
bool SetDefault(std::string_view db);
|
||||
|
||||
/**
|
||||
* @brief Checks if access is grated to the database.
|
||||
@ -299,7 +301,7 @@ class Databases final {
|
||||
* @param db name of the database
|
||||
* @return true if allow_all and not denied or granted
|
||||
*/
|
||||
bool Contains(const std::string &db) const;
|
||||
bool Contains(std::string_view db) const;
|
||||
|
||||
bool GetAllowAll() const { return allow_all_; }
|
||||
const std::set<std::string, std::less<>> &GetGrants() const { return grants_dbs_; }
|
||||
@ -312,7 +314,7 @@ class Databases final {
|
||||
|
||||
private:
|
||||
Databases(bool allow_all, std::set<std::string, std::less<>> grant, std::set<std::string, std::less<>> deny,
|
||||
std::string default_db = dbms::kDefaultDB)
|
||||
std::string default_db = std::string{dbms::kDefaultDB})
|
||||
: grants_dbs_(std::move(grant)),
|
||||
denies_dbs_(std::move(deny)),
|
||||
allow_all_(allow_all),
|
||||
@ -331,9 +333,9 @@ class User final {
|
||||
User();
|
||||
|
||||
explicit User(const std::string &username);
|
||||
User(const std::string &username, std::string password_hash, const Permissions &permissions);
|
||||
User(const std::string &username, std::optional<HashedPassword> password_hash, const Permissions &permissions);
|
||||
#ifdef MG_ENTERPRISE
|
||||
User(const std::string &username, std::string password_hash, const Permissions &permissions,
|
||||
User(const std::string &username, std::optional<HashedPassword> password_hash, const Permissions &permissions,
|
||||
FineGrainedAccessHandler fine_grained_access_handler, Databases db_access = {});
|
||||
#endif
|
||||
User(const User &) = default;
|
||||
@ -345,8 +347,18 @@ class User final {
|
||||
/// @throw AuthException if unable to verify the password.
|
||||
bool CheckPassword(const std::string &password);
|
||||
|
||||
bool UpgradeHash(const std::string password) {
|
||||
if (!password_hash_) return false;
|
||||
if (password_hash_->IsSalted()) return false;
|
||||
|
||||
auto const algo = password_hash_->HashAlgo();
|
||||
UpdatePassword(password, algo);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @throw AuthException if unable to set the password.
|
||||
void UpdatePassword(const std::optional<std::string> &password = std::nullopt);
|
||||
void UpdatePassword(const std::optional<std::string> &password = {},
|
||||
std::optional<PasswordHashAlgorithm> algo_override = std::nullopt);
|
||||
|
||||
void SetRole(const Role &role);
|
||||
|
||||
@ -381,7 +393,7 @@ class User final {
|
||||
|
||||
private:
|
||||
std::string username_;
|
||||
std::string password_hash_;
|
||||
std::optional<HashedPassword> password_hash_;
|
||||
Permissions permissions_;
|
||||
#ifdef MG_ENTERPRISE
|
||||
FineGrainedAccessHandler fine_grained_access_handler_;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -170,6 +170,7 @@ inline State HandleFailure(TSession &session, const std::exception &e) {
|
||||
spdlog::trace("Error trace: {}", p->trace());
|
||||
}
|
||||
session.encoder_buffer_.Clear();
|
||||
|
||||
auto code_message = ExceptionToErrorMessage(e);
|
||||
bool fail_sent = session.encoder_.MessageFailure({{"code", code_message.first}, {"message", code_message.second}});
|
||||
if (!fail_sent) {
|
||||
|
@ -44,7 +44,7 @@ class ResultStreamFaker {
|
||||
std::vector<memgraph::communication::bolt::Value> bvalues;
|
||||
bvalues.reserve(values.size());
|
||||
for (const auto &value : values) {
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(value, *store_, memgraph::storage::View::NEW);
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(value, store_, memgraph::storage::View::NEW);
|
||||
MG_ASSERT(maybe_value.HasValue());
|
||||
bvalues.push_back(std::move(*maybe_value));
|
||||
}
|
||||
@ -56,7 +56,7 @@ class ResultStreamFaker {
|
||||
void Summary(const std::map<std::string, memgraph::query::TypedValue> &summary) {
|
||||
std::map<std::string, memgraph::communication::bolt::Value> bsummary;
|
||||
for (const auto &item : summary) {
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(item.second, *store_, memgraph::storage::View::NEW);
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(item.second, store_, memgraph::storage::View::NEW);
|
||||
MG_ASSERT(maybe_value.HasValue());
|
||||
bsummary.insert({item.first, std::move(*maybe_value)});
|
||||
}
|
||||
|
29
src/coordination/CMakeLists.txt
Normal file
29
src/coordination/CMakeLists.txt
Normal file
@ -0,0 +1,29 @@
|
||||
add_library(mg-coordination STATIC)
|
||||
add_library(mg::coordination ALIAS mg-coordination)
|
||||
target_sources(mg-coordination
|
||||
PUBLIC
|
||||
include/coordination/coordinator_client.hpp
|
||||
include/coordination/coordinator_state.hpp
|
||||
include/coordination/coordinator_rpc.hpp
|
||||
include/coordination/coordinator_server.hpp
|
||||
include/coordination/coordinator_config.hpp
|
||||
include/coordination/coordinator_exceptions.hpp
|
||||
include/coordination/coordinator_instance.hpp
|
||||
include/coordination/coordinator_slk.hpp
|
||||
include/coordination/coordinator_data.hpp
|
||||
include/coordination/constants.hpp
|
||||
include/coordination/failover_status.hpp
|
||||
include/coordination/coordinator_cluster_config.hpp
|
||||
|
||||
PRIVATE
|
||||
coordinator_client.cpp
|
||||
coordinator_state.cpp
|
||||
coordinator_rpc.cpp
|
||||
coordinator_server.cpp
|
||||
coordinator_data.cpp
|
||||
)
|
||||
target_include_directories(mg-coordination PUBLIC include)
|
||||
|
||||
target_link_libraries(mg-coordination
|
||||
PUBLIC mg::utils mg::rpc mg::slk mg::io mg::repl_coord_glue lib::rangev3
|
||||
)
|
109
src/coordination/coordinator_client.cpp
Normal file
109
src/coordination/coordinator_client.cpp
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_client.hpp"
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
namespace {
|
||||
auto CreateClientContext(const memgraph::coordination::CoordinatorClientConfig &config)
|
||||
-> communication::ClientContext {
|
||||
return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file}
|
||||
: communication::ClientContext{};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
CoordinatorClient::CoordinatorClient(CoordinatorData *coord_data, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb)
|
||||
: rpc_context_{CreateClientContext(config)},
|
||||
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
|
||||
&rpc_context_},
|
||||
config_{std::move(config)},
|
||||
coord_data_{coord_data},
|
||||
succ_cb_{std::move(succ_cb)},
|
||||
fail_cb_{std::move(fail_cb)} {}
|
||||
|
||||
auto CoordinatorClient::InstanceName() const -> std::string { return config_.instance_name; }
|
||||
auto CoordinatorClient::SocketAddress() const -> std::string { return rpc_client_.Endpoint().SocketAddress(); }
|
||||
|
||||
void CoordinatorClient::StartFrequentCheck() {
|
||||
MG_ASSERT(config_.health_check_frequency_sec > std::chrono::seconds(0),
|
||||
"Health check frequency must be greater than 0");
|
||||
|
||||
instance_checker_.Run(
|
||||
"Coord checker", config_.health_check_frequency_sec, [this, instance_name = config_.instance_name] {
|
||||
try {
|
||||
spdlog::trace("Sending frequent heartbeat to machine {} on {}", instance_name,
|
||||
rpc_client_.Endpoint().SocketAddress());
|
||||
auto stream{rpc_client_.Stream<memgraph::replication_coordination_glue::FrequentHeartbeatRpc>()};
|
||||
stream.AwaitResponse();
|
||||
succ_cb_(coord_data_, instance_name);
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
fail_cb_(coord_data_, instance_name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
|
||||
|
||||
void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
|
||||
void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
|
||||
|
||||
auto CoordinatorClient::SetSuccCallback(HealthCheckCallback succ_cb) -> void { succ_cb_ = std::move(succ_cb); }
|
||||
auto CoordinatorClient::SetFailCallback(HealthCheckCallback fail_cb) -> void { fail_cb_ = std::move(fail_cb); }
|
||||
|
||||
auto CoordinatorClient::ReplicationClientInfo() const -> const CoordinatorClientConfig::ReplicationClientInfo & {
|
||||
return config_.replication_client_info;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::ResetReplicationClientInfo() -> void {
|
||||
// TODO (antoniofilipovic) Sync with Andi on this one
|
||||
// config_.replication_client_info.reset();
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendPromoteReplicaToMainRpc(
|
||||
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<PromoteReplicaToMainRpc>(std::move(replication_clients_info))};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
spdlog::error("Failed to receive successful RPC failover response!");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
spdlog::error("RPC error occurred while sending failover RPC!");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendSetToReplicaRpc(CoordinatorClient::ReplClientInfo replication_client_info) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<SetMainToReplicaRpc>(std::move(replication_client_info))};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
spdlog::error("Failed to set main to replica!");
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Sent request RPC from coordinator to instance to set it as replica!");
|
||||
return true;
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
spdlog::error("Failed to send failover RPC from coordinator to new main!");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
220
src/coordination/coordinator_data.cpp
Normal file
220
src/coordination/coordinator_data.cpp
Normal file
@ -0,0 +1,220 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "coordination/coordinator_instance.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_data.hpp"
|
||||
|
||||
#include <range/v3/view.hpp>
|
||||
#include <shared_mutex>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
CoordinatorData::CoordinatorData() {
|
||||
auto find_instance = [](CoordinatorData *coord_data, std::string_view instance_name) -> CoordinatorInstance & {
|
||||
auto instance = std::ranges::find_if(
|
||||
coord_data->registered_instances_,
|
||||
[instance_name](const CoordinatorInstance &instance) { return instance.InstanceName() == instance_name; });
|
||||
|
||||
MG_ASSERT(instance != coord_data->registered_instances_.end(), "Instance {} not found during callback!",
|
||||
instance_name);
|
||||
return *instance;
|
||||
};
|
||||
|
||||
replica_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto lock = std::lock_guard{coord_data->coord_data_lock_};
|
||||
spdlog::trace("Instance {} performing replica successful callback", instance_name);
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name);
|
||||
instance.UpdateLastResponseTime();
|
||||
};
|
||||
|
||||
replica_fail_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto lock = std::lock_guard{coord_data->coord_data_lock_};
|
||||
spdlog::trace("Instance {} performing replica failure callback", instance_name);
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsReplica(), "Instance {} is not a replica!", instance_name);
|
||||
instance.UpdateInstanceStatus();
|
||||
};
|
||||
|
||||
main_succ_cb_ = [find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto lock = std::lock_guard{coord_data->coord_data_lock_};
|
||||
spdlog::trace("Instance {} performing main successful callback", instance_name);
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsMain(), "Instance {} is not a main!", instance_name);
|
||||
instance.UpdateLastResponseTime();
|
||||
};
|
||||
|
||||
main_fail_cb_ = [this, find_instance](CoordinatorData *coord_data, std::string_view instance_name) -> void {
|
||||
auto lock = std::lock_guard{coord_data->coord_data_lock_};
|
||||
spdlog::trace("Instance {} performing main failure callback", instance_name);
|
||||
auto &instance = find_instance(coord_data, instance_name);
|
||||
MG_ASSERT(instance.IsMain(), "Instance {} is not a main!", instance_name);
|
||||
if (bool main_alive = instance.UpdateInstanceStatus(); !main_alive) {
|
||||
spdlog::info("Main instance {} is not alive, starting automatic failover", instance_name);
|
||||
switch (auto failover_status = DoFailover(); failover_status) {
|
||||
using enum DoFailoverStatus;
|
||||
case ALL_REPLICAS_DOWN:
|
||||
spdlog::warn("Failover aborted since all replicas are down!");
|
||||
break;
|
||||
case MAIN_ALIVE:
|
||||
spdlog::warn("Failover aborted since main is alive!");
|
||||
break;
|
||||
case RPC_FAILED:
|
||||
spdlog::warn("Failover aborted since promoting replica to main failed!");
|
||||
break;
|
||||
case SUCCESS:
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
auto CoordinatorData::DoFailover() -> DoFailoverStatus {
|
||||
using ReplicationClientInfo = CoordinatorClientConfig::ReplicationClientInfo;
|
||||
|
||||
auto replica_instances = registered_instances_ | ranges::views::filter(&CoordinatorInstance::IsReplica);
|
||||
|
||||
auto chosen_replica_instance = std::ranges::find_if(replica_instances, &CoordinatorInstance::IsAlive);
|
||||
if (chosen_replica_instance == replica_instances.end()) {
|
||||
return DoFailoverStatus::ALL_REPLICAS_DOWN;
|
||||
}
|
||||
|
||||
chosen_replica_instance->PrepareForFailover();
|
||||
|
||||
std::vector<ReplicationClientInfo> repl_clients_info;
|
||||
repl_clients_info.reserve(std::ranges::distance(replica_instances));
|
||||
|
||||
auto const not_chosen_replica_instance = [&chosen_replica_instance](const CoordinatorInstance &instance) {
|
||||
return instance != *chosen_replica_instance;
|
||||
};
|
||||
auto const not_main = [](const CoordinatorInstance &instance) { return !instance.IsMain(); };
|
||||
|
||||
// TODO (antoniofilipovic): Should we send also data on old MAIN???
|
||||
// TODO: (andi) Don't send replicas which aren't alive
|
||||
for (const auto &unchosen_replica_instance :
|
||||
replica_instances | ranges::views::filter(not_chosen_replica_instance) | ranges::views::filter(not_main)) {
|
||||
repl_clients_info.emplace_back(unchosen_replica_instance.client_.ReplicationClientInfo());
|
||||
}
|
||||
|
||||
if (!chosen_replica_instance->client_.SendPromoteReplicaToMainRpc(std::move(repl_clients_info))) {
|
||||
chosen_replica_instance->RestoreAfterFailedFailover();
|
||||
return DoFailoverStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
auto old_main = std::ranges::find_if(registered_instances_, &CoordinatorInstance::IsMain);
|
||||
// TODO: (andi) For performing restoration we will have to improve this
|
||||
old_main->client_.PauseFrequentCheck();
|
||||
|
||||
chosen_replica_instance->PostFailover(main_succ_cb_, main_fail_cb_);
|
||||
|
||||
return DoFailoverStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorData::ShowInstances() const -> std::vector<CoordinatorInstanceStatus> {
|
||||
std::vector<CoordinatorInstanceStatus> instances_status;
|
||||
instances_status.reserve(registered_instances_.size());
|
||||
|
||||
auto const stringify_repl_role = [](const CoordinatorInstance &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "";
|
||||
if (instance.IsMain()) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
auto const instance_to_status =
|
||||
[&stringify_repl_role](const CoordinatorInstance &instance) -> CoordinatorInstanceStatus {
|
||||
return {.instance_name = instance.InstanceName(),
|
||||
.socket_address = instance.SocketAddress(),
|
||||
.replication_role = stringify_repl_role(instance),
|
||||
.is_alive = instance.IsAlive()};
|
||||
};
|
||||
|
||||
{
|
||||
auto lock = std::shared_lock{coord_data_lock_};
|
||||
std::ranges::transform(registered_instances_, std::back_inserter(instances_status), instance_to_status);
|
||||
}
|
||||
|
||||
return instances_status;
|
||||
}
|
||||
|
||||
auto CoordinatorData::SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_data_lock_};
|
||||
|
||||
// Find replica we already registered
|
||||
auto registered_replica = std::find_if(
|
||||
registered_instances_.begin(), registered_instances_.end(),
|
||||
[instance_name](const CoordinatorInstance &instance) { return instance.InstanceName() == instance_name; });
|
||||
|
||||
// if replica not found...
|
||||
if (registered_replica == registered_instances_.end()) {
|
||||
spdlog::error("You didn't register instance with given name {}", instance_name);
|
||||
return SetInstanceToMainCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
registered_replica->client_.PauseFrequentCheck();
|
||||
|
||||
std::vector<CoordinatorClientConfig::ReplicationClientInfo> repl_clients_info;
|
||||
repl_clients_info.reserve(registered_instances_.size() - 1);
|
||||
std::ranges::for_each(registered_instances_,
|
||||
[registered_replica, &repl_clients_info](const CoordinatorInstance &replica) {
|
||||
if (replica != *registered_replica) {
|
||||
repl_clients_info.emplace_back(replica.client_.ReplicationClientInfo());
|
||||
}
|
||||
});
|
||||
|
||||
// PROMOTE REPLICA TO MAIN
|
||||
// THIS SHOULD FAIL HERE IF IT IS DOWN
|
||||
if (auto result = registered_replica->client_.SendPromoteReplicaToMainRpc(std::move(repl_clients_info)); !result) {
|
||||
registered_replica->client_.ResumeFrequentCheck();
|
||||
return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN;
|
||||
}
|
||||
|
||||
registered_replica->client_.SetSuccCallback(main_succ_cb_);
|
||||
registered_replica->client_.SetFailCallback(main_fail_cb_);
|
||||
registered_replica->replication_role_ = replication_coordination_glue::ReplicationRole::MAIN;
|
||||
registered_replica->client_.ResumeFrequentCheck();
|
||||
|
||||
return SetInstanceToMainCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorData::RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_data_lock_};
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
return instance.InstanceName() == config.instance_name;
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
|
||||
if (std::ranges::any_of(registered_instances_, [&config](const CoordinatorInstance &instance) {
|
||||
spdlog::trace("Comparing {} with {}", instance.SocketAddress(), config.SocketAddress());
|
||||
return instance.SocketAddress() == config.SocketAddress();
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::END_POINT_EXISTS;
|
||||
}
|
||||
|
||||
CoordinatorClientConfig::ReplicationClientInfo replication_client_info_copy = config.replication_client_info;
|
||||
|
||||
// TODO (antoniofilipovic) create and then push back
|
||||
auto *instance = ®istered_instances_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_,
|
||||
replication_coordination_glue::ReplicationRole::REPLICA);
|
||||
if (auto res = instance->client_.SendSetToReplicaRpc(replication_client_info_copy); !res) {
|
||||
return RegisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
instance->client_.StartFrequentCheck();
|
||||
|
||||
return RegisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
107
src/coordination/coordinator_rpc.cpp
Normal file
107
src/coordination/coordinator_rpc.cpp
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
|
||||
#include "coordination/coordinator_slk.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
namespace memgraph {
|
||||
|
||||
namespace coordination {
|
||||
|
||||
void PromoteReplicaToMainReq::Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void PromoteReplicaToMainReq::Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void PromoteReplicaToMainRes::Save(const PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void PromoteReplicaToMainRes::Load(PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void SetMainToReplicaReq::Save(const SetMainToReplicaReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void SetMainToReplicaReq::Load(SetMainToReplicaReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void SetMainToReplicaRes::Save(const SetMainToReplicaRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void SetMainToReplicaRes::Load(SetMainToReplicaRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
} // namespace coordination
|
||||
|
||||
constexpr utils::TypeInfo coordination::PromoteReplicaToMainReq::kType{utils::TypeId::COORD_FAILOVER_REQ,
|
||||
"CoordPromoteReplicaToMainReq", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::PromoteReplicaToMainRes::kType{utils::TypeId::COORD_FAILOVER_RES,
|
||||
"CoordPromoteReplicaToMainRes", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::SetMainToReplicaReq::kType{utils::TypeId::COORD_SET_REPL_MAIN_REQ,
|
||||
"CoordSetReplMainReq", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::SetMainToReplicaRes::kType{utils::TypeId::COORD_SET_REPL_MAIN_RES,
|
||||
"CoordSetReplMainRes", nullptr};
|
||||
|
||||
namespace slk {
|
||||
|
||||
void Save(const memgraph::coordination::PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.success, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->success, reader);
|
||||
}
|
||||
|
||||
void Save(const memgraph::coordination::PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.replication_clients_info, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->replication_clients_info, reader);
|
||||
}
|
||||
|
||||
void Save(const memgraph::coordination::SetMainToReplicaReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.replication_client_info, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::SetMainToReplicaReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->replication_client_info, reader);
|
||||
}
|
||||
|
||||
void Save(const memgraph::coordination::SetMainToReplicaRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.success, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::SetMainToReplicaRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->success, reader);
|
||||
}
|
||||
|
||||
} // namespace slk
|
||||
|
||||
} // namespace memgraph
|
||||
|
||||
#endif
|
57
src/coordination/coordinator_server.cpp
Normal file
57
src/coordination/coordinator_server.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_server.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
namespace {
|
||||
|
||||
auto CreateServerContext(const memgraph::coordination::CoordinatorServerConfig &config)
|
||||
-> communication::ServerContext {
|
||||
return (config.ssl) ? communication::ServerContext{config.ssl->key_file, config.ssl->cert_file, config.ssl->ca_file,
|
||||
config.ssl->verify_peer}
|
||||
: communication::ServerContext{};
|
||||
}
|
||||
|
||||
// NOTE: The coordinator server doesn't more than 1 processing thread - each replica can
|
||||
// have only a single coordinator server. Also, the single-threaded guarantee
|
||||
// simplifies the rest of the implementation.
|
||||
constexpr auto kCoordinatorServerThreads = 1;
|
||||
|
||||
} // namespace
|
||||
|
||||
CoordinatorServer::CoordinatorServer(const CoordinatorServerConfig &config)
|
||||
: rpc_server_context_{CreateServerContext(config)},
|
||||
rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
|
||||
kCoordinatorServerThreads} {
|
||||
rpc_server_.Register<replication_coordination_glue::FrequentHeartbeatRpc>([](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received FrequentHeartbeatRpc on coordinator server");
|
||||
replication_coordination_glue::FrequentHeartbeatHandler(req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
CoordinatorServer::~CoordinatorServer() {
|
||||
if (rpc_server_.IsRunning()) {
|
||||
auto const &endpoint = rpc_server_.endpoint();
|
||||
spdlog::trace("Closing coordinator server on {}:{}", endpoint.address, endpoint.port);
|
||||
rpc_server_.Shutdown();
|
||||
}
|
||||
rpc_server_.AwaitShutdown();
|
||||
}
|
||||
|
||||
bool CoordinatorServer::Start() { return rpc_server_.Start(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
89
src/coordination/coordinator_state.cpp
Normal file
89
src/coordination/coordinator_state.cpp
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_state.hpp"
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#include "flags/replication.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
CoordinatorState::CoordinatorState() {
|
||||
MG_ASSERT(!(FLAGS_coordinator && FLAGS_coordinator_server_port),
|
||||
"Instance cannot be a coordinator and have registered coordinator server.");
|
||||
|
||||
spdlog::info("Executing coordinator constructor");
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
spdlog::info("Coordinator server port set");
|
||||
auto const config = CoordinatorServerConfig{
|
||||
.ip_address = kDefaultReplicationServerIp,
|
||||
.port = static_cast<uint16_t>(FLAGS_coordinator_server_port),
|
||||
};
|
||||
spdlog::info("Executing coordinator constructor main replica");
|
||||
|
||||
data_ = CoordinatorMainReplicaData{.coordinator_server_ = std::make_unique<CoordinatorServer>(config)};
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorState::RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorData>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](const CoordinatorMainReplicaData & /*coordinator_main_replica_data*/) {
|
||||
return RegisterInstanceCoordinatorStatus::NOT_COORDINATOR;
|
||||
},
|
||||
[config](CoordinatorData &coordinator_data) { return coordinator_data.RegisterInstance(config); }},
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorData>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{[](const CoordinatorMainReplicaData & /*coordinator_main_replica_data*/) {
|
||||
return SetInstanceToMainCoordinatorStatus::NOT_COORDINATOR;
|
||||
},
|
||||
[&instance_name](CoordinatorData &coordinator_data) {
|
||||
return coordinator_data.SetInstanceToMain(instance_name);
|
||||
}},
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::ShowInstances() const -> std::vector<CoordinatorInstanceStatus> {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorData>(data_),
|
||||
"Can't call show instances on data_, as variant holds wrong alternative");
|
||||
return std::get<CoordinatorData>(data_).ShowInstances();
|
||||
}
|
||||
|
||||
[[nodiscard]] auto CoordinatorState::DoFailover() -> DoFailoverStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorData>(data_), "Cannot do failover since variant holds wrong alternative");
|
||||
auto &coord_state = std::get<CoordinatorData>(data_);
|
||||
return coord_state.DoFailover();
|
||||
}
|
||||
|
||||
auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorMainReplicaData>(data_),
|
||||
"Cannot get coordinator server since variant holds wrong alternative");
|
||||
return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
|
||||
}
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
22
src/coordination/include/coordination/constants.hpp
Normal file
22
src/coordination/include/coordination/constants.hpp
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
#ifdef MG_EXPERIMENTAL_HIGH_AVAILABILITY
|
||||
constexpr bool allow_ha = true;
|
||||
#else
|
||||
constexpr bool allow_ha = false;
|
||||
#endif
|
||||
|
||||
} // namespace memgraph::coordination
|
77
src/coordination/include/coordination/coordinator_client.hpp
Normal file
77
src/coordination/include/coordination/coordinator_client.hpp
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "utils/scheduler.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorData;
|
||||
using HealthCheckCallback = std::function<void(CoordinatorData *, std::string_view)>;
|
||||
|
||||
class CoordinatorClient {
|
||||
public:
|
||||
using ReplClientInfo = CoordinatorClientConfig::ReplicationClientInfo;
|
||||
using ReplicationClientsInfo = std::vector<ReplClientInfo>;
|
||||
|
||||
explicit CoordinatorClient(CoordinatorData *coord_data_, CoordinatorClientConfig config, HealthCheckCallback succ_cb,
|
||||
HealthCheckCallback fail_cb);
|
||||
|
||||
~CoordinatorClient() = default;
|
||||
|
||||
CoordinatorClient(CoordinatorClient &) = delete;
|
||||
CoordinatorClient &operator=(CoordinatorClient const &) = delete;
|
||||
|
||||
CoordinatorClient(CoordinatorClient &&) noexcept = delete;
|
||||
CoordinatorClient &operator=(CoordinatorClient &&) noexcept = delete;
|
||||
|
||||
void StartFrequentCheck();
|
||||
void StopFrequentCheck();
|
||||
void PauseFrequentCheck();
|
||||
void ResumeFrequentCheck();
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto SocketAddress() const -> std::string;
|
||||
|
||||
auto SendPromoteReplicaToMainRpc(ReplicationClientsInfo replication_clients_info) const -> bool;
|
||||
|
||||
auto ReplicationClientInfo() const -> const ReplClientInfo &;
|
||||
auto ResetReplicationClientInfo() -> void;
|
||||
|
||||
auto SendSetToReplicaRpc(ReplClientInfo replication_client_info) const -> bool;
|
||||
|
||||
auto SetSuccCallback(HealthCheckCallback succ_cb) -> void;
|
||||
auto SetFailCallback(HealthCheckCallback fail_cb) -> void;
|
||||
|
||||
friend bool operator==(CoordinatorClient const &first, CoordinatorClient const &second) {
|
||||
return first.config_ == second.config_;
|
||||
}
|
||||
|
||||
private:
|
||||
utils::Scheduler instance_checker_;
|
||||
|
||||
// TODO: (andi) Pimpl?
|
||||
communication::ClientContext rpc_context_;
|
||||
mutable rpc::Client rpc_client_;
|
||||
|
||||
CoordinatorClientConfig config_;
|
||||
CoordinatorData *coord_data_;
|
||||
HealthCheckCallback succ_cb_;
|
||||
HealthCheckCallback fail_cb_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -0,0 +1,22 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
namespace memgraph::coordination {
|
||||
|
||||
struct CoordinatorClusterConfig {
|
||||
static constexpr int alive_response_time_difference_sec_{5};
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
78
src/coordination/include/coordination/coordinator_config.hpp
Normal file
78
src/coordination/include/coordination/coordinator_config.hpp
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
|
||||
struct CoordinatorClientConfig {
|
||||
std::string instance_name;
|
||||
std::string ip_address;
|
||||
uint16_t port{};
|
||||
std::chrono::seconds health_check_frequency_sec{1};
|
||||
|
||||
auto SocketAddress() const -> std::string { return ip_address + ":" + std::to_string(port); }
|
||||
|
||||
// Info which coordinator will send to new main when performing failover
|
||||
struct ReplicationClientInfo {
|
||||
// Must be the same as CoordinatorClientConfig's instance_name
|
||||
std::string instance_name;
|
||||
replication_coordination_glue::ReplicationMode replication_mode{};
|
||||
std::string replication_ip_address;
|
||||
uint16_t replication_port{};
|
||||
|
||||
friend bool operator==(ReplicationClientInfo const &, ReplicationClientInfo const &) = default;
|
||||
};
|
||||
|
||||
// Each instance has replication config in case it fails
|
||||
ReplicationClientInfo replication_client_info;
|
||||
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
|
||||
friend bool operator==(const SSL &, const SSL &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(CoordinatorClientConfig const &, CoordinatorClientConfig const &) = default;
|
||||
};
|
||||
|
||||
struct CoordinatorServerConfig {
|
||||
std::string ip_address;
|
||||
uint16_t port{};
|
||||
struct SSL {
|
||||
std::string key_file;
|
||||
std::string cert_file;
|
||||
std::string ca_file;
|
||||
bool verify_peer{};
|
||||
friend bool operator==(SSL const &, SSL const &) = default;
|
||||
};
|
||||
|
||||
std::optional<SSL> ssl;
|
||||
|
||||
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
49
src/coordination/include/coordination/coordinator_data.hpp
Normal file
49
src/coordination/include/coordination/coordinator_data.hpp
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_instance.hpp"
|
||||
#include "coordination/coordinator_instance_status.hpp"
|
||||
#include "coordination/coordinator_server.hpp"
|
||||
#include "coordination/failover_status.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
|
||||
#include <list>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
class CoordinatorData {
|
||||
public:
|
||||
CoordinatorData();
|
||||
|
||||
[[nodiscard]] auto DoFailover() -> DoFailoverStatus;
|
||||
|
||||
[[nodiscard]] auto RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<CoordinatorInstanceStatus>;
|
||||
|
||||
private:
|
||||
mutable utils::RWLock coord_data_lock_{utils::RWLock::Priority::READ};
|
||||
HealthCheckCallback main_succ_cb_, main_fail_cb_, replica_succ_cb_, replica_fail_cb_;
|
||||
// Must be std::list because we rely on pointer stability
|
||||
std::list<CoordinatorInstance> registered_instances_;
|
||||
};
|
||||
|
||||
struct CoordinatorMainReplicaData {
|
||||
std::unique_ptr<CoordinatorServer> coordinator_server_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -0,0 +1,32 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
class CoordinatorFailoverException final : public utils::BasicException {
|
||||
public:
|
||||
explicit CoordinatorFailoverException(const std::string_view what) noexcept
|
||||
: BasicException("Failover didn't complete successfully: " + std::string(what)) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit CoordinatorFailoverException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
|
||||
: CoordinatorFailoverException(fmt::format(fmt, std::forward<Args>(args)...)) {}
|
||||
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(CoordinatorFailoverException)
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -0,0 +1,77 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_client.hpp"
|
||||
#include "coordination/coordinator_cluster_config.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorData;
|
||||
|
||||
class CoordinatorInstance {
|
||||
public:
|
||||
CoordinatorInstance(CoordinatorData *data, CoordinatorClientConfig config, HealthCheckCallback succ_cb,
|
||||
HealthCheckCallback fail_cb, replication_coordination_glue::ReplicationRole replication_role)
|
||||
: client_(data, std::move(config), std::move(succ_cb), std::move(fail_cb)),
|
||||
replication_role_(replication_role),
|
||||
is_alive_(true) {}
|
||||
|
||||
CoordinatorInstance(CoordinatorInstance const &other) = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance const &other) = delete;
|
||||
CoordinatorInstance(CoordinatorInstance &&other) noexcept = delete;
|
||||
CoordinatorInstance &operator=(CoordinatorInstance &&other) noexcept = delete;
|
||||
~CoordinatorInstance() = default;
|
||||
|
||||
auto UpdateInstanceStatus() -> bool {
|
||||
is_alive_ = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - last_response_time_)
|
||||
.count() < CoordinatorClusterConfig::alive_response_time_difference_sec_;
|
||||
return is_alive_;
|
||||
}
|
||||
auto UpdateLastResponseTime() -> void { last_response_time_ = std::chrono::system_clock::now(); }
|
||||
|
||||
auto InstanceName() const -> std::string { return client_.InstanceName(); }
|
||||
auto SocketAddress() const -> std::string { return client_.SocketAddress(); }
|
||||
auto IsAlive() const -> bool { return is_alive_; }
|
||||
|
||||
auto IsReplica() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
}
|
||||
auto IsMain() const -> bool { return replication_role_ == replication_coordination_glue::ReplicationRole::MAIN; }
|
||||
|
||||
auto PrepareForFailover() -> void { client_.PauseFrequentCheck(); }
|
||||
auto RestoreAfterFailedFailover() -> void { client_.ResumeFrequentCheck(); }
|
||||
|
||||
auto PostFailover(HealthCheckCallback main_succ_cb, HealthCheckCallback main_fail_cb) -> void {
|
||||
replication_role_ = replication_coordination_glue::ReplicationRole::MAIN;
|
||||
client_.SetSuccCallback(std::move(main_succ_cb));
|
||||
client_.SetFailCallback(std::move(main_fail_cb));
|
||||
// Comment with Andi but we shouldn't delete this, what if this MAIN FAILS AGAIN
|
||||
// client_.ResetReplicationClientInfo();
|
||||
client_.ResumeFrequentCheck();
|
||||
}
|
||||
|
||||
CoordinatorClient client_;
|
||||
replication_coordination_glue::ReplicationRole replication_role_;
|
||||
std::chrono::system_clock::time_point last_response_time_{};
|
||||
bool is_alive_{false};
|
||||
|
||||
friend bool operator==(CoordinatorInstance const &first, CoordinatorInstance const &second) {
|
||||
return first.client_ == second.client_ && first.replication_role_ == second.replication_role_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -0,0 +1,31 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "io/network/endpoint.hpp"
|
||||
|
||||
#include <string_view>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
struct CoordinatorInstanceStatus {
|
||||
std::string instance_name;
|
||||
std::string socket_address;
|
||||
std::string replication_role;
|
||||
bool is_alive;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
||||
#endif
|
104
src/coordination/include/coordination/coordinator_rpc.hpp
Normal file
104
src/coordination/include/coordination/coordinator_rpc.hpp
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
struct PromoteReplicaToMainReq {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit PromoteReplicaToMainReq(std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info)
|
||||
: replication_clients_info(std::move(replication_clients_info)) {}
|
||||
PromoteReplicaToMainReq() = default;
|
||||
|
||||
std::vector<CoordinatorClientConfig::ReplicationClientInfo> replication_clients_info;
|
||||
};
|
||||
|
||||
struct PromoteReplicaToMainRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit PromoteReplicaToMainRes(bool success) : success(success) {}
|
||||
PromoteReplicaToMainRes() = default;
|
||||
|
||||
bool success;
|
||||
};
|
||||
|
||||
using PromoteReplicaToMainRpc = rpc::RequestResponse<PromoteReplicaToMainReq, PromoteReplicaToMainRes>;
|
||||
|
||||
struct SetMainToReplicaReq {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(SetMainToReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const SetMainToReplicaReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit SetMainToReplicaReq(CoordinatorClientConfig::ReplicationClientInfo replication_client_info)
|
||||
: replication_client_info(std::move(replication_client_info)) {}
|
||||
|
||||
SetMainToReplicaReq() = default;
|
||||
|
||||
CoordinatorClientConfig::ReplicationClientInfo replication_client_info;
|
||||
};
|
||||
|
||||
struct SetMainToReplicaRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(SetMainToReplicaRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const SetMainToReplicaRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit SetMainToReplicaRes(bool success) : success(success) {}
|
||||
SetMainToReplicaRes() = default;
|
||||
|
||||
bool success;
|
||||
};
|
||||
|
||||
using SetMainToReplicaRpc = rpc::RequestResponse<SetMainToReplicaReq, SetMainToReplicaRes>;
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
||||
// SLK serialization declarations
|
||||
namespace memgraph::slk {
|
||||
|
||||
void Save(const memgraph::coordination::PromoteReplicaToMainRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::coordination::PromoteReplicaToMainRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
void Save(const memgraph::coordination::PromoteReplicaToMainReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::coordination::PromoteReplicaToMainReq *self, memgraph::slk::Reader *reader);
|
||||
|
||||
void Save(const memgraph::coordination::SetMainToReplicaRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::coordination::SetMainToReplicaRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
void Save(const memgraph::coordination::SetMainToReplicaReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
void Load(memgraph::coordination::SetMainToReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
|
||||
} // namespace memgraph::slk
|
||||
|
||||
#endif
|
44
src/coordination/include/coordination/coordinator_server.hpp
Normal file
44
src/coordination/include/coordination/coordinator_server.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "rpc/server.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorServer {
|
||||
public:
|
||||
explicit CoordinatorServer(const CoordinatorServerConfig &config);
|
||||
CoordinatorServer(const CoordinatorServer &) = delete;
|
||||
CoordinatorServer(CoordinatorServer &&) = delete;
|
||||
CoordinatorServer &operator=(const CoordinatorServer &) = delete;
|
||||
CoordinatorServer &operator=(CoordinatorServer &&) = delete;
|
||||
|
||||
virtual ~CoordinatorServer();
|
||||
|
||||
bool Start();
|
||||
|
||||
template <typename TRequestResponse, typename F>
|
||||
void Register(F &&callback) {
|
||||
rpc_server_.Register<TRequestResponse>(std::forward<F>(callback));
|
||||
}
|
||||
|
||||
private:
|
||||
communication::ServerContext rpc_server_context_;
|
||||
rpc::Server rpc_server_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
38
src/coordination/include/coordination/coordinator_slk.hpp
Normal file
38
src/coordination/include/coordination/coordinator_slk.hpp
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
namespace memgraph::slk {
|
||||
|
||||
using ReplicationClientInfo = coordination::CoordinatorClientConfig::ReplicationClientInfo;
|
||||
|
||||
inline void Save(const ReplicationClientInfo &obj, Builder *builder) {
|
||||
Save(obj.instance_name, builder);
|
||||
Save(obj.replication_mode, builder);
|
||||
Save(obj.replication_ip_address, builder);
|
||||
Save(obj.replication_port, builder);
|
||||
}
|
||||
|
||||
inline void Load(ReplicationClientInfo *obj, Reader *reader) {
|
||||
Load(&obj->instance_name, reader);
|
||||
Load(&obj->replication_mode, reader);
|
||||
Load(&obj->replication_ip_address, reader);
|
||||
Load(&obj->replication_port, reader);
|
||||
}
|
||||
} // namespace memgraph::slk
|
||||
#endif
|
53
src/coordination/include/coordination/coordinator_state.hpp
Normal file
53
src/coordination/include/coordination/coordinator_state.hpp
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_data.hpp"
|
||||
#include "coordination/coordinator_instance_status.hpp"
|
||||
#include "coordination/coordinator_server.hpp"
|
||||
#include "coordination/failover_status.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
|
||||
#include <variant>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorState {
|
||||
public:
|
||||
CoordinatorState();
|
||||
~CoordinatorState() = default;
|
||||
|
||||
CoordinatorState(const CoordinatorState &) = delete;
|
||||
CoordinatorState &operator=(const CoordinatorState &) = delete;
|
||||
|
||||
CoordinatorState(CoordinatorState &&) noexcept = delete;
|
||||
CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
|
||||
|
||||
[[nodiscard]] auto RegisterInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
|
||||
[[nodiscard]] auto SetInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<CoordinatorInstanceStatus>;
|
||||
|
||||
// The client code must check that the server exists before calling this method.
|
||||
auto GetCoordinatorServer() const -> CoordinatorServer &;
|
||||
|
||||
[[nodiscard]] auto DoFailover() -> DoFailoverStatus;
|
||||
|
||||
private:
|
||||
std::variant<CoordinatorData, CoordinatorMainReplicaData> data_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
21
src/coordination/include/coordination/failover_status.hpp
Normal file
21
src/coordination/include/coordination/failover_status.hpp
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
enum class DoFailoverStatus : uint8_t { SUCCESS, ALL_REPLICAS_DOWN, MAIN_ALIVE, RPC_FAILED };
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -0,0 +1,37 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
enum class RegisterInstanceCoordinatorStatus : uint8_t {
|
||||
NAME_EXISTS,
|
||||
END_POINT_EXISTS,
|
||||
COULD_NOT_BE_PERSISTED,
|
||||
NOT_COORDINATOR,
|
||||
RPC_FAILED,
|
||||
SUCCESS
|
||||
};
|
||||
|
||||
enum class SetInstanceToMainCoordinatorStatus : uint8_t {
|
||||
NO_INSTANCE_WITH_NAME,
|
||||
NOT_COORDINATOR,
|
||||
SUCCESS,
|
||||
COULD_NOT_PROMOTE_TO_MAIN,
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -1,3 +1,2 @@
|
||||
|
||||
add_library(mg-dbms STATIC dbms_handler.cpp database.cpp replication_handler.cpp replication_client.cpp inmemory/replication_handlers.cpp)
|
||||
target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query)
|
||||
add_library(mg-dbms STATIC dbms_handler.cpp database.cpp replication_handler.cpp coordinator_handler.cpp replication_client.cpp inmemory/replication_handlers.cpp coordinator_handlers.cpp)
|
||||
target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query mg-replication mg-coordination)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,7 +13,8 @@
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
constexpr static const char *kDefaultDB = "memgraph"; //!< Name of the default database
|
||||
constexpr std::string_view kDefaultDB = "memgraph"; //!< Name of the default database
|
||||
constexpr std::string_view kMultiTenantDir = "databases"; //!< Name of the multi-tenant directory
|
||||
|
||||
#ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY
|
||||
constexpr bool allow_mt_repl = true;
|
||||
|
38
src/dbms/coordinator_handler.cpp
Normal file
38
src/dbms/coordinator_handler.cpp
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "dbms/coordinator_handler.hpp"
|
||||
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
CoordinatorHandler::CoordinatorHandler(DbmsHandler &dbms_handler) : dbms_handler_(dbms_handler) {}
|
||||
|
||||
auto CoordinatorHandler::RegisterInstance(memgraph::coordination::CoordinatorClientConfig config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus {
|
||||
return dbms_handler_.CoordinatorState().RegisterInstance(config);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::SetInstanceToMain(std::string instance_name)
|
||||
-> coordination::SetInstanceToMainCoordinatorStatus {
|
||||
return dbms_handler_.CoordinatorState().SetInstanceToMain(std::move(instance_name));
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::CoordinatorInstanceStatus> {
|
||||
return dbms_handler_.CoordinatorState().ShowInstances();
|
||||
}
|
||||
} // namespace memgraph::dbms
|
||||
|
||||
#endif
|
47
src/dbms/coordinator_handler.hpp
Normal file
47
src/dbms/coordinator_handler.hpp
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "utils/result.hpp"
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_instance_status.hpp"
|
||||
#include "coordination/failover_status.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
class DbmsHandler;
|
||||
|
||||
class CoordinatorHandler {
|
||||
public:
|
||||
explicit CoordinatorHandler(DbmsHandler &dbms_handler);
|
||||
|
||||
auto RegisterInstance(coordination::CoordinatorClientConfig config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus;
|
||||
|
||||
auto SetInstanceToMain(std::string instance_name) -> coordination::SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<coordination::CoordinatorInstanceStatus>;
|
||||
|
||||
private:
|
||||
DbmsHandler &dbms_handler_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
#endif
|
153
src/dbms/coordinator_handlers.cpp
Normal file
153
src/dbms/coordinator_handlers.cpp
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "dbms/coordinator_handlers.hpp"
|
||||
#include "dbms/utils.hpp"
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "dbms/replication_client.hpp"
|
||||
|
||||
#include "range/v3/view.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
void CoordinatorHandlers::Register(DbmsHandler &dbms_handler) {
|
||||
auto &server = dbms_handler.CoordinatorState().GetCoordinatorServer();
|
||||
|
||||
server.Register<coordination::PromoteReplicaToMainRpc>(
|
||||
[&dbms_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void {
|
||||
spdlog::info("Received PromoteReplicaToMainRpc");
|
||||
CoordinatorHandlers::PromoteReplicaToMainHandler(dbms_handler, req_reader, res_builder);
|
||||
});
|
||||
|
||||
server.Register<coordination::SetMainToReplicaRpc>(
|
||||
[&dbms_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void {
|
||||
spdlog::info("Received SetMainToReplicaRpc from coordinator server");
|
||||
CoordinatorHandlers::SetMainToReplicaHandler(dbms_handler, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::SetMainToReplicaHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder) {
|
||||
auto &repl_state = dbms_handler.ReplicationState();
|
||||
|
||||
if (!repl_state.IsMain()) {
|
||||
spdlog::error("Setting to replica must be performed on main.");
|
||||
slk::Save(coordination::SetMainToReplicaRes{false}, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
coordination::SetMainToReplicaReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
|
||||
replication::ReplicationServerConfig clients_config{.ip_address = req.replication_client_info.replication_ip_address,
|
||||
.port = req.replication_client_info.replication_port};
|
||||
|
||||
if (bool success = memgraph::dbms::SetReplicationRoleReplica(dbms_handler, clients_config); !success) {
|
||||
spdlog::error("Setting main to replica failed!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{true}, res_builder);
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::PromoteReplicaToMainHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder) {
|
||||
auto &repl_state = dbms_handler.ReplicationState();
|
||||
|
||||
if (!repl_state.IsReplica()) {
|
||||
spdlog::error("Failover must be performed on replica!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
auto repl_server_config = std::get<replication::RoleReplicaData>(repl_state.ReplicationData()).config;
|
||||
|
||||
// This can fail because of disk. If it does, the cluster state could get inconsistent.
|
||||
// We don't handle disk issues.
|
||||
if (bool success = memgraph::dbms::DoReplicaToMainPromotion(dbms_handler); !success) {
|
||||
spdlog::error("Promoting replica to main failed!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
coordination::PromoteReplicaToMainReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
|
||||
auto const converter = [](const auto &repl_info_config) {
|
||||
return replication::ReplicationClientConfig{
|
||||
.name = repl_info_config.instance_name,
|
||||
.mode = repl_info_config.replication_mode,
|
||||
.ip_address = repl_info_config.replication_ip_address,
|
||||
.port = repl_info_config.replication_port,
|
||||
};
|
||||
};
|
||||
|
||||
MG_ASSERT(
|
||||
std::get<replication::RoleMainData>(repl_state.ReplicationData()).registered_replicas_.empty(),
|
||||
"No replicas should be registered after promoting replica to main and before registering replication clients!");
|
||||
|
||||
// registering replicas
|
||||
for (auto const &config : req.replication_clients_info | ranges::views::transform(converter)) {
|
||||
auto instance_client = repl_state.RegisterReplica(config);
|
||||
if (instance_client.HasError()) {
|
||||
switch (instance_client.GetError()) {
|
||||
// Can't happen, we are already replica
|
||||
case memgraph::replication::RegisterReplicaError::NOT_MAIN:
|
||||
spdlog::error("Failover must be performed to main!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
// Can't happen, checked on the coordinator side
|
||||
case memgraph::replication::RegisterReplicaError::NAME_EXISTS:
|
||||
spdlog::error("Replica with the same name already exists!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
// Can't happen, checked on the coordinator side
|
||||
case memgraph::replication::RegisterReplicaError::ENDPOINT_EXISTS:
|
||||
spdlog::error("Replica with the same endpoint already exists!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
// We don't handle disk issues
|
||||
case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
|
||||
spdlog::error("Registered replica could not be persisted!");
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{false}, res_builder);
|
||||
return;
|
||||
case memgraph::replication::RegisterReplicaError::SUCCESS:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!allow_mt_repl && dbms_handler.All().size() > 1) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
|
||||
auto &instance_client_ref = *instance_client.GetValue();
|
||||
|
||||
// Update system before enabling individual storage <-> replica clients
|
||||
dbms_handler.SystemRestore(instance_client_ref);
|
||||
|
||||
// TODO: (andi) Policy for register all databases
|
||||
// Will be resolved after deciding about choosing new replica
|
||||
const bool all_clients_good = memgraph::dbms::RegisterAllDatabasesClients(dbms_handler, instance_client_ref);
|
||||
MG_ASSERT(all_clients_good, "Failed to register one or more databases to the REPLICA \"{}\".", config.name);
|
||||
|
||||
StartReplicaClient(dbms_handler, instance_client_ref);
|
||||
}
|
||||
|
||||
slk::Save(coordination::PromoteReplicaToMainRes{true}, res_builder);
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
#endif
|
34
src/dbms/coordinator_handlers.hpp
Normal file
34
src/dbms/coordinator_handlers.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
class DbmsHandler;
|
||||
|
||||
class CoordinatorHandlers {
|
||||
public:
|
||||
static void Register(DbmsHandler &dbms_handler);
|
||||
|
||||
private:
|
||||
static void PromoteReplicaToMainHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
static void SetMainToReplicaHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
||||
#endif
|
@ -26,7 +26,7 @@ Database::Database(storage::Config config, replication::ReplicationState &repl_s
|
||||
streams_{config.durability.storage_directory / "streams"},
|
||||
plan_cache_{FLAGS_query_plan_cache_max_size},
|
||||
repl_state_(&repl_state) {
|
||||
if (config.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk ||
|
||||
if (config.salient.storage_mode == memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL || config.force_on_disk ||
|
||||
utils::DirExists(config.disk.main_storage_directory)) {
|
||||
storage_ = std::make_unique<storage::DiskStorage>(std::move(config));
|
||||
} else {
|
||||
|
@ -81,7 +81,14 @@ class Database {
|
||||
*
|
||||
* @return const std::string&
|
||||
*/
|
||||
const std::string &id() const { return storage_->id(); }
|
||||
const std::string &name() const { return storage_->name(); }
|
||||
|
||||
/**
|
||||
* @brief Unique storage identified (uuid)
|
||||
*
|
||||
* @return const utils::UUID&
|
||||
*/
|
||||
const utils::UUID &uuid() const { return storage_->uuid(); }
|
||||
|
||||
/**
|
||||
* @brief Returns the storage configuration
|
||||
@ -103,7 +110,7 @@ class Database {
|
||||
* @param force_directory Use the configured directory, do not try to decipher the multi-db version
|
||||
* @return DatabaseInfo
|
||||
*/
|
||||
DatabaseInfo GetInfo(bool force_directory, replication::ReplicationRole replication_role) const {
|
||||
DatabaseInfo GetInfo(bool force_directory, replication_coordination_glue::ReplicationRole replication_role) const {
|
||||
DatabaseInfo info;
|
||||
info.storage_info = storage_->GetInfo(force_directory, replication_role);
|
||||
info.triggers = trigger_store_.GetTriggerInfo().size();
|
||||
|
@ -51,7 +51,7 @@ class DatabaseHandler : public Handler<Database> {
|
||||
* @param config Storage configuration
|
||||
* @return HandlerT::NewResult
|
||||
*/
|
||||
HandlerT::NewResult New(std::string_view name, storage::Config config, replication::ReplicationState &repl_state) {
|
||||
HandlerT::NewResult New(storage::Config config, replication::ReplicationState &repl_state) {
|
||||
// Control that no one is using the same data directory
|
||||
if (std::any_of(begin(), end(), [&](auto &elem) {
|
||||
auto db_acc = elem.second.access();
|
||||
@ -61,8 +61,7 @@ class DatabaseHandler : public Handler<Database> {
|
||||
spdlog::info("Tried to generate new storage using a claimed directory.");
|
||||
return NewError::EXISTS;
|
||||
}
|
||||
config.name = name; // Set storage id via config
|
||||
return HandlerT::New(std::piecewise_construct, name, config, repl_state);
|
||||
return HandlerT::New(std::piecewise_construct, config.salient.name, config, repl_state);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -11,56 +11,205 @@
|
||||
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
|
||||
#include "dbms/coordinator_handlers.hpp"
|
||||
#include "flags/replication.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/global.hpp"
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
namespace {
|
||||
constexpr std::string_view kDBPrefix = "database:"; // Key prefix for database durability
|
||||
constexpr std::string_view kLastCommitedSystemTsKey = "last_commited_system_ts"; // Key for timestamp durability
|
||||
} // namespace
|
||||
|
||||
struct Durability {
|
||||
enum class DurabilityVersion : uint8_t {
|
||||
V0 = 0,
|
||||
V1,
|
||||
};
|
||||
|
||||
struct VersionException : public utils::BasicException {
|
||||
VersionException() : utils::BasicException("Unsupported durability version!") {}
|
||||
};
|
||||
|
||||
struct UnknownVersionException : public utils::BasicException {
|
||||
UnknownVersionException() : utils::BasicException("Unable to parse the durability version!") {}
|
||||
};
|
||||
|
||||
struct MigrationException : public utils::BasicException {
|
||||
MigrationException() : utils::BasicException("Failed to migrate to the current durability version!") {}
|
||||
};
|
||||
|
||||
static DurabilityVersion VersionCheck(std::optional<std::string_view> val) {
|
||||
if (!val) {
|
||||
return DurabilityVersion::V0;
|
||||
}
|
||||
if (val == "V1") {
|
||||
return DurabilityVersion::V1;
|
||||
}
|
||||
throw UnknownVersionException();
|
||||
};
|
||||
|
||||
static auto GenKey(std::string_view name) -> std::string { return fmt::format("{}{}", kDBPrefix, name); }
|
||||
|
||||
static auto GenVal(utils::UUID uuid, std::filesystem::path rel_dir) {
|
||||
nlohmann::json json;
|
||||
json["uuid"] = uuid;
|
||||
json["rel_dir"] = rel_dir;
|
||||
// TODO: Serialize the configuration
|
||||
return json.dump();
|
||||
}
|
||||
|
||||
static void Migrate(kvstore::KVStore *durability, const std::filesystem::path &root) {
|
||||
const auto ver_val = durability->Get("version");
|
||||
const auto ver = VersionCheck(ver_val);
|
||||
|
||||
std::map<std::string, std::string> to_put;
|
||||
std::vector<std::string> to_delete;
|
||||
|
||||
// Update from V0 to V1
|
||||
if (ver == DurabilityVersion::V0) {
|
||||
for (const auto &[key, val] : *durability) {
|
||||
if (key == "version") continue; // Reserved key
|
||||
// Generate a UUID
|
||||
auto const uuid = utils::UUID();
|
||||
// New json values
|
||||
auto new_key = GenKey(key);
|
||||
auto path = root;
|
||||
if (key != kDefaultDB) { // Special case for non-default DBs
|
||||
// Move directory to new UUID dir
|
||||
path = root / kMultiTenantDir / std::string{uuid};
|
||||
std::filesystem::path old_dir(root / kMultiTenantDir / key);
|
||||
std::error_code ec;
|
||||
std::filesystem::rename(old_dir, path, ec);
|
||||
MG_ASSERT(!ec, "Failed to upgrade durability: cannot move default directory.");
|
||||
}
|
||||
// Generate json and update value
|
||||
auto new_data = GenVal(uuid, std::filesystem::relative(path, root));
|
||||
to_put.emplace(std::move(new_key), std::move(new_data));
|
||||
to_delete.emplace_back(key);
|
||||
}
|
||||
}
|
||||
|
||||
// Set version
|
||||
durability->Put("version", "V1");
|
||||
// Update to the new key-value pairs
|
||||
if (!durability->PutAndDeleteMultiple(to_put, to_delete)) {
|
||||
throw MigrationException();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
DbmsHandler::DbmsHandler(
|
||||
storage::Config config,
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
bool recovery_on_startup, bool delete_on_drop)
|
||||
: default_config_{std::move(config)},
|
||||
delete_on_drop_(delete_on_drop),
|
||||
repl_state_{ReplicationStateRootPath(default_config_)} {
|
||||
bool recovery_on_startup)
|
||||
: default_config_{std::move(config)}, repl_state_{ReplicationStateRootPath(default_config_)} {
|
||||
// TODO: Decouple storage config from dbms config
|
||||
// TODO: Save individual db configs inside the kvstore and restore from there
|
||||
storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases");
|
||||
const auto &db_dir = default_config_.durability.storage_directory;
|
||||
|
||||
/*
|
||||
* FILESYSTEM MANIPULATION
|
||||
*/
|
||||
const auto &root = default_config_.durability.storage_directory;
|
||||
storage::UpdatePaths(default_config_, root);
|
||||
const auto &db_dir = default_config_.durability.storage_directory / kMultiTenantDir;
|
||||
// TODO: Unify durability and wal
|
||||
const auto durability_dir = db_dir / ".durability";
|
||||
utils::EnsureDirOrDie(db_dir);
|
||||
utils::EnsureDirOrDie(durability_dir);
|
||||
durability_ = std::make_unique<kvstore::KVStore>(durability_dir);
|
||||
|
||||
// Generate the default database
|
||||
MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB.");
|
||||
/*
|
||||
* DURABILITY
|
||||
*/
|
||||
// Migrate durability
|
||||
Durability::Migrate(durability_.get(), root);
|
||||
auto directories = std::set{std::string{kDefaultDB}};
|
||||
|
||||
// Recover previous databases
|
||||
if (recovery_on_startup) {
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
if (name == kDefaultDB) continue; // Already set
|
||||
spdlog::info("Restoring database {}.", name);
|
||||
MG_ASSERT(!New_(name).HasError(), "Failed while creating database {}.", name);
|
||||
auto it = durability_->begin(std::string(kDBPrefix));
|
||||
auto end = durability_->end(std::string(kDBPrefix));
|
||||
for (; it != end; ++it) {
|
||||
const auto &[key, config_json] = *it;
|
||||
const auto name = key.substr(kDBPrefix.size());
|
||||
auto json = nlohmann::json::parse(config_json);
|
||||
const auto uuid = json.at("uuid").get<utils::UUID>();
|
||||
const auto rel_dir = json.at("rel_dir").get<std::filesystem::path>();
|
||||
spdlog::info("Restoring database {} at {}.", name, rel_dir);
|
||||
auto new_db = New_(name, uuid, rel_dir);
|
||||
MG_ASSERT(!new_db.HasError(), "Failed while creating database {}.", name);
|
||||
directories.emplace(rel_dir.filename());
|
||||
spdlog::info("Database {} restored.", name);
|
||||
}
|
||||
// Read the last timestamp
|
||||
auto lcst = durability_->Get(kLastCommitedSystemTsKey);
|
||||
if (lcst) {
|
||||
last_commited_system_timestamp_ = std::stoul(*lcst);
|
||||
system_timestamp_ = last_commited_system_timestamp_;
|
||||
}
|
||||
} else { // Clear databases from the durability list and auth
|
||||
auto locked_auth = auth->Lock();
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
auto it = durability_->begin(std::string{kDBPrefix});
|
||||
auto end = durability_->end(std::string{kDBPrefix});
|
||||
for (; it != end; ++it) {
|
||||
const auto &[key, _] = *it;
|
||||
const auto name = key.substr(kDBPrefix.size());
|
||||
if (name == kDefaultDB) continue;
|
||||
locked_auth->DeleteDatabase(name);
|
||||
durability_->Delete(name);
|
||||
durability_->Delete(key);
|
||||
}
|
||||
// Delete the last timestamp
|
||||
durability_->Delete(kLastCommitedSystemTsKey);
|
||||
}
|
||||
|
||||
/*
|
||||
* DATABASES CLEAN UP
|
||||
*/
|
||||
// Clean the unused directories
|
||||
for (const auto &entry : std::filesystem::directory_iterator(db_dir)) {
|
||||
const auto &name = entry.path().filename().string();
|
||||
if (entry.is_directory() && !name.empty() && name.front() != '.') {
|
||||
auto itr = directories.find(name);
|
||||
if (itr == directories.end()) {
|
||||
std::error_code dummy;
|
||||
std::filesystem::remove_all(entry, dummy);
|
||||
} else {
|
||||
directories.erase(itr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DEFAULT DB SETUP
|
||||
*/
|
||||
// Setup the default DB
|
||||
SetupDefault_();
|
||||
|
||||
/*
|
||||
* REPLICATION RECOVERY AND STARTUP
|
||||
*/
|
||||
// Startup replication state (if recovered at startup)
|
||||
auto replica = [this](replication::RoleReplicaData const &data) {
|
||||
// Register handlers
|
||||
InMemoryReplicationHandlers::Register(this, *data.server);
|
||||
if (!data.server->Start()) {
|
||||
spdlog::error("Unable to start the replication server.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
// Replication frequent check start
|
||||
auto replica = [this](replication::RoleReplicaData const &data) { return StartRpcServer(*this, data); };
|
||||
// Replication recovery and frequent check start
|
||||
auto main = [this](replication::RoleMainData &data) {
|
||||
for (auto &client : data.registered_replicas_) {
|
||||
SystemRestore(client);
|
||||
}
|
||||
ForEach([this](DatabaseAccess db) { RecoverReplication(db); });
|
||||
for (auto &client : data.registered_replicas_) {
|
||||
StartReplicaClient(*this, client);
|
||||
}
|
||||
@ -69,7 +218,232 @@ DbmsHandler::DbmsHandler(
|
||||
// Startup proccess for main/replica
|
||||
MG_ASSERT(std::visit(memgraph::utils::Overloaded{replica, main}, repl_state_.ReplicationData()),
|
||||
"Replica recovery failure!");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Warning
|
||||
if (default_config_.durability.snapshot_wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED &&
|
||||
repl_state_.IsMain()) {
|
||||
spdlog::warn(
|
||||
"The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please "
|
||||
"consider "
|
||||
"enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because "
|
||||
"without write-ahead logs this instance is not replicating any data.");
|
||||
}
|
||||
|
||||
// MAIN or REPLICA instance
|
||||
if (FLAGS_coordinator_server_port) {
|
||||
CoordinatorHandlers::Register(*this);
|
||||
MG_ASSERT(coordinator_state_.GetCoordinatorServer().Start(), "Failed to start coordinator server!");
|
||||
}
|
||||
}
|
||||
|
||||
DbmsHandler::DeleteResult DbmsHandler::TryDelete(std::string_view db_name) {
|
||||
std::lock_guard<LockT> wr(lock_);
|
||||
if (db_name == kDefaultDB) {
|
||||
// MSG cannot delete the default db
|
||||
return DeleteError::DEFAULT_DB;
|
||||
}
|
||||
|
||||
// Get DB config for the UUID and disk clean up
|
||||
const auto conf = db_handler_.GetConfig(db_name);
|
||||
if (!conf) {
|
||||
return DeleteError::NON_EXISTENT;
|
||||
}
|
||||
const auto &storage_path = conf->durability.storage_directory;
|
||||
const auto &uuid = conf->salient.uuid;
|
||||
|
||||
// Check if db exists
|
||||
try {
|
||||
// Low level handlers
|
||||
if (!db_handler_.TryDelete(db_name)) {
|
||||
return DeleteError::USING;
|
||||
}
|
||||
} catch (utils::BasicException &) {
|
||||
return DeleteError::NON_EXISTENT;
|
||||
}
|
||||
|
||||
// Remove from durability list
|
||||
if (durability_) durability_->Delete(Durability::GenKey(db_name));
|
||||
|
||||
// Delete disk storage
|
||||
std::error_code ec;
|
||||
(void)std::filesystem::remove_all(storage_path, ec);
|
||||
if (ec) {
|
||||
spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path);
|
||||
}
|
||||
|
||||
// Success
|
||||
// Save delta
|
||||
if (system_transaction_) {
|
||||
system_transaction_->delta.emplace(SystemTransaction::Delta::drop_database, uuid);
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
DbmsHandler::DeleteResult DbmsHandler::Delete(std::string_view db_name) {
|
||||
auto wr = std::lock_guard(lock_);
|
||||
return Delete_(db_name);
|
||||
}
|
||||
|
||||
DbmsHandler::DeleteResult DbmsHandler::Delete(utils::UUID uuid) {
|
||||
auto wr = std::lock_guard(lock_);
|
||||
std::string db_name;
|
||||
try {
|
||||
const auto db = Get_(uuid);
|
||||
db_name = db->name();
|
||||
} catch (const UnknownDatabaseException &) {
|
||||
return DeleteError::NON_EXISTENT;
|
||||
}
|
||||
return Delete_(db_name);
|
||||
}
|
||||
|
||||
DbmsHandler::NewResultT DbmsHandler::New_(storage::Config storage_config) {
|
||||
auto new_db = db_handler_.New(storage_config, repl_state_);
|
||||
|
||||
if (new_db.HasValue()) { // Success
|
||||
// Save delta
|
||||
if (system_transaction_) {
|
||||
system_transaction_->delta.emplace(SystemTransaction::Delta::create_database, storage_config.salient);
|
||||
}
|
||||
UpdateDurability(storage_config);
|
||||
return new_db.GetValue();
|
||||
}
|
||||
return new_db.GetError();
|
||||
}
|
||||
|
||||
DbmsHandler::DeleteResult DbmsHandler::Delete_(std::string_view db_name) {
|
||||
if (db_name == kDefaultDB) {
|
||||
// MSG cannot delete the default db
|
||||
return DeleteError::DEFAULT_DB;
|
||||
}
|
||||
|
||||
const auto storage_path = StorageDir_(db_name);
|
||||
if (!storage_path) return DeleteError::NON_EXISTENT;
|
||||
|
||||
{
|
||||
auto db = db_handler_.Get(db_name);
|
||||
if (!db) return DeleteError::NON_EXISTENT;
|
||||
// TODO: ATM we assume REPLICA won't have streams,
|
||||
// this is a best effort approach just in case they do
|
||||
// there is still subtle data race we stream manipulation
|
||||
// can occur while we are dropping the database
|
||||
db->prepare_for_deletion();
|
||||
auto &database = *db->get();
|
||||
database.streams()->StopAll();
|
||||
database.streams()->DropAll();
|
||||
database.thread_pool()->Shutdown();
|
||||
}
|
||||
|
||||
// Remove from durability list
|
||||
if (durability_) durability_->Delete(Durability::GenKey(db_name));
|
||||
|
||||
// Check if db exists
|
||||
// Low level handlers
|
||||
db_handler_.DeferDelete(db_name, [storage_path = *storage_path, db_name = std::string{db_name}]() {
|
||||
// Delete disk storage
|
||||
std::error_code ec;
|
||||
(void)std::filesystem::remove_all(storage_path, ec);
|
||||
if (ec) {
|
||||
spdlog::error(R"(Failed to clean disk while deleting database "{}" stored in {})", db_name, storage_path);
|
||||
}
|
||||
});
|
||||
|
||||
return {}; // Success
|
||||
}
|
||||
|
||||
void DbmsHandler::UpdateDurability(const storage::Config &config, std::optional<std::filesystem::path> rel_dir) {
|
||||
if (!durability_) return;
|
||||
// Save database in a list of active databases
|
||||
const auto &key = Durability::GenKey(config.salient.name);
|
||||
if (rel_dir == std::nullopt)
|
||||
rel_dir =
|
||||
std::filesystem::relative(config.durability.storage_directory, default_config_.durability.storage_directory);
|
||||
const auto &val = Durability::GenVal(config.salient.uuid, *rel_dir);
|
||||
durability_->Put(key, val);
|
||||
}
|
||||
|
||||
AllSyncReplicaStatus DbmsHandler::Commit() {
|
||||
if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt)
|
||||
return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit
|
||||
const auto &delta = *system_transaction_->delta;
|
||||
|
||||
auto sync_status = AllSyncReplicaStatus::AllCommitsConfirmed;
|
||||
// TODO Create a system client that can handle all of this automatically
|
||||
switch (delta.action) {
|
||||
using enum SystemTransaction::Delta::Action;
|
||||
case CREATE_DATABASE: {
|
||||
// Replication
|
||||
auto main_handler = [&](memgraph::replication::RoleMainData &main_data) {
|
||||
// TODO: data race issue? registered_replicas_ access not protected
|
||||
// This is sync in any case, as this is the startup
|
||||
for (auto &client : main_data.registered_replicas_) {
|
||||
bool completed = SteamAndFinalizeDelta<storage::replication::CreateDatabaseRpc>(
|
||||
client,
|
||||
[](const storage::replication::CreateDatabaseRes &response) {
|
||||
return response.result != storage::replication::CreateDatabaseRes::Result::FAILURE;
|
||||
},
|
||||
std::string(main_data.epoch_.id()), last_commited_system_timestamp_,
|
||||
system_transaction_->system_timestamp, delta.config);
|
||||
// TODO: reduce duplicate code
|
||||
if (!completed && client.mode_ == replication_coordination_glue::ReplicationMode::SYNC) {
|
||||
sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed;
|
||||
}
|
||||
}
|
||||
// Sync database with REPLICAs
|
||||
RecoverReplication(Get_(delta.config.name));
|
||||
};
|
||||
auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ };
|
||||
std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
} break;
|
||||
case DROP_DATABASE: {
|
||||
// Replication
|
||||
auto main_handler = [&](memgraph::replication::RoleMainData &main_data) {
|
||||
// TODO: data race issue? registered_replicas_ access not protected
|
||||
// This is sync in any case, as this is the startup
|
||||
for (auto &client : main_data.registered_replicas_) {
|
||||
bool completed = SteamAndFinalizeDelta<storage::replication::DropDatabaseRpc>(
|
||||
client,
|
||||
[](const storage::replication::DropDatabaseRes &response) {
|
||||
return response.result != storage::replication::DropDatabaseRes::Result::FAILURE;
|
||||
},
|
||||
std::string(main_data.epoch_.id()), last_commited_system_timestamp_,
|
||||
system_transaction_->system_timestamp, delta.uuid);
|
||||
// TODO: reduce duplicate code
|
||||
if (!completed && client.mode_ == replication_coordination_glue::ReplicationMode::SYNC) {
|
||||
sync_status = AllSyncReplicaStatus::SomeCommitsUnconfirmed;
|
||||
}
|
||||
}
|
||||
};
|
||||
auto replica_handler = [](memgraph::replication::RoleReplicaData &) { /* Nothing to do */ };
|
||||
std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
} break;
|
||||
}
|
||||
|
||||
durability_->Put(kLastCommitedSystemTsKey, std::to_string(system_transaction_->system_timestamp));
|
||||
last_commited_system_timestamp_ = system_transaction_->system_timestamp;
|
||||
ResetSystemTransaction();
|
||||
return sync_status;
|
||||
}
|
||||
|
||||
#else // not MG_ENTERPRISE
|
||||
|
||||
AllSyncReplicaStatus DbmsHandler::Commit() {
|
||||
if (system_transaction_ == std::nullopt || system_transaction_->delta == std::nullopt) {
|
||||
return AllSyncReplicaStatus::AllCommitsConfirmed; // Nothing to commit
|
||||
}
|
||||
const auto &delta = *system_transaction_->delta;
|
||||
|
||||
switch (delta.action) {
|
||||
using enum SystemTransaction::Delta::Action;
|
||||
case CREATE_DATABASE:
|
||||
case DROP_DATABASE:
|
||||
/* Community edition doesn't support multi-tenant replication */
|
||||
break;
|
||||
}
|
||||
|
||||
last_commited_system_timestamp_ = system_transaction_->system_timestamp;
|
||||
ResetSystemTransaction();
|
||||
return AllSyncReplicaStatus::AllCommitsConfirmed;
|
||||
}
|
||||
|
||||
#endif
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -12,34 +12,37 @@
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <concepts>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <stdexcept>
|
||||
#include <system_error>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
#include "auth/auth.hpp"
|
||||
#include "constants.hpp"
|
||||
#include "dbms/database.hpp"
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include "dbms/replication_handler.hpp"
|
||||
#include "kvstore/kvstore.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "storage/v2/config.hpp"
|
||||
#include "storage/v2/replication/enums.hpp"
|
||||
#include "storage/v2/replication/rpc.hpp"
|
||||
#include "storage/v2/transaction.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include "coordination/coordinator_state.hpp"
|
||||
#include "dbms/database_handler.hpp"
|
||||
#endif
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "dbms/transaction.hpp"
|
||||
#include "global.hpp"
|
||||
#include "query/config.hpp"
|
||||
#include "query/interpreter_context.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
#include "storage/v2/isolation_level.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
@ -48,6 +51,11 @@
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
enum class AllSyncReplicaStatus {
|
||||
AllCommitsConfirmed,
|
||||
SomeCommitsUnconfirmed,
|
||||
};
|
||||
|
||||
struct Statistics {
|
||||
uint64_t num_vertex; //!< Sum of vertexes in every database
|
||||
uint64_t num_edges; //!< Sum of edges in every database
|
||||
@ -102,11 +110,10 @@ class DbmsHandler {
|
||||
* @param configs storage configuration
|
||||
* @param auth pointer to the global authenticator
|
||||
* @param recovery_on_startup restore databases (and its content) and authentication data
|
||||
* @param delete_on_drop when dropping delete any associated directories on disk
|
||||
*/
|
||||
DbmsHandler(storage::Config config,
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
bool recovery_on_startup, bool delete_on_drop); // TODO If more arguments are added use a config strut
|
||||
bool recovery_on_startup); // TODO If more arguments are added use a config struct
|
||||
#else
|
||||
/**
|
||||
* @brief Initialize the handler. A single database is supported in community edition.
|
||||
@ -116,10 +123,12 @@ class DbmsHandler {
|
||||
DbmsHandler(storage::Config config)
|
||||
: repl_state_{ReplicationStateRootPath(config)},
|
||||
db_gatekeeper_{[&] {
|
||||
config.name = kDefaultDB;
|
||||
config.salient.name = kDefaultDB;
|
||||
return std::move(config);
|
||||
}(),
|
||||
repl_state_} {}
|
||||
repl_state_} {
|
||||
RecoverReplication(Get());
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
@ -131,9 +140,56 @@ class DbmsHandler {
|
||||
*/
|
||||
NewResultT New(const std::string &name) {
|
||||
std::lock_guard<LockT> wr(lock_);
|
||||
return New_(name, name);
|
||||
const auto uuid = utils::UUID{};
|
||||
return New_(name, uuid);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create new if name/uuid do not match any database. Drop and recreate if database already present.
|
||||
* @note Default database is not dropped, only its UUID is updated and only if the database is clean.
|
||||
*
|
||||
* @param config desired salient config
|
||||
* @return NewResultT context on success, error on failure
|
||||
*/
|
||||
NewResultT Update(const storage::SalientConfig &config) {
|
||||
std::lock_guard<LockT> wr(lock_);
|
||||
auto new_db = New_(config);
|
||||
if (new_db.HasValue() || new_db.GetError() != NewError::EXISTS) {
|
||||
// NOTE: If db already exists we retry below
|
||||
return new_db;
|
||||
}
|
||||
|
||||
spdlog::debug("Trying to create db '{}' on replica which already exists.", config.name);
|
||||
|
||||
auto db = Get_(config.name);
|
||||
if (db->uuid() == config.uuid) { // Same db
|
||||
return db;
|
||||
}
|
||||
|
||||
spdlog::debug("Different UUIDs");
|
||||
|
||||
// TODO: Fix this hack
|
||||
if (config.name == kDefaultDB) {
|
||||
if (db->storage()->repl_storage_state_.last_commit_timestamp_ != storage::kTimestampInitialId) {
|
||||
spdlog::debug("Default storage is not clean, cannot update UUID...");
|
||||
return NewError::GENERIC; // Update error
|
||||
}
|
||||
spdlog::debug("Update default db's UUID");
|
||||
// Default db cannot be deleted and remade, have to just update the UUID
|
||||
db->storage()->config_.salient.uuid = config.uuid;
|
||||
UpdateDurability(db->storage()->config_, ".");
|
||||
return db;
|
||||
}
|
||||
|
||||
spdlog::debug("Drop database and recreate with the correct UUID");
|
||||
// Defer drop
|
||||
(void)Delete_(db->name());
|
||||
// Second attempt
|
||||
return New_(config);
|
||||
}
|
||||
|
||||
void UpdateDurability(const storage::Config &config, std::optional<std::filesystem::path> rel_dir = {});
|
||||
|
||||
/**
|
||||
* @brief Get the context associated with the "name" database
|
||||
*
|
||||
@ -145,6 +201,19 @@ class DbmsHandler {
|
||||
std::shared_lock<LockT> rd(lock_);
|
||||
return Get_(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the context associated with the UUID database
|
||||
*
|
||||
* @param uuid
|
||||
* @return DatabaseAccess
|
||||
* @throw UnknownDatabaseException if database not found
|
||||
*/
|
||||
DatabaseAccess Get(const utils::UUID &uuid) {
|
||||
std::shared_lock<LockT> rd(lock_);
|
||||
return Get_(uuid);
|
||||
}
|
||||
|
||||
#else
|
||||
/**
|
||||
* @brief Get the context associated with the default database
|
||||
@ -160,50 +229,28 @@ class DbmsHandler {
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
/**
|
||||
* @brief Delete database.
|
||||
* @brief Attempt to delete database.
|
||||
*
|
||||
* @param db_name database name
|
||||
* @return DeleteResult error on failure
|
||||
*/
|
||||
DeleteResult Delete(const std::string &db_name) {
|
||||
std::lock_guard<LockT> wr(lock_);
|
||||
if (db_name == kDefaultDB) {
|
||||
// MSG cannot delete the default db
|
||||
return DeleteError::DEFAULT_DB;
|
||||
}
|
||||
DeleteResult TryDelete(std::string_view db_name);
|
||||
|
||||
const auto storage_path = StorageDir_(db_name);
|
||||
if (!storage_path) return DeleteError::NON_EXISTENT;
|
||||
/**
|
||||
* @brief Delete or defer deletion of database.
|
||||
*
|
||||
* @param db_name database name
|
||||
* @return DeleteResult error on failure
|
||||
*/
|
||||
DeleteResult Delete(std::string_view db_name);
|
||||
|
||||
// Check if db exists
|
||||
try {
|
||||
// Low level handlers
|
||||
if (!db_handler_.Delete(db_name)) {
|
||||
return DeleteError::USING;
|
||||
}
|
||||
} catch (utils::BasicException &) {
|
||||
return DeleteError::NON_EXISTENT;
|
||||
}
|
||||
|
||||
// Remove from durability list
|
||||
if (durability_) durability_->Delete(db_name);
|
||||
|
||||
// Delete disk storage
|
||||
if (delete_on_drop_) {
|
||||
std::error_code ec;
|
||||
(void)std::filesystem::remove_all(*storage_path, ec);
|
||||
if (ec) {
|
||||
spdlog::error("Failed to clean disk while deleting database \"{}\".", db_name);
|
||||
defunct_dbs_.emplace(db_name);
|
||||
return DeleteError::DISK_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from defunct_dbs_ (in case a second delete call was successful)
|
||||
defunct_dbs_.erase(db_name);
|
||||
|
||||
return {}; // Success
|
||||
}
|
||||
/**
|
||||
* @brief Delete or defer deletion of database.
|
||||
*
|
||||
* @param uuid database UUID
|
||||
* @return DeleteResult error on failure
|
||||
*/
|
||||
DeleteResult Delete(utils::UUID uuid);
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -216,7 +263,7 @@ class DbmsHandler {
|
||||
std::shared_lock<LockT> rd(lock_);
|
||||
return db_handler_.All();
|
||||
#else
|
||||
return {db_gatekeeper_.access()->get()->id()};
|
||||
return {db_gatekeeper_.access()->get()->name()};
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -226,6 +273,10 @@ class DbmsHandler {
|
||||
bool IsMain() const { return repl_state_.IsMain(); }
|
||||
bool IsReplica() const { return repl_state_.IsReplica(); }
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
coordination::CoordinatorState &CoordinatorState() { return coordinator_state_; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Return the statistics all databases.
|
||||
*
|
||||
@ -305,7 +356,7 @@ class DbmsHandler {
|
||||
auto db_acc_opt = db_gk.access();
|
||||
if (db_acc_opt) {
|
||||
auto &db_acc = *db_acc_opt;
|
||||
spdlog::debug("Restoring trigger for database \"{}\"", db_acc->id());
|
||||
spdlog::debug("Restoring trigger for database \"{}\"", db_acc->name());
|
||||
auto storage_accessor = db_acc->Access();
|
||||
auto dba = memgraph::query::DbAccessor{storage_accessor.get()};
|
||||
db_acc->trigger_store()->RestoreTriggers(&ic->ast_cache, &dba, ic->config.query, ic->auth_checker);
|
||||
@ -330,7 +381,7 @@ class DbmsHandler {
|
||||
auto db_acc = db_gk.access();
|
||||
if (db_acc) {
|
||||
auto *db = db_acc->get();
|
||||
spdlog::debug("Restoring streams for database \"{}\"", db->id());
|
||||
spdlog::debug("Restoring streams for database \"{}\"", db->name());
|
||||
db->streams()->RestoreStreams(*db_acc, ic);
|
||||
}
|
||||
}
|
||||
@ -341,7 +392,7 @@ class DbmsHandler {
|
||||
*
|
||||
* @param f
|
||||
*/
|
||||
void ForEach(auto f) {
|
||||
void ForEach(std::invocable<DatabaseAccess> auto f) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
std::shared_lock<LockT> rd(lock_);
|
||||
for (auto &[_, db_gk] : db_handler_) {
|
||||
@ -351,33 +402,103 @@ class DbmsHandler {
|
||||
#endif
|
||||
auto db_acc = db_gk.access();
|
||||
if (db_acc) { // This isn't an error, just a defunct db
|
||||
f(db_acc->get());
|
||||
f(*db_acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief todo
|
||||
*
|
||||
* @param f
|
||||
*/
|
||||
void ForOne(auto f) {
|
||||
void NewSystemTransaction() {
|
||||
DMG_ASSERT(!system_transaction_, "Already running a system transaction");
|
||||
system_transaction_.emplace(++system_timestamp_);
|
||||
}
|
||||
|
||||
void ResetSystemTransaction() { system_transaction_.reset(); }
|
||||
|
||||
//! \tparam RPC An rpc::RequestResponse
|
||||
//! \tparam Args the args type
|
||||
//! \param client the client to use for rpc communication
|
||||
//! \param check predicate to check response is ok
|
||||
//! \param args arguments to forward to the rpc request
|
||||
//! \return If replica stream is completed or enqueued
|
||||
template <typename RPC, typename... Args>
|
||||
bool SteamAndFinalizeDelta(auto &client, auto &&check, Args &&...args) {
|
||||
try {
|
||||
auto stream = client.rpc_client_.template Stream<RPC>(std::forward<Args>(args)...);
|
||||
auto task = [&client, check = std::forward<decltype(check)>(check), stream = std::move(stream)]() mutable {
|
||||
if (stream.IsDefunct()) {
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
if (check(stream.AwaitResponse())) {
|
||||
return true;
|
||||
}
|
||||
} catch (memgraph::rpc::GenericRpcFailedException const &e) {
|
||||
// swallow error, fallthrough to error handling
|
||||
}
|
||||
// This replica needs SYSTEM recovery
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
return false;
|
||||
};
|
||||
|
||||
if (client.mode_ == memgraph::replication_coordination_glue::ReplicationMode::ASYNC) {
|
||||
client.thread_pool_.AddTask([task = utils::CopyMovableFunctionWrapper{std::move(task)}]() mutable { task(); });
|
||||
return true;
|
||||
}
|
||||
|
||||
return task();
|
||||
} catch (memgraph::rpc::GenericRpcFailedException const &e) {
|
||||
// This replica needs SYSTEM recovery
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
AllSyncReplicaStatus Commit();
|
||||
|
||||
auto LastCommitedTS() const -> uint64_t { return last_commited_system_timestamp_; }
|
||||
void SetLastCommitedTS(uint64_t new_ts) { last_commited_system_timestamp_.store(new_ts); }
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
std::shared_lock<LockT> rd(lock_);
|
||||
for (auto &[_, db_gk] : db_handler_) {
|
||||
auto db_acc = db_gk.access();
|
||||
if (db_acc) { // This isn't an error, just a defunct db
|
||||
if (f(db_acc->get())) break; // Run until the first successful one
|
||||
// When being called by intepreter no need to gain lock, it should already be under a system transaction
|
||||
// But concurrently the FrequentCheck is running and will need to lock before reading last_commited_system_timestamp_
|
||||
template <bool REQUIRE_LOCK = false>
|
||||
void SystemRestore(replication::ReplicationClient &client) {
|
||||
// Check if system is up to date
|
||||
if (client.state_.WithLock(
|
||||
[](auto &state) { return state == memgraph::replication::ReplicationClient::State::READY; }))
|
||||
return;
|
||||
|
||||
// Try to recover...
|
||||
{
|
||||
auto [database_configs, last_commited_system_timestamp] = std::invoke([&] {
|
||||
auto sys_guard =
|
||||
std::unique_lock{system_lock_, std::defer_lock}; // ensure no other system transaction in progress
|
||||
if constexpr (REQUIRE_LOCK) {
|
||||
sys_guard.lock();
|
||||
}
|
||||
auto configs = std::vector<storage::SalientConfig>{};
|
||||
ForEach([&configs](DatabaseAccess acc) { configs.emplace_back(acc->config().salient); });
|
||||
return std::pair{configs, last_commited_system_timestamp_.load()};
|
||||
});
|
||||
try {
|
||||
auto stream = client.rpc_client_.Stream<storage::replication::SystemRecoveryRpc>(last_commited_system_timestamp,
|
||||
std::move(database_configs));
|
||||
const auto response = stream.AwaitResponse();
|
||||
if (response.result == storage::replication::SystemRecoveryRes::Result::FAILURE) {
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
return;
|
||||
}
|
||||
} catch (memgraph::rpc::GenericRpcFailedException const &e) {
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
return;
|
||||
}
|
||||
}
|
||||
#else
|
||||
{
|
||||
auto db_acc = db_gatekeeper_.access();
|
||||
MG_ASSERT(db_acc, "Should always have the database");
|
||||
f(db_acc->get());
|
||||
}
|
||||
#endif
|
||||
|
||||
// Successfully recovered
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::READY; });
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
#ifdef MG_ENTERPRISE
|
||||
@ -387,7 +508,7 @@ class DbmsHandler {
|
||||
* @param name Database name
|
||||
* @return std::optional<std::filesystem::path>
|
||||
*/
|
||||
std::optional<std::filesystem::path> StorageDir_(const std::string &name) {
|
||||
std::optional<std::filesystem::path> StorageDir_(std::string_view name) {
|
||||
const auto conf = db_handler_.GetConfig(name);
|
||||
if (conf) {
|
||||
return conf->durability.storage_directory;
|
||||
@ -400,105 +521,108 @@ class DbmsHandler {
|
||||
* @brief Create a new Database associated with the "name" database
|
||||
*
|
||||
* @param name name of the database
|
||||
* @param uuid undelying RocksDB directory
|
||||
* @return NewResultT context on success, error on failure
|
||||
*/
|
||||
NewResultT New_(const std::string &name) { return New_(name, name); }
|
||||
NewResultT New_(std::string_view name, utils::UUID uuid, std::optional<std::filesystem::path> rel_dir = {}) {
|
||||
auto config_copy = default_config_;
|
||||
config_copy.salient.name = name;
|
||||
config_copy.salient.uuid = uuid;
|
||||
spdlog::debug("Creating database '{}' - '{}'", name, std::string{uuid});
|
||||
if (rel_dir) {
|
||||
storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / *rel_dir);
|
||||
} else {
|
||||
storage::UpdatePaths(config_copy,
|
||||
default_config_.durability.storage_directory / kMultiTenantDir / std::string{uuid});
|
||||
}
|
||||
return New_(std::move(config_copy));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create a new Database associated with the "name" database
|
||||
* @brief Create a new Database using the passed configuration
|
||||
*
|
||||
* @param name name of the database
|
||||
* @param storage_subdir undelying RocksDB directory
|
||||
* @param config configuration to be used
|
||||
* @return NewResultT context on success, error on failure
|
||||
*/
|
||||
NewResultT New_(const std::string &name, std::filesystem::path storage_subdir) {
|
||||
NewResultT New_(const storage::SalientConfig &config) {
|
||||
auto config_copy = default_config_;
|
||||
storage::UpdatePaths(config_copy, default_config_.durability.storage_directory / storage_subdir);
|
||||
return New_(name, config_copy);
|
||||
config_copy.salient = config; // name, uuid, mode, etc
|
||||
UpdatePaths(config_copy, config_copy.durability.storage_directory / kMultiTenantDir / std::string{config.uuid});
|
||||
return New_(std::move(config_copy));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create a new Database associated with the "name" database
|
||||
*
|
||||
* @param name name of the database
|
||||
* @param storage_config storage configuration
|
||||
* @return NewResultT context on success, error on failure
|
||||
*/
|
||||
NewResultT New_(const std::string &name, storage::Config &storage_config) {
|
||||
if (defunct_dbs_.contains(name)) {
|
||||
spdlog::warn("Failed to generate database due to the unknown state of the previously defunct database \"{}\".",
|
||||
name);
|
||||
return NewError::DEFUNCT;
|
||||
}
|
||||
NewResultT New_(storage::Config storage_config);
|
||||
|
||||
auto new_db = db_handler_.New(name, storage_config, repl_state_);
|
||||
if (new_db.HasValue()) {
|
||||
// Success
|
||||
if (durability_) durability_->Put(name, "ok"); // TODO: Serialize the configuration?
|
||||
return new_db.GetValue();
|
||||
}
|
||||
return new_db.GetError();
|
||||
}
|
||||
// TODO: new overload of Delete_ with DatabaseAccess
|
||||
DeleteResult Delete_(std::string_view db_name);
|
||||
|
||||
/**
|
||||
* @brief Create a new Database associated with the default database
|
||||
*
|
||||
* @return NewResultT context on success, error on failure
|
||||
*/
|
||||
NewResultT NewDefault_() {
|
||||
// Create the default DB in the root (this is how it was done pre multi-tenancy)
|
||||
auto res = New_(kDefaultDB, "..");
|
||||
if (res.HasValue()) {
|
||||
// For back-compatibility...
|
||||
// Recreate the dbms layout for the default db and symlink to the root
|
||||
const auto dir = StorageDir_(kDefaultDB);
|
||||
MG_ASSERT(dir, "Failed to find storage path.");
|
||||
const auto main_dir = *dir / "databases" / kDefaultDB;
|
||||
void SetupDefault_() {
|
||||
try {
|
||||
Get(kDefaultDB);
|
||||
} catch (const UnknownDatabaseException &) {
|
||||
// No default DB restored, create it
|
||||
MG_ASSERT(New_(kDefaultDB, {/* random UUID */}, ".").HasValue(), "Failed while creating the default database");
|
||||
}
|
||||
|
||||
if (!std::filesystem::exists(main_dir)) {
|
||||
std::filesystem::create_directory(main_dir);
|
||||
}
|
||||
// For back-compatibility...
|
||||
// Recreate the dbms layout for the default db and symlink to the root
|
||||
const auto dir = StorageDir_(kDefaultDB);
|
||||
MG_ASSERT(dir, "Failed to find storage path.");
|
||||
const auto main_dir = *dir / kMultiTenantDir / kDefaultDB;
|
||||
|
||||
// Force link on-disk directories
|
||||
const auto conf = db_handler_.GetConfig(kDefaultDB);
|
||||
MG_ASSERT(conf, "No configuration for the default database.");
|
||||
const auto &tmp_conf = conf->disk;
|
||||
std::vector<std::filesystem::path> to_link{
|
||||
tmp_conf.main_storage_directory, tmp_conf.label_index_directory,
|
||||
tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory,
|
||||
tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory,
|
||||
tmp_conf.durability_directory, tmp_conf.wal_directory,
|
||||
};
|
||||
if (!std::filesystem::exists(main_dir)) {
|
||||
std::filesystem::create_directory(main_dir);
|
||||
}
|
||||
|
||||
// Add in-memory paths
|
||||
// Some directories are redundant (skip those)
|
||||
const std::vector<std::string> skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"};
|
||||
for (auto const &item : std::filesystem::directory_iterator{*dir}) {
|
||||
const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path());
|
||||
if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue;
|
||||
to_link.push_back(item.path());
|
||||
}
|
||||
// Force link on-disk directories
|
||||
const auto conf = db_handler_.GetConfig(kDefaultDB);
|
||||
MG_ASSERT(conf, "No configuration for the default database.");
|
||||
const auto &tmp_conf = conf->disk;
|
||||
std::vector<std::filesystem::path> to_link{
|
||||
tmp_conf.main_storage_directory, tmp_conf.label_index_directory,
|
||||
tmp_conf.label_property_index_directory, tmp_conf.unique_constraints_directory,
|
||||
tmp_conf.name_id_mapper_directory, tmp_conf.id_name_mapper_directory,
|
||||
tmp_conf.durability_directory, tmp_conf.wal_directory,
|
||||
};
|
||||
|
||||
// Symlink to root dir
|
||||
for (auto const &item : to_link) {
|
||||
const auto dir_name = std::filesystem::relative(item, item.parent_path());
|
||||
const auto link = main_dir / dir_name;
|
||||
const auto to = std::filesystem::relative(item, main_dir);
|
||||
if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) {
|
||||
std::filesystem::create_directory_symlink(to, link);
|
||||
} else { // Check existing link
|
||||
std::error_code ec;
|
||||
const auto test_link = std::filesystem::read_symlink(link, ec);
|
||||
if (ec || test_link != to) {
|
||||
MG_ASSERT(false,
|
||||
"Memgraph storage directory incompatible with new version.\n"
|
||||
"Please use a clean directory or remove \"{}\" and try again.",
|
||||
link.string());
|
||||
}
|
||||
// Add in-memory paths
|
||||
// Some directories are redundant (skip those)
|
||||
const std::vector<std::string> skip{".lock", "audit_log", "auth", "databases", "internal_modules", "settings"};
|
||||
for (auto const &item : std::filesystem::directory_iterator{*dir}) {
|
||||
const auto dir_name = std::filesystem::relative(item.path(), item.path().parent_path());
|
||||
if (std::find(skip.begin(), skip.end(), dir_name) != skip.end()) continue;
|
||||
to_link.push_back(item.path());
|
||||
}
|
||||
|
||||
// Symlink to root dir
|
||||
for (auto const &item : to_link) {
|
||||
const auto dir_name = std::filesystem::relative(item, item.parent_path());
|
||||
const auto link = main_dir / dir_name;
|
||||
const auto to = std::filesystem::relative(item, main_dir);
|
||||
if (!std::filesystem::is_symlink(link) && !std::filesystem::exists(link)) {
|
||||
std::filesystem::create_directory_symlink(to, link);
|
||||
} else { // Check existing link
|
||||
std::error_code ec;
|
||||
const auto test_link = std::filesystem::read_symlink(link, ec);
|
||||
if (ec || test_link != to) {
|
||||
MG_ASSERT(false,
|
||||
"Memgraph storage directory incompatible with new version.\n"
|
||||
"Please use a clean directory or remove \"{}\" and try again.",
|
||||
link.string());
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -516,17 +640,57 @@ class DbmsHandler {
|
||||
throw UnknownDatabaseException("Tried to retrieve an unknown database \"{}\".", name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the context associated with the UUID database
|
||||
*
|
||||
* @param uuid
|
||||
* @return DatabaseAccess
|
||||
* @throw UnknownDatabaseException if database not found
|
||||
*/
|
||||
DatabaseAccess Get_(const utils::UUID &uuid) {
|
||||
// TODO Speed up
|
||||
for (auto &[_, db_gk] : db_handler_) {
|
||||
auto acc = db_gk.access();
|
||||
if (acc->get()->uuid() == uuid) {
|
||||
return std::move(*acc);
|
||||
}
|
||||
}
|
||||
throw UnknownDatabaseException("Tried to retrieve an unknown database with UUID \"{}\".", std::string{uuid});
|
||||
}
|
||||
#endif
|
||||
|
||||
void RecoverReplication(DatabaseAccess db_acc) {
|
||||
if (allow_mt_repl || db_acc->name() == dbms::kDefaultDB) {
|
||||
// Handle global replication state
|
||||
spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash.");
|
||||
// RECOVER REPLICA CONNECTIONS
|
||||
memgraph::dbms::RestoreReplication(repl_state_, std::move(db_acc));
|
||||
} else if (const ::memgraph::replication::RoleMainData *data =
|
||||
std::get_if<::memgraph::replication::RoleMainData>(&repl_state_.ReplicationData());
|
||||
data && !data->registered_replicas_.empty()) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
mutable LockT lock_{utils::RWLock::Priority::READ}; //!< protective lock
|
||||
storage::Config default_config_; //!< Storage configuration used when creating new databases
|
||||
DatabaseHandler db_handler_; //!< multi-tenancy storage handler
|
||||
std::unique_ptr<kvstore::KVStore> durability_; //!< list of active dbs (pointer so we can postpone its creation)
|
||||
bool delete_on_drop_; //!< Flag defining if dropping storage also deletes its directory
|
||||
std::set<std::string> defunct_dbs_; //!< Databases that are in an unknown state due to various failures
|
||||
coordination::CoordinatorState coordinator_state_; //!< Replication coordinator
|
||||
#endif
|
||||
// TODO: Make an api
|
||||
public:
|
||||
utils::ResourceLock system_lock_{}; //!> Ensure exclusive access for system queries
|
||||
private:
|
||||
std::optional<SystemTransaction> system_transaction_; //!< Current system transaction (only one at a time)
|
||||
uint64_t system_timestamp_{storage::kTimestampInitialId}; //!< System timestamp
|
||||
std::atomic_uint64_t last_commited_system_timestamp_{
|
||||
storage::kTimestampInitialId}; //!< Last commited system timestamp
|
||||
replication::ReplicationState repl_state_; //!< Global replication state
|
||||
#ifndef MG_ENTERPRISE
|
||||
mutable utils::Gatekeeper<Database> db_gatekeeper_; //!< Single databases gatekeeper
|
||||
#endif
|
||||
};
|
||||
}; // namespace memgraph::dbms
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -21,6 +21,7 @@
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/gatekeeper.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
@ -82,7 +83,7 @@ class Handler {
|
||||
* @return true on success
|
||||
* @throw BasicException
|
||||
*/
|
||||
bool Delete(const std::string &name) {
|
||||
bool TryDelete(std::string_view name) {
|
||||
if (auto itr = items_.find(name); itr != items_.end()) {
|
||||
auto db_acc = itr->second.access();
|
||||
if (db_acc && db_acc->try_delete()) {
|
||||
@ -92,9 +93,42 @@ class Handler {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// TODO: Change to return enum
|
||||
throw utils::BasicException("Unknown item \"{}\".", name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Delete or defunct the context associated with the name.
|
||||
*
|
||||
* @param name Name associated with the context to delete
|
||||
* @param post_delete_func What to do after deletion has happened
|
||||
*/
|
||||
template <typename Func>
|
||||
void DeferDelete(std::string_view name, Func &&post_delete_func) {
|
||||
auto itr = items_.find(name);
|
||||
if (itr == items_.end()) return;
|
||||
|
||||
auto db_acc = itr->second.access();
|
||||
if (!db_acc) return;
|
||||
|
||||
if (db_acc->try_delete()) {
|
||||
// Delete the database now
|
||||
db_acc->reset();
|
||||
post_delete_func();
|
||||
} else {
|
||||
// Defer deletion
|
||||
db_acc->reset();
|
||||
// TODO: Make sure this shuts down correctly
|
||||
auto task = [gk = std::move(itr->second), post_delete_func = std::forward<Func>(post_delete_func)]() mutable {
|
||||
gk.~Gatekeeper<T>();
|
||||
post_delete_func();
|
||||
};
|
||||
defer_pool_.AddTask(utils::CopyMovableFunctionWrapper{std::move(task)});
|
||||
}
|
||||
// In any case remove from handled map
|
||||
items_.erase(itr);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if a name is already used.
|
||||
*
|
||||
@ -120,6 +154,7 @@ class Handler {
|
||||
private:
|
||||
std::unordered_map<std::string, utils::Gatekeeper<T>, string_hash, std::equal_to<>>
|
||||
items_; //!< map to all active items
|
||||
utils::ThreadPool defer_pool_{1};
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -10,6 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
@ -22,7 +23,7 @@
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
using memgraph::storage::Delta;
|
||||
using memgraph::storage::EdgeAccessor;
|
||||
using memgraph::storage::EdgeRef;
|
||||
@ -49,29 +50,29 @@ std::pair<uint64_t, WalDeltaData> ReadDelta(storage::durability::BaseDecoder *de
|
||||
}
|
||||
};
|
||||
|
||||
std::optional<DatabaseAccess> GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, std::string_view db_name) {
|
||||
std::optional<DatabaseAccess> GetDatabaseAccessor(dbms::DbmsHandler *dbms_handler, const utils::UUID &uuid) {
|
||||
try {
|
||||
#ifdef MG_ENTERPRISE
|
||||
auto acc = dbms_handler->Get(db_name);
|
||||
#else
|
||||
if (db_name != dbms::kDefaultDB) {
|
||||
spdlog::warn("Trying to replicate a non-default database on a community replica.");
|
||||
return std::nullopt;
|
||||
}
|
||||
auto acc = dbms_handler->Get();
|
||||
#endif
|
||||
auto acc = dbms_handler->Get(uuid);
|
||||
if (!acc) {
|
||||
spdlog::error("Failed to get access to ", db_name);
|
||||
spdlog::error("Failed to get access to UUID ", std::string{uuid});
|
||||
return std::nullopt;
|
||||
}
|
||||
#else
|
||||
auto acc = dbms_handler->Get();
|
||||
if (!acc) {
|
||||
spdlog::warn("Failed to get access to the default db.");
|
||||
return std::nullopt;
|
||||
}
|
||||
#endif
|
||||
auto *inmem_storage = dynamic_cast<storage::InMemoryStorage *>(acc.get()->storage());
|
||||
if (!inmem_storage || inmem_storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) {
|
||||
spdlog::error("Database \"{}\" is not IN_MEMORY_TRANSACTIONAL.", db_name);
|
||||
spdlog::error("Database is not IN_MEMORY_TRANSACTIONAL.");
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::optional{std::move(acc)};
|
||||
} catch (const dbms::UnknownDatabaseException &e) {
|
||||
spdlog::warn("No database \"{}\" on replica!", db_name);
|
||||
spdlog::warn("No database with UUID \"{}\" on replica!", std::string{uuid});
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
@ -109,13 +110,16 @@ void InMemoryReplicationHandlers::HeartbeatHandler(dbms::DbmsHandler *dbms_handl
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::HeartbeatReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::HeartbeatRes res{false, 0, ""};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: this handler is agnostic of InMemory, move to be reused by on-disk
|
||||
auto const *storage = db_acc->get()->storage();
|
||||
storage::replication::HeartbeatRes res{storage->id(), true,
|
||||
storage->repl_storage_state_.last_commit_timestamp_.load(),
|
||||
storage::replication::HeartbeatRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load(),
|
||||
std::string{storage->repl_storage_state_.epoch_.id()}};
|
||||
slk::Save(res, res_builder);
|
||||
}
|
||||
@ -124,8 +128,12 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::AppendDeltasReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::AppendDeltasRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
@ -165,7 +173,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha
|
||||
storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating
|
||||
}
|
||||
|
||||
storage::replication::AppendDeltasRes res{storage->id(), false, repl_storage_state.last_commit_timestamp_.load()};
|
||||
storage::replication::AppendDeltasRes res{false, repl_storage_state.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
@ -174,7 +182,7 @@ void InMemoryReplicationHandlers::AppendDeltasHandler(dbms::DbmsHandler *dbms_ha
|
||||
storage, &decoder,
|
||||
storage::durability::kVersion); // TODO: Check if we are always using the latest version when replicating
|
||||
|
||||
storage::replication::AppendDeltasRes res{storage->id(), true, repl_storage_state.last_commit_timestamp_.load()};
|
||||
storage::replication::AppendDeltasRes res{true, repl_storage_state.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
spdlog::debug("Replication recovery from append deltas finished, replica is now up to date!");
|
||||
}
|
||||
@ -183,8 +191,12 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::SnapshotReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::SnapshotRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
@ -232,8 +244,7 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
}
|
||||
storage_guard.unlock();
|
||||
|
||||
storage::replication::SnapshotRes res{storage->id(), true,
|
||||
storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
storage::replication::SnapshotRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
|
||||
spdlog::trace("Deleting old snapshot files due to snapshot recovery.");
|
||||
@ -263,8 +274,12 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::WalFilesReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::WalFilesRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto wal_file_number = req.file_number;
|
||||
spdlog::debug("Received WAL files: {}", wal_file_number);
|
||||
@ -278,8 +293,7 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle
|
||||
LoadWal(storage, &decoder);
|
||||
}
|
||||
|
||||
storage::replication::WalFilesRes res{storage->id(), true,
|
||||
storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
storage::replication::WalFilesRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
spdlog::debug("Replication recovery from WAL files ended successfully, replica is now up to date!");
|
||||
}
|
||||
@ -288,8 +302,12 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::CurrentWalReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::CurrentWalRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
@ -298,8 +316,7 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand
|
||||
|
||||
LoadWal(storage, &decoder);
|
||||
|
||||
storage::replication::CurrentWalRes res{storage->id(), true,
|
||||
storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
storage::replication::CurrentWalRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
spdlog::debug("Replication recovery from current WAL ended successfully, replica is now up to date!");
|
||||
}
|
||||
@ -318,6 +335,8 @@ void InMemoryReplicationHandlers::LoadWal(storage::InMemoryStorage *storage, sto
|
||||
}
|
||||
auto &replica_epoch = storage->repl_storage_state_.epoch_;
|
||||
if (wal_info.epoch_id != replica_epoch.id()) {
|
||||
// questionable behaviour, we trust that any change in epoch implies change in who is MAIN
|
||||
// when we use high availability, this assumption need to be checked.
|
||||
auto prev_epoch = replica_epoch.SetEpoch(wal_info.epoch_id);
|
||||
storage->repl_storage_state_.AddEpochToHistoryForce(prev_epoch);
|
||||
}
|
||||
@ -355,13 +374,16 @@ void InMemoryReplicationHandlers::TimestampHandler(dbms::DbmsHandler *dbms_handl
|
||||
slk::Builder *res_builder) {
|
||||
storage::replication::TimestampReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto const db_acc = GetDatabaseAccessor(dbms_handler, req.db_name);
|
||||
if (!db_acc) return;
|
||||
auto const db_acc = GetDatabaseAccessor(dbms_handler, req.uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::TimestampRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: this handler is agnostic of InMemory, move to be reused by on-disk
|
||||
auto const *storage = db_acc->get()->storage();
|
||||
storage::replication::TimestampRes res{storage->id(), true,
|
||||
storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
storage::replication::TimestampRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
@ -508,7 +530,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
case WalDeltaData::Type::EDGE_SET_PROPERTY: {
|
||||
spdlog::trace(" Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
if (!storage->config_.items.properties_on_edges)
|
||||
if (!storage->config_.salient.items.properties_on_edges)
|
||||
throw utils::BasicException(
|
||||
"Can't set properties on edges because properties on edges "
|
||||
"are disabled!");
|
||||
@ -575,8 +597,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
spdlog::trace(" Transaction end");
|
||||
if (!commit_timestamp_and_accessor || commit_timestamp_and_accessor->first != timestamp)
|
||||
throw utils::BasicException("Invalid commit data!");
|
||||
auto ret =
|
||||
commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first, false /* not main */);
|
||||
auto ret = commit_timestamp_and_accessor->second.Commit(
|
||||
{.desired_commit_timestamp = commit_timestamp_and_accessor->first, .is_main = false});
|
||||
if (ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
commit_timestamp_and_accessor = std::nullopt;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -12,7 +12,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "replication/replication_server.hpp"
|
||||
#include "replication/state.hpp"
|
||||
#include "storage/v2/replication/serialization.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
@ -24,8 +24,7 @@ namespace memgraph::dbms {
|
||||
|
||||
inline std::unique_ptr<storage::Storage> CreateInMemoryStorage(storage::Config config,
|
||||
::memgraph::replication::ReplicationState &repl_state) {
|
||||
const auto wal_mode = config.durability.snapshot_wal_mode;
|
||||
const auto name = config.name;
|
||||
const auto name = config.salient.name;
|
||||
auto storage = std::make_unique<storage::InMemoryStorage>(std::move(config));
|
||||
|
||||
// Connect replication state and storage
|
||||
@ -34,24 +33,6 @@ inline std::unique_ptr<storage::Storage> CreateInMemoryStorage(storage::Config c
|
||||
return storage->CreateSnapshot(repl_state.GetRole());
|
||||
});
|
||||
|
||||
if (allow_mt_repl || name == dbms::kDefaultDB) {
|
||||
// Handle global replication state
|
||||
spdlog::info("Replication configuration will be stored and will be automatically restored in case of a crash.");
|
||||
// RECOVER REPLICA CONNECTIONS
|
||||
memgraph::dbms::RestoreReplication(repl_state, *storage);
|
||||
} else if (const ::memgraph::replication::RoleMainData *data =
|
||||
std::get_if<::memgraph::replication::RoleMainData>(&repl_state.ReplicationData());
|
||||
data && !data->registered_replicas_.empty()) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
|
||||
if (wal_mode == storage::Config::Durability::SnapshotWalMode::DISABLED && repl_state.IsMain()) {
|
||||
spdlog::warn(
|
||||
"The instance has the MAIN replication role, but durability logs and snapshots are disabled. Please consider "
|
||||
"enabling durability by using --storage-snapshot-interval-sec and --storage-wal-enabled flags because "
|
||||
"without write-ahead logs this instance is not replicating any data.");
|
||||
}
|
||||
|
||||
return std::move(storage);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -10,6 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
@ -17,18 +18,26 @@ void StartReplicaClient(DbmsHandler &dbms_handler, replication::ReplicationClien
|
||||
// No client error, start instance level client
|
||||
auto const &endpoint = client.rpc_client_.Endpoint();
|
||||
spdlog::trace("Replication client started at: {}:{}", endpoint.address, endpoint.port);
|
||||
client.StartFrequentCheck([&dbms_handler](std::string_view name) {
|
||||
// Working connection, check if any database has been left behind
|
||||
dbms_handler.ForEach([name](dbms::Database *db) {
|
||||
client.StartFrequentCheck([&dbms_handler](bool reconnect, replication::ReplicationClient &client) {
|
||||
// Working connection
|
||||
// Check if system needs restoration
|
||||
if (reconnect) {
|
||||
client.state_.WithLock([](auto &state) { state = memgraph::replication::ReplicationClient::State::BEHIND; });
|
||||
}
|
||||
#ifdef MG_ENTERPRISE
|
||||
dbms_handler.SystemRestore<true>(client);
|
||||
#endif
|
||||
// Check if any database has been left behind
|
||||
dbms_handler.ForEach([&name = client.name_, reconnect](dbms::DatabaseAccess db_acc) {
|
||||
// Specific database <-> replica client
|
||||
db->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) {
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
db_acc->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) {
|
||||
if (reconnect || client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
// Database <-> replica might be behind, check and recover
|
||||
client->TryCheckReplicaStateAsync(db->storage());
|
||||
client->TryCheckReplicaStateAsync(db_acc->storage(), db_acc);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
} // namespace memgraph::dbms
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -11,15 +11,21 @@
|
||||
|
||||
#include "dbms/replication_handler.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "dbms/global.hpp"
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include "dbms/inmemory/storage_helper.hpp"
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "dbms/utils.hpp"
|
||||
#include "replication/messages.hpp"
|
||||
#include "replication/state.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v2/config.hpp"
|
||||
#include "storage/v2/replication/rpc.hpp"
|
||||
#include "utils/on_scope_exit.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationClientConfig;
|
||||
using memgraph::replication::ReplicationState;
|
||||
using memgraph::replication::RoleMainData;
|
||||
using memgraph::replication::RoleReplicaData;
|
||||
|
||||
@ -32,8 +38,8 @@ std::string RegisterReplicaErrorToString(RegisterReplicaError error) {
|
||||
using enum RegisterReplicaError;
|
||||
case NAME_EXISTS:
|
||||
return "NAME_EXISTS";
|
||||
case END_POINT_EXISTS:
|
||||
return "END_POINT_EXISTS";
|
||||
case ENDPOINT_EXISTS:
|
||||
return "ENDPOINT_EXISTS";
|
||||
case CONNECTION_FAILED:
|
||||
return "CONNECTION_FAILED";
|
||||
case COULD_NOT_BE_PERSISTED:
|
||||
@ -45,34 +51,13 @@ std::string RegisterReplicaErrorToString(RegisterReplicaError error) {
|
||||
ReplicationHandler::ReplicationHandler(DbmsHandler &dbms_handler) : dbms_handler_(dbms_handler) {}
|
||||
|
||||
bool ReplicationHandler::SetReplicationRoleMain() {
|
||||
auto const main_handler = [](RoleMainData const &) {
|
||||
auto const main_handler = [](RoleMainData &) {
|
||||
// If we are already MAIN, we don't want to change anything
|
||||
return false;
|
||||
};
|
||||
|
||||
auto const replica_handler = [this](RoleReplicaData const &) {
|
||||
// STEP 1) bring down all REPLICA servers
|
||||
dbms_handler_.ForEach([](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
// Remember old epoch + storage timestamp association
|
||||
storage->PrepareForNewEpoch();
|
||||
});
|
||||
|
||||
// STEP 2) Change to MAIN
|
||||
// TODO: restore replication servers if false?
|
||||
if (!dbms_handler_.ReplicationState().SetReplicationRoleMain()) {
|
||||
// TODO: Handle recovery on failure???
|
||||
return false;
|
||||
}
|
||||
|
||||
// STEP 3) We are now MAIN, update storage local epoch
|
||||
const auto &epoch =
|
||||
std::get<RoleMainData>(std::as_const(dbms_handler_.ReplicationState()).ReplicationData()).epoch_;
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
storage->repl_storage_state_.epoch_ = epoch;
|
||||
});
|
||||
|
||||
return true;
|
||||
return memgraph::dbms::DoReplicaToMainPromotion(dbms_handler_);
|
||||
};
|
||||
|
||||
// TODO: under lock
|
||||
@ -89,8 +74,8 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::
|
||||
// TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are
|
||||
// deleting the replica.
|
||||
// Remove database specific clients
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
dbms_handler_.ForEach([&](DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); });
|
||||
});
|
||||
// Remove instance level clients
|
||||
@ -105,15 +90,7 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::
|
||||
// ASSERT
|
||||
return false;
|
||||
},
|
||||
[this](RoleReplicaData const &data) {
|
||||
// Register handlers
|
||||
InMemoryReplicationHandlers::Register(&dbms_handler_, *data.server);
|
||||
if (!data.server->Start()) {
|
||||
spdlog::error("Unable to start the replication server.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}},
|
||||
[this](RoleReplicaData const &data) { return StartRpcServer(dbms_handler_, data); }},
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
// TODO Handle error (restore to main?)
|
||||
return success;
|
||||
@ -123,60 +100,48 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio
|
||||
-> memgraph::utils::BasicResult<RegisterReplicaError> {
|
||||
MG_ASSERT(dbms_handler_.ReplicationState().IsMain(), "Only main instance can register a replica!");
|
||||
|
||||
auto instance_client = dbms_handler_.ReplicationState().RegisterReplica(config);
|
||||
if (instance_client.HasError()) switch (instance_client.GetError()) {
|
||||
auto maybe_client = dbms_handler_.ReplicationState().RegisterReplica(config);
|
||||
if (maybe_client.HasError()) {
|
||||
switch (maybe_client.GetError()) {
|
||||
case memgraph::replication::RegisterReplicaError::NOT_MAIN:
|
||||
MG_ASSERT(false, "Only main instance can register a replica!");
|
||||
return {};
|
||||
case memgraph::replication::RegisterReplicaError::NAME_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::NAME_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::END_POINT_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::END_POINT_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::ENDPOINT_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::ENDPOINT_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
|
||||
return memgraph::dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED;
|
||||
case memgraph::replication::RegisterReplicaError::SUCCESS:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allow_mt_repl && dbms_handler_.All().size() > 1) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
|
||||
bool all_clients_good = true;
|
||||
#ifdef MG_ENTERPRISE
|
||||
// Update system before enabling individual storage <-> replica clients
|
||||
dbms_handler_.SystemRestore(*maybe_client.GetValue());
|
||||
#endif
|
||||
|
||||
// Add database specific clients (NOTE Currently all databases are connected to each replica)
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
if (!allow_mt_repl && storage->id() != kDefaultDB) {
|
||||
return;
|
||||
}
|
||||
// TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes
|
||||
if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
|
||||
|
||||
all_clients_good &=
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([storage, &instance_client](auto &storage_clients) {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(*instance_client.GetValue());
|
||||
client->Start(storage);
|
||||
// After start the storage <-> replica state should be READY or RECOVERING (if correctly started)
|
||||
// MAYBE_BEHIND isn't a statement of the current state, this is the default value
|
||||
// Failed to start due to branching of MAIN and REPLICA
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
return false;
|
||||
}
|
||||
storage_clients.push_back(std::move(client));
|
||||
return true;
|
||||
});
|
||||
});
|
||||
const auto dbms_error = memgraph::dbms::HandleRegisterReplicaStatus(maybe_client);
|
||||
if (dbms_error.has_value()) {
|
||||
return *dbms_error;
|
||||
}
|
||||
auto &instance_client_ptr = maybe_client.GetValue();
|
||||
const bool all_clients_good = memgraph::dbms::RegisterAllDatabasesClients(dbms_handler_, *instance_client_ptr);
|
||||
|
||||
// NOTE Currently if any databases fails, we revert back
|
||||
if (!all_clients_good) {
|
||||
spdlog::error("Failed to register all databases to the REPLICA \"{}\"", config.name);
|
||||
spdlog::error("Failed to register all databases on the REPLICA \"{}\"", config.name);
|
||||
UnregisterReplica(config.name);
|
||||
return RegisterReplicaError::CONNECTION_FAILED;
|
||||
}
|
||||
|
||||
// No client error, start instance level client
|
||||
StartReplicaClient(dbms_handler_, *instance_client.GetValue());
|
||||
StartReplicaClient(dbms_handler_, *instance_client_ptr);
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -189,8 +154,8 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR
|
||||
return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
|
||||
}
|
||||
// Remove database specific clients
|
||||
dbms_handler_.ForEach([name](Database *db) {
|
||||
db->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) {
|
||||
dbms_handler_.ForEach([name](DatabaseAccess db_acc) {
|
||||
db_acc->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) {
|
||||
std::erase_if(clients, [name](const auto &client) { return client->Name() == name; });
|
||||
});
|
||||
});
|
||||
@ -204,7 +169,7 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole {
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole {
|
||||
return dbms_handler_.ReplicationState().GetRole();
|
||||
}
|
||||
|
||||
@ -214,20 +179,20 @@ bool ReplicationHandler::IsReplica() const { return dbms_handler_.ReplicationSta
|
||||
|
||||
// Per storage
|
||||
// NOTE Storage will connect to all replicas. Future work might change this
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage) {
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc) {
|
||||
spdlog::info("Restoring replication role.");
|
||||
|
||||
/// MAIN
|
||||
auto const recover_main = [&storage](RoleMainData &mainData) {
|
||||
auto const recover_main = [db_acc = std::move(db_acc)](RoleMainData &mainData) mutable { // NOLINT
|
||||
// Each individual client has already been restored and started. Here we just go through each database and start its
|
||||
// client
|
||||
for (auto &instance_client : mainData.registered_replicas_) {
|
||||
spdlog::info("Replica {} restoration started for {}.", instance_client.name_, storage.id());
|
||||
|
||||
const auto &ret = storage.repl_storage_state_.replication_clients_.WithLock(
|
||||
[&](auto &storage_clients) -> utils::BasicResult<RegisterReplicaError> {
|
||||
spdlog::info("Replica {} restoration started for {}.", instance_client.name_, db_acc->name());
|
||||
const auto &ret = db_acc->storage()->repl_storage_state_.replication_clients_.WithLock(
|
||||
[&, db_acc](auto &storage_clients) mutable -> utils::BasicResult<RegisterReplicaError> {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(instance_client);
|
||||
client->Start(&storage);
|
||||
auto *storage = db_acc->storage();
|
||||
client->Start(storage, std::move(db_acc));
|
||||
// After start the storage <-> replica state should be READY or RECOVERING (if correctly started)
|
||||
// MAYBE_BEHIND isn't a statement of the current state, this is the default value
|
||||
// Failed to start due to branching of MAIN and REPLICA
|
||||
@ -244,7 +209,7 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor
|
||||
LOG_FATAL("Failure when restoring replica {}: {}.", instance_client.name_,
|
||||
RegisterReplicaErrorToString(ret.GetError()));
|
||||
}
|
||||
spdlog::info("Replica {} restored for {}.", instance_client.name_, storage.id());
|
||||
spdlog::info("Replica {} restored for {}.", instance_client.name_, db_acc->name());
|
||||
}
|
||||
spdlog::info("Replication role restored to MAIN.");
|
||||
};
|
||||
@ -259,4 +224,177 @@ void RestoreReplication(replication::ReplicationState &repl_state, storage::Stor
|
||||
},
|
||||
repl_state.ReplicationData());
|
||||
}
|
||||
|
||||
namespace system_replication {
|
||||
#ifdef MG_ENTERPRISE
|
||||
void SystemHeartbeatHandler(const uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
replication::SystemHeartbeatReq req;
|
||||
replication::SystemHeartbeatReq::Load(&req, req_reader);
|
||||
|
||||
replication::SystemHeartbeatRes res(ts);
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
memgraph::storage::replication::CreateDatabaseReq req;
|
||||
memgraph::slk::Load(&req, req_reader);
|
||||
|
||||
using memgraph::storage::replication::CreateDatabaseRes;
|
||||
CreateDatabaseRes res(CreateDatabaseRes::Result::FAILURE);
|
||||
|
||||
// Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot
|
||||
// of the set of databases. Hence no history exists to maintain regarding epoch change.
|
||||
// If MAIN has changed we need to check this new group_timestamp is consistent with
|
||||
// what we have so far.
|
||||
|
||||
if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) {
|
||||
spdlog::debug("CreateDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp,
|
||||
dbms_handler.LastCommitedTS());
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Create new
|
||||
auto new_db = dbms_handler.Update(req.config);
|
||||
if (new_db.HasValue()) {
|
||||
// Successfully create db
|
||||
dbms_handler.SetLastCommitedTS(req.new_group_timestamp);
|
||||
res = CreateDatabaseRes(CreateDatabaseRes::Result::SUCCESS);
|
||||
spdlog::debug("CreateDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp);
|
||||
}
|
||||
} catch (...) {
|
||||
// Failure
|
||||
}
|
||||
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
void DropDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
memgraph::storage::replication::DropDatabaseReq req;
|
||||
memgraph::slk::Load(&req, req_reader);
|
||||
|
||||
using memgraph::storage::replication::DropDatabaseRes;
|
||||
DropDatabaseRes res(DropDatabaseRes::Result::FAILURE);
|
||||
|
||||
// Note: No need to check epoch, recovery mechanism is done by a full uptodate snapshot
|
||||
// of the set of databases. Hence no history exists to maintain regarding epoch change.
|
||||
// If MAIN has changed we need to check this new group_timestamp is consistent with
|
||||
// what we have so far.
|
||||
|
||||
if (req.expected_group_timestamp != dbms_handler.LastCommitedTS()) {
|
||||
spdlog::debug("DropDatabaseHandler: bad expected timestamp {},{}", req.expected_group_timestamp,
|
||||
dbms_handler.LastCommitedTS());
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// NOTE: Single communication channel can exist at a time, no other database can be deleted/created at the moment.
|
||||
auto new_db = dbms_handler.Delete(req.uuid);
|
||||
if (new_db.HasError()) {
|
||||
if (new_db.GetError() == DeleteError::NON_EXISTENT) {
|
||||
// Nothing to drop
|
||||
dbms_handler.SetLastCommitedTS(req.new_group_timestamp);
|
||||
res = DropDatabaseRes(DropDatabaseRes::Result::NO_NEED);
|
||||
}
|
||||
} else {
|
||||
// Successfully drop db
|
||||
dbms_handler.SetLastCommitedTS(req.new_group_timestamp);
|
||||
res = DropDatabaseRes(DropDatabaseRes::Result::SUCCESS);
|
||||
spdlog::debug("DropDatabaseHandler: SUCCESS updated LCTS to {}", req.new_group_timestamp);
|
||||
}
|
||||
} catch (...) {
|
||||
// Failure
|
||||
}
|
||||
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
// TODO Speed up
|
||||
memgraph::storage::replication::SystemRecoveryReq req;
|
||||
memgraph::slk::Load(&req, req_reader);
|
||||
|
||||
using memgraph::storage::replication::SystemRecoveryRes;
|
||||
SystemRecoveryRes res(SystemRecoveryRes::Result::FAILURE);
|
||||
|
||||
utils::OnScopeExit send_on_exit([&]() { memgraph::slk::Save(res, res_builder); });
|
||||
|
||||
// Get all current dbs
|
||||
auto old = dbms_handler.All();
|
||||
|
||||
// Check/create the incoming dbs
|
||||
for (const auto &config : req.database_configs) {
|
||||
// Missing db
|
||||
try {
|
||||
if (dbms_handler.Update(config).HasError()) {
|
||||
spdlog::debug("SystemRecoveryHandler: Failed to update database \"{}\".", config.name);
|
||||
return; // Send failure on exit
|
||||
}
|
||||
} catch (const UnknownDatabaseException &) {
|
||||
spdlog::debug("SystemRecoveryHandler: UnknownDatabaseException");
|
||||
return; // Send failure on exit
|
||||
}
|
||||
const auto it = std::find(old.begin(), old.end(), config.name);
|
||||
if (it != old.end()) old.erase(it);
|
||||
}
|
||||
|
||||
// Delete all the leftover old dbs
|
||||
for (const auto &remove_db : old) {
|
||||
const auto del = dbms_handler.Delete(remove_db);
|
||||
if (del.HasError()) {
|
||||
// Some errors are not terminal
|
||||
if (del.GetError() == DeleteError::DEFAULT_DB || del.GetError() == DeleteError::NON_EXISTENT) {
|
||||
spdlog::debug("SystemRecoveryHandler: Dropped database \"{}\".", remove_db);
|
||||
continue;
|
||||
}
|
||||
spdlog::debug("SystemRecoveryHandler: Failed to drop database \"{}\".", remove_db);
|
||||
return; // Send failure on exit
|
||||
}
|
||||
}
|
||||
// Successfully recovered
|
||||
dbms_handler.SetLastCommitedTS(req.forced_group_timestamp);
|
||||
spdlog::debug("SystemRecoveryHandler: SUCCESS updated LCTS to {}", req.forced_group_timestamp);
|
||||
res = SystemRecoveryRes(SystemRecoveryRes::Result::SUCCESS);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Register(replication::RoleReplicaData const &data, dbms::DbmsHandler &dbms_handler) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
data.server->rpc_server_.Register<replication::SystemHeartbeatRpc>(
|
||||
[&dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received SystemHeartbeatRpc");
|
||||
SystemHeartbeatHandler(dbms_handler.LastCommitedTS(), req_reader, res_builder);
|
||||
});
|
||||
data.server->rpc_server_.Register<storage::replication::CreateDatabaseRpc>(
|
||||
[&dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received CreateDatabaseRpc");
|
||||
CreateDatabaseHandler(dbms_handler, req_reader, res_builder);
|
||||
});
|
||||
data.server->rpc_server_.Register<storage::replication::DropDatabaseRpc>(
|
||||
[&dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received DropDatabaseRpc");
|
||||
DropDatabaseHandler(dbms_handler, req_reader, res_builder);
|
||||
});
|
||||
data.server->rpc_server_.Register<storage::replication::SystemRecoveryRpc>(
|
||||
[&dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received SystemRecoveryRpc");
|
||||
SystemRecoveryHandler(dbms_handler, req_reader, res_builder);
|
||||
});
|
||||
#endif
|
||||
}
|
||||
} // namespace system_replication
|
||||
|
||||
bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data) {
|
||||
// Register handlers
|
||||
InMemoryReplicationHandlers::Register(&dbms_handler, *data.server);
|
||||
system_replication::Register(data, dbms_handler);
|
||||
// Start server
|
||||
if (!data.server->Start()) {
|
||||
spdlog::error("Unable to start the replication server.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -11,11 +11,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "replication/role.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "dbms/database.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
// BEGIN fwd declares
|
||||
namespace memgraph::replication {
|
||||
struct ReplicationState;
|
||||
struct ReplicationServerConfig;
|
||||
@ -23,9 +22,11 @@ struct ReplicationClientConfig;
|
||||
} // namespace memgraph::replication
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
class DbmsHandler;
|
||||
|
||||
enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, CONNECTION_FAILED, COULD_NOT_BE_PERSISTED };
|
||||
enum class RegisterReplicaError : uint8_t { NAME_EXISTS, ENDPOINT_EXISTS, CONNECTION_FAILED, COULD_NOT_BE_PERSISTED };
|
||||
|
||||
enum class UnregisterReplicaResult : uint8_t {
|
||||
NOT_MAIN,
|
||||
COULD_NOT_BE_PERSISTED,
|
||||
@ -52,7 +53,7 @@ struct ReplicationHandler {
|
||||
auto UnregisterReplica(std::string_view name) -> UnregisterReplicaResult;
|
||||
|
||||
// Helper pass-through (TODO: remove)
|
||||
auto GetRole() const -> memgraph::replication::ReplicationRole;
|
||||
auto GetRole() const -> memgraph::replication_coordination_glue::ReplicationRole;
|
||||
bool IsMain() const;
|
||||
bool IsReplica() const;
|
||||
|
||||
@ -62,6 +63,20 @@ struct ReplicationHandler {
|
||||
|
||||
/// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
|
||||
/// TODO: extend to do multiple storages
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage);
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, DatabaseAccess db_acc);
|
||||
|
||||
namespace system_replication {
|
||||
// System handlers
|
||||
#ifdef MG_ENTERPRISE
|
||||
void CreateDatabaseHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
void SystemHeartbeatHandler(uint64_t ts, slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
void SystemRecoveryHandler(DbmsHandler &dbms_handler, slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
#endif
|
||||
|
||||
/// Register all DBMS level RPC handlers
|
||||
void Register(replication::RoleReplicaData const &data, DbmsHandler &dbms_handler);
|
||||
} // namespace system_replication
|
||||
|
||||
bool StartRpcServer(DbmsHandler &dbms_handler, const replication::RoleReplicaData &data);
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
64
src/dbms/transaction.hpp
Normal file
64
src/dbms/transaction.hpp
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "storage/v2/config.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
struct SystemTransaction {
|
||||
struct Delta {
|
||||
enum class Action {
|
||||
CREATE_DATABASE,
|
||||
DROP_DATABASE,
|
||||
};
|
||||
|
||||
static constexpr struct CreateDatabase {
|
||||
} create_database;
|
||||
static constexpr struct DropDatabase {
|
||||
} drop_database;
|
||||
|
||||
Delta(CreateDatabase /*tag*/, storage::SalientConfig config)
|
||||
: action(Action::CREATE_DATABASE), config(std::move(config)) {}
|
||||
Delta(DropDatabase /*tag*/, const utils::UUID &uuid) : action(Action::DROP_DATABASE), uuid(uuid) {}
|
||||
|
||||
Delta(const Delta &) = delete;
|
||||
Delta(Delta &&) = delete;
|
||||
Delta &operator=(const Delta &) = delete;
|
||||
Delta &operator=(Delta &&) = delete;
|
||||
|
||||
~Delta() {
|
||||
switch (action) {
|
||||
case Action::CREATE_DATABASE:
|
||||
std::destroy_at(&config);
|
||||
break;
|
||||
case Action::DROP_DATABASE:
|
||||
break;
|
||||
// Some deltas might have special destructor handling
|
||||
}
|
||||
}
|
||||
|
||||
Action action;
|
||||
union {
|
||||
storage::SalientConfig config;
|
||||
utils::UUID uuid;
|
||||
};
|
||||
};
|
||||
|
||||
explicit SystemTransaction(uint64_t timestamp) : system_timestamp(timestamp) {}
|
||||
|
||||
// Currently system transitions support a single delta
|
||||
std::optional<Delta> delta{};
|
||||
uint64_t system_timestamp;
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
133
src/dbms/utils.hpp
Normal file
133
src/dbms/utils.hpp
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
#pragma once
|
||||
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "dbms/replication_handler.hpp"
|
||||
#include "replication/include/replication/state.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
inline bool DoReplicaToMainPromotion(dbms::DbmsHandler &dbms_handler) {
|
||||
auto &repl_state = dbms_handler.ReplicationState();
|
||||
// STEP 1) bring down all REPLICA servers
|
||||
dbms_handler.ForEach([](DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
// Remember old epoch + storage timestamp association
|
||||
storage->PrepareForNewEpoch();
|
||||
});
|
||||
|
||||
// STEP 2) Change to MAIN
|
||||
// TODO: restore replication servers if false?
|
||||
if (!repl_state.SetReplicationRoleMain()) {
|
||||
// TODO: Handle recovery on failure???
|
||||
return false;
|
||||
}
|
||||
|
||||
// STEP 3) We are now MAIN, update storage local epoch
|
||||
const auto &epoch =
|
||||
std::get<replication::RoleMainData>(std::as_const(dbms_handler.ReplicationState()).ReplicationData()).epoch_;
|
||||
dbms_handler.ForEach([&](DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
storage->repl_storage_state_.epoch_ = epoch;
|
||||
});
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
inline bool SetReplicationRoleReplica(dbms::DbmsHandler &dbms_handler,
|
||||
const memgraph::replication::ReplicationServerConfig &config) {
|
||||
if (dbms_handler.ReplicationState().IsReplica()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are
|
||||
// deleting the replica.
|
||||
// Remove database specific clients
|
||||
dbms_handler.ForEach([&](DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); });
|
||||
});
|
||||
// Remove instance level clients
|
||||
std::get<replication::RoleMainData>(dbms_handler.ReplicationState().ReplicationData()).registered_replicas_.clear();
|
||||
|
||||
// Creates the server
|
||||
dbms_handler.ReplicationState().SetReplicationRoleReplica(config);
|
||||
|
||||
// Start
|
||||
const auto success = std::visit(utils::Overloaded{[](replication::RoleMainData const &) {
|
||||
// ASSERT
|
||||
return false;
|
||||
},
|
||||
[&dbms_handler](replication::RoleReplicaData const &data) {
|
||||
return StartRpcServer(dbms_handler, data);
|
||||
}},
|
||||
dbms_handler.ReplicationState().ReplicationData());
|
||||
// TODO Handle error (restore to main?)
|
||||
return success;
|
||||
}
|
||||
|
||||
inline bool RegisterAllDatabasesClients(dbms::DbmsHandler &dbms_handler,
|
||||
replication::ReplicationClient &instance_client) {
|
||||
if (!allow_mt_repl && dbms_handler.All().size() > 1) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
|
||||
bool all_clients_good = true;
|
||||
|
||||
// Add database specific clients (NOTE Currently all databases are connected to each replica)
|
||||
dbms_handler.ForEach([&](DatabaseAccess db_acc) {
|
||||
auto *storage = db_acc->storage();
|
||||
if (!allow_mt_repl && storage->name() != kDefaultDB) {
|
||||
return;
|
||||
}
|
||||
// TODO: ATM only IN_MEMORY_TRANSACTIONAL, fix other modes
|
||||
if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
|
||||
|
||||
all_clients_good &= storage->repl_storage_state_.replication_clients_.WithLock(
|
||||
[storage, &instance_client, db_acc = std::move(db_acc)](auto &storage_clients) mutable { // NOLINT
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(instance_client);
|
||||
// All good, start replica client
|
||||
client->Start(storage, std::move(db_acc));
|
||||
// After start the storage <-> replica state should be READY or RECOVERING (if correctly started)
|
||||
// MAYBE_BEHIND isn't a statement of the current state, this is the default value
|
||||
// Failed to start due an error like branching of MAIN and REPLICA
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
return false; // TODO: sometimes we need to still add to storage_clients
|
||||
}
|
||||
storage_clients.push_back(std::move(client));
|
||||
return true;
|
||||
});
|
||||
});
|
||||
|
||||
return all_clients_good;
|
||||
}
|
||||
|
||||
inline std::optional<RegisterReplicaError> HandleRegisterReplicaStatus(
|
||||
utils::BasicResult<replication::RegisterReplicaError, replication::ReplicationClient *> &instance_client) {
|
||||
if (instance_client.HasError()) switch (instance_client.GetError()) {
|
||||
case replication::RegisterReplicaError::NOT_MAIN:
|
||||
MG_ASSERT(false, "Only main instance can register a replica!");
|
||||
return {};
|
||||
case replication::RegisterReplicaError::NAME_EXISTS:
|
||||
return dbms::RegisterReplicaError::NAME_EXISTS;
|
||||
case replication::RegisterReplicaError::ENDPOINT_EXISTS:
|
||||
return dbms::RegisterReplicaError::ENDPOINT_EXISTS;
|
||||
case replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
|
||||
return dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED;
|
||||
case replication::RegisterReplicaError::SUCCESS:
|
||||
break;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
@ -6,6 +6,7 @@ add_library(mg-flags STATIC audit.cpp
|
||||
memory_limit.cpp
|
||||
run_time_configurable.cpp
|
||||
storage_mode.cpp
|
||||
query.cpp)
|
||||
query.cpp
|
||||
replication.cpp)
|
||||
target_include_directories(mg-flags PUBLIC ${CMAKE_SOURCE_DIR}/include)
|
||||
target_link_libraries(mg-flags PUBLIC spdlog::spdlog mg-settings mg-utils)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -17,5 +17,6 @@
|
||||
#include "flags/log_level.hpp"
|
||||
#include "flags/memory_limit.hpp"
|
||||
#include "flags/query.hpp"
|
||||
#include "flags/replication.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "flags/storage_mode.hpp"
|
||||
|
@ -131,12 +131,6 @@ DEFINE_uint64(storage_recovery_thread_count,
|
||||
DEFINE_bool(storage_enable_schema_metadata, false,
|
||||
"Controls whether metadata should be collected about the resident labels and edge types.");
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_delete_on_drop, true,
|
||||
"If set to true the query 'DROP DATABASE x' will delete the underlying storage as well.");
|
||||
#endif
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(telemetry_enabled, false,
|
||||
"Set to true to enable telemetry. We collect information about the "
|
||||
@ -162,13 +156,6 @@ DEFINE_string(pulsar_service_url, "", "Default URL used while connecting to Puls
|
||||
|
||||
// Query flags.
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint64(replication_replica_check_frequency_sec, 1,
|
||||
"The time duration between two replica checks/pings. If < 1, replicas will NOT be checked at all. NOTE: "
|
||||
"The MAIN instance allocates a new thread for each REPLICA.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(replication_restore_state_on_startup, false, "Restore replication state on startup, e.g. recover replica");
|
||||
|
||||
DEFINE_VALIDATED_string(query_modules_directory, "",
|
||||
"Directory where modules with custom query procedures are stored. "
|
||||
"NOTE: Multiple comma-separated directories can be defined.",
|
||||
@ -208,3 +195,9 @@ DEFINE_HIDDEN_string(organization_name, "", "Organization name.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(auth_user_or_role_name_regex, memgraph::glue::kDefaultUserRoleRegex.data(),
|
||||
"Set to the regular expression that each user or role name must fulfill.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(auth_password_permit_null, true, "Set to false to disable null passwords.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_string(auth_password_strength_regex, memgraph::glue::kDefaultPasswordRegex.data(),
|
||||
"The regular expression that should be used to match the entire "
|
||||
"entered password to ensure its strength.");
|
||||
|
@ -84,10 +84,6 @@ DECLARE_bool(storage_parallel_schema_recovery);
|
||||
DECLARE_uint64(storage_recovery_thread_count);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_enable_schema_metadata);
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_delete_on_drop);
|
||||
#endif
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(telemetry_enabled);
|
||||
@ -116,14 +112,13 @@ namespace memgraph::flags {
|
||||
auto ParseQueryModulesDirectory() -> std::vector<std::filesystem::path>;
|
||||
} // namespace memgraph::flags
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(replication_replica_check_frequency_sec);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(replication_restore_state_on_startup);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_string(license_key);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_string(organization_name);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_string(auth_user_or_role_name_regex);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(auth_password_permit_null);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_string(auth_password_strength_regex);
|
||||
|
26
src/flags/replication.cpp
Normal file
26
src/flags/replication.cpp
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "replication.hpp"
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(coordinator, false, "Controls whether the instance is a replication coordinator.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint32(coordinator_server_port, 0, "Port on which coordinator servers will be started.");
|
||||
#endif
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint64(replication_replica_check_frequency_sec, 1,
|
||||
"The time duration between two replica checks/pings. If < 1, replicas will NOT be checked at all. NOTE: "
|
||||
"The MAIN instance allocates a new thread for each REPLICA.");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(replication_restore_state_on_startup, false, "Restore replication state on startup, e.g. recover replica");
|
26
src/flags/replication.hpp
Normal file
26
src/flags/replication.hpp
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(coordinator);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint32(coordinator_server_port);
|
||||
#endif
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(replication_replica_check_frequency_sec);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(replication_restore_state_on_startup);
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -17,20 +17,14 @@
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
|
||||
#include <array>
|
||||
|
||||
inline constexpr std::array storage_mode_mappings{
|
||||
std::pair{std::string_view{"IN_MEMORY_TRANSACTIONAL"}, memgraph::storage::StorageMode::IN_MEMORY_TRANSACTIONAL},
|
||||
std::pair{std::string_view{"IN_MEMORY_ANALYTICAL"}, memgraph::storage::StorageMode::IN_MEMORY_ANALYTICAL},
|
||||
std::pair{std::string_view{"ON_DISK_TRANSACTIONAL"}, memgraph::storage::StorageMode::ON_DISK_TRANSACTIONAL}};
|
||||
|
||||
const std::string storage_mode_help_string =
|
||||
fmt::format("Default storage mode Memgraph uses. Allowed values: {}",
|
||||
memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings));
|
||||
memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings));
|
||||
|
||||
// NOLINTNEXTLINE (cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_help_string.c_str(), {
|
||||
if (const auto result = memgraph::utils::IsValidEnumValueString(value, storage_mode_mappings); result.HasError()) {
|
||||
if (const auto result = memgraph::utils::IsValidEnumValueString(value, memgraph::storage::storage_mode_mappings);
|
||||
result.HasError()) {
|
||||
switch (result.GetError()) {
|
||||
case memgraph::utils::ValidationError::EmptyValue: {
|
||||
std::cout << "Storage mode cannot be empty." << std::endl;
|
||||
@ -38,7 +32,7 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he
|
||||
}
|
||||
case memgraph::utils::ValidationError::InvalidValue: {
|
||||
std::cout << "Invalid value for storage mode. Allowed values: "
|
||||
<< memgraph::utils::GetAllowedEnumValuesString(storage_mode_mappings) << std::endl;
|
||||
<< memgraph::utils::GetAllowedEnumValuesString(memgraph::storage::storage_mode_mappings) << std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -48,8 +42,8 @@ DEFINE_VALIDATED_string(storage_mode, "IN_MEMORY_TRANSACTIONAL", storage_mode_he
|
||||
});
|
||||
|
||||
memgraph::storage::StorageMode memgraph::flags::ParseStorageMode() {
|
||||
const auto storage_mode =
|
||||
memgraph::utils::StringToEnum<memgraph::storage::StorageMode>(FLAGS_storage_mode, storage_mode_mappings);
|
||||
const auto storage_mode = memgraph::utils::StringToEnum<memgraph::storage::StorageMode>(
|
||||
FLAGS_storage_mode, memgraph::storage::storage_mode_mappings);
|
||||
MG_ASSERT(storage_mode, "Invalid storage mode");
|
||||
return *storage_mode;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -80,7 +80,7 @@ std::vector<memgraph::communication::bolt::Value> TypedValueResultStreamBase::De
|
||||
std::vector<memgraph::communication::bolt::Value> decoded_values;
|
||||
decoded_values.reserve(values.size());
|
||||
for (const auto &v : values) {
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(v, *storage_, memgraph::storage::View::NEW);
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(v, storage_, memgraph::storage::View::NEW);
|
||||
if (maybe_value.HasError()) {
|
||||
switch (maybe_value.GetError()) {
|
||||
case memgraph::storage::Error::DELETED_OBJECT:
|
||||
@ -112,14 +112,14 @@ std::string SessionHL::GetDefaultDB() {
|
||||
if (user_.has_value()) {
|
||||
return user_->db_access().GetDefault();
|
||||
}
|
||||
return memgraph::dbms::kDefaultDB;
|
||||
return std::string{memgraph::dbms::kDefaultDB};
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string SessionHL::GetCurrentDB() const {
|
||||
if (!interpreter_.current_db_.db_acc_) return "";
|
||||
const auto *db = interpreter_.current_db_.db_acc_->get();
|
||||
return db->id();
|
||||
return db->name();
|
||||
}
|
||||
|
||||
std::optional<std::string> SessionHL::GetServerNameForInit() {
|
||||
@ -167,10 +167,10 @@ std::map<std::string, memgraph::communication::bolt::Value> SessionHL::Discard(s
|
||||
std::map<std::string, memgraph::communication::bolt::Value> SessionHL::Pull(SessionHL::TEncoder *encoder,
|
||||
std::optional<int> n,
|
||||
std::optional<int> qid) {
|
||||
// TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt)
|
||||
auto *db = interpreter_.current_db_.db_acc_->get();
|
||||
try {
|
||||
TypedValueResultStream<TEncoder> stream(encoder, db->storage());
|
||||
auto &db = interpreter_.current_db_.db_acc_;
|
||||
auto *storage = db ? db->get()->storage() : nullptr;
|
||||
TypedValueResultStream<TEncoder> stream(encoder, storage);
|
||||
return DecodeSummary(interpreter_.Pull(&stream, n, qid));
|
||||
} catch (const memgraph::query::QueryException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
@ -193,17 +193,17 @@ std::pair<std::vector<std::string>, std::optional<int>> SessionHL::Interpret(
|
||||
for (const auto &[key, bolt_param] : params) {
|
||||
params_pv.emplace(key, ToPropertyValue(bolt_param));
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
const std::string *username{nullptr};
|
||||
if (user_) {
|
||||
username = &user_->username();
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt)
|
||||
auto *db = interpreter_.current_db_.db_acc_->get();
|
||||
if (memgraph::license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
auto &db = interpreter_.current_db_.db_acc_;
|
||||
audit_log_->Record(endpoint_.address().to_string(), user_ ? *username : "", query,
|
||||
memgraph::storage::PropertyValue(params_pv), db->id());
|
||||
memgraph::storage::PropertyValue(params_pv), db ? db->get()->name() : "no known database");
|
||||
}
|
||||
#endif
|
||||
try {
|
||||
@ -351,11 +351,11 @@ SessionHL::~SessionHL() {
|
||||
|
||||
std::map<std::string, memgraph::communication::bolt::Value> SessionHL::DecodeSummary(
|
||||
const std::map<std::string, memgraph::query::TypedValue> &summary) {
|
||||
// TODO: Update once interpreter can handle non-database queries (db_acc will be nullopt)
|
||||
auto *db = interpreter_.current_db_.db_acc_->get();
|
||||
auto &db_acc = interpreter_.current_db_.db_acc_;
|
||||
auto *storage = db_acc ? db_acc->get()->storage() : nullptr;
|
||||
std::map<std::string, memgraph::communication::bolt::Value> decoded_summary;
|
||||
for (const auto &kv : summary) {
|
||||
auto maybe_value = ToBoltValue(kv.second, *db->storage(), memgraph::storage::View::NEW);
|
||||
auto maybe_value = ToBoltValue(kv.second, storage, memgraph::storage::View::NEW);
|
||||
if (maybe_value.HasError()) {
|
||||
switch (maybe_value.GetError()) {
|
||||
case memgraph::storage::Error::DELETED_OBJECT:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -66,6 +66,8 @@ auth::Permission PrivilegeToPermission(query::AuthQuery::Privilege privilege) {
|
||||
return auth::Permission::MULTI_DATABASE_EDIT;
|
||||
case query::AuthQuery::Privilege::MULTI_DATABASE_USE:
|
||||
return auth::Permission::MULTI_DATABASE_USE;
|
||||
case query::AuthQuery::Privilege::COORDINATOR:
|
||||
return auth::Permission::COORDINATOR;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,4 +13,5 @@
|
||||
|
||||
namespace memgraph::glue {
|
||||
inline constexpr std::string_view kDefaultUserRoleRegex = "[a-zA-Z0-9_.+-@]+";
|
||||
static constexpr std::string_view kDefaultPasswordRegex = ".+";
|
||||
} // namespace memgraph::glue
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -249,25 +249,10 @@ std::vector<std::vector<memgraph::query::TypedValue>> ShowFineGrainedRolePrivile
|
||||
namespace memgraph::glue {
|
||||
|
||||
AuthQueryHandler::AuthQueryHandler(
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
std::string name_regex_string)
|
||||
: auth_(auth), name_regex_string_(std::move(name_regex_string)), name_regex_(name_regex_string_) {}
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth)
|
||||
: auth_(auth) {}
|
||||
|
||||
bool AuthQueryHandler::CreateUser(const std::string &username, const std::optional<std::string> &password) {
|
||||
if (name_regex_string_ != kDefaultUserRoleRegex) {
|
||||
if (const auto license_check_result =
|
||||
memgraph::license::global_license_checker.IsEnterpriseValid(memgraph::utils::global_settings);
|
||||
license_check_result.HasError()) {
|
||||
throw memgraph::auth::AuthException(
|
||||
"Custom user/role regex is a Memgraph Enterprise feature. Please set the config "
|
||||
"(\"--auth-user-or-role-name-regex\") to its default value (\"{}\") or remove the flag.\n{}",
|
||||
kDefaultUserRoleRegex,
|
||||
memgraph::license::LicenseCheckErrorToString(license_check_result.GetError(), "user/role regex"));
|
||||
}
|
||||
}
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
const auto [first_user, user_added] = std::invoke([&, this] {
|
||||
auto locked_auth = auth_->Lock();
|
||||
@ -294,7 +279,7 @@ bool AuthQueryHandler::CreateUser(const std::string &username, const std::option
|
||||
);
|
||||
#ifdef MG_ENTERPRISE
|
||||
GrantDatabaseToUser(auth::kAllDatabases, username);
|
||||
SetMainDatabase(username, dbms::kDefaultDB);
|
||||
SetMainDatabase(dbms::kDefaultDB, username);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -305,9 +290,6 @@ bool AuthQueryHandler::CreateUser(const std::string &username, const std::option
|
||||
}
|
||||
|
||||
bool AuthQueryHandler::DropUser(const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -319,16 +301,13 @@ bool AuthQueryHandler::DropUser(const std::string &username) {
|
||||
}
|
||||
|
||||
void AuthQueryHandler::SetPassword(const std::string &username, const std::optional<std::string> &password) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
if (!user) {
|
||||
throw memgraph::query::QueryRuntimeException("User '{}' doesn't exist.", username);
|
||||
}
|
||||
user->UpdatePassword(password);
|
||||
locked_auth->UpdatePassword(*user, password);
|
||||
locked_auth->SaveUser(*user);
|
||||
} catch (const memgraph::auth::AuthException &e) {
|
||||
throw memgraph::query::QueryRuntimeException(e.what());
|
||||
@ -336,9 +315,6 @@ void AuthQueryHandler::SetPassword(const std::string &username, const std::optio
|
||||
}
|
||||
|
||||
bool AuthQueryHandler::CreateRole(const std::string &rolename) {
|
||||
if (!std::regex_match(rolename, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
return locked_auth->AddRole(rolename).has_value();
|
||||
@ -349,9 +325,6 @@ bool AuthQueryHandler::CreateRole(const std::string &rolename) {
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
bool AuthQueryHandler::RevokeDatabaseFromUser(const std::string &db, const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -363,9 +336,6 @@ bool AuthQueryHandler::RevokeDatabaseFromUser(const std::string &db, const std::
|
||||
}
|
||||
|
||||
bool AuthQueryHandler::GrantDatabaseToUser(const std::string &db, const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -378,9 +348,6 @@ bool AuthQueryHandler::GrantDatabaseToUser(const std::string &db, const std::str
|
||||
|
||||
std::vector<std::vector<memgraph::query::TypedValue>> AuthQueryHandler::GetDatabasePrivileges(
|
||||
const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user or role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->ReadLock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -393,10 +360,7 @@ std::vector<std::vector<memgraph::query::TypedValue>> AuthQueryHandler::GetDatab
|
||||
}
|
||||
}
|
||||
|
||||
bool AuthQueryHandler::SetMainDatabase(const std::string &db, const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
bool AuthQueryHandler::SetMainDatabase(std::string_view db, const std::string &username) {
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -417,9 +381,6 @@ void AuthQueryHandler::DeleteDatabase(std::string_view db) {
|
||||
#endif
|
||||
|
||||
bool AuthQueryHandler::DropRole(const std::string &rolename) {
|
||||
if (!std::regex_match(rolename, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto role = locked_auth->GetRole(rolename);
|
||||
@ -465,9 +426,6 @@ std::vector<memgraph::query::TypedValue> AuthQueryHandler::GetRolenames() {
|
||||
}
|
||||
|
||||
std::optional<std::string> AuthQueryHandler::GetRolenameForUser(const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->ReadLock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -485,9 +443,6 @@ std::optional<std::string> AuthQueryHandler::GetRolenameForUser(const std::strin
|
||||
}
|
||||
|
||||
std::vector<memgraph::query::TypedValue> AuthQueryHandler::GetUsernamesForRole(const std::string &rolename) {
|
||||
if (!std::regex_match(rolename, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->ReadLock();
|
||||
auto role = locked_auth->GetRole(rolename);
|
||||
@ -507,12 +462,6 @@ std::vector<memgraph::query::TypedValue> AuthQueryHandler::GetUsernamesForRole(c
|
||||
}
|
||||
|
||||
void AuthQueryHandler::SetRole(const std::string &username, const std::string &rolename) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
if (!std::regex_match(rolename, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -535,9 +484,6 @@ void AuthQueryHandler::SetRole(const std::string &username, const std::string &r
|
||||
}
|
||||
|
||||
void AuthQueryHandler::ClearRole(const std::string &username) {
|
||||
if (!std::regex_match(username, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->Lock();
|
||||
auto user = locked_auth->GetUser(username);
|
||||
@ -552,9 +498,6 @@ void AuthQueryHandler::ClearRole(const std::string &username) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<memgraph::query::TypedValue>> AuthQueryHandler::GetPrivileges(const std::string &user_or_role) {
|
||||
if (!std::regex_match(user_or_role, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user or role name.");
|
||||
}
|
||||
try {
|
||||
auto locked_auth = auth_->ReadLock();
|
||||
std::vector<std::vector<memgraph::query::TypedValue>> grants;
|
||||
@ -704,9 +647,6 @@ void AuthQueryHandler::EditPermissions(
|
||||
const TEditFineGrainedPermissionsFun &edit_fine_grained_permissions_fun
|
||||
#endif
|
||||
) {
|
||||
if (!std::regex_match(user_or_role, name_regex_)) {
|
||||
throw memgraph::query::QueryRuntimeException("Invalid user or role name.");
|
||||
}
|
||||
try {
|
||||
std::vector<memgraph::auth::Permission> permissions;
|
||||
permissions.reserve(privileges.size());
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -24,12 +24,9 @@ namespace memgraph::glue {
|
||||
|
||||
class AuthQueryHandler final : public memgraph::query::AuthQueryHandler {
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth_;
|
||||
std::string name_regex_string_;
|
||||
std::regex name_regex_;
|
||||
|
||||
public:
|
||||
AuthQueryHandler(memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
std::string name_regex_string);
|
||||
AuthQueryHandler(memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth);
|
||||
|
||||
bool CreateUser(const std::string &username, const std::optional<std::string> &password) override;
|
||||
|
||||
@ -44,7 +41,7 @@ class AuthQueryHandler final : public memgraph::query::AuthQueryHandler {
|
||||
|
||||
std::vector<std::vector<memgraph::query::TypedValue>> GetDatabasePrivileges(const std::string &username) override;
|
||||
|
||||
bool SetMainDatabase(const std::string &db, const std::string &username) override;
|
||||
bool SetMainDatabase(std::string_view db, const std::string &username) override;
|
||||
|
||||
void DeleteDatabase(std::string_view db) override;
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -73,8 +73,14 @@ storage::Result<communication::bolt::Edge> ToBoltEdge(const query::EdgeAccessor
|
||||
return ToBoltEdge(edge.impl_, db, view);
|
||||
}
|
||||
|
||||
storage::Result<Value> ToBoltValue(const query::TypedValue &value, const storage::Storage &db, storage::View view) {
|
||||
storage::Result<Value> ToBoltValue(const query::TypedValue &value, const storage::Storage *db, storage::View view) {
|
||||
auto check_db = [db]() {
|
||||
if (db == nullptr) [[unlikely]]
|
||||
throw communication::bolt::ValueException("Database needed for TypeValue conversion.");
|
||||
};
|
||||
|
||||
switch (value.type()) {
|
||||
// No database needed
|
||||
case query::TypedValue::Type::Null:
|
||||
return Value();
|
||||
case query::TypedValue::Type::Bool:
|
||||
@ -85,16 +91,16 @@ storage::Result<Value> ToBoltValue(const query::TypedValue &value, const storage
|
||||
return Value(value.ValueDouble());
|
||||
case query::TypedValue::Type::String:
|
||||
return Value(std::string(value.ValueString()));
|
||||
case query::TypedValue::Type::List: {
|
||||
std::vector<Value> values;
|
||||
values.reserve(value.ValueList().size());
|
||||
for (const auto &v : value.ValueList()) {
|
||||
auto maybe_value = ToBoltValue(v, db, view);
|
||||
if (maybe_value.HasError()) return maybe_value.GetError();
|
||||
values.emplace_back(std::move(*maybe_value));
|
||||
}
|
||||
return Value(std::move(values));
|
||||
}
|
||||
case query::TypedValue::Type::Date:
|
||||
return Value(value.ValueDate());
|
||||
case query::TypedValue::Type::LocalTime:
|
||||
return Value(value.ValueLocalTime());
|
||||
case query::TypedValue::Type::LocalDateTime:
|
||||
return Value(value.ValueLocalDateTime());
|
||||
case query::TypedValue::Type::Duration:
|
||||
return Value(value.ValueDuration());
|
||||
|
||||
// Database potentially not required
|
||||
case query::TypedValue::Type::Map: {
|
||||
std::map<std::string, Value> map;
|
||||
for (const auto &kv : value.ValueMap()) {
|
||||
@ -104,35 +110,48 @@ storage::Result<Value> ToBoltValue(const query::TypedValue &value, const storage
|
||||
}
|
||||
return Value(std::move(map));
|
||||
}
|
||||
|
||||
// Database is required
|
||||
case query::TypedValue::Type::List: {
|
||||
check_db();
|
||||
std::vector<Value> values;
|
||||
values.reserve(value.ValueList().size());
|
||||
for (const auto &v : value.ValueList()) {
|
||||
auto maybe_value = ToBoltValue(v, db, view);
|
||||
if (maybe_value.HasError()) return maybe_value.GetError();
|
||||
values.emplace_back(std::move(*maybe_value));
|
||||
}
|
||||
return Value(std::move(values));
|
||||
}
|
||||
case query::TypedValue::Type::Vertex: {
|
||||
auto maybe_vertex = ToBoltVertex(value.ValueVertex(), db, view);
|
||||
check_db();
|
||||
auto maybe_vertex = ToBoltVertex(value.ValueVertex(), *db, view);
|
||||
if (maybe_vertex.HasError()) return maybe_vertex.GetError();
|
||||
return Value(std::move(*maybe_vertex));
|
||||
}
|
||||
case query::TypedValue::Type::Edge: {
|
||||
auto maybe_edge = ToBoltEdge(value.ValueEdge(), db, view);
|
||||
check_db();
|
||||
auto maybe_edge = ToBoltEdge(value.ValueEdge(), *db, view);
|
||||
if (maybe_edge.HasError()) return maybe_edge.GetError();
|
||||
return Value(std::move(*maybe_edge));
|
||||
}
|
||||
case query::TypedValue::Type::Path: {
|
||||
auto maybe_path = ToBoltPath(value.ValuePath(), db, view);
|
||||
check_db();
|
||||
auto maybe_path = ToBoltPath(value.ValuePath(), *db, view);
|
||||
if (maybe_path.HasError()) return maybe_path.GetError();
|
||||
return Value(std::move(*maybe_path));
|
||||
}
|
||||
case query::TypedValue::Type::Date:
|
||||
return Value(value.ValueDate());
|
||||
case query::TypedValue::Type::LocalTime:
|
||||
return Value(value.ValueLocalTime());
|
||||
case query::TypedValue::Type::LocalDateTime:
|
||||
return Value(value.ValueLocalDateTime());
|
||||
case query::TypedValue::Type::Duration:
|
||||
return Value(value.ValueDuration());
|
||||
case query::TypedValue::Type::Function:
|
||||
throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value");
|
||||
case query::TypedValue::Type::Graph:
|
||||
auto maybe_graph = ToBoltGraph(value.ValueGraph(), db, view);
|
||||
case query::TypedValue::Type::Graph: {
|
||||
check_db();
|
||||
auto maybe_graph = ToBoltGraph(value.ValueGraph(), *db, view);
|
||||
if (maybe_graph.HasError()) return maybe_graph.GetError();
|
||||
return Value(std::move(*maybe_graph));
|
||||
}
|
||||
|
||||
// Unsupported conversions
|
||||
case query::TypedValue::Type::Function: {
|
||||
throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -65,7 +65,7 @@ storage::Result<std::map<std::string, communication::bolt::Value>> ToBoltGraph(c
|
||||
/// @param storage::View for ToBoltVertex and ToBoltEdge.
|
||||
///
|
||||
/// @throw std::bad_alloc
|
||||
storage::Result<communication::bolt::Value> ToBoltValue(const query::TypedValue &value, const storage::Storage &db,
|
||||
storage::Result<communication::bolt::Value> ToBoltValue(const query::TypedValue &value, const storage::Storage *db,
|
||||
storage::View view);
|
||||
|
||||
query::TypedValue ToTypedValue(const communication::bolt::Value &value);
|
||||
|
@ -8,4 +8,5 @@ find_package(fmt REQUIRED)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
add_library(mg-io STATIC ${io_src_files})
|
||||
add_library(mg::io ALIAS mg-io)
|
||||
target_link_libraries(mg-io stdc++fs Threads::Threads fmt::fmt mg-utils)
|
||||
|
@ -166,7 +166,7 @@ bool Endpoint::IsResolvableAddress(const std::string &address, uint16_t port) {
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrAddress(
|
||||
const std::string &address, const std::optional<uint16_t> default_port = {}) {
|
||||
const std::string &address, const std::optional<uint16_t> default_port) {
|
||||
const std::string delimiter = ":";
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -51,7 +51,7 @@ KVStore &KVStore::operator=(KVStore &&other) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool KVStore::Put(const std::string &key, const std::string &value) {
|
||||
bool KVStore::Put(std::string_view key, std::string_view value) {
|
||||
auto s = pimpl_->db->Put(rocksdb::WriteOptions(), key, value);
|
||||
return s.ok();
|
||||
}
|
||||
@ -65,7 +65,7 @@ bool KVStore::PutMultiple(const std::map<std::string, std::string> &items) {
|
||||
return s.ok();
|
||||
}
|
||||
|
||||
std::optional<std::string> KVStore::Get(const std::string &key) const noexcept {
|
||||
std::optional<std::string> KVStore::Get(std::string_view key) const noexcept {
|
||||
std::string value;
|
||||
auto s = pimpl_->db->Get(rocksdb::ReadOptions(), key, &value);
|
||||
if (!s.ok()) return std::nullopt;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -61,7 +61,7 @@ class KVStore final {
|
||||
* @return true if the value has been successfully stored.
|
||||
* In case of any error false is going to be returned.
|
||||
*/
|
||||
bool Put(const std::string &key, const std::string &value);
|
||||
bool Put(std::string_view key, std::string_view value);
|
||||
|
||||
/**
|
||||
* Store values under the given keys.
|
||||
@ -81,7 +81,7 @@ class KVStore final {
|
||||
* @return Value for the given key. std::nullopt in case of any error
|
||||
* OR the value doesn't exist.
|
||||
*/
|
||||
std::optional<std::string> Get(const std::string &key) const noexcept;
|
||||
std::optional<std::string> Get(std::string_view key) const noexcept;
|
||||
|
||||
/**
|
||||
* Deletes the key and corresponding value from storage.
|
||||
|
@ -11,13 +11,11 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include "audit/log.hpp"
|
||||
#include "communication/metrics.hpp"
|
||||
#include "communication/websocket/auth.hpp"
|
||||
#include "communication/websocket/server.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include "flags/all.hpp"
|
||||
#include "flags/run_time_configurable.hpp"
|
||||
#include "glue/MonitoringServerT.hpp"
|
||||
#include "glue/ServerT.hpp"
|
||||
#include "glue/auth_checker.hpp"
|
||||
@ -33,9 +31,9 @@
|
||||
#include "query/procedure/module.hpp"
|
||||
#include "query/procedure/py_module.hpp"
|
||||
#include "requests/requests.hpp"
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "telemetry/telemetry.hpp"
|
||||
#include "utils/signals.hpp"
|
||||
#include "utils/skip_list.hpp"
|
||||
#include "utils/sysinfo/memory.hpp"
|
||||
#include "utils/system_info.hpp"
|
||||
#include "utils/terminate_handler.hpp"
|
||||
@ -73,7 +71,7 @@ void InitFromCypherlFile(memgraph::query::InterpreterContext &ctx, memgraph::dbm
|
||||
spdlog::warn("{} The rest of the init-file will be run.", e.what());
|
||||
}
|
||||
if (audit_log) {
|
||||
audit_log->Record("", "", line, {}, memgraph::dbms::kDefaultDB);
|
||||
audit_log->Record("", "", line, {}, std::string{memgraph::dbms::kDefaultDB});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -300,8 +298,7 @@ int main(int argc, char **argv) {
|
||||
memgraph::storage::Config db_config{
|
||||
.gc = {.type = memgraph::storage::Config::Gc::Type::PERIODIC,
|
||||
.interval = std::chrono::seconds(FLAGS_storage_gc_cycle_sec)},
|
||||
.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
|
||||
.durability = {.storage_directory = FLAGS_data_directory,
|
||||
.recover_on_startup = FLAGS_storage_recover_on_startup || FLAGS_data_recovery_on_startup,
|
||||
.snapshot_retention_count = FLAGS_storage_snapshot_retention_count,
|
||||
@ -323,7 +320,9 @@ int main(int argc, char **argv) {
|
||||
.id_name_mapper_directory = FLAGS_data_directory + "/rocksdb_id_name_mapper",
|
||||
.durability_directory = FLAGS_data_directory + "/rocksdb_durability",
|
||||
.wal_directory = FLAGS_data_directory + "/rocksdb_wal"},
|
||||
.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
.salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.salient.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
|
||||
memgraph::utils::Scheduler jemalloc_purge_scheduler;
|
||||
jemalloc_purge_scheduler.Run("Jemalloc purge", std::chrono::seconds(FLAGS_storage_gc_cycle_sec),
|
||||
@ -358,11 +357,10 @@ int main(int argc, char **argv) {
|
||||
.stream_transaction_retry_interval = std::chrono::milliseconds(FLAGS_stream_transaction_retry_interval)};
|
||||
|
||||
auto auth_glue =
|
||||
[flag = FLAGS_auth_user_or_role_name_regex](
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
std::unique_ptr<memgraph::query::AuthQueryHandler> &ah, std::unique_ptr<memgraph::query::AuthChecker> &ac) {
|
||||
[](memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
std::unique_ptr<memgraph::query::AuthQueryHandler> &ah, std::unique_ptr<memgraph::query::AuthChecker> &ac) {
|
||||
// Glue high level auth implementations to the query side
|
||||
ah = std::make_unique<memgraph::glue::AuthQueryHandler>(auth, flag);
|
||||
ah = std::make_unique<memgraph::glue::AuthQueryHandler>(auth);
|
||||
ac = std::make_unique<memgraph::glue::AuthChecker>(auth);
|
||||
// Handle users passed via arguments
|
||||
auto *maybe_username = std::getenv(kMgUser);
|
||||
@ -378,9 +376,10 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
};
|
||||
|
||||
// WIP
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> auth_{data_directory /
|
||||
"auth"};
|
||||
memgraph::auth::Auth::Config auth_config{FLAGS_auth_user_or_role_name_regex, FLAGS_auth_password_strength_regex,
|
||||
FLAGS_auth_password_permit_null};
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> auth_{
|
||||
data_directory / "auth", auth_config};
|
||||
std::unique_ptr<memgraph::query::AuthQueryHandler> auth_handler;
|
||||
std::unique_ptr<memgraph::query::AuthChecker> auth_checker;
|
||||
auth_glue(&auth_, auth_handler, auth_checker);
|
||||
@ -388,7 +387,7 @@ int main(int argc, char **argv) {
|
||||
memgraph::dbms::DbmsHandler dbms_handler(db_config
|
||||
#ifdef MG_ENTERPRISE
|
||||
,
|
||||
&auth_, FLAGS_data_recovery_on_startup, FLAGS_storage_delete_on_drop
|
||||
&auth_, FLAGS_data_recovery_on_startup
|
||||
#endif
|
||||
);
|
||||
auto db_acc = dbms_handler.Get();
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "utils/timer.hpp"
|
||||
#include "version.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationRole;
|
||||
using memgraph::replication_coordination_glue::ReplicationRole;
|
||||
|
||||
bool ValidateControlCharacter(const char *flagname, const std::string &value) {
|
||||
if (value.empty()) {
|
||||
@ -707,12 +707,11 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
std::unordered_map<NodeId, memgraph::storage::Gid> node_id_map;
|
||||
memgraph::storage::Config config{
|
||||
|
||||
.items = {.properties_on_edges = FLAGS_storage_properties_on_edges},
|
||||
.durability = {.storage_directory = FLAGS_data_directory,
|
||||
.recover_on_startup = false,
|
||||
.snapshot_wal_mode = memgraph::storage::Config::Durability::SnapshotWalMode::DISABLED,
|
||||
.snapshot_on_exit = true},
|
||||
.salient = {.items = {.properties_on_edges = FLAGS_storage_properties_on_edges}},
|
||||
};
|
||||
memgraph::replication::ReplicationState repl_state{memgraph::storage::ReplicationStateRootPath(config)};
|
||||
auto store = memgraph::dbms::CreateInMemoryStorage(config, repl_state);
|
||||
|
@ -57,7 +57,7 @@ class AuthQueryHandler {
|
||||
|
||||
/// Return true if main database set successfully
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
virtual bool SetMainDatabase(const std::string &db, const std::string &username) = 0;
|
||||
virtual bool SetMainDatabase(std::string_view db, const std::string &username) = 0;
|
||||
|
||||
/// Delete database from all users
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -72,8 +72,9 @@ class TypedValueVectorCompare final {
|
||||
|
||||
/// Raise QueryRuntimeException if the value for symbol isn't of expected type.
|
||||
inline void ExpectType(const Symbol &symbol, const TypedValue &value, TypedValue::Type expected) {
|
||||
if (value.type() != expected)
|
||||
if (value.type() != expected) [[unlikely]] {
|
||||
throw QueryRuntimeException("Expected a {} for '{}', but got {}.", expected, symbol.name(), value.type());
|
||||
}
|
||||
}
|
||||
|
||||
inline void ProcessError(const storage::Error error) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -555,7 +555,10 @@ class DbAccessor final {
|
||||
|
||||
void AdvanceCommand() { accessor_->AdvanceCommand(); }
|
||||
|
||||
utils::BasicResult<storage::StorageManipulationError, void> Commit() { return accessor_->Commit(); }
|
||||
utils::BasicResult<storage::StorageManipulationError, void> Commit(storage::CommitReplArgs reparg = {},
|
||||
storage::DatabaseAccessProtector db_acc = {}) {
|
||||
return accessor_->Commit(std::move(reparg), std::move(db_acc));
|
||||
}
|
||||
|
||||
void Abort() { accessor_->Abort(); }
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -195,6 +195,12 @@ class DatabaseContextRequiredException : public QueryRuntimeException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(DatabaseContextRequiredException)
|
||||
};
|
||||
|
||||
class ConcurrentSystemQueriesException : public QueryRuntimeException {
|
||||
public:
|
||||
using QueryRuntimeException::QueryRuntimeException;
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(ConcurrentSystemQueriesException)
|
||||
};
|
||||
|
||||
class WriteVertexOperationInEdgeImportModeException : public QueryException {
|
||||
public:
|
||||
WriteVertexOperationInEdgeImportModeException()
|
||||
@ -253,6 +259,13 @@ class ReplicationModificationInMulticommandTxException : public QueryException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(ReplicationModificationInMulticommandTxException)
|
||||
};
|
||||
|
||||
class CoordinatorModificationInMulticommandTxException : public QueryException {
|
||||
public:
|
||||
CoordinatorModificationInMulticommandTxException()
|
||||
: QueryException("Coordinator clause not allowed in multicommand transactions.") {}
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(CoordinatorModificationInMulticommandTxException)
|
||||
};
|
||||
|
||||
class ReplicationDisabledOnDiskStorage : public QueryException {
|
||||
public:
|
||||
ReplicationDisabledOnDiskStorage() : QueryException("Replication is not supported while in on-disk storage mode.") {}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -239,6 +239,9 @@ constexpr utils::TypeInfo query::DumpQuery::kType{utils::TypeId::AST_DUMP_QUERY,
|
||||
constexpr utils::TypeInfo query::ReplicationQuery::kType{utils::TypeId::AST_REPLICATION_QUERY, "ReplicationQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::CoordinatorQuery::kType{utils::TypeId::AST_COORDINATOR_QUERY, "CoordinatorQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::LockPathQuery::kType{utils::TypeId::AST_LOCK_PATH_QUERY, "LockPathQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
|
@ -1209,7 +1209,8 @@ class PropertyLookup : public memgraph::query::Expression {
|
||||
}
|
||||
|
||||
protected:
|
||||
PropertyLookup(Expression *expression, PropertyIx property) : expression_(expression), property_(property) {}
|
||||
PropertyLookup(Expression *expression, PropertyIx property)
|
||||
: expression_(expression), property_(std::move(property)) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -1805,9 +1806,9 @@ class EdgeAtom : public memgraph::query::PatternAtom {
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Type { SINGLE, DEPTH_FIRST, BREADTH_FIRST, WEIGHTED_SHORTEST_PATH, ALL_SHORTEST_PATHS };
|
||||
enum class Type : uint8_t { SINGLE, DEPTH_FIRST, BREADTH_FIRST, WEIGHTED_SHORTEST_PATH, ALL_SHORTEST_PATHS };
|
||||
|
||||
enum class Direction { IN, OUT, BOTH };
|
||||
enum class Direction : uint8_t { IN, OUT, BOTH };
|
||||
|
||||
/// Lambda for use in filtering or weight calculation during variable expand.
|
||||
struct Lambda {
|
||||
@ -2860,6 +2861,7 @@ class AuthQuery : public memgraph::query::Query {
|
||||
TRANSACTION_MANAGEMENT,
|
||||
MULTI_DATABASE_EDIT,
|
||||
MULTI_DATABASE_USE,
|
||||
COORDINATOR
|
||||
};
|
||||
|
||||
enum class FineGrainedPrivilege { NOTHING, READ, UPDATE, CREATE_DELETE };
|
||||
@ -2938,7 +2940,8 @@ const std::vector<AuthQuery::Privilege> kPrivilegesAll = {AuthQuery::Privilege::
|
||||
AuthQuery::Privilege::TRANSACTION_MANAGEMENT,
|
||||
AuthQuery::Privilege::STORAGE_MODE,
|
||||
AuthQuery::Privilege::MULTI_DATABASE_EDIT,
|
||||
AuthQuery::Privilege::MULTI_DATABASE_USE};
|
||||
AuthQuery::Privilege::MULTI_DATABASE_USE,
|
||||
AuthQuery::Privilege::COORDINATOR};
|
||||
|
||||
class DatabaseInfoQuery : public memgraph::query::Query {
|
||||
public:
|
||||
@ -3050,8 +3053,9 @@ class ReplicationQuery : public memgraph::query::Query {
|
||||
|
||||
memgraph::query::ReplicationQuery::Action action_;
|
||||
memgraph::query::ReplicationQuery::ReplicationRole role_;
|
||||
std::string replica_name_;
|
||||
std::string instance_name_;
|
||||
memgraph::query::Expression *socket_address_{nullptr};
|
||||
memgraph::query::Expression *coordinator_socket_address_{nullptr};
|
||||
memgraph::query::Expression *port_{nullptr};
|
||||
memgraph::query::ReplicationQuery::SyncMode sync_mode_;
|
||||
|
||||
@ -3059,10 +3063,53 @@ class ReplicationQuery : public memgraph::query::Query {
|
||||
ReplicationQuery *object = storage->Create<ReplicationQuery>();
|
||||
object->action_ = action_;
|
||||
object->role_ = role_;
|
||||
object->replica_name_ = replica_name_;
|
||||
object->instance_name_ = instance_name_;
|
||||
object->socket_address_ = socket_address_ ? socket_address_->Clone(storage) : nullptr;
|
||||
object->port_ = port_ ? port_->Clone(storage) : nullptr;
|
||||
object->sync_mode_ = sync_mode_;
|
||||
object->coordinator_socket_address_ =
|
||||
coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class CoordinatorQuery : public memgraph::query::Query {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Action {
|
||||
REGISTER_INSTANCE,
|
||||
SET_INSTANCE_TO_MAIN,
|
||||
SHOW_REPLICATION_CLUSTER,
|
||||
};
|
||||
|
||||
enum class SyncMode { SYNC, ASYNC };
|
||||
|
||||
CoordinatorQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::CoordinatorQuery::Action action_;
|
||||
std::string instance_name_;
|
||||
memgraph::query::Expression *replication_socket_address_{nullptr};
|
||||
memgraph::query::Expression *coordinator_socket_address_{nullptr};
|
||||
memgraph::query::CoordinatorQuery::SyncMode sync_mode_;
|
||||
|
||||
CoordinatorQuery *Clone(AstStorage *storage) const override {
|
||||
auto *object = storage->Create<CoordinatorQuery>();
|
||||
object->action_ = action_;
|
||||
object->instance_name_ = instance_name_;
|
||||
object->replication_socket_address_ =
|
||||
replication_socket_address_ ? replication_socket_address_->Clone(storage) : nullptr;
|
||||
object->sync_mode_ = sync_mode_;
|
||||
object->coordinator_socket_address_ =
|
||||
coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
@ -3623,7 +3670,7 @@ class MultiDatabaseQuery : public memgraph::query::Query {
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
enum class Action { CREATE, USE, DROP };
|
||||
enum class Action { CREATE, USE, DROP, SHOW };
|
||||
|
||||
memgraph::query::MultiDatabaseQuery::Action action_;
|
||||
std::string db_name_;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -108,6 +108,7 @@ class MultiDatabaseQuery;
|
||||
class ShowDatabasesQuery;
|
||||
class EdgeImportModeQuery;
|
||||
class PatternComprehension;
|
||||
class CoordinatorQuery;
|
||||
|
||||
using TreeCompositeVisitor = utils::CompositeVisitor<
|
||||
SingleQuery, CypherUnion, NamedExpression, OrOperator, XorOperator, AndOperator, NotOperator, AdditionOperator,
|
||||
@ -146,6 +147,7 @@ class QueryVisitor
|
||||
SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery, LockPathQuery,
|
||||
FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery, StreamQuery,
|
||||
SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery, StorageModeQuery,
|
||||
AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery> {};
|
||||
AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery,
|
||||
CoordinatorQuery> {};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -321,6 +321,13 @@ antlrcpp::Any CypherMainVisitor::visitReplicationQuery(MemgraphCypher::Replicati
|
||||
return replication_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCoordinatorQuery(MemgraphCypher::CoordinatorQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "CoordinatorQuery should have exactly one child!");
|
||||
auto *coordinator_query = std::any_cast<CoordinatorQuery *>(ctx->children[0]->accept(this));
|
||||
query_ = coordinator_query;
|
||||
return coordinator_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitEdgeImportModeQuery(MemgraphCypher::EdgeImportModeQueryContext *ctx) {
|
||||
auto *edge_import_mode_query = storage_->Create<EdgeImportModeQuery>();
|
||||
if (ctx->ACTIVE()) {
|
||||
@ -335,24 +342,34 @@ antlrcpp::Any CypherMainVisitor::visitEdgeImportModeQuery(MemgraphCypher::EdgeIm
|
||||
antlrcpp::Any CypherMainVisitor::visitSetReplicationRole(MemgraphCypher::SetReplicationRoleContext *ctx) {
|
||||
auto *replication_query = storage_->Create<ReplicationQuery>();
|
||||
replication_query->action_ = ReplicationQuery::Action::SET_REPLICATION_ROLE;
|
||||
|
||||
auto set_replication_port = [replication_query, ctx, this]() -> void {
|
||||
if (ctx->port->numberLiteral() && ctx->port->numberLiteral()->integerLiteral()) {
|
||||
replication_query->port_ = std::any_cast<Expression *>(ctx->port->accept(this));
|
||||
} else {
|
||||
throw SyntaxException("Port must be an integer literal!");
|
||||
}
|
||||
};
|
||||
|
||||
if (ctx->MAIN()) {
|
||||
replication_query->role_ = ReplicationQuery::ReplicationRole::MAIN;
|
||||
if (ctx->WITH() || ctx->PORT()) {
|
||||
throw SemanticException("Main can't set a port!");
|
||||
}
|
||||
replication_query->role_ = ReplicationQuery::ReplicationRole::MAIN;
|
||||
|
||||
} else if (ctx->REPLICA()) {
|
||||
replication_query->role_ = ReplicationQuery::ReplicationRole::REPLICA;
|
||||
if (ctx->WITH() && ctx->PORT()) {
|
||||
if (ctx->port->numberLiteral() && ctx->port->numberLiteral()->integerLiteral()) {
|
||||
replication_query->port_ = std::any_cast<Expression *>(ctx->port->accept(this));
|
||||
} else {
|
||||
throw SyntaxException("Port must be an integer literal!");
|
||||
}
|
||||
set_replication_port();
|
||||
} else {
|
||||
throw SemanticException("Replica must set a port!");
|
||||
}
|
||||
}
|
||||
|
||||
return replication_query;
|
||||
}
|
||||
antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowReplicationRoleContext *ctx) {
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowReplicationRoleContext * /*ctx*/) {
|
||||
auto *replication_query = storage_->Create<ReplicationQuery>();
|
||||
replication_query->action_ = ReplicationQuery::Action::SHOW_REPLICATION_ROLE;
|
||||
return replication_query;
|
||||
@ -361,7 +378,7 @@ antlrcpp::Any CypherMainVisitor::visitShowReplicationRole(MemgraphCypher::ShowRe
|
||||
antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterReplicaContext *ctx) {
|
||||
auto *replication_query = storage_->Create<ReplicationQuery>();
|
||||
replication_query->action_ = ReplicationQuery::Action::REGISTER_REPLICA;
|
||||
replication_query->replica_name_ = std::any_cast<std::string>(ctx->replicaName()->symbolicName()->accept(this));
|
||||
replication_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
if (ctx->SYNC()) {
|
||||
replication_query->sync_mode_ = memgraph::query::ReplicationQuery::SyncMode::SYNC;
|
||||
} else if (ctx->ASYNC()) {
|
||||
@ -370,26 +387,67 @@ antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterRe
|
||||
|
||||
if (!ctx->socketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Socket address should be a string literal!");
|
||||
} else {
|
||||
replication_query->socket_address_ = std::any_cast<Expression *>(ctx->socketAddress()->accept(this));
|
||||
}
|
||||
replication_query->socket_address_ = std::any_cast<Expression *>(ctx->socketAddress()->accept(this));
|
||||
|
||||
return replication_query;
|
||||
}
|
||||
|
||||
// License check is done in the interpreter.
|
||||
antlrcpp::Any CypherMainVisitor::visitRegisterInstanceOnCoordinator(
|
||||
MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
if (!ctx->replicationSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Replication socket address should be a string literal!");
|
||||
}
|
||||
|
||||
if (!ctx->coordinatorSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Coordinator socket address should be a string literal!");
|
||||
}
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::REGISTER_INSTANCE;
|
||||
coordinator_query->replication_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->replicationSocketAddress()->accept(this));
|
||||
coordinator_query->coordinator_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->coordinatorSocketAddress()->accept(this));
|
||||
coordinator_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
if (ctx->ASYNC()) {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::ASYNC;
|
||||
} else {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::SYNC;
|
||||
}
|
||||
|
||||
return coordinator_query;
|
||||
}
|
||||
|
||||
// License check is done in the interpreter
|
||||
antlrcpp::Any CypherMainVisitor::visitShowReplicationCluster(MemgraphCypher::ShowReplicationClusterContext * /*ctx*/) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::SHOW_REPLICATION_CLUSTER;
|
||||
return coordinator_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitDropReplica(MemgraphCypher::DropReplicaContext *ctx) {
|
||||
auto *replication_query = storage_->Create<ReplicationQuery>();
|
||||
replication_query->action_ = ReplicationQuery::Action::DROP_REPLICA;
|
||||
replication_query->replica_name_ = std::any_cast<std::string>(ctx->replicaName()->symbolicName()->accept(this));
|
||||
replication_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
return replication_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitShowReplicas(MemgraphCypher::ShowReplicasContext *ctx) {
|
||||
antlrcpp::Any CypherMainVisitor::visitShowReplicas(MemgraphCypher::ShowReplicasContext * /*ctx*/) {
|
||||
auto *replication_query = storage_->Create<ReplicationQuery>();
|
||||
replication_query->action_ = ReplicationQuery::Action::SHOW_REPLICAS;
|
||||
return replication_query;
|
||||
}
|
||||
|
||||
// License check is done in the interpreter
|
||||
antlrcpp::Any CypherMainVisitor::visitSetInstanceToMain(MemgraphCypher::SetInstanceToMainContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::SET_INSTANCE_TO_MAIN;
|
||||
coordinator_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
query_ = coordinator_query;
|
||||
return coordinator_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitLockPathQuery(MemgraphCypher::LockPathQueryContext *ctx) {
|
||||
auto *lock_query = storage_->Create<LockPathQuery>();
|
||||
if (ctx->STATUS()) {
|
||||
@ -1657,6 +1715,7 @@ antlrcpp::Any CypherMainVisitor::visitPrivilege(MemgraphCypher::PrivilegeContext
|
||||
if (ctx->STORAGE_MODE()) return AuthQuery::Privilege::STORAGE_MODE;
|
||||
if (ctx->MULTI_DATABASE_EDIT()) return AuthQuery::Privilege::MULTI_DATABASE_EDIT;
|
||||
if (ctx->MULTI_DATABASE_USE()) return AuthQuery::Privilege::MULTI_DATABASE_USE;
|
||||
if (ctx->COORDINATOR()) return AuthQuery::Privilege::COORDINATOR;
|
||||
LOG_FATAL("Should not get here - unknown privilege!");
|
||||
}
|
||||
|
||||
@ -1771,7 +1830,11 @@ antlrcpp::Any CypherMainVisitor::visitReturnBody(MemgraphCypher::ReturnBodyConte
|
||||
body.skip = static_cast<Expression *>(std::any_cast<Expression *>(ctx->skip()->accept(this)));
|
||||
}
|
||||
if (ctx->limit()) {
|
||||
body.limit = static_cast<Expression *>(std::any_cast<Expression *>(ctx->limit()->accept(this)));
|
||||
if (ctx->limit()->expression()) {
|
||||
body.limit = std::any_cast<Expression *>(ctx->limit()->accept(this));
|
||||
} else {
|
||||
body.limit = std::any_cast<ParameterLookup *>(ctx->limit()->accept(this));
|
||||
}
|
||||
}
|
||||
std::tie(body.all_identifiers, body.named_expressions) =
|
||||
std::any_cast<std::pair<bool, std::vector<NamedExpression *>>>(ctx->returnItems()->accept(this));
|
||||
@ -2907,6 +2970,14 @@ antlrcpp::Any CypherMainVisitor::visitDropDatabase(MemgraphCypher::DropDatabaseC
|
||||
return mdb_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitShowDatabase(MemgraphCypher::ShowDatabaseContext * /*ctx*/) {
|
||||
auto *mdb_query = storage_->Create<MultiDatabaseQuery>();
|
||||
mdb_query->db_name_ = "";
|
||||
mdb_query->action_ = MultiDatabaseQuery::Action::SHOW;
|
||||
query_ = mdb_query;
|
||||
return mdb_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitShowDatabases(MemgraphCypher::ShowDatabasesContext * /*ctx*/) {
|
||||
query_ = storage_->Create<ShowDatabasesQuery>();
|
||||
return query_;
|
||||
|
@ -233,6 +233,26 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitShowReplicas(MemgraphCypher::ShowReplicasContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CoordinatorQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCoordinatorQuery(MemgraphCypher::CoordinatorQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CoordinatorQuery*
|
||||
*/
|
||||
antlrcpp::Any visitRegisterInstanceOnCoordinator(MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CoordinatorQuery*
|
||||
*/
|
||||
antlrcpp::Any visitSetInstanceToMain(MemgraphCypher::SetInstanceToMainContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CoordinatorQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowReplicationCluster(MemgraphCypher::ShowReplicationClusterContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return LockPathQuery*
|
||||
*/
|
||||
@ -1007,6 +1027,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitDropDatabase(MemgraphCypher::DropDatabaseContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return MultiDatabaseQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowDatabase(MemgraphCypher::ShowDatabaseContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ShowDatabasesQuery*
|
||||
*/
|
||||
|
@ -143,7 +143,7 @@ order : ORDER BY sortItem ( ',' sortItem )* ;
|
||||
|
||||
skip : L_SKIP expression ;
|
||||
|
||||
limit : LIMIT expression ;
|
||||
limit : LIMIT ( expression | parameter ) ;
|
||||
|
||||
sortItem : expression ( ASCENDING | ASC | DESCENDING | DESC )? ;
|
||||
|
||||
|
@ -102,6 +102,7 @@ FILTER : F I L T E R ;
|
||||
IN : I N ;
|
||||
INDEX : I N D E X ;
|
||||
INFO : I N F O ;
|
||||
INSTANCE : I N S T A N C E ;
|
||||
IS : I S ;
|
||||
KB : K B ;
|
||||
KEY : K E Y ;
|
||||
@ -122,6 +123,7 @@ PROCEDURE : P R O C E D U R E ;
|
||||
PROFILE : P R O F I L E ;
|
||||
QUERY : Q U E R Y ;
|
||||
REDUCE : R E D U C E ;
|
||||
REGISTER : R E G I S T E R;
|
||||
REMOVE : R E M O V E ;
|
||||
RETURN : R E T U R N ;
|
||||
SET : S E T ;
|
||||
|
@ -48,10 +48,12 @@ memgraphCypherKeyword : cypherKeyword
|
||||
| DATABASE
|
||||
| DENY
|
||||
| DROP
|
||||
| DO
|
||||
| DUMP
|
||||
| EDGE
|
||||
| EDGE_TYPES
|
||||
| EXECUTE
|
||||
| FAILOVER
|
||||
| FOR
|
||||
| FOREACH
|
||||
| FREE
|
||||
@ -61,6 +63,7 @@ memgraphCypherKeyword : cypherKeyword
|
||||
| GRANT
|
||||
| HEADER
|
||||
| IDENTIFIED
|
||||
| INSTANCE
|
||||
| NODE_LABELS
|
||||
| NULLIF
|
||||
| IMPORT
|
||||
@ -151,6 +154,7 @@ query : cypherQuery
|
||||
| multiDatabaseQuery
|
||||
| showDatabases
|
||||
| edgeImportModeQuery
|
||||
| coordinatorQuery
|
||||
;
|
||||
|
||||
cypherQuery : ( indexHints )? singleQuery ( cypherUnion )* ( queryMemoryLimit )? ;
|
||||
@ -183,6 +187,11 @@ replicationQuery : setReplicationRole
|
||||
| showReplicas
|
||||
;
|
||||
|
||||
coordinatorQuery : registerInstanceOnCoordinator
|
||||
| setInstanceToMain
|
||||
| showReplicationCluster
|
||||
;
|
||||
|
||||
triggerQuery : createTrigger
|
||||
| dropTrigger
|
||||
| showTriggers
|
||||
@ -323,6 +332,7 @@ privilege : CREATE
|
||||
| STORAGE_MODE
|
||||
| MULTI_DATABASE_EDIT
|
||||
| MULTI_DATABASE_USE
|
||||
| COORDINATOR
|
||||
;
|
||||
|
||||
granularPrivilege : NOTHING | READ | UPDATE | CREATE_DELETE ;
|
||||
@ -364,14 +374,23 @@ setReplicationRole : SET REPLICATION ROLE TO ( MAIN | REPLICA )
|
||||
|
||||
showReplicationRole : SHOW REPLICATION ROLE ;
|
||||
|
||||
replicaName : symbolicName ;
|
||||
showReplicationCluster : SHOW REPLICATION CLUSTER ;
|
||||
|
||||
instanceName : symbolicName ;
|
||||
|
||||
socketAddress : literal ;
|
||||
|
||||
registerReplica : REGISTER REPLICA replicaName ( SYNC | ASYNC )
|
||||
coordinatorSocketAddress : literal ;
|
||||
replicationSocketAddress : literal ;
|
||||
|
||||
registerReplica : REGISTER REPLICA instanceName ( SYNC | ASYNC )
|
||||
TO socketAddress ;
|
||||
|
||||
dropReplica : DROP REPLICA replicaName ;
|
||||
registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ON coordinatorSocketAddress ( AS ASYNC ) ? WITH replicationSocketAddress ;
|
||||
|
||||
setInstanceToMain : SET INSTANCE instanceName TO MAIN ;
|
||||
|
||||
dropReplica : DROP REPLICA instanceName ;
|
||||
|
||||
showReplicas : SHOW REPLICAS ;
|
||||
|
||||
@ -480,6 +499,7 @@ transactionId : literal ;
|
||||
multiDatabaseQuery : createDatabase
|
||||
| useDatabase
|
||||
| dropDatabase
|
||||
| showDatabase
|
||||
;
|
||||
|
||||
createDatabase : CREATE DATABASE databaseName ;
|
||||
@ -488,6 +508,8 @@ useDatabase : USE DATABASE databaseName ;
|
||||
|
||||
dropDatabase : DROP DATABASE databaseName ;
|
||||
|
||||
showDatabase : SHOW DATABASE ;
|
||||
|
||||
showDatabases : SHOW DATABASES ;
|
||||
|
||||
edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ;
|
||||
|
@ -39,15 +39,18 @@ BOOTSTRAP_SERVERS : B O O T S T R A P UNDERSCORE S E R V E R S ;
|
||||
CALL : C A L L ;
|
||||
CHECK : C H E C K ;
|
||||
CLEAR : C L E A R ;
|
||||
CLUSTER : C L U S T E R ;
|
||||
COMMIT : C O M M I T ;
|
||||
COMMITTED : C O M M I T T E D ;
|
||||
CONFIG : C O N F I G ;
|
||||
CONFIGS : C O N F I G S;
|
||||
CONSUMER_GROUP : C O N S U M E R UNDERSCORE G R O U P ;
|
||||
COORDINATOR : C O O R D I N A T O R ;
|
||||
CREATE_DELETE : C R E A T E UNDERSCORE D E L E T E ;
|
||||
CREDENTIALS : C R E D E N T I A L S ;
|
||||
CSV : C S V ;
|
||||
DATA : D A T A ;
|
||||
DO : D O ;
|
||||
DELIMITER : D E L I M I T E R ;
|
||||
DATABASE : D A T A B A S E ;
|
||||
DATABASES : D A T A B A S E S ;
|
||||
@ -59,6 +62,7 @@ DURABILITY : D U R A B I L I T Y ;
|
||||
EDGE : E D G E ;
|
||||
EDGE_TYPES : E D G E UNDERSCORE T Y P E S ;
|
||||
EXECUTE : E X E C U T E ;
|
||||
FAILOVER : F A I L O V E R ;
|
||||
FOR : F O R ;
|
||||
FOREACH : F O R E A C H;
|
||||
FREE : F R E E ;
|
||||
@ -75,6 +79,7 @@ IMPORT : I M P O R T ;
|
||||
INACTIVE : I N A C T I V E ;
|
||||
IN_MEMORY_ANALYTICAL : I N UNDERSCORE M E M O R Y UNDERSCORE A N A L Y T I C A L ;
|
||||
IN_MEMORY_TRANSACTIONAL : I N UNDERSCORE M E M O R Y UNDERSCORE T R A N S A C T I O N A L ;
|
||||
INSTANCE : I N S T A N C E ;
|
||||
ISOLATION : I S O L A T I O N ;
|
||||
KAFKA : K A F K A ;
|
||||
LABELS : L A B E L S ;
|
||||
@ -107,6 +112,7 @@ REVOKE : R E V O K E ;
|
||||
ROLE : R O L E ;
|
||||
ROLES : R O L E S ;
|
||||
QUOTE : Q U O T E ;
|
||||
SERVER : S E R V E R ;
|
||||
SERVICE_URL : S E R V I C E UNDERSCORE U R L ;
|
||||
SESSION : S E S S I O N ;
|
||||
SETTING : S E T T I N G ;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -106,6 +106,7 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_EDIT);
|
||||
break;
|
||||
case MultiDatabaseQuery::Action::USE:
|
||||
case MultiDatabaseQuery::Action::SHOW:
|
||||
AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_USE);
|
||||
break;
|
||||
}
|
||||
@ -115,6 +116,8 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
AddPrivilege(AuthQuery::Privilege::MULTI_DATABASE_USE); /* OR EDIT */
|
||||
}
|
||||
|
||||
void Visit(CoordinatorQuery & /*coordinator_query*/) override { AddPrivilege(AuthQuery::Privilege::COORDINATOR); }
|
||||
|
||||
bool PreVisit(Create & /*unused*/) override {
|
||||
AddPrivilege(AuthQuery::Privilege::CREATE);
|
||||
return false;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -218,7 +218,8 @@ const trie::Trie kKeywords = {"union",
|
||||
"directory",
|
||||
"lock",
|
||||
"unlock",
|
||||
"build"};
|
||||
"build",
|
||||
"instance"};
|
||||
|
||||
// Unicode codepoints that are allowed at the start of the unescaped name.
|
||||
const std::bitset<kBitsetSize> kUnescapedNameAllowedStarts(
|
||||
|
@ -1121,11 +1121,11 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
throw QueryRuntimeException("Unexpected error when getting properties.");
|
||||
}
|
||||
}
|
||||
return *maybe_props;
|
||||
return *std::move(maybe_props);
|
||||
}
|
||||
|
||||
template <class TRecordAccessor>
|
||||
storage::PropertyValue GetProperty(const TRecordAccessor &record_accessor, PropertyIx prop) {
|
||||
storage::PropertyValue GetProperty(const TRecordAccessor &record_accessor, const PropertyIx &prop) {
|
||||
auto maybe_prop = record_accessor.GetProperty(view_, ctx_->properties[prop.ix]);
|
||||
if (maybe_prop.HasError() && maybe_prop.GetError() == storage::Error::NONEXISTENT_OBJECT) {
|
||||
// This is a very nasty and temporary hack in order to make MERGE work.
|
||||
@ -1148,7 +1148,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
throw QueryRuntimeException("Unexpected error when getting a property.");
|
||||
}
|
||||
}
|
||||
return *maybe_prop;
|
||||
return *std::move(maybe_prop);
|
||||
}
|
||||
|
||||
template <class TRecordAccessor>
|
||||
@ -1178,7 +1178,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
return *maybe_prop;
|
||||
}
|
||||
|
||||
storage::LabelId GetLabel(LabelIx label) { return ctx_->labels[label.ix]; }
|
||||
storage::LabelId GetLabel(const LabelIx &label) { return ctx_->labels[label.ix]; }
|
||||
|
||||
Frame *frame_;
|
||||
const SymbolTable *symbol_table_;
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user