Merge branch 'master' into query-execution-in-mage
This commit is contained in:
commit
53458b4962
@ -64,8 +64,8 @@ Checks: '*,
|
||||
-readability-identifier-length,
|
||||
-misc-no-recursion,
|
||||
-concurrency-mt-unsafe,
|
||||
-bugprone-easily-swappable-parameters'
|
||||
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-unchecked-optional-access'
|
||||
WarningsAsErrors: ''
|
||||
HeaderFilterRegex: 'src/.*'
|
||||
AnalyzeTemporaryDtors: false
|
||||
|
22
.github/pull_request_template.md
vendored
22
.github/pull_request_template.md
vendored
@ -1,14 +1,28 @@
|
||||
### Description
|
||||
|
||||
Please briefly explain the changes you made here.
|
||||
|
||||
|
||||
Please delete either the [master < EPIC] or [master < Task] part, depending on what are your needs.
|
||||
|
||||
[master < Epic] PR
|
||||
- [ ] Check, and update documentation if necessary
|
||||
- [ ] Write E2E tests
|
||||
- [ ] Compare the [benchmarking results](https://bench-graph.memgraph.com/) between the master branch and the Epic branch
|
||||
- [ ] Provide the full content or a guide for the final git message
|
||||
- [FINAL GIT MESSAGE]
|
||||
|
||||
[master < Task] PR
|
||||
- [ ] Check, and update documentation if necessary
|
||||
- [ ] Provide the full content or a guide for the final git message
|
||||
- **[FINAL GIT MESSAGE]**
|
||||
|
||||
|
||||
To keep docs changelog up to date, one more thing to do:
|
||||
- [ ] Write a release note here, including added/changed clauses
|
||||
### Documentation checklist
|
||||
- [ ] Add the documentation label tag
|
||||
- [ ] Add the bug / feature label tag
|
||||
- [ ] Add the milestone for which this feature is intended
|
||||
- If not known, set for a later milestone
|
||||
- [ ] Write a release note, including added/changed clauses
|
||||
- **[Release note text]**
|
||||
- [ ] Link the documentation PR here
|
||||
- **[Documentation PR link]**
|
||||
- [ ] Tag someone from docs team in the comments
|
||||
|
553
.github/workflows/diff.yaml
vendored
553
.github/workflows/diff.yaml
vendored
@ -4,10 +4,6 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
@ -19,11 +15,16 @@ on:
|
||||
jobs:
|
||||
community_build:
|
||||
name: "Community build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -33,35 +34,56 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build community binaries
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build community binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMG_ENTERPRISE=OFF ..
|
||||
make -j$THREADS
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --community
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
code_analysis:
|
||||
name: "Code analysis"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Debug
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -71,6 +93,14 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# This is also needed if we want do to comparison against other branches
|
||||
# See https://github.community/t/checkout-code-fails-when-it-runs-lerna-run-test-since-master/17920
|
||||
- name: Fetch all history for all tags and branches
|
||||
@ -78,11 +108,13 @@ jobs:
|
||||
|
||||
- name: Initialize deps
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --init-only
|
||||
|
||||
- name: Set base branch
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
@ -96,45 +128,43 @@ jobs:
|
||||
|
||||
- name: Python code analysis
|
||||
run: |
|
||||
CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only --diff-filter=d)
|
||||
for file in ${CHANGED_FILES}; do
|
||||
echo ${file}
|
||||
if [[ ${file} == *.py ]]; then
|
||||
python3 -m black --check --diff ${file}
|
||||
python3 -m isort --profile black --check-only --diff ${file}
|
||||
fi
|
||||
done
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph code-analysis --base-branch "${{ env.BASE_BRANCH }}"
|
||||
|
||||
- name: Build combined ASAN, UBSAN and coverage binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
cd build
|
||||
cmake -DTEST_COVERAGE=ON -DASAN=ON -DUBSAN=ON ..
|
||||
make -j$THREADS memgraph__unit
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --coverage --asan --ubsan
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests. It is restricted to 2 threads intentionally, because higher concurrency makes the timing related tests unstable.
|
||||
cd build
|
||||
LSAN_OPTIONS=suppressions=$PWD/../tools/lsan.supp UBSAN_OPTIONS=halt_on_error=1 ctest -R memgraph__unit --output-on-failure -j2
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit-coverage
|
||||
|
||||
- name: Compute code coverage
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Compute code coverage.
|
||||
cd tools/github
|
||||
./coverage_convert
|
||||
|
||||
# Package code coverage.
|
||||
cd generated
|
||||
tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph code-coverage
|
||||
|
||||
- name: Save code coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -144,21 +174,36 @@ jobs:
|
||||
|
||||
- name: Run clang-tidy
|
||||
run: |
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph clang-tidy --base-branch "${{ env.BASE_BRANCH }}"
|
||||
|
||||
# Restrict clang-tidy results only to the modified parts
|
||||
git diff -U0 ${{ env.BASE_BRANCH }}... -- src | ./tools/github/clang-tidy/clang-tidy-diff.py -p 1 -j $THREADS -path build -regex ".+\.cpp" | tee ./build/clang_tidy_output.txt
|
||||
|
||||
# Fail if any warning is reported
|
||||
! cat ./build/clang_tidy_output.txt | ./tools/github/clang-tidy/grep_error_lines.sh > /dev/null
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
debug_build:
|
||||
name: "Debug build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 100
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Debug
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -168,58 +213,95 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build debug binaries
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build debug binaries.
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$THREADS
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run leftover CTest tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run leftover CTest tests (all except unit and benchmark tests).
|
||||
cd build
|
||||
ctest -E "(memgraph__unit|memgraph__benchmark)" --output-on-failure
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph leftover-CTest
|
||||
|
||||
- name: Run drivers tests
|
||||
run: |
|
||||
./tests/drivers/run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph drivers
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph integration
|
||||
|
||||
- name: Run cppcheck and clang-format
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run cppcheck and clang-format.
|
||||
cd tools/github
|
||||
./cppcheck_and_clang_format diff
|
||||
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph cppcheck-and-clang-format
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage(Debug build)"
|
||||
path: tools/github/cppcheck_and_clang_format.txt
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 100
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Release
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -229,26 +311,33 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph gql-behave
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -260,14 +349,19 @@ jobs:
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit
|
||||
|
||||
# This step will be skipped because the e2e stream tests have been disabled
|
||||
# We need to fix this as soon as possible
|
||||
- name: Ensure Kafka and Pulsar are up
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose up -d
|
||||
@ -276,13 +370,17 @@ jobs:
|
||||
|
||||
- name: Run e2e tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
source ve3/bin/activate_e2e
|
||||
cd e2e
|
||||
./run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph e2e
|
||||
|
||||
# Same as two steps prior
|
||||
- name: Ensure Kafka and Pulsar are down
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose down
|
||||
@ -291,59 +389,92 @@ jobs:
|
||||
|
||||
- name: Run stress test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
./continuous_integration
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph stress-plain
|
||||
|
||||
- name: Run stress test (SSL)
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
./continuous_integration --use-ssl
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph stress-ssl
|
||||
|
||||
- name: Run durability test
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
python3 durability --num-steps 5
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph durability
|
||||
|
||||
- name: Create enterprise DEB package
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
cd build
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
package-memgraph
|
||||
|
||||
# create mgconsole
|
||||
# we use the -B to force the build
|
||||
make -j$THREADS -B mgconsole
|
||||
|
||||
# Create enterprise DEB package.
|
||||
mkdir output && cd output
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --package
|
||||
|
||||
- name: Save enterprise DEB package
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Enterprise DEB package"
|
||||
path: build/output/memgraph*.deb
|
||||
path: build/output/${{ env.OS }}/memgraph*.deb
|
||||
|
||||
- name: Copy build logs
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --build-logs
|
||||
|
||||
- name: Save test data
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: "Test data(Release build)"
|
||||
path: |
|
||||
# multiple paths could be defined
|
||||
build/logs
|
||||
path: build/logs
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_jepsen_test:
|
||||
name: "Release Jepsen Test"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl]
|
||||
#continue-on-error: true
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 80
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-10
|
||||
TOOLCHAIN: v4
|
||||
ARCH: amd
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -353,16 +484,31 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
# Build only memgraph release binarie.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
make -j$THREADS memgraph
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Copy memgraph binary
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --binary
|
||||
|
||||
- name: Refresh Jepsen Cluster
|
||||
run: |
|
||||
@ -381,13 +527,27 @@ jobs:
|
||||
name: "Jepsen Report"
|
||||
path: tests/jepsen/Jepsen.tar.gz
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_benchmarks:
|
||||
name: "Release benchmarks"
|
||||
runs-on: [self-hosted, Linux, X64, Diff, Gen7]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild, Gen7]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Release
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -397,25 +557,33 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build only memgraph release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=release ..
|
||||
make -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run macro benchmarks
|
||||
run: |
|
||||
cd tests/macro_benchmark
|
||||
./harness QuerySuite MemgraphRunner \
|
||||
--groups aggregation 1000_create unwind_create dense_expand match \
|
||||
--no-strict
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph macro-benchmark
|
||||
|
||||
- name: Get branch name (merge)
|
||||
if: github.event_name != 'pull_request'
|
||||
@ -429,30 +597,49 @@ jobs:
|
||||
|
||||
- name: Upload macro benchmark results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "macro_benchmark" \
|
||||
--benchmark-results "../../tests/macro_benchmark/.harness_summary" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph upload-to-bench-graph \
|
||||
--benchmark-name "macro_benchmark" \
|
||||
--benchmark-results "../../tests/macro_benchmark/.harness_summary" \
|
||||
--github-run-id ${{ github.run_id }} \
|
||||
--github-run-number ${{ github.run_number }} \
|
||||
--head-branch-name ${{ env.BRANCH_NAME }}
|
||||
|
||||
# TODO (andi) No need for path flags and for --disk-storage and --in-memory-analytical
|
||||
- name: Run mgbench
|
||||
run: |
|
||||
cd tests/mgbench
|
||||
./benchmark.py vendor-native --num-workers-for-benchmark 12 --export-results benchmark_result.json pokec/medium/*/*
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph mgbench
|
||||
|
||||
- name: Upload mgbench results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "mgbench" \
|
||||
--benchmark-results "../../tests/mgbench/benchmark_result.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph upload-to-bench-graph \
|
||||
--benchmark-name "mgbench" \
|
||||
--benchmark-results "../../tests/mgbench/benchmark_result.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
@ -300,6 +300,19 @@ endif()
|
||||
|
||||
option(ENABLE_JEMALLOC "Use jemalloc" ON)
|
||||
|
||||
option(MG_MEMORY_PROFILE "If build should be setup for memory profiling" OFF)
|
||||
if (MG_MEMORY_PROFILE AND ENABLE_JEMALLOC)
|
||||
message(STATUS "Jemalloc has been disabled because MG_MEMORY_PROFILE is enabled")
|
||||
set(ENABLE_JEMALLOC OFF)
|
||||
endif ()
|
||||
if (MG_MEMORY_PROFILE AND ASAN)
|
||||
message(STATUS "ASAN has been disabled because MG_MEMORY_PROFILE is enabled")
|
||||
set(ASAN OFF)
|
||||
endif ()
|
||||
if (MG_MEMORY_PROFILE)
|
||||
add_compile_definitions(MG_MEMORY_PROFILE)
|
||||
endif ()
|
||||
|
||||
if (ASAN)
|
||||
message(WARNING "Disabling jemalloc as it doesn't work well with ASAN")
|
||||
set(ENABLE_JEMALLOC OFF)
|
||||
|
@ -45,6 +45,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
readline-devel # for memgraph console
|
||||
python3-devel # for query modules
|
||||
openssl-devel
|
||||
openssl
|
||||
libseccomp-devel
|
||||
python3 python3-pip nmap-ncat # for tests
|
||||
#
|
||||
|
@ -43,6 +43,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
readline-devel # for memgraph console
|
||||
python3-devel # for query modules
|
||||
openssl-devel
|
||||
openssl
|
||||
libseccomp-devel
|
||||
python3 python-virtualenv python3-pip nmap-ncat # for qa, macro_benchmark and stress tests
|
||||
#
|
||||
|
@ -59,7 +59,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
doxygen graphviz # source documentation generators
|
||||
which nodejs golang custom-golang1.18.9 # for driver tests
|
||||
zip unzip java-11-openjdk-devel java-17-openjdk java-17-openjdk-devel custom-maven3.9.3 # for driver tests
|
||||
sbcl # for custom Lisp C++ preprocessing
|
||||
cl-asdf common-lisp-controller sbcl # for custom Lisp C++ preprocessing
|
||||
autoconf # for jemalloc code generation
|
||||
libtool # for protobuf code generation
|
||||
cyrus-sasl-devel
|
||||
@ -162,6 +162,30 @@ install() {
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == doxygen ]; then
|
||||
if ! dnf list installed doxygen >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://dl.rockylinux.org/pub/rocky/9/CRB/x86_64/os/Packages/d/doxygen-1.9.1-11.el9.x86_64.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == cl-asdf ]; then
|
||||
if ! dnf list installed cl-asdf >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/cl-asdf-20101028-18.el8.noarch.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == common-lisp-controller ]; then
|
||||
if ! dnf list installed common-lisp-controller >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/common-lisp-controller-7.4-20.el8.noarch.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == sbcl ]; then
|
||||
if ! dnf list installed sbcl >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/sbcl-2.0.1-4.el8.x86_64.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == PyYAML ]; then
|
||||
if [ -z ${SUDO_USER+x} ]; then # Running as root (e.g. Docker).
|
||||
pip3 install --user PyYAML
|
||||
|
@ -20,14 +20,18 @@ if [ ! -f "$INPUT" ]; then
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create consraints manually if needed${COLOR_NULL}"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/^:begin/BEGIN/g; s/^BEGIN$/BEGIN;/g;' \
|
||||
-e 's/^:commit/COMMIT/g; s/^COMMIT$/COMMIT;/g;' \
|
||||
-e '/^CALL/d; /^SCHEMA AWAIT/d;' \
|
||||
-e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" > "$OUTPUT"
|
||||
-e '/^CREATE CONSTRAINT/d; /^DROP CONSTRAINT/d;' "$INPUT" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
|
61
import/n2mg_separate_files_cypherl.sh
Executable file
61
import/n2mg_separate_files_cypherl.sh
Executable file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 5 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT="$5"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT"
|
||||
|
||||
cat "$INPUT_NODES" >> "$OUTPUT"
|
||||
cat "$INPUT_RELATIONSHIPS" >> "$OUTPUT"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher file under $OUTPUT"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT | mgconsole\`"
|
64
import/n2mg_separate_files_cypherls.sh
Executable file
64
import/n2mg_separate_files_cypherls.sh
Executable file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash -e
|
||||
COLOR_ORANGE="\e[38;5;208m"
|
||||
COLOR_GREEN="\e[38;5;35m"
|
||||
COLOR_RED="\e[0;31m"
|
||||
COLOR_NULL="\e[0m"
|
||||
|
||||
print_help() {
|
||||
echo -e "${COLOR_ORANGE}HOW TO RUN:${COLOR_NULL} $0 input_file_schema_path input_file_nodes_path input_file_relationships_path input_file_cleanup_path output_file_schema_path output_file_nodes_path output_file_relationships_path output_file_cleanup_path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$#" -ne 8 ]; then
|
||||
print_help
|
||||
fi
|
||||
INPUT_SCHEMA="$1"
|
||||
INPUT_NODES="$2"
|
||||
INPUT_RELATIONSHIPS="$3"
|
||||
INPUT_CLEANUP="$4"
|
||||
OUTPUT_SCHEMA="$5"
|
||||
OUTPUT_NODES="$6"
|
||||
OUTPUT_RELATIONSHIPS="$7"
|
||||
OUTPUT_CLEANUP="$8"
|
||||
|
||||
if [ ! -f "$INPUT_SCHEMA" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_NODES" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_RELATIONSHIPS" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
if [ ! -f "$INPUT_CLEANUP" ]; then
|
||||
echo -e "${COLOR_RED}ERROR:${COLOR_NULL} input_file_path is not a file!"
|
||||
print_help
|
||||
fi
|
||||
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} BEGIN and COMMIT are required because variables share the same name (e.g. row)"
|
||||
echo -e "${COLOR_ORANGE}NOTE:${COLOR_NULL} CONSTRAINTS are just skipped -> ${COLOR_RED}please create constraints manually if needed${COLOR_NULL}"
|
||||
|
||||
|
||||
echo 'CREATE INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' > "$OUTPUT_SCHEMA"
|
||||
|
||||
sed -e 's/CREATE RANGE INDEX FOR (n:/CREATE INDEX ON :/g;' \
|
||||
-e 's/) ON (n./(/g;' \
|
||||
-e '/^CREATE CONSTRAINT/d' $INPUT_SCHEMA >> "$OUTPUT_SCHEMA"
|
||||
|
||||
cat "$INPUT_NODES" > "$OUTPUT_NODES"
|
||||
cat "$INPUT_RELATIONSHIPS" > "$OUTPUT_RELATIONSHIPS"
|
||||
|
||||
sed -e '/^DROP CONSTRAINT/d' "$INPUT_CLEANUP" >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo 'DROP INDEX ON :`UNIQUE IMPORT LABEL`(`UNIQUE IMPORT ID`);' >> "$OUTPUT_CLEANUP"
|
||||
|
||||
echo ""
|
||||
echo -e "${COLOR_GREEN}DONE!${COLOR_NULL} Please find Memgraph compatible cypherl|.cypher files under $OUTPUT_SCHEMA, $OUTPUT_NODES, $OUTPUT_RELATIONSHIPS and $OUTPUT_CLEANUP"
|
||||
echo ""
|
||||
echo "Please import data by executing => \`cat $OUTPUT_SCHEMA | mgconsole\`, \`cat $OUTPUT_NODES | mgconsole\`, \`cat $OUTPUT_RELATIONSHIPS | mgconsole\` and \`cat $OUTPUT_CLEANUP | mgconsole\`"
|
34
init
34
init
@ -14,6 +14,7 @@ function print_help () {
|
||||
echo "Optional arguments:"
|
||||
echo -e " -h\tdisplay this help and exit"
|
||||
echo -e " --without-libs-setup\tskip the step for setting up libs"
|
||||
echo -e " --ci\tscript is being run inside ci"
|
||||
}
|
||||
|
||||
function setup_virtualenv () {
|
||||
@ -35,6 +36,7 @@ function setup_virtualenv () {
|
||||
}
|
||||
|
||||
setup_libs=true
|
||||
ci=false
|
||||
if [[ $# -eq 1 && "$1" == "-h" ]]; then
|
||||
print_help
|
||||
exit 0
|
||||
@ -45,6 +47,10 @@ else
|
||||
shift
|
||||
setup_libs=false
|
||||
;;
|
||||
--ci)
|
||||
shift
|
||||
ci=true
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
echo "Invalid argument provided: $1"
|
||||
@ -76,11 +82,13 @@ if [[ "$setup_libs" == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Fix for centos 7 during release
|
||||
if [ "${DISTRO}" = "centos-7" ] || [ "${DISTRO}" = "debian-11" ] || [ "${DISTRO}" = "amzn-2" ]; then
|
||||
if python3 -m pip show virtualenv >/dev/null 2>/dev/null; then
|
||||
python3 -m pip uninstall -y virtualenv
|
||||
if [[ "$ci" == "false" ]]; then
|
||||
if [ "${DISTRO}" = "centos-7" ] || [ "${DISTRO}" = "debian-11" ] || [ "${DISTRO}" = "amzn-2" ]; then
|
||||
if python3 -m pip show virtualenv >/dev/null 2>/dev/null; then
|
||||
python3 -m pip uninstall -y virtualenv
|
||||
fi
|
||||
python3 -m pip install virtualenv
|
||||
fi
|
||||
python3 -m pip install virtualenv
|
||||
fi
|
||||
|
||||
# setup gql_behave dependencies
|
||||
@ -119,14 +127,16 @@ fi
|
||||
# Install precommit hook except on old operating systems because we don't
|
||||
# develop on them -> pre-commit hook not required -> we can use latest
|
||||
# packages.
|
||||
if [ "${DISTRO}" != "centos-7" ] && [ "$DISTRO" != "debian-10" ] && [ "${DISTRO}" != "ubuntu-18.04" ] && [ "${DISTRO}" != "amzn-2" ]; then
|
||||
python3 -m pip install pre-commit
|
||||
python3 -m pre_commit install
|
||||
# Install py format tools for usage during the development.
|
||||
echo "Install black formatter"
|
||||
python3 -m pip install black==23.1.*
|
||||
echo "Install isort"
|
||||
python3 -m pip install isort==5.12.*
|
||||
if [[ "$ci" == "false" ]]; then
|
||||
if [ "${DISTRO}" != "centos-7" ] && [ "$DISTRO" != "debian-10" ] && [ "${DISTRO}" != "ubuntu-18.04" ] && [ "${DISTRO}" != "amzn-2" ]; then
|
||||
python3 -m pip install pre-commit
|
||||
python3 -m pre_commit install
|
||||
# Install py format tools for usage during the development.
|
||||
echo "Install black formatter"
|
||||
python3 -m pip install black==23.1.*
|
||||
echo "Install isort"
|
||||
python3 -m pip install isort==5.12.*
|
||||
fi
|
||||
fi
|
||||
|
||||
# Link `include/mgp.py` with `release/mgp/mgp.py`
|
||||
|
@ -127,6 +127,7 @@ declare -A primary_urls=(
|
||||
["jemalloc"]="http://$local_cache_host/git/jemalloc.git"
|
||||
["range-v3"]="http://$local_cache_host/git/range-v3.git"
|
||||
["nuraft"]="http://$local_cache_host/git/NuRaft.git"
|
||||
["asio"]="http://$local_cache_host/git/asio.git"
|
||||
)
|
||||
|
||||
# The goal of secondary urls is to have links to the "source of truth" of
|
||||
@ -157,6 +158,7 @@ declare -A secondary_urls=(
|
||||
["jemalloc"]="https://github.com/jemalloc/jemalloc.git"
|
||||
["range-v3"]="https://github.com/ericniebler/range-v3.git"
|
||||
["nuraft"]="https://github.com/eBay/NuRaft.git"
|
||||
["asio"]="https://github.com/chriskohlhoff/asio.git"
|
||||
)
|
||||
|
||||
# antlr
|
||||
@ -266,13 +268,13 @@ repo_clone_try_double "${primary_urls[jemalloc]}" "${secondary_urls[jemalloc]}"
|
||||
pushd jemalloc
|
||||
|
||||
./autogen.sh
|
||||
MALLOC_CONF="retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000" \
|
||||
MALLOC_CONF="background_thread:true,retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000" \
|
||||
./configure \
|
||||
--disable-cxx \
|
||||
--with-lg-page=12 \
|
||||
--with-lg-hugepage=21 \
|
||||
--enable-shared=no --prefix=$working_dir \
|
||||
--with-malloc-conf="retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000"
|
||||
--with-malloc-conf="background_thread:true,retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000"
|
||||
|
||||
make -j$CPUS install
|
||||
popd
|
||||
@ -286,5 +288,7 @@ nuraft_tag="v2.1.0"
|
||||
repo_clone_try_double "${primary_urls[nuraft]}" "${secondary_urls[nuraft]}" "nuraft" "$nuraft_tag" true
|
||||
pushd nuraft
|
||||
git apply ../nuraft2.1.0.patch
|
||||
asio_tag="asio-1-29-0"
|
||||
repo_clone_try_double "${primary_urls[asio]}" "${secondary_urls[asio]}" "asio" "$asio_tag" true
|
||||
./prepare.sh
|
||||
popd
|
||||
|
73
release/package/amd-builders-v4.yml
Normal file
73
release/package/amd-builders-v4.yml
Normal file
@ -0,0 +1,73 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_v4_amzn-2:
|
||||
image: "memgraph/mgbuild:v4_amzn-2"
|
||||
build:
|
||||
context: amzn-2
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_amzn-2"
|
||||
|
||||
mgbuild_v4_centos-7:
|
||||
image: "memgraph/mgbuild:v4_centos-7"
|
||||
build:
|
||||
context: centos-7
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_centos-7"
|
||||
|
||||
mgbuild_v4_centos-9:
|
||||
image: "memgraph/mgbuild:v4_centos-9"
|
||||
build:
|
||||
context: centos-9
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_centos-9"
|
||||
|
||||
mgbuild_v4_debian-10:
|
||||
image: "memgraph/mgbuild:v4_debian-10"
|
||||
build:
|
||||
context: debian-10
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-10"
|
||||
|
||||
mgbuild_v4_debian-11:
|
||||
image: "memgraph/mgbuild:v4_debian-11"
|
||||
build:
|
||||
context: debian-11
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-11"
|
||||
|
||||
mgbuild_v4_fedora-36:
|
||||
image: "memgraph/mgbuild:v4_fedora-36"
|
||||
build:
|
||||
context: fedora-36
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_fedora-36"
|
||||
|
||||
mgbuild_v4_ubuntu-18.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-18.04"
|
||||
build:
|
||||
context: ubuntu-18.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-18.04"
|
||||
|
||||
mgbuild_v4_ubuntu-20.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-20.04"
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-20.04"
|
||||
|
||||
mgbuild_v4_ubuntu-22.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-22.04"
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-22.04"
|
81
release/package/amd-builders-v5.yml
Normal file
81
release/package/amd-builders-v5.yml
Normal file
@ -0,0 +1,81 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_v5_amzn-2:
|
||||
image: "memgraph/mgbuild:v5_amzn-2"
|
||||
build:
|
||||
context: amzn-2
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_amzn-2"
|
||||
|
||||
mgbuild_v5_centos-7:
|
||||
image: "memgraph/mgbuild:v5_centos-7"
|
||||
build:
|
||||
context: centos-7
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_centos-7"
|
||||
|
||||
mgbuild_v5_centos-9:
|
||||
image: "memgraph/mgbuild:v5_centos-9"
|
||||
build:
|
||||
context: centos-9
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_centos-9"
|
||||
|
||||
mgbuild_v5_debian-11:
|
||||
image: "memgraph/mgbuild:v5_debian-11"
|
||||
build:
|
||||
context: debian-11
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_debian-11"
|
||||
|
||||
mgbuild_v5_debian-12:
|
||||
image: "memgraph/mgbuild:v5_debian-12"
|
||||
build:
|
||||
context: debian-12
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_debian-12"
|
||||
|
||||
mgbuild_v5_fedora-38:
|
||||
image: "memgraph/mgbuild:v5_fedora-38"
|
||||
build:
|
||||
context: fedora-38
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_fedora-38"
|
||||
|
||||
mgbuild_v5_fedora-39:
|
||||
image: "memgraph/mgbuild:v5_fedora-39"
|
||||
build:
|
||||
context: fedora-39
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_fedora-39"
|
||||
|
||||
mgbuild_v5_rocky-9.3:
|
||||
image: "memgraph/mgbuild:v5_rocky-9.3"
|
||||
build:
|
||||
context: rocky-9.3
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_rocky-9.3"
|
||||
|
||||
mgbuild_v5_ubuntu-20.04:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-20.04"
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_ubuntu-20.04"
|
||||
|
||||
mgbuild_v5_ubuntu-22.04:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-22.04"
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_ubuntu-22.04"
|
@ -7,9 +7,34 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz
|
||||
# Download and install toolchain
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/amzn-2.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/amzn-2.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
18
release/package/arm-builders-v4.yml
Normal file
18
release/package/arm-builders-v4.yml
Normal file
@ -0,0 +1,18 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
mgbuild_v4_debian-11-arm:
|
||||
image: "memgraph/mgbuild:v4_debian-11-arm"
|
||||
build:
|
||||
context: debian-11-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-11-arm"
|
||||
|
||||
mgbuild_v4_ubuntu_v4_22.04-arm:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-22.04-arm"
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-22.04-arm"
|
18
release/package/arm-builders-v5.yml
Normal file
18
release/package/arm-builders-v5.yml
Normal file
@ -0,0 +1,18 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
debian-12-arm:
|
||||
image: "memgraph/mgbuild:v5_debian-12-arm"
|
||||
build:
|
||||
context: debian-12-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_debian-12-arm"
|
||||
|
||||
ubuntu-22.04-arm:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-22.04-arm"
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_ubuntu-22.04-arm"
|
@ -1,11 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
debian-11-arm:
|
||||
build:
|
||||
context: debian-11-arm
|
||||
container_name: "mgbuild_debian-11-arm"
|
||||
ubuntu-2204-arm:
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
container_name: "mgbuild_ubuntu-22.04-arm"
|
@ -7,9 +7,33 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/centos-7.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/centos-7.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -7,9 +7,33 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/centos-9.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/centos-9.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-10.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-10.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-11-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-11-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-11.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-11.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
39
release/package/debian-12-arm/Dockerfile
Normal file
39
release/package/debian-12-arm/Dockerfile
Normal file
@ -0,0 +1,39 @@
|
||||
FROM debian:12
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
ca-certificates wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-12-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-12-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
39
release/package/debian-12/Dockerfile
Normal file
39
release/package/debian-12/Dockerfile
Normal file
@ -0,0 +1,39 @@
|
||||
FROM debian:12
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
ca-certificates wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-12.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-12.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
@ -1,38 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_centos-7:
|
||||
build:
|
||||
context: centos-7
|
||||
container_name: "mgbuild_centos-7"
|
||||
mgbuild_centos-9:
|
||||
build:
|
||||
context: centos-9
|
||||
container_name: "mgbuild_centos-9"
|
||||
mgbuild_debian-10:
|
||||
build:
|
||||
context: debian-10
|
||||
container_name: "mgbuild_debian-10"
|
||||
mgbuild_debian-11:
|
||||
build:
|
||||
context: debian-11
|
||||
container_name: "mgbuild_debian-11"
|
||||
mgbuild_ubuntu-18.04:
|
||||
build:
|
||||
context: ubuntu-18.04
|
||||
container_name: "mgbuild_ubuntu-18.04"
|
||||
mgbuild_ubuntu-20.04:
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
container_name: "mgbuild_ubuntu-20.04"
|
||||
mgbuild_ubuntu-22.04:
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
container_name: "mgbuild_ubuntu-22.04"
|
||||
mgbuild_fedora-36:
|
||||
build:
|
||||
context: fedora-36
|
||||
container_name: "mgbuild_fedora-36"
|
||||
mgbuild_amzn-2:
|
||||
build:
|
||||
context: amzn-2
|
||||
container_name: "mgbuild_amzn-2"
|
@ -8,9 +8,30 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-36.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-36.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
37
release/package/fedora-38/Dockerfile
Normal file
37
release/package/fedora-38/Dockerfile
Normal file
@ -0,0 +1,37 @@
|
||||
FROM fedora:38
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-38.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-38.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
37
release/package/fedora-39/Dockerfile
Normal file
37
release/package/fedora-39/Dockerfile
Normal file
@ -0,0 +1,37 @@
|
||||
FROM fedora:39
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-39.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-39.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
665
release/package/mgbuild.sh
Executable file
665
release/package/mgbuild.sh
Executable file
@ -0,0 +1,665 @@
|
||||
#!/bin/bash
|
||||
set -Eeuo pipefail
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
SCRIPT_NAME=${0##*/}
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
MGBUILD_HOME_DIR="/home/mg"
|
||||
MGBUILD_ROOT_DIR="$MGBUILD_HOME_DIR/memgraph"
|
||||
|
||||
DEFAULT_TOOLCHAIN="v5"
|
||||
SUPPORTED_TOOLCHAINS=(
|
||||
v4 v5
|
||||
)
|
||||
DEFAULT_OS="all"
|
||||
SUPPORTED_OS=(
|
||||
all
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm debian-12 debian-12-arm
|
||||
fedora-36 fedora-38 fedora-39
|
||||
rocky-9.3
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
SUPPORTED_OS_V4=(
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm
|
||||
fedora-36
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
SUPPORTED_OS_V5=(
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-11 debian-11-arm debian-12 debian-12-arm
|
||||
fedora-38 fedora-39
|
||||
rocky-9.3
|
||||
ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
DEFAULT_BUILD_TYPE="Release"
|
||||
SUPPORTED_BUILD_TYPES=(
|
||||
Debug
|
||||
Release
|
||||
RelWithDebInfo
|
||||
)
|
||||
DEFAULT_ARCH="amd"
|
||||
SUPPORTED_ARCHS=(
|
||||
amd
|
||||
arm
|
||||
)
|
||||
SUPPORTED_TESTS=(
|
||||
clang-tidy cppcheck-and-clang-format code-analysis
|
||||
code-coverage drivers durability e2e gql-behave
|
||||
integration leftover-CTest macro-benchmark
|
||||
mgbench stress-plain stress-ssl
|
||||
unit unit-coverage upload-to-bench-graph
|
||||
|
||||
)
|
||||
DEFAULT_THREADS=0
|
||||
DEFAULT_ENTERPRISE_LICENSE=""
|
||||
DEFAULT_ORGANIZATION_NAME="memgraph"
|
||||
|
||||
print_help () {
|
||||
echo -e "\nUsage: $SCRIPT_NAME [GLOBAL OPTIONS] COMMAND [COMMAND OPTIONS]"
|
||||
echo -e "\nInteract with mgbuild containers"
|
||||
|
||||
echo -e "\nCommands:"
|
||||
echo -e " build Build mgbuild image"
|
||||
echo -e " build-memgraph [OPTIONS] Build memgraph binary inside mgbuild container"
|
||||
echo -e " copy OPTIONS Copy an artifact from mgbuild container to host"
|
||||
echo -e " package-memgraph Create memgraph package from built binary inside mgbuild container"
|
||||
echo -e " pull Pull mgbuild image from dockerhub"
|
||||
echo -e " push [OPTIONS] Push mgbuild image to dockerhub"
|
||||
echo -e " run [OPTIONS] Run mgbuild container"
|
||||
echo -e " stop [OPTIONS] Stop mgbuild container"
|
||||
echo -e " test-memgraph TEST Run a selected test TEST (see supported tests below) inside mgbuild container"
|
||||
|
||||
echo -e "\nSupported tests:"
|
||||
echo -e " \"${SUPPORTED_TESTS[*]}\""
|
||||
|
||||
echo -e "\nGlobal options:"
|
||||
echo -e " --arch string Specify target architecture (\"${SUPPORTED_ARCHS[*]}\") (default \"$DEFAULT_ARCH\")"
|
||||
echo -e " --build-type string Specify build type (\"${SUPPORTED_BUILD_TYPES[*]}\") (default \"$DEFAULT_BUILD_TYPE\")"
|
||||
echo -e " --enterprise-license string Specify the enterprise license (default \"\")"
|
||||
echo -e " --organization-name string Specify the organization name (default \"memgraph\")"
|
||||
echo -e " --os string Specify operating system (\"${SUPPORTED_OS[*]}\") (default \"$DEFAULT_OS\")"
|
||||
echo -e " --threads int Specify the number of threads a command will use (default \"\$(nproc)\" for container)"
|
||||
echo -e " --toolchain string Specify toolchain version (\"${SUPPORTED_TOOLCHAINS[*]}\") (default \"$DEFAULT_TOOLCHAIN\")"
|
||||
|
||||
echo -e "\nbuild-memgraph options:"
|
||||
echo -e " --asan Build with ASAN"
|
||||
echo -e " --community Build community version"
|
||||
echo -e " --coverage Build with code coverage"
|
||||
echo -e " --for-docker Add flag -DMG_TELEMETRY_ID_OVERRIDE=DOCKER to cmake"
|
||||
echo -e " --for-platform Add flag -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM to cmake"
|
||||
echo -e " --init-only Only run init script"
|
||||
echo -e " --no-copy Don't copy the memgraph repo from host."
|
||||
echo -e " Use this option with caution, be sure that memgraph source code is in correct location inside mgbuild container"
|
||||
echo -e " --ubsan Build with UBSAN"
|
||||
|
||||
echo -e "\ncopy options:"
|
||||
echo -e " --binary Copy memgraph binary from mgbuild container to host"
|
||||
echo -e " --build-logs Copy build logs from mgbuild container to host"
|
||||
echo -e " --package Copy memgraph package from mgbuild container to host"
|
||||
|
||||
echo -e "\npush options:"
|
||||
echo -e " -p, --password string Specify password for docker login"
|
||||
echo -e " -u, --username string Specify username for docker login"
|
||||
|
||||
echo -e "\nrun options:"
|
||||
echo -e " --pull Pull the mgbuild image before running"
|
||||
|
||||
echo -e "\nstop options:"
|
||||
echo -e " --remove Remove the stopped mgbuild container"
|
||||
|
||||
echo -e "\nToolchain v4 supported OSs:"
|
||||
echo -e " \"${SUPPORTED_OS_V4[*]}\""
|
||||
|
||||
echo -e "\nToolchain v5 supported OSs:"
|
||||
echo -e " \"${SUPPORTED_OS_V5[*]}\""
|
||||
|
||||
echo -e "\nExample usage:"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo test-memgraph unit"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd package"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd copy --package"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd stop --remove"
|
||||
}
|
||||
|
||||
check_support() {
|
||||
local is_supported=false
|
||||
case "$1" in
|
||||
arch)
|
||||
for e in "${SUPPORTED_ARCHS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Architecture $2 isn't supported!\nChoose from ${SUPPORTED_ARCHS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
build_type)
|
||||
for e in "${SUPPORTED_BUILD_TYPES[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Build type $2 isn't supported!\nChoose from ${SUPPORTED_BUILD_TYPES[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
os)
|
||||
for e in "${SUPPORTED_OS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: OS $2 isn't supported!\nChoose from ${SUPPORTED_OS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
toolchain)
|
||||
for e in "${SUPPORTED_TOOLCHAINS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "TError: oolchain version $2 isn't supported!\nChoose from ${SUPPORTED_TOOLCHAINS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
os_toolchain_combo)
|
||||
if [[ "$3" == "v4" ]]; then
|
||||
local SUPPORTED_OS_TOOLCHAIN=("${SUPPORTED_OS_V4[@]}")
|
||||
elif [[ "$3" == "v5" ]]; then
|
||||
local SUPPORTED_OS_TOOLCHAIN=("${SUPPORTED_OS_V5[@]}")
|
||||
else
|
||||
echo -e "Error: $3 isn't a supported toolchain_version!\nChoose from ${SUPPORTED_TOOLCHAINS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
for e in "${SUPPORTED_OS_TOOLCHAIN[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Toolchain version $3 doesn't support OS $2!\nChoose from ${SUPPORTED_OS_TOOLCHAIN[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo -e "Error: This function can only check arch, build_type, os, toolchain version and os toolchain combination"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
######## BUILD, COPY AND PACKAGE MEMGRAPH ########
|
||||
##################################################
|
||||
build_memgraph () {
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local container_build_dir="$MGBUILD_ROOT_DIR/build"
|
||||
local container_output_dir="$container_build_dir/output"
|
||||
local arm_flag=""
|
||||
if [[ "$arch" == "arm" ]] || [[ "$os" =~ "-arm" ]]; then
|
||||
arm_flag="-DMG_ARCH="ARM64""
|
||||
fi
|
||||
local build_type_flag="-DCMAKE_BUILD_TYPE=$build_type"
|
||||
local telemetry_id_override_flag=""
|
||||
local community_flag=""
|
||||
local coverage_flag=""
|
||||
local asan_flag=""
|
||||
local ubsan_flag=""
|
||||
local init_only=false
|
||||
local for_docker=false
|
||||
local for_platform=false
|
||||
local copy_from_host=true
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--community)
|
||||
community_flag="-DMG_ENTERPRISE=OFF"
|
||||
shift 1
|
||||
;;
|
||||
--init-only)
|
||||
init_only=true
|
||||
shift 1
|
||||
;;
|
||||
--for-docker)
|
||||
for_docker=true
|
||||
if [[ "$for_platform" == "true" ]]; then
|
||||
echo "Error: Cannot combine --for-docker and --for-platform flags"
|
||||
exit 1
|
||||
fi
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER "
|
||||
shift 1
|
||||
;;
|
||||
--for-platform)
|
||||
for_platform=true
|
||||
if [[ "$for_docker" == "true" ]]; then
|
||||
echo "Error: Cannot combine --for-docker and --for-platform flags"
|
||||
exit 1
|
||||
fi
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM "
|
||||
shift 1
|
||||
;;
|
||||
--coverage)
|
||||
coverage_flag="-DTEST_COVERAGE=ON"
|
||||
shift 1
|
||||
;;
|
||||
--asan)
|
||||
asan_flag="-DASAN=ON"
|
||||
shift 1
|
||||
;;
|
||||
--ubsan)
|
||||
ubsan_flag="-DUBSAN=ON"
|
||||
shift 1
|
||||
;;
|
||||
--no-copy)
|
||||
copy_from_host=false
|
||||
shift 1
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Initializing deps ..."
|
||||
# If master is not the current branch, fetch it, because the get_version
|
||||
# script depends on it. If we are on master, the fetch command is going to
|
||||
# fail so that's why there is the explicit check.
|
||||
# Required here because Docker build container can't access remote.
|
||||
cd "$PROJECT_ROOT"
|
||||
if [[ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]]; then
|
||||
git fetch origin master:master
|
||||
fi
|
||||
|
||||
if [[ "$copy_from_host" == "true" ]]; then
|
||||
# Ensure we have a clean build directory
|
||||
docker exec -u mg "$build_container" bash -c "rm -rf $MGBUILD_ROOT_DIR && mkdir -p $MGBUILD_ROOT_DIR"
|
||||
echo "Copying project files..."
|
||||
docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/"
|
||||
fi
|
||||
# Change ownership of copied files so the mg user inside container can access them
|
||||
docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
|
||||
|
||||
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
|
||||
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
|
||||
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check MEMGRAPH_BUILD_DEPS || /environment/os/$os.sh install MEMGRAPH_BUILD_DEPS"
|
||||
|
||||
echo "Building targeted package..."
|
||||
# Fix issue with git marking directory as not safe
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && git config --global --add safe.directory '*'"
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && $ACTIVATE_TOOLCHAIN && ./init --ci"
|
||||
if [[ "$init_only" == "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Building Memgraph for $os on $build_container..."
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && rm -rf ./*"
|
||||
# Fix cmake failing locally if remote is clone via ssh
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && git remote set-url origin https://github.com/memgraph/memgraph.git"
|
||||
|
||||
# Define cmake command
|
||||
local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .."
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $cmake_cmd"
|
||||
|
||||
# ' is used instead of " because we need to run make within the allowed
|
||||
# container resources.
|
||||
# Default value for $threads is 0 instead of $(nproc) because macos
|
||||
# doesn't support the nproc command.
|
||||
# 0 is set for default value and checked here because mgbuild containers
|
||||
# support nproc
|
||||
# shellcheck disable=SC2016
|
||||
if [[ "$threads" == 0 ]]; then
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc)'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc) -B mgconsole'
|
||||
else
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads -B mgconsole'
|
||||
fi
|
||||
}
|
||||
|
||||
package_memgraph() {
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
local container_output_dir="$MGBUILD_ROOT_DIR/build/output"
|
||||
local package_command=""
|
||||
if [[ "$os" =~ ^"centos".* ]] || [[ "$os" =~ ^"fedora".* ]] || [[ "$os" =~ ^"amzn".* ]] || [[ "$os" =~ ^"rocky".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "yum -y update"
|
||||
package_command=" cpack -G RPM --config ../CPackConfig.cmake && rpmlint --file='../../release/rpm/rpmlintrc' memgraph*.rpm "
|
||||
fi
|
||||
if [[ "$os" =~ ^"debian".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "apt --allow-releaseinfo-change -y update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
if [[ "$os" =~ ^"ubuntu".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "apt update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
docker exec -u mg "$build_container" bash -c "mkdir -p $container_output_dir && cd $container_output_dir && $ACTIVATE_TOOLCHAIN && $package_command"
|
||||
}
|
||||
|
||||
copy_memgraph() {
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
case "$1" in
|
||||
--binary)
|
||||
echo "Copying memgraph binary to host..."
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph"
|
||||
local host_output_path="$PROJECT_ROOT/build/memgraph"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Binary saved to $host_output_path"
|
||||
;;
|
||||
--build-logs)
|
||||
echo "Copying memgraph build logs to host..."
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/logs"
|
||||
local host_output_path="$PROJECT_ROOT/build/logs"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Build logs saved to $host_output_path"
|
||||
;;
|
||||
--package)
|
||||
echo "Copying memgraph package to host..."
|
||||
local container_output_dir="$MGBUILD_ROOT_DIR/build/output"
|
||||
local host_output_dir="$PROJECT_ROOT/build/output/$os"
|
||||
local last_package_name=$(docker exec -u mg "$build_container" bash -c "cd $container_output_dir && ls -t memgraph* | head -1")
|
||||
mkdir -p "$host_output_dir"
|
||||
docker cp "$build_container:$container_output_dir/$last_package_name" "$host_output_dir/$last_package_name"
|
||||
echo "Package saved to $host_output_dir/$last_package_name"
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
##################### TESTS ######################
|
||||
##################################################
|
||||
test_memgraph() {
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local ACTIVATE_VENV="./setup.sh /opt/toolchain-${toolchain_version}/activate"
|
||||
local EXPORT_LICENSE="export MEMGRAPH_ENTERPRISE_LICENSE=$enterprise_license"
|
||||
local EXPORT_ORG_NAME="export MEMGRAPH_ORGANIZATION_NAME=$organization_name"
|
||||
local BUILD_DIR="$MGBUILD_ROOT_DIR/build"
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
echo "Running $1 test on $build_container..."
|
||||
|
||||
case "$1" in
|
||||
unit)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN "'&& ctest -R memgraph__unit --output-on-failure -j$threads'
|
||||
;;
|
||||
unit-coverage)
|
||||
local setup_lsan_ubsan="export LSAN_OPTIONS=suppressions=$BUILD_DIR/../tools/lsan.supp && export UBSAN_OPTIONS=halt_on_error=1"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN && $setup_lsan_ubsan "'&& ctest -R memgraph__unit --output-on-failure -j2'
|
||||
;;
|
||||
leftover-CTest)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN "'&& ctest -E "(memgraph__unit|memgraph__benchmark)" --output-on-failure'
|
||||
;;
|
||||
drivers)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh'
|
||||
;;
|
||||
integration)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh'
|
||||
;;
|
||||
cppcheck-and-clang-format)
|
||||
local test_output_path="$MGBUILD_ROOT_DIR/tools/github/cppcheck_and_clang_format.txt"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tools/github/cppcheck_and_clang_format.txt"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tools/github && $ACTIVATE_TOOLCHAIN "'&& ./cppcheck_and_clang_format diff'
|
||||
docker cp $build_container:$test_output_path $test_output_host_dest
|
||||
;;
|
||||
stress-plain)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& ./continuous_integration'
|
||||
;;
|
||||
stress-ssl)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& ./continuous_integration --use-ssl'
|
||||
;;
|
||||
durability)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& python3 durability --num-steps 5'
|
||||
;;
|
||||
gql-behave)
|
||||
local test_output_dir="$MGBUILD_ROOT_DIR/tests/gql_behave"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tests/gql_behave"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && cd $MGBUILD_ROOT_DIR/tests/gql_behave "'&& ./continuous_integration'
|
||||
docker cp $build_container:$test_output_dir/gql_behave_status.csv $test_output_host_dest/gql_behave_status.csv
|
||||
docker cp $build_container:$test_output_dir/gql_behave_status.html $test_output_host_dest/gql_behave_status.html
|
||||
;;
|
||||
macro-benchmark)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && export USER=mg && export LANG=$(echo $LANG) && cd $MGBUILD_ROOT_DIR/tests/macro_benchmark "'&& ./harness QuerySuite MemgraphRunner --groups aggregation 1000_create unwind_create dense_expand match --no-strict'
|
||||
;;
|
||||
mgbench)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/mgbench "'&& ./benchmark.py vendor-native --num-workers-for-benchmark 12 --export-results benchmark_result.json pokec/medium/*/*'
|
||||
;;
|
||||
upload-to-bench-graph)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
local SETUP_VE3_ENV="virtualenv -p python3 ve3 && source ve3/bin/activate && pip install -r requirements.txt"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tools/bench-graph-client && $SETUP_VE3_ENV && $SETUP_PASSED_ARGS "'&& ./main.py $PASSED_ARGS'
|
||||
;;
|
||||
code-analysis)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/code_analysis && $SETUP_PASSED_ARGS "'&& ./python_code_analysis.sh $PASSED_ARGS'
|
||||
;;
|
||||
code-coverage)
|
||||
local test_output_path="$MGBUILD_ROOT_DIR/tools/github/generated/code_coverage.tar.gz"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tools/github/generated/code_coverage.tar.gz"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && $ACTIVATE_TOOLCHAIN && cd $MGBUILD_ROOT_DIR/tools/github "'&& ./coverage_convert'
|
||||
docker exec -u mg $build_container bash -c "cd $MGBUILD_ROOT_DIR/tools/github/generated && tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu"
|
||||
mkdir -p $PROJECT_ROOT/tools/github/generated
|
||||
docker cp $build_container:$test_output_path $test_output_host_dest
|
||||
;;
|
||||
clang-tidy)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && export THREADS=$threads && $ACTIVATE_TOOLCHAIN && cd $MGBUILD_ROOT_DIR/tests/code_analysis && $SETUP_PASSED_ARGS "'&& ./clang_tidy.sh $PASSED_ARGS'
|
||||
;;
|
||||
e2e)
|
||||
# local kafka_container="kafka_kafka_1"
|
||||
# local kafka_hostname="kafka"
|
||||
# local pulsar_container="pulsar_pulsar_1"
|
||||
# local pulsar_hostname="pulsar"
|
||||
# local setup_hostnames="export KAFKA_HOSTNAME=$kafka_hostname && PULSAR_HOSTNAME=$pulsar_hostname"
|
||||
# local build_container_network=$(docker inspect $build_container --format='{{ .HostConfig.NetworkMode }}')
|
||||
# docker network connect --alias $kafka_hostname $build_container_network $kafka_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
# docker network connect --alias $pulsar_hostname $build_container_network $pulsar_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
docker exec -u mg $build_container bash -c "pip install --user networkx && pip3 install --user networkx"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && source ve3/bin/activate_e2e && cd $MGBUILD_ROOT_DIR/tests/e2e "'&& ./run.sh'
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown test '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
################### PARSE ARGS ###################
|
||||
##################################################
|
||||
if [ "$#" -eq 0 ] || [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
|
||||
print_help
|
||||
exit 0
|
||||
fi
|
||||
arch=$DEFAULT_ARCH
|
||||
build_type=$DEFAULT_BUILD_TYPE
|
||||
enterprise_license=$DEFAULT_ENTERPRISE_LICENSE
|
||||
organization_name=$DEFAULT_ORGANIZATION_NAME
|
||||
os=$DEFAULT_OS
|
||||
threads=$DEFAULT_THREADS
|
||||
toolchain_version=$DEFAULT_TOOLCHAIN
|
||||
command=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--arch)
|
||||
arch=$2
|
||||
check_support arch $arch
|
||||
shift 2
|
||||
;;
|
||||
--build-type)
|
||||
build_type=$2
|
||||
check_support build_type $build_type
|
||||
shift 2
|
||||
;;
|
||||
--enterprise-license)
|
||||
enterprise_license=$2
|
||||
shift 2
|
||||
;;
|
||||
--organization-name)
|
||||
organization_name=$2
|
||||
shift 2
|
||||
;;
|
||||
--os)
|
||||
os=$2
|
||||
check_support os $os
|
||||
shift 2
|
||||
;;
|
||||
--threads)
|
||||
threads=$2
|
||||
shift 2
|
||||
;;
|
||||
--toolchain)
|
||||
toolchain_version=$2
|
||||
check_support toolchain $toolchain_version
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
if [[ "$1" =~ ^--.* ]]; then
|
||||
echo -e "Error: Unknown option '$1'"
|
||||
exit 1
|
||||
else
|
||||
command=$1
|
||||
shift 1
|
||||
break
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
check_support os_toolchain_combo $os $toolchain_version
|
||||
|
||||
if [[ "$command" == "" ]]; then
|
||||
echo -e "Error: Command not provided, please provide command"
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if docker compose version > /dev/null 2>&1; then
|
||||
docker_compose_cmd="docker compose"
|
||||
elif which docker-compose > /dev/null 2>&1; then
|
||||
docker_compose_cmd="docker-compose"
|
||||
else
|
||||
echo -e "Missing command: There has to be installed either 'docker-compose' or 'docker compose'"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using $docker_compose_cmd"
|
||||
|
||||
##################################################
|
||||
################# PARSE COMMAND ##################
|
||||
##################################################
|
||||
case $command in
|
||||
build)
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml build
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml build mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
run)
|
||||
cd $SCRIPT_DIR
|
||||
pull=false
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
if [[ "$1" == "--pull" ]]; then
|
||||
pull=true
|
||||
else
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [[ "$os" == "all" ]]; then
|
||||
if [[ "$pull" == "true" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures
|
||||
elif [[ "$docker_compose_cmd" == "docker compose" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures --policy missing
|
||||
fi
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml up -d
|
||||
else
|
||||
if [[ "$pull" == "true" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull mgbuild_${toolchain_version}_${os}
|
||||
elif ! docker image inspect memgraph/mgbuild:${toolchain_version}_${os} > /dev/null 2>&1; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml up -d mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
cd $SCRIPT_DIR
|
||||
remove=false
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
if [[ "$1" == "--remove" ]]; then
|
||||
remove=true
|
||||
else
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml down
|
||||
else
|
||||
docker stop mgbuild_${toolchain_version}_${os}
|
||||
if [[ "$remove" == "true" ]]; then
|
||||
docker rm mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
pull)
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
push)
|
||||
docker login $@
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml push --ignore-push-failures
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml push mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
build-memgraph)
|
||||
build_memgraph $@
|
||||
;;
|
||||
package-memgraph)
|
||||
package_memgraph
|
||||
;;
|
||||
test-memgraph)
|
||||
test_memgraph $@
|
||||
;;
|
||||
copy)
|
||||
copy_memgraph $@
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown command '$command'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
40
release/package/rocky-9.3/Dockerfile
Normal file
40
release/package/rocky-9.3/Dockerfile
Normal file
@ -0,0 +1,40 @@
|
||||
FROM rockylinux:9.3
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/rocky-9.3.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/rocky-9.3.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9.3)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
@ -1,208 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
SUPPORTED_OS=(
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
fedora-36
|
||||
amzn-2
|
||||
)
|
||||
|
||||
SUPPORTED_BUILD_TYPES=(
|
||||
Debug
|
||||
Release
|
||||
RelWithDebInfo
|
||||
)
|
||||
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
TOOLCHAIN_VERSION="toolchain-v4"
|
||||
ACTIVATE_TOOLCHAIN="source /opt/${TOOLCHAIN_VERSION}/activate"
|
||||
HOST_OUTPUT_DIR="$PROJECT_ROOT/build/output"
|
||||
|
||||
print_help () {
|
||||
# TODO(gitbuda): Update the release/package/run.sh help
|
||||
echo "$0 init|package|docker|test {os} {build_type} [--for-docker|--for-platform]"
|
||||
echo ""
|
||||
echo " OSs: ${SUPPORTED_OS[*]}"
|
||||
echo " Build types: ${SUPPORTED_BUILD_TYPES[*]}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
make_package () {
|
||||
os="$1"
|
||||
build_type="$2"
|
||||
|
||||
build_container="mgbuild_$os"
|
||||
echo "Building Memgraph for $os on $build_container..."
|
||||
|
||||
package_command=""
|
||||
if [[ "$os" =~ ^"centos".* ]] || [[ "$os" =~ ^"fedora".* ]] || [[ "$os" =~ ^"amzn".* ]]; then
|
||||
docker exec "$build_container" bash -c "yum -y update"
|
||||
package_command=" cpack -G RPM --config ../CPackConfig.cmake && rpmlint --file='../../release/rpm/rpmlintrc' memgraph*.rpm "
|
||||
fi
|
||||
if [[ "$os" =~ ^"debian".* ]]; then
|
||||
docker exec "$build_container" bash -c "apt --allow-releaseinfo-change -y update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
if [[ "$os" =~ ^"ubuntu".* ]]; then
|
||||
docker exec "$build_container" bash -c "apt update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
telemetry_id_override_flag=""
|
||||
if [[ "$#" -gt 2 ]]; then
|
||||
if [[ "$3" == "--for-docker" ]]; then
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER "
|
||||
elif [[ "$3" == "--for-platform" ]]; then
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM"
|
||||
else
|
||||
print_help
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Copying project files..."
|
||||
# If master is not the current branch, fetch it, because the get_version
|
||||
# script depends on it. If we are on master, the fetch command is going to
|
||||
# fail so that's why there is the explicit check.
|
||||
# Required here because Docker build container can't access remote.
|
||||
cd "$PROJECT_ROOT"
|
||||
if [[ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]]; then
|
||||
git fetch origin master:master
|
||||
fi
|
||||
|
||||
# Ensure we have a clean build directory
|
||||
docker exec "$build_container" rm -rf /memgraph
|
||||
|
||||
docker exec "$build_container" mkdir -p /memgraph
|
||||
# TODO(gitbuda): Revisit copying the whole repo -> makese sense under CI.
|
||||
docker cp "$PROJECT_ROOT/." "$build_container:/memgraph/"
|
||||
|
||||
container_build_dir="/memgraph/build"
|
||||
container_output_dir="$container_build_dir/output"
|
||||
|
||||
# TODO(gitbuda): TOOLCHAIN_RUN_DEPS should be installed during the Docker
|
||||
# image build phase, but that is not easy at this point because the
|
||||
# environment/os/{os}.sh does not come within the toolchain package. When
|
||||
# migrating to the next version of toolchain do that, and remove the
|
||||
# TOOLCHAIN_RUN_DEPS installation from here.
|
||||
# TODO(gitbuda): On the other side, having this here allows updating deps
|
||||
# wihout reruning the build containers.
|
||||
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
|
||||
docker exec "$build_container" bash -c "/memgraph/environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
|
||||
docker exec "$build_container" bash -c "/memgraph/environment/os/$os.sh install MEMGRAPH_BUILD_DEPS"
|
||||
|
||||
echo "Building targeted package..."
|
||||
# Fix issue with git marking directory as not safe
|
||||
docker exec "$build_container" bash -c "cd /memgraph && git config --global --add safe.directory '*'"
|
||||
docker exec "$build_container" bash -c "cd /memgraph && $ACTIVATE_TOOLCHAIN && ./init"
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && rm -rf ./*"
|
||||
# TODO(gitbuda): cmake fails locally if remote is clone via ssh because of the key -> FIX
|
||||
if [[ "$os" =~ "-arm" ]]; then
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && cmake -DCMAKE_BUILD_TYPE=$build_type -DMG_ARCH="ARM64" $telemetry_id_override_flag .."
|
||||
else
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && cmake -DCMAKE_BUILD_TYPE=$build_type $telemetry_id_override_flag .."
|
||||
fi
|
||||
# ' is used instead of " because we need to run make within the allowed
|
||||
# container resources.
|
||||
# shellcheck disable=SC2016
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc)'
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc) -B mgconsole'
|
||||
docker exec "$build_container" bash -c "mkdir -p $container_output_dir && cd $container_output_dir && $ACTIVATE_TOOLCHAIN && $package_command"
|
||||
|
||||
echo "Copying targeted package to host..."
|
||||
last_package_name=$(docker exec "$build_container" bash -c "cd $container_output_dir && ls -t memgraph* | head -1")
|
||||
# The operating system folder is introduced because multiple different
|
||||
# packages could be preserved during the same build "session".
|
||||
mkdir -p "$HOST_OUTPUT_DIR/$os"
|
||||
package_host_destination="$HOST_OUTPUT_DIR/$os/$last_package_name"
|
||||
docker cp "$build_container:$container_output_dir/$last_package_name" "$package_host_destination"
|
||||
echo "Package saved to $package_host_destination."
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
init)
|
||||
cd "$SCRIPT_DIR"
|
||||
if ! which "docker-compose" >/dev/null; then
|
||||
docker_compose_cmd="docker compose"
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
fi
|
||||
$docker_compose_cmd build --build-arg TOOLCHAIN_VERSION="${TOOLCHAIN_VERSION}"
|
||||
$docker_compose_cmd up -d
|
||||
;;
|
||||
|
||||
docker)
|
||||
# NOTE: Docker is build on top of Debian 11 package.
|
||||
based_on_os="debian-11"
|
||||
# shellcheck disable=SC2012
|
||||
last_package_name=$(cd "$HOST_OUTPUT_DIR/$based_on_os" && ls -t memgraph* | head -1)
|
||||
docker_build_folder="$PROJECT_ROOT/release/docker"
|
||||
cd "$docker_build_folder"
|
||||
./package_docker --latest "$HOST_OUTPUT_DIR/$based_on_os/$last_package_name"
|
||||
# shellcheck disable=SC2012
|
||||
docker_image_name=$(cd "$docker_build_folder" && ls -t memgraph* | head -1)
|
||||
docker_host_folder="$HOST_OUTPUT_DIR/docker"
|
||||
docker_host_image_path="$docker_host_folder/$docker_image_name"
|
||||
mkdir -p "$docker_host_folder"
|
||||
cp "$docker_build_folder/$docker_image_name" "$docker_host_image_path"
|
||||
echo "Docker images saved to $docker_host_image_path."
|
||||
;;
|
||||
|
||||
package)
|
||||
shift 1
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
print_help
|
||||
fi
|
||||
os="$1"
|
||||
build_type="$2"
|
||||
shift 2
|
||||
is_os_ok=false
|
||||
for supported_os in "${SUPPORTED_OS[@]}"; do
|
||||
if [[ "$supported_os" == "${os}" ]]; then
|
||||
is_os_ok=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
is_build_type_ok=false
|
||||
for supported_build_type in "${SUPPORTED_BUILD_TYPES[@]}"; do
|
||||
if [[ "$supported_build_type" == "${build_type}" ]]; then
|
||||
is_build_type_ok=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_os_ok" == true && "$is_build_type_ok" == true ]]; then
|
||||
make_package "$os" "$build_type" "$@"
|
||||
else
|
||||
if [[ "$is_os_ok" == false ]]; then
|
||||
echo "Unsupported OS: $os"
|
||||
elif [[ "$is_build_type_ok" == false ]]; then
|
||||
echo "Unsupported build type: $build_type"
|
||||
fi
|
||||
print_help
|
||||
fi
|
||||
;;
|
||||
|
||||
build)
|
||||
shift 1
|
||||
if [[ "$#" -ne 2 ]]; then
|
||||
print_help
|
||||
fi
|
||||
# in the vX format, e.g. v5
|
||||
toolchain_version="$1"
|
||||
# a name of the os folder, e.g. ubuntu-22.04-arm
|
||||
os="$2"
|
||||
cd "$SCRIPT_DIR/$os"
|
||||
docker build -f Dockerfile --build-arg TOOLCHAIN_VERSION="toolchain-$toolchain_version" -t "memgraph/memgraph-builder:${toolchain_version}_$os" .
|
||||
;;
|
||||
|
||||
test)
|
||||
echo "TODO(gitbuda): Test all packages on mgtest containers."
|
||||
;;
|
||||
|
||||
*)
|
||||
print_help
|
||||
;;
|
||||
esac
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-18.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-18.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-20.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-20.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-22.04-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-22.04-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-22.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-22.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -16,11 +16,14 @@ target_sources(mg-coordination
|
||||
include/coordination/raft_state.hpp
|
||||
include/coordination/rpc_errors.hpp
|
||||
|
||||
include/nuraft/raft_log_action.hpp
|
||||
include/nuraft/coordinator_cluster_state.hpp
|
||||
include/nuraft/coordinator_log_store.hpp
|
||||
include/nuraft/coordinator_state_machine.hpp
|
||||
include/nuraft/coordinator_state_manager.hpp
|
||||
|
||||
PRIVATE
|
||||
coordinator_config.cpp
|
||||
coordinator_client.cpp
|
||||
coordinator_state.cpp
|
||||
coordinator_rpc.cpp
|
||||
@ -33,6 +36,7 @@ target_sources(mg-coordination
|
||||
coordinator_log_store.cpp
|
||||
coordinator_state_machine.cpp
|
||||
coordinator_state_manager.cpp
|
||||
coordinator_cluster_state.cpp
|
||||
)
|
||||
target_include_directories(mg-coordination PUBLIC include)
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_rpc.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "replication_coordination_glue/messages.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
@ -30,7 +31,7 @@ auto CreateClientContext(memgraph::coordination::CoordinatorClientConfig const &
|
||||
} // namespace
|
||||
|
||||
CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb)
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb)
|
||||
: rpc_context_{CreateClientContext(config)},
|
||||
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
|
||||
&rpc_context_},
|
||||
@ -40,7 +41,9 @@ CoordinatorClient::CoordinatorClient(CoordinatorInstance *coord_instance, Coordi
|
||||
fail_cb_{std::move(fail_cb)} {}
|
||||
|
||||
auto CoordinatorClient::InstanceName() const -> std::string { return config_.instance_name; }
|
||||
auto CoordinatorClient::SocketAddress() const -> std::string { return rpc_client_.Endpoint().SocketAddress(); }
|
||||
|
||||
auto CoordinatorClient::CoordinatorSocketAddress() const -> std::string { return config_.CoordinatorSocketAddress(); }
|
||||
auto CoordinatorClient::ReplicationSocketAddress() const -> std::string { return config_.ReplicationSocketAddress(); }
|
||||
|
||||
auto CoordinatorClient::InstanceDownTimeoutSec() const -> std::chrono::seconds {
|
||||
return config_.instance_down_timeout_sec;
|
||||
@ -63,11 +66,15 @@ void CoordinatorClient::StartFrequentCheck() {
|
||||
[this, instance_name = config_.instance_name] {
|
||||
try {
|
||||
spdlog::trace("Sending frequent heartbeat to machine {} on {}", instance_name,
|
||||
rpc_client_.Endpoint().SocketAddress());
|
||||
config_.CoordinatorSocketAddress());
|
||||
{ // NOTE: This is intentionally scoped so that stream lock could get released.
|
||||
auto stream{rpc_client_.Stream<memgraph::replication_coordination_glue::FrequentHeartbeatRpc>()};
|
||||
stream.AwaitResponse();
|
||||
}
|
||||
// Subtle race condition:
|
||||
// acquiring of lock needs to happen before function call, as function callback can be changed
|
||||
// for instance after lock is already acquired
|
||||
// (failover case when instance is promoted to MAIN)
|
||||
succ_cb_(coord_instance_, instance_name);
|
||||
} catch (rpc::RpcFailedException const &) {
|
||||
fail_cb_(coord_instance_, instance_name);
|
||||
@ -79,11 +86,6 @@ void CoordinatorClient::StopFrequentCheck() { instance_checker_.Stop(); }
|
||||
void CoordinatorClient::PauseFrequentCheck() { instance_checker_.Pause(); }
|
||||
void CoordinatorClient::ResumeFrequentCheck() { instance_checker_.Resume(); }
|
||||
|
||||
auto CoordinatorClient::SetCallbacks(HealthCheckCallback succ_cb, HealthCheckCallback fail_cb) -> void {
|
||||
succ_cb_ = std::move(succ_cb);
|
||||
fail_cb_ = std::move(fail_cb);
|
||||
}
|
||||
|
||||
auto CoordinatorClient::ReplicationClientInfo() const -> ReplClientInfo { return config_.replication_client_info; }
|
||||
|
||||
auto CoordinatorClient::SendPromoteReplicaToMainRpc(const utils::UUID &uuid,
|
||||
@ -117,7 +119,7 @@ auto CoordinatorClient::DemoteToReplica() const -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bool {
|
||||
auto CoordinatorClient::SendSwapMainUUIDRpc(utils::UUID const &uuid) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<replication_coordination_glue::SwapMainUUIDRpc>(uuid)};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
@ -131,7 +133,7 @@ auto CoordinatorClient::SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bo
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendUnregisterReplicaRpc(std::string const &instance_name) const -> bool {
|
||||
auto CoordinatorClient::SendUnregisterReplicaRpc(std::string_view instance_name) const -> bool {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<UnregisterReplicaRpc>(instance_name)};
|
||||
if (!stream.AwaitResponse().success) {
|
||||
@ -171,5 +173,17 @@ auto CoordinatorClient::SendEnableWritingOnMainRpc() const -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto CoordinatorClient::SendGetInstanceTimestampsRpc() const
|
||||
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories> {
|
||||
try {
|
||||
auto stream{rpc_client_.Stream<coordination::GetDatabaseHistoriesRpc>()};
|
||||
return stream.AwaitResponse().database_histories;
|
||||
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
spdlog::error("RPC error occured while sending GetInstance UUID RPC");
|
||||
return GetInstanceUUIDError::RPC_EXCEPTION;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
147
src/coordination/coordinator_cluster_state.cpp
Normal file
147
src/coordination/coordinator_cluster_state.cpp
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "nuraft/coordinator_cluster_state.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
#include <shared_mutex>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state) {
|
||||
j = nlohmann::json{{"config", instance_state.config}, {"status", instance_state.status}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state) {
|
||||
j.at("config").get_to(instance_state.config);
|
||||
j.at("status").get_to(instance_state.status);
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances)
|
||||
: instances_{std::move(instances)} {}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState const &other) : instances_{other.instances_} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState const &other) {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instances_ = other.instances_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoordinatorClusterState::CoordinatorClusterState(CoordinatorClusterState &&other) noexcept
|
||||
: instances_{std::move(other.instances_)} {}
|
||||
|
||||
CoordinatorClusterState &CoordinatorClusterState::operator=(CoordinatorClusterState &&other) noexcept {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
instances_ = std::move(other.instances_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::MainExists() const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return std::ranges::any_of(instances_,
|
||||
[](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsMain(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it = instances_.find(instance_name);
|
||||
return it != instances_.end() && it->second.status == ReplicationRole::REPLICA;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::InsertInstance(std::string instance_name, InstanceState instance_state) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
instances_.insert_or_assign(std::move(instance_name), std::move(instance_state));
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void {
|
||||
auto lock = std::lock_guard{log_lock_};
|
||||
switch (log_action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE: {
|
||||
auto const &config = std::get<CoordinatorClientConfig>(log_entry);
|
||||
instances_[config.instance_name] = InstanceState{config, ReplicationRole::REPLICA};
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
instances_.erase(instance_name);
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::MAIN;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA: {
|
||||
auto const instance_name = std::get<std::string>(log_entry);
|
||||
auto it = instances_.find(instance_name);
|
||||
MG_ASSERT(it != instances_.end(), "Instance does not exist as part of raft state!");
|
||||
it->second.status = ReplicationRole::REPLICA;
|
||||
break;
|
||||
}
|
||||
case RaftLogAction::UPDATE_UUID: {
|
||||
uuid_ = std::get<utils::UUID>(log_entry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Serialize(ptr<buffer> &data) -> void {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
|
||||
auto const log = nlohmann::json(instances_).dump();
|
||||
|
||||
data = buffer::alloc(sizeof(uint32_t) + log.size());
|
||||
buffer_serializer bs(data);
|
||||
bs.put_str(log);
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::Deserialize(buffer &data) -> CoordinatorClusterState {
|
||||
buffer_serializer bs(data);
|
||||
auto const j = nlohmann::json::parse(bs.get_str());
|
||||
auto instances = j.get<std::map<std::string, InstanceState, std::less<>>>();
|
||||
|
||||
return CoordinatorClusterState{std::move(instances)};
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetInstances() const -> std::vector<InstanceState> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
return instances_ | ranges::views::values | ranges::to<std::vector<InstanceState>>;
|
||||
}
|
||||
|
||||
auto CoordinatorClusterState::GetUUID() const -> utils::UUID { return uuid_; }
|
||||
|
||||
auto CoordinatorClusterState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
auto lock = std::shared_lock{log_lock_};
|
||||
auto const it =
|
||||
std::ranges::find_if(instances_, [](auto const &entry) { return entry.second.status == ReplicationRole::MAIN; });
|
||||
if (it == instances_.end()) {
|
||||
return {};
|
||||
}
|
||||
return it->first;
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
54
src/coordination/coordinator_config.cpp
Normal file
54
src/coordination/coordinator_config.cpp
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"replication_mode", config.replication_mode},
|
||||
{"replication_ip_address", config.replication_ip_address},
|
||||
{"replication_port", config.replication_port}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.replication_mode = j.at("replication_mode").get<replication_coordination_glue::ReplicationMode>();
|
||||
config.replication_ip_address = j.at("replication_ip_address").get<std::string>();
|
||||
config.replication_port = j.at("replication_port").get<uint16_t>();
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config) {
|
||||
j = nlohmann::json{{"instance_name", config.instance_name},
|
||||
{"ip_address", config.ip_address},
|
||||
{"port", config.port},
|
||||
{"instance_health_check_frequency_sec", config.instance_health_check_frequency_sec.count()},
|
||||
{"instance_down_timeout_sec", config.instance_down_timeout_sec.count()},
|
||||
{"instance_get_uuid_frequency_sec", config.instance_get_uuid_frequency_sec.count()},
|
||||
{"replication_client_info", config.replication_client_info}};
|
||||
}
|
||||
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config) {
|
||||
config.instance_name = j.at("instance_name").get<std::string>();
|
||||
config.ip_address = j.at("ip_address").get<std::string>();
|
||||
config.port = j.at("port").get<uint16_t>();
|
||||
config.instance_health_check_frequency_sec =
|
||||
std::chrono::seconds{j.at("instance_health_check_frequency_sec").get<int>()};
|
||||
config.instance_down_timeout_sec = std::chrono::seconds{j.at("instance_down_timeout_sec").get<int>()};
|
||||
config.instance_get_uuid_frequency_sec = std::chrono::seconds{j.at("instance_get_uuid_frequency_sec").get<int>()};
|
||||
config.replication_client_info = j.at("replication_client_info").get<ReplClientInfo>();
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -57,6 +57,17 @@ void CoordinatorHandlers::Register(memgraph::coordination::CoordinatorServer &se
|
||||
spdlog::info("Received GetInstanceUUIDRpc on coordinator server");
|
||||
CoordinatorHandlers::GetInstanceUUIDHandler(replication_handler, req_reader, res_builder);
|
||||
});
|
||||
|
||||
server.Register<coordination::GetDatabaseHistoriesRpc>(
|
||||
[&replication_handler](slk::Reader *req_reader, slk::Builder *res_builder) -> void {
|
||||
spdlog::info("Received GetDatabasesHistoryRpc on coordinator server");
|
||||
CoordinatorHandlers::GetDatabaseHistoriesHandler(replication_handler, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::GetDatabaseHistoriesHandler(replication::ReplicationHandler &replication_handler,
|
||||
slk::Reader * /*req_reader*/, slk::Builder *res_builder) {
|
||||
slk::Save(coordination::GetDatabaseHistoriesRes{replication_handler.GetDatabasesHistories()}, res_builder);
|
||||
}
|
||||
|
||||
void CoordinatorHandlers::SwapMainUUIDHandler(replication::ReplicationHandler &replication_handler,
|
||||
|
@ -15,10 +15,12 @@
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/fmt.hpp"
|
||||
#include "dbms/constants.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
#include "utils/functional.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
|
||||
#include <range/v3/view.hpp>
|
||||
#include <shared_mutex>
|
||||
@ -30,144 +32,156 @@ using nuraft::srv_config;
|
||||
|
||||
CoordinatorInstance::CoordinatorInstance()
|
||||
: raft_state_(RaftState::MakeRaftState(
|
||||
[this] { std::ranges::for_each(repl_instances_, &ReplicationInstance::StartFrequentCheck); },
|
||||
[this] { std::ranges::for_each(repl_instances_, &ReplicationInstance::StopFrequentCheck); })) {
|
||||
auto find_repl_instance = [](CoordinatorInstance *self,
|
||||
std::string_view repl_instance_name) -> ReplicationInstance & {
|
||||
auto repl_instance =
|
||||
std::ranges::find_if(self->repl_instances_, [repl_instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == repl_instance_name;
|
||||
});
|
||||
[this]() {
|
||||
spdlog::info("Leader changed, starting all replication instances!");
|
||||
auto const instances = raft_state_.GetInstances();
|
||||
auto replicas = instances | ranges::views::filter([](auto const &instance) {
|
||||
return instance.status == ReplicationRole::REPLICA;
|
||||
});
|
||||
|
||||
MG_ASSERT(repl_instance != self->repl_instances_.end(), "Instance {} not found during callback!",
|
||||
repl_instance_name);
|
||||
return *repl_instance;
|
||||
std::ranges::for_each(replicas, [this](auto &replica) {
|
||||
spdlog::info("Started pinging replication instance {}", replica.config.instance_name);
|
||||
repl_instances_.emplace_back(this, replica.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
});
|
||||
|
||||
auto main = instances | ranges::views::filter(
|
||||
[](auto const &instance) { return instance.status == ReplicationRole::MAIN; });
|
||||
|
||||
std::ranges::for_each(main, [this](auto &main_instance) {
|
||||
spdlog::info("Started pinging main instance {}", main_instance.config.instance_name);
|
||||
repl_instances_.emplace_back(this, main_instance.config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback);
|
||||
});
|
||||
|
||||
std::ranges::for_each(repl_instances_, [this](auto &instance) {
|
||||
instance.SetNewMainUUID(raft_state_.GetUUID());
|
||||
instance.StartFrequentCheck();
|
||||
});
|
||||
},
|
||||
[this]() {
|
||||
spdlog::info("Leader changed, stopping all replication instances!");
|
||||
repl_instances_.clear();
|
||||
})) {
|
||||
client_succ_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetSuccessCallback(), self, repl_instance_name);
|
||||
};
|
||||
|
||||
replica_succ_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
client_fail_cb_ = [](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
|
||||
// We need to get replicas UUID from time to time to ensure replica is listening to correct main
|
||||
// and that it didn't go down for less time than we could notice
|
||||
// We need to get id of main replica is listening to
|
||||
// and swap if necessary
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(self->GetMainUUID())) {
|
||||
spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
};
|
||||
|
||||
replica_fail_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
};
|
||||
|
||||
main_succ_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
|
||||
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
|
||||
if (repl_instance.IsAlive()) {
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
|
||||
|
||||
auto const curr_main_uuid = self->GetMainUUID();
|
||||
if (curr_main_uuid == repl_instance_uuid.value()) {
|
||||
if (!repl_instance.EnableWritingOnMain()) {
|
||||
spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(antoniof) make demoteToReplica idempotent since main can be demoted to replica but
|
||||
// swapUUID can fail
|
||||
if (repl_instance.DemoteToReplica(self->replica_succ_cb_, self->replica_fail_cb_)) {
|
||||
repl_instance.OnSuccessPing();
|
||||
spdlog::info("Instance {} demoted to replica", repl_instance_name);
|
||||
} else {
|
||||
spdlog::error("Instance {} failed to become replica", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(curr_main_uuid)) {
|
||||
spdlog::error(fmt::format("Failed to swap uuid for demoted main instance {}", repl_instance.InstanceName()));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
main_fail_cb_ = [find_repl_instance](CoordinatorInstance *self, std::string_view repl_instance_name) -> void {
|
||||
auto lock = std::lock_guard{self->coord_instance_lock_};
|
||||
spdlog::trace("Instance {} performing main failure callback", repl_instance_name);
|
||||
auto &repl_instance = find_repl_instance(self, repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set");
|
||||
|
||||
if (!repl_instance.IsAlive() && self->GetMainUUID() == repl_instance_uuid.value()) {
|
||||
spdlog::info("Cluster without main instance, trying automatic failover");
|
||||
self->TryFailover(); // TODO: (andi) Initiate failover
|
||||
}
|
||||
auto &repl_instance = self->FindReplicationInstance(repl_instance_name);
|
||||
std::invoke(repl_instance.GetFailCallback(), self, repl_instance_name);
|
||||
};
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance & {
|
||||
auto repl_instance =
|
||||
std::ranges::find_if(repl_instances_, [replication_instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == replication_instance_name;
|
||||
});
|
||||
|
||||
MG_ASSERT(repl_instance != repl_instances_.end(), "Instance {} not found during callback!",
|
||||
replication_instance_name);
|
||||
return *repl_instance;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::ShowInstances() const -> std::vector<InstanceStatus> {
|
||||
auto const coord_instances = raft_state_.GetAllCoordinators();
|
||||
|
||||
auto const stringify_repl_role = [](ReplicationInstance const &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "unknown";
|
||||
if (instance.IsMain()) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
auto const repl_instance_to_status = [&stringify_repl_role](ReplicationInstance const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.InstanceName(),
|
||||
.coord_socket_address = instance.SocketAddress(),
|
||||
.cluster_role = stringify_repl_role(instance),
|
||||
.is_alive = instance.IsAlive()};
|
||||
};
|
||||
|
||||
auto const coord_instance_to_status = [](ptr<srv_config> const &instance) -> InstanceStatus {
|
||||
return {.instance_name = "coordinator_" + std::to_string(instance->get_id()),
|
||||
.raft_socket_address = instance->get_endpoint(),
|
||||
.cluster_role = "coordinator",
|
||||
.is_alive = true}; // TODO: (andi) Get this info from RAFT and test it or when we will move
|
||||
// CoordinatorState to every instance, we can be smarter about this using our RPC.
|
||||
.health = "unknown"}; // TODO: (andi) Get this info from RAFT and test it or when we will move
|
||||
};
|
||||
auto instances_status = utils::fmap(raft_state_.GetAllCoordinators(), coord_instance_to_status);
|
||||
|
||||
auto instances_status = utils::fmap(coord_instance_to_status, coord_instances);
|
||||
{
|
||||
auto lock = std::shared_lock{coord_instance_lock_};
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), repl_instance_to_status);
|
||||
if (raft_state_.IsLeader()) {
|
||||
auto const stringify_repl_role = [this](ReplicationInstance const &instance) -> std::string {
|
||||
if (!instance.IsAlive()) return "unknown";
|
||||
if (raft_state_.IsMain(instance.InstanceName())) return "main";
|
||||
return "replica";
|
||||
};
|
||||
|
||||
auto const stringify_repl_health = [](ReplicationInstance const &instance) -> std::string {
|
||||
return instance.IsAlive() ? "up" : "down";
|
||||
};
|
||||
|
||||
auto process_repl_instance_as_leader =
|
||||
[&stringify_repl_role, &stringify_repl_health](ReplicationInstance const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.InstanceName(),
|
||||
.coord_socket_address = instance.CoordinatorSocketAddress(),
|
||||
.cluster_role = stringify_repl_role(instance),
|
||||
.health = stringify_repl_health(instance)};
|
||||
};
|
||||
|
||||
{
|
||||
auto lock = std::shared_lock{coord_instance_lock_};
|
||||
std::ranges::transform(repl_instances_, std::back_inserter(instances_status), process_repl_instance_as_leader);
|
||||
}
|
||||
} else {
|
||||
auto const stringify_inst_status = [](ReplicationRole status) -> std::string {
|
||||
return status == ReplicationRole::MAIN ? "main" : "replica";
|
||||
};
|
||||
|
||||
// TODO: (andi) Add capability that followers can also return socket addresses
|
||||
auto process_repl_instance_as_follower = [&stringify_inst_status](auto const &instance) -> InstanceStatus {
|
||||
return {.instance_name = instance.config.instance_name,
|
||||
.cluster_role = stringify_inst_status(instance.status),
|
||||
.health = "unknown"};
|
||||
};
|
||||
|
||||
std::ranges::transform(raft_state_.GetInstances(), std::back_inserter(instances_status),
|
||||
process_repl_instance_as_follower);
|
||||
}
|
||||
|
||||
return instances_status;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::TryFailover() -> void {
|
||||
auto alive_replicas = repl_instances_ | ranges::views::filter(&ReplicationInstance::IsReplica) |
|
||||
ranges::views::filter(&ReplicationInstance::IsAlive);
|
||||
auto const is_replica = [this](ReplicationInstance const &instance) { return IsReplica(instance.InstanceName()); };
|
||||
|
||||
auto alive_replicas =
|
||||
repl_instances_ | ranges::views::filter(is_replica) | ranges::views::filter(&ReplicationInstance::IsAlive);
|
||||
|
||||
if (ranges::empty(alive_replicas)) {
|
||||
spdlog::warn("Failover failed since all replicas are down!");
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Smarter choice
|
||||
auto new_main = ranges::begin(alive_replicas);
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Failover failed since the instance is not the leader!");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const get_ts = [](ReplicationInstance &replica) { return replica.GetClient().SendGetInstanceTimestampsRpc(); };
|
||||
|
||||
auto maybe_instance_db_histories = alive_replicas | ranges::views::transform(get_ts) | ranges::to<std::vector>();
|
||||
|
||||
auto const ts_has_error = [](auto const &res) -> bool { return res.HasError(); };
|
||||
|
||||
if (std::ranges::any_of(maybe_instance_db_histories, ts_has_error)) {
|
||||
spdlog::error("Aborting failover as at least one instance didn't provide per database history.");
|
||||
return;
|
||||
}
|
||||
|
||||
auto transform_to_pairs = ranges::views::transform([](auto const &zipped) {
|
||||
auto &[replica, res] = zipped;
|
||||
return std::make_pair(replica.InstanceName(), res.GetValue());
|
||||
});
|
||||
|
||||
auto instance_db_histories =
|
||||
ranges::views::zip(alive_replicas, maybe_instance_db_histories) | transform_to_pairs | ranges::to<std::vector>();
|
||||
|
||||
auto [most_up_to_date_instance, latest_epoch, latest_commit_timestamp] =
|
||||
ChooseMostUpToDateInstance(instance_db_histories);
|
||||
|
||||
spdlog::trace("The most up to date instance is {} with epoch {} and {} latest commit timestamp",
|
||||
most_up_to_date_instance, latest_epoch, latest_commit_timestamp); // NOLINT
|
||||
|
||||
auto *new_main = &FindReplicationInstance(most_up_to_date_instance);
|
||||
|
||||
new_main->PauseFrequentCheck();
|
||||
utils::OnScopeExit scope_exit{[&new_main] { new_main->ResumeFrequentCheck(); }};
|
||||
@ -177,41 +191,56 @@ auto CoordinatorInstance::TryFailover() -> void {
|
||||
};
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
// If for some replicas swap fails, for others on successful ping we will revert back on next change
|
||||
// or we will do failover first again and then it will be consistent again
|
||||
for (auto &other_replica_instance : alive_replicas | ranges::views::filter(is_not_new_main)) {
|
||||
if (!other_replica_instance.SendSwapAndUpdateUUID(new_main_uuid)) {
|
||||
spdlog::error(fmt::format("Failed to swap uuid for instance {} which is alive, aborting failover",
|
||||
other_replica_instance.InstanceName()));
|
||||
return;
|
||||
}
|
||||
if (std::ranges::any_of(alive_replicas | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return;
|
||||
}
|
||||
|
||||
auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
|
||||
ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
|
||||
ranges::to<ReplicationClientsInfo>();
|
||||
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), main_succ_cb_, main_fail_cb_)) {
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), &CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback)) {
|
||||
spdlog::warn("Failover failed since promoting replica to main failed!");
|
||||
return;
|
||||
}
|
||||
// TODO: (andi) This should be replicated across all coordinator instances with Raft log
|
||||
SetMainUUID(new_main_uuid);
|
||||
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto const new_main_instance_name = new_main->InstanceName();
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(new_main_instance_name)) {
|
||||
return;
|
||||
}
|
||||
|
||||
spdlog::info("Failover successful! Instance {} promoted to main.", new_main->InstanceName());
|
||||
}
|
||||
|
||||
// TODO: (andi) Make sure you cannot put coordinator instance to the main
|
||||
auto CoordinatorInstance::SetReplicationInstanceToMain(std::string instance_name)
|
||||
auto CoordinatorInstance::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> SetInstanceToMainCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, &ReplicationInstance::IsMain)) {
|
||||
if (raft_state_.MainExists()) {
|
||||
return SetInstanceToMainCoordinatorStatus::MAIN_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return SetInstanceToMainCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const is_new_main = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
|
||||
auto new_main = std::ranges::find_if(repl_instances_, is_new_main);
|
||||
|
||||
if (new_main == repl_instances_.end()) {
|
||||
@ -229,85 +258,93 @@ auto CoordinatorInstance::SetReplicationInstanceToMain(std::string instance_name
|
||||
|
||||
auto const new_main_uuid = utils::UUID{};
|
||||
|
||||
for (auto &other_instance : repl_instances_ | ranges::views::filter(is_not_new_main)) {
|
||||
if (!other_instance.SendSwapAndUpdateUUID(new_main_uuid)) {
|
||||
spdlog::error(
|
||||
fmt::format("Failed to swap uuid for instance {}, aborting failover", other_instance.InstanceName()));
|
||||
return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
|
||||
}
|
||||
auto const failed_to_swap = [&new_main_uuid](ReplicationInstance &instance) {
|
||||
return !instance.SendSwapAndUpdateUUID(new_main_uuid);
|
||||
};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_ | ranges::views::filter(is_not_new_main), failed_to_swap)) {
|
||||
spdlog::error("Failed to swap uuid for all instances");
|
||||
return SetInstanceToMainCoordinatorStatus::SWAP_UUID_FAILED;
|
||||
}
|
||||
|
||||
ReplicationClientsInfo repl_clients_info;
|
||||
repl_clients_info.reserve(repl_instances_.size() - 1);
|
||||
std::ranges::transform(repl_instances_ | ranges::views::filter(is_not_new_main),
|
||||
std::back_inserter(repl_clients_info), &ReplicationInstance::ReplicationClientInfo);
|
||||
auto repl_clients_info = repl_instances_ | ranges::views::filter(is_not_new_main) |
|
||||
ranges::views::transform(&ReplicationInstance::ReplicationClientInfo) |
|
||||
ranges::to<ReplicationClientsInfo>();
|
||||
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), main_succ_cb_, main_fail_cb_)) {
|
||||
if (!new_main->PromoteToMain(new_main_uuid, std::move(repl_clients_info), &CoordinatorInstance::MainSuccessCallback,
|
||||
&CoordinatorInstance::MainFailCallback)) {
|
||||
return SetInstanceToMainCoordinatorStatus::COULD_NOT_PROMOTE_TO_MAIN;
|
||||
}
|
||||
|
||||
// TODO: (andi) This should be replicated across all coordinator instances with Raft log
|
||||
SetMainUUID(new_main_uuid);
|
||||
spdlog::info("Instance {} promoted to main", instance_name);
|
||||
if (!raft_state_.AppendUpdateUUIDLog(new_main_uuid)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsMainLog(instance_name)) {
|
||||
return SetInstanceToMainCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
spdlog::info("Instance {} promoted to main on leader", instance_name);
|
||||
return SetInstanceToMainCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig config)
|
||||
auto CoordinatorInstance::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
auto instance_name = config.instance_name;
|
||||
|
||||
auto const name_matches = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, name_matches)) {
|
||||
if (std::ranges::any_of(repl_instances_, [instance_name = config.instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::NAME_EXISTS;
|
||||
}
|
||||
|
||||
auto const socket_address_matches = [&config](ReplicationInstance const &instance) {
|
||||
return instance.SocketAddress() == config.SocketAddress();
|
||||
};
|
||||
if (std::ranges::any_of(repl_instances_, [&config](ReplicationInstance const &instance) {
|
||||
return instance.CoordinatorSocketAddress() == config.CoordinatorSocketAddress();
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::COORD_ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
if (std::ranges::any_of(repl_instances_, socket_address_matches)) {
|
||||
return RegisterInstanceCoordinatorStatus::ENDPOINT_EXISTS;
|
||||
if (std::ranges::any_of(repl_instances_, [&config](ReplicationInstance const &instance) {
|
||||
return instance.ReplicationSocketAddress() == config.ReplicationSocketAddress();
|
||||
})) {
|
||||
return RegisterInstanceCoordinatorStatus::REPL_ENDPOINT_EXISTS;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return RegisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const res = raft_state_.AppendRegisterReplicationInstance(instance_name);
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for registering instance {}. Most likely the reason is that the instance is not "
|
||||
"the "
|
||||
"leader.",
|
||||
config.instance_name);
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_COULD_NOT_ACCEPT;
|
||||
}
|
||||
auto const undo_action_ = [this]() { repl_instances_.pop_back(); };
|
||||
|
||||
spdlog::info("Request for registering instance {} accepted", instance_name);
|
||||
try {
|
||||
repl_instances_.emplace_back(this, std::move(config), replica_succ_cb_, replica_fail_cb_);
|
||||
} catch (CoordinatorRegisterInstanceException const &) {
|
||||
auto *new_instance = &repl_instances_.emplace_back(this, config, client_succ_cb_, client_fail_cb_,
|
||||
&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback);
|
||||
|
||||
if (!new_instance->SendDemoteToReplicaRpc()) {
|
||||
spdlog::error("Failed to send demote to replica rpc for instance {}", config.instance_name);
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to register instance {} with error code {}", instance_name, res->get_result_code());
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_COULD_NOT_APPEND;
|
||||
if (!raft_state_.AppendRegisterReplicationInstanceLog(config)) {
|
||||
undo_action_();
|
||||
return RegisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
spdlog::info("Instance {} registered", instance_name);
|
||||
new_instance->StartFrequentCheck();
|
||||
|
||||
spdlog::info("Instance {} registered", config.instance_name);
|
||||
return RegisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::UnregisterReplicationInstance(std::string instance_name)
|
||||
auto CoordinatorInstance::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus {
|
||||
auto lock = std::lock_guard{coord_instance_lock_};
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
return UnregisterInstanceCoordinatorStatus::NOT_LEADER;
|
||||
}
|
||||
|
||||
auto const name_matches = [&instance_name](ReplicationInstance const &instance) {
|
||||
return instance.InstanceName() == instance_name;
|
||||
};
|
||||
@ -317,31 +354,208 @@ auto CoordinatorInstance::UnregisterReplicationInstance(std::string instance_nam
|
||||
return UnregisterInstanceCoordinatorStatus::NO_INSTANCE_WITH_NAME;
|
||||
}
|
||||
|
||||
if (inst_to_remove->IsMain() && inst_to_remove->IsAlive()) {
|
||||
auto const is_main = [this](ReplicationInstance const &instance) {
|
||||
return IsMain(instance.InstanceName()) && instance.GetMainUUID() == raft_state_.GetUUID() && instance.IsAlive();
|
||||
};
|
||||
|
||||
if (is_main(*inst_to_remove)) {
|
||||
return UnregisterInstanceCoordinatorStatus::IS_MAIN;
|
||||
}
|
||||
|
||||
inst_to_remove->StopFrequentCheck();
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, &ReplicationInstance::IsMain);
|
||||
MG_ASSERT(curr_main != repl_instances_.end(), "There must be a main instance when unregistering a replica");
|
||||
if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
|
||||
inst_to_remove->StartFrequentCheck();
|
||||
return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
|
||||
auto curr_main = std::ranges::find_if(repl_instances_, is_main);
|
||||
|
||||
if (curr_main != repl_instances_.end() && curr_main->IsAlive()) {
|
||||
if (!curr_main->SendUnregisterReplicaRpc(instance_name)) {
|
||||
inst_to_remove->StartFrequentCheck();
|
||||
return UnregisterInstanceCoordinatorStatus::RPC_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
std::erase_if(repl_instances_, name_matches);
|
||||
|
||||
if (!raft_state_.AppendUnregisterReplicationInstanceLog(instance_name)) {
|
||||
return UnregisterInstanceCoordinatorStatus::RAFT_LOG_ERROR;
|
||||
}
|
||||
|
||||
return UnregisterInstanceCoordinatorStatus::SUCCESS;
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, std::move(raft_address));
|
||||
auto CoordinatorInstance::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
raft_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::GetMainUUID() const -> utils::UUID { return main_uuid_; }
|
||||
void CoordinatorInstance::MainFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main fail callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
repl_instance.OnFailPing();
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Replication instance must have uuid set");
|
||||
|
||||
// TODO: (andi) Add to the RAFT log.
|
||||
auto CoordinatorInstance::SetMainUUID(utils::UUID new_uuid) -> void { main_uuid_ = new_uuid; }
|
||||
// NOLINTNEXTLINE
|
||||
if (!repl_instance.IsAlive() && raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
spdlog::info("Cluster without main instance, trying automatic failover");
|
||||
TryFailover();
|
||||
}
|
||||
}
|
||||
|
||||
void CoordinatorInstance::MainSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing main successful callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (repl_instance.IsAlive()) {
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &repl_instance_uuid = repl_instance.GetMainUUID();
|
||||
MG_ASSERT(repl_instance_uuid.has_value(), "Instance must have uuid set.");
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (raft_state_.GetUUID() == repl_instance_uuid.value()) {
|
||||
if (!repl_instance.EnableWritingOnMain()) {
|
||||
spdlog::error("Failed to enable writing on main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.RequestLeadership()) {
|
||||
spdlog::error("Demoting main instance {} to replica failed since the instance is not the leader!",
|
||||
repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (repl_instance.DemoteToReplica(&CoordinatorInstance::ReplicaSuccessCallback,
|
||||
&CoordinatorInstance::ReplicaFailCallback)) {
|
||||
repl_instance.OnSuccessPing();
|
||||
spdlog::info("Instance {} demoted to replica", repl_instance_name);
|
||||
} else {
|
||||
spdlog::error("Instance {} failed to become replica", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!repl_instance.SendSwapAndUpdateUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for demoted main instance {}", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!raft_state_.AppendSetInstanceAsReplicaLog(repl_instance_name)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CoordinatorInstance::ReplicaSuccessCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica successful callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
// We need to get replicas UUID from time to time to ensure replica is listening to correct main
|
||||
// and that it didn't go down for less time than we could notice
|
||||
// We need to get id of main replica is listening to
|
||||
// and swap if necessary
|
||||
if (!repl_instance.EnsureReplicaHasCorrectMainUUID(raft_state_.GetUUID())) {
|
||||
spdlog::error("Failed to swap uuid for replica instance {} which is alive", repl_instance.InstanceName());
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnSuccessPing();
|
||||
}
|
||||
|
||||
void CoordinatorInstance::ReplicaFailCallback(std::string_view repl_instance_name) {
|
||||
spdlog::trace("Instance {} performing replica failure callback", repl_instance_name);
|
||||
auto &repl_instance = FindReplicationInstance(repl_instance_name);
|
||||
|
||||
if (!IsReplica(repl_instance_name)) {
|
||||
spdlog::error("Aborting replica fail callback since instance {} is not replica anymore", repl_instance_name);
|
||||
return;
|
||||
}
|
||||
|
||||
repl_instance.OnFailPing();
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> instance_database_histories)
|
||||
-> NewMainRes {
|
||||
std::optional<NewMainRes> new_main_res;
|
||||
std::for_each(
|
||||
instance_database_histories.begin(), instance_database_histories.end(),
|
||||
[&new_main_res](const InstanceNameDbHistories &instance_res_pair) {
|
||||
const auto &[instance_name, instance_db_histories] = instance_res_pair;
|
||||
|
||||
// Find default db for instance and its history
|
||||
auto default_db_history_data = std::ranges::find_if(
|
||||
instance_db_histories, [default_db = memgraph::dbms::kDefaultDB](
|
||||
const replication_coordination_glue::DatabaseHistory &db_timestamps) {
|
||||
return db_timestamps.name == default_db;
|
||||
});
|
||||
|
||||
std::ranges::for_each(
|
||||
instance_db_histories,
|
||||
[&instance_name = instance_name](const replication_coordination_glue::DatabaseHistory &db_history) {
|
||||
spdlog::debug("Instance {}: name {}, default db {}", instance_name, db_history.name,
|
||||
memgraph::dbms::kDefaultDB);
|
||||
});
|
||||
|
||||
MG_ASSERT(default_db_history_data != instance_db_histories.end(), "No history for instance");
|
||||
|
||||
const auto &instance_default_db_history = default_db_history_data->history;
|
||||
|
||||
std::ranges::for_each(instance_default_db_history | ranges::views::reverse,
|
||||
[&instance_name = instance_name](const auto &epoch_history_it) {
|
||||
spdlog::debug("Instance {}: epoch {}, last_commit_timestamp: {}", instance_name,
|
||||
std::get<0>(epoch_history_it), std::get<1>(epoch_history_it));
|
||||
});
|
||||
|
||||
// get latest epoch
|
||||
// get latest timestamp
|
||||
|
||||
if (!new_main_res) {
|
||||
const auto &[epoch, timestamp] = *instance_default_db_history.crbegin();
|
||||
new_main_res = std::make_optional<NewMainRes>({instance_name, epoch, timestamp});
|
||||
spdlog::debug("Currently the most up to date instance is {} with epoch {} and {} latest commit timestamp",
|
||||
instance_name, epoch, timestamp);
|
||||
return;
|
||||
}
|
||||
|
||||
bool found_same_point{false};
|
||||
std::string last_most_up_to_date_epoch{new_main_res->latest_epoch};
|
||||
for (auto [epoch, timestamp] : ranges::reverse_view(instance_default_db_history)) {
|
||||
if (new_main_res->latest_commit_timestamp < timestamp) {
|
||||
new_main_res = std::make_optional<NewMainRes>({instance_name, epoch, timestamp});
|
||||
spdlog::trace("Found the new most up to date instance {} with epoch {} and {} latest commit timestamp",
|
||||
instance_name, epoch, timestamp);
|
||||
}
|
||||
|
||||
// we found point at which they were same
|
||||
if (epoch == last_most_up_to_date_epoch) {
|
||||
found_same_point = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_same_point) {
|
||||
spdlog::error("Didn't find same history epoch {} for instance {} and instance {}", last_most_up_to_date_epoch,
|
||||
new_main_res->most_up_to_date_instance, instance_name);
|
||||
}
|
||||
});
|
||||
|
||||
return std::move(*new_main_res);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::IsMain(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorInstance::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return raft_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -62,34 +62,33 @@ ptr<log_entry> CoordinatorLogStore::last_entry() const {
|
||||
|
||||
uint64_t CoordinatorLogStore::append(ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
uint64_t next_slot{0};
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
}
|
||||
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
uint64_t next_slot = start_idx_ + logs_.size() - 1;
|
||||
logs_[next_slot] = clone;
|
||||
|
||||
return next_slot;
|
||||
}
|
||||
|
||||
// TODO: (andi) I think this is used for resolving conflicts inside NuRaft, check...
|
||||
// different compared to in_memory_log_store.cxx
|
||||
void CoordinatorLogStore::write_at(uint64_t index, ptr<log_entry> &entry) {
|
||||
ptr<log_entry> clone = MakeClone(entry);
|
||||
|
||||
// Discard all logs equal to or greater than `index.
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
auto itr = logs_.lower_bound(index);
|
||||
while (itr != logs_.end()) {
|
||||
itr = logs_.erase(itr);
|
||||
}
|
||||
logs_[index] = clone;
|
||||
}
|
||||
|
||||
ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start, uint64_t end) {
|
||||
auto ret = cs_new<std::vector<ptr<log_entry>>>();
|
||||
ret->resize(end - start);
|
||||
|
||||
for (uint64_t i = start, curr_index = 0; i < end; ++i, ++curr_index) {
|
||||
for (uint64_t i = start, curr_index = 0; i < end; i++, curr_index++) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
@ -105,21 +104,14 @@ ptr<std::vector<ptr<log_entry>>> CoordinatorLogStore::log_entries(uint64_t start
|
||||
}
|
||||
|
||||
ptr<log_entry> CoordinatorLogStore::entry_at(uint64_t index) {
|
||||
ptr<log_entry> src = nullptr;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
src = FindOrDefault_(index);
|
||||
}
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
ptr<log_entry> src = FindOrDefault_(index);
|
||||
return MakeClone(src);
|
||||
}
|
||||
|
||||
uint64_t CoordinatorLogStore::term_at(uint64_t index) {
|
||||
uint64_t term = 0;
|
||||
{
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
term = FindOrDefault_(index)->get_term();
|
||||
}
|
||||
return term;
|
||||
auto lock = std::lock_guard{logs_lock_};
|
||||
return FindOrDefault_(index)->get_term();
|
||||
}
|
||||
|
||||
ptr<buffer> CoordinatorLogStore::pack(uint64_t index, int32 cnt) {
|
||||
|
@ -76,9 +76,9 @@ void EnableWritingOnMainRes::Load(EnableWritingOnMainRes *self, memgraph::slk::R
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void EnableWritingOnMainReq::Save(EnableWritingOnMainReq const &self, memgraph::slk::Builder *builder) {}
|
||||
void EnableWritingOnMainReq::Save(EnableWritingOnMainReq const & /*self*/, memgraph::slk::Builder * /*builder*/) {}
|
||||
|
||||
void EnableWritingOnMainReq::Load(EnableWritingOnMainReq *self, memgraph::slk::Reader *reader) {}
|
||||
void EnableWritingOnMainReq::Load(EnableWritingOnMainReq * /*self*/, memgraph::slk::Reader * /*reader*/) {}
|
||||
|
||||
// GetInstanceUUID
|
||||
void GetInstanceUUIDReq::Save(const GetInstanceUUIDReq &self, memgraph::slk::Builder *builder) {
|
||||
@ -97,6 +97,24 @@ void GetInstanceUUIDRes::Load(GetInstanceUUIDRes *self, memgraph::slk::Reader *r
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
// GetDatabaseHistoriesRpc
|
||||
|
||||
void GetDatabaseHistoriesReq::Save(const GetDatabaseHistoriesReq & /*self*/, memgraph::slk::Builder * /*builder*/) {
|
||||
/* nothing to serialize */
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesReq::Load(GetDatabaseHistoriesReq * /*self*/, memgraph::slk::Reader * /*reader*/) {
|
||||
/* nothing to serialize */
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesRes::Save(const GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
|
||||
void GetDatabaseHistoriesRes::Load(GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
} // namespace coordination
|
||||
|
||||
constexpr utils::TypeInfo coordination::PromoteReplicaToMainReq::kType{utils::TypeId::COORD_FAILOVER_REQ,
|
||||
@ -130,6 +148,12 @@ constexpr utils::TypeInfo coordination::GetInstanceUUIDReq::kType{utils::TypeId:
|
||||
constexpr utils::TypeInfo coordination::GetInstanceUUIDRes::kType{utils::TypeId::COORD_GET_UUID_RES, "CoordGetUUIDRes",
|
||||
nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::GetDatabaseHistoriesReq::kType{utils::TypeId::COORD_GET_INSTANCE_DATABASES_REQ,
|
||||
"GetInstanceDatabasesReq", nullptr};
|
||||
|
||||
constexpr utils::TypeInfo coordination::GetDatabaseHistoriesRes::kType{utils::TypeId::COORD_GET_INSTANCE_DATABASES_RES,
|
||||
"GetInstanceDatabasesRes", nullptr};
|
||||
|
||||
namespace slk {
|
||||
|
||||
// PromoteReplicaToMainRpc
|
||||
@ -213,6 +237,16 @@ void Load(memgraph::coordination::GetInstanceUUIDRes *self, memgraph::slk::Reade
|
||||
memgraph::slk::Load(&self->uuid, reader);
|
||||
}
|
||||
|
||||
// GetInstanceTimestampsReq
|
||||
|
||||
void Save(const memgraph::coordination::GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.database_histories, builder);
|
||||
}
|
||||
|
||||
void Load(memgraph::coordination::GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->database_histories, reader);
|
||||
}
|
||||
|
||||
} // namespace slk
|
||||
|
||||
} // namespace memgraph
|
||||
|
@ -41,7 +41,7 @@ CoordinatorState::CoordinatorState() {
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig config)
|
||||
auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
@ -56,7 +56,8 @@ auto CoordinatorState::RegisterReplicationInstance(CoordinatorClientConfig confi
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus {
|
||||
auto CoordinatorState::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot unregister instance since variant holds wrong alternative");
|
||||
|
||||
@ -70,7 +71,8 @@ auto CoordinatorState::UnregisterReplicationInstance(std::string instance_name)
|
||||
data_);
|
||||
}
|
||||
|
||||
auto CoordinatorState::SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus {
|
||||
auto CoordinatorState::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> SetInstanceToMainCoordinatorStatus {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
|
||||
@ -96,8 +98,8 @@ auto CoordinatorState::GetCoordinatorServer() const -> CoordinatorServer & {
|
||||
return *std::get<CoordinatorMainReplicaData>(data_).coordinator_server_;
|
||||
}
|
||||
|
||||
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
auto CoordinatorState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
MG_ASSERT(std::holds_alternative<CoordinatorInstance>(data_),
|
||||
"Coordinator cannot register replica since variant holds wrong alternative");
|
||||
return std::get<CoordinatorInstance>(data_).AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
|
@ -12,100 +12,204 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr int MAX_SNAPSHOTS = 3;
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
auto CoordinatorStateMachine::EncodeRegisterReplicationInstance(const std::string &name) -> ptr<buffer> {
|
||||
std::string str_log = name + "_replica";
|
||||
ptr<buffer> log = buffer::alloc(sizeof(uint32_t) + str_log.size());
|
||||
buffer_serializer bs(log);
|
||||
bs.put_str(str_log);
|
||||
return log;
|
||||
auto CoordinatorStateMachine::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return cluster_state_.FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeRegisterReplicationInstance(buffer &data) -> std::string {
|
||||
auto CoordinatorStateMachine::MainExists() const -> bool { return cluster_state_.MainExists(); }
|
||||
|
||||
auto CoordinatorStateMachine::IsMain(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return cluster_state_.IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::CreateLog(nlohmann::json &&log) -> ptr<buffer> {
|
||||
auto const log_dump = log.dump();
|
||||
ptr<buffer> log_buf = buffer::alloc(sizeof(uint32_t) + log_dump.size());
|
||||
buffer_serializer bs(log_buf);
|
||||
bs.put_str(log_dump);
|
||||
return log_buf;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::REGISTER_REPLICATION_INSTANCE}, {"info", config}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UNREGISTER_REPLICATION_INSTANCE}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_MAIN}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::SET_INSTANCE_AS_REPLICA}, {"info", instance_name}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer> {
|
||||
return CreateLog({{"action", RaftLogAction::UPDATE_UUID}, {"info", uuid}});
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction> {
|
||||
buffer_serializer bs(data);
|
||||
return bs.get_str();
|
||||
auto const json = nlohmann::json::parse(bs.get_str());
|
||||
|
||||
auto const action = json["action"].get<RaftLogAction>();
|
||||
auto const &info = json["info"];
|
||||
|
||||
switch (action) {
|
||||
case RaftLogAction::REGISTER_REPLICATION_INSTANCE:
|
||||
return {info.get<CoordinatorClientConfig>(), action};
|
||||
case RaftLogAction::UPDATE_UUID:
|
||||
return {info.get<utils::UUID>(), action};
|
||||
case RaftLogAction::UNREGISTER_REPLICATION_INSTANCE:
|
||||
case RaftLogAction::SET_INSTANCE_AS_MAIN:
|
||||
[[fallthrough]];
|
||||
case RaftLogAction::SET_INSTANCE_AS_REPLICA:
|
||||
return {info.get<std::string>(), action};
|
||||
}
|
||||
throw std::runtime_error("Unknown action");
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::pre_commit(ulong const log_idx, buffer &data) -> ptr<buffer> {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("pre_commit {} : {}", log_idx, str);
|
||||
return nullptr;
|
||||
}
|
||||
auto CoordinatorStateMachine::pre_commit(ulong const /*log_idx*/, buffer & /*data*/) -> ptr<buffer> { return nullptr; }
|
||||
|
||||
auto CoordinatorStateMachine::commit(ulong const log_idx, buffer &data) -> ptr<buffer> {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("commit {} : {}", log_idx, str);
|
||||
|
||||
spdlog::debug("Commit: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
auto const [parsed_data, log_action] = DecodeLog(data);
|
||||
cluster_state_.DoAction(parsed_data, log_action);
|
||||
last_committed_idx_ = log_idx;
|
||||
return nullptr;
|
||||
|
||||
// Return raft log number
|
||||
ptr<buffer> ret = buffer::alloc(sizeof(log_idx));
|
||||
buffer_serializer bs_ret(ret);
|
||||
bs_ret.put_u64(log_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::commit_config(ulong const log_idx, ptr<cluster_config> & /*new_conf*/) -> void {
|
||||
last_committed_idx_ = log_idx;
|
||||
spdlog::debug("Commit config: log_idx={}", log_idx);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::rollback(ulong const log_idx, buffer &data) -> void {
|
||||
buffer_serializer bs(data);
|
||||
std::string str = bs.get_str();
|
||||
|
||||
spdlog::info("rollback {} : {}", log_idx, str);
|
||||
// NOTE: Nothing since we don't do anything in pre_commit
|
||||
spdlog::debug("Rollback: log_idx={}, data.size()={}", log_idx, data.size());
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::read_logical_snp_obj(snapshot & /*snapshot*/, void *& /*user_snp_ctx*/, ulong /*obj_id*/,
|
||||
auto CoordinatorStateMachine::read_logical_snp_obj(snapshot &snapshot, void *& /*user_snp_ctx*/, ulong obj_id,
|
||||
ptr<buffer> &data_out, bool &is_last_obj) -> int {
|
||||
// Put dummy data.
|
||||
data_out = buffer::alloc(sizeof(int32));
|
||||
buffer_serializer bs(data_out);
|
||||
bs.put_i32(0);
|
||||
spdlog::debug("read logical snapshot object, obj_id: {}", obj_id);
|
||||
|
||||
ptr<SnapshotCtx> ctx = nullptr;
|
||||
{
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
auto entry = snapshots_.find(snapshot.get_last_log_idx());
|
||||
if (entry == snapshots_.end()) {
|
||||
data_out = nullptr;
|
||||
is_last_obj = true;
|
||||
return 0;
|
||||
}
|
||||
ctx = entry->second;
|
||||
}
|
||||
|
||||
if (obj_id == 0) {
|
||||
// Object ID == 0: first object, put dummy data.
|
||||
data_out = buffer::alloc(sizeof(int32));
|
||||
buffer_serializer bs(data_out);
|
||||
bs.put_i32(0);
|
||||
is_last_obj = false;
|
||||
} else {
|
||||
// Object ID > 0: second object, put actual value.
|
||||
ctx->cluster_state_.Serialize(data_out);
|
||||
}
|
||||
|
||||
is_last_obj = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &s, ulong &obj_id, buffer & /*data*/, bool /*is_first_obj*/,
|
||||
bool /*is_last_obj*/) -> void {
|
||||
spdlog::info("save snapshot {} term {} object ID", s.get_last_log_idx(), s.get_last_log_term(), obj_id);
|
||||
// Request next object.
|
||||
obj_id++;
|
||||
auto CoordinatorStateMachine::save_logical_snp_obj(snapshot &snapshot, ulong &obj_id, buffer &data, bool is_first_obj,
|
||||
bool is_last_obj) -> void {
|
||||
spdlog::debug("save logical snapshot object, obj_id: {}, is_first_obj: {}, is_last_obj: {}", obj_id, is_first_obj,
|
||||
is_last_obj);
|
||||
|
||||
if (obj_id == 0) {
|
||||
ptr<buffer> snp_buf = snapshot.serialize();
|
||||
auto ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
} else {
|
||||
auto cluster_state = CoordinatorClusterState::Deserialize(data);
|
||||
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
auto entry = snapshots_.find(snapshot.get_last_log_idx());
|
||||
DMG_ASSERT(entry != snapshots_.end());
|
||||
entry->second->cluster_state_ = cluster_state;
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::apply_snapshot(snapshot &s) -> bool {
|
||||
spdlog::info("apply snapshot {} term {}", s.get_last_log_idx(), s.get_last_log_term());
|
||||
{
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
last_snapshot_ = snapshot::deserialize(*snp_buf);
|
||||
}
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("apply snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
|
||||
auto entry = snapshots_.find(s.get_last_log_idx());
|
||||
if (entry == snapshots_.end()) return false;
|
||||
|
||||
cluster_state_ = entry->second->cluster_state_;
|
||||
return true;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::free_user_snp_ctx(void *&user_snp_ctx) -> void {}
|
||||
|
||||
auto CoordinatorStateMachine::last_snapshot() -> ptr<snapshot> {
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
return last_snapshot_;
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("last_snapshot");
|
||||
auto entry = snapshots_.rbegin();
|
||||
if (entry == snapshots_.rend()) return nullptr;
|
||||
|
||||
ptr<SnapshotCtx> ctx = entry->second;
|
||||
return ctx->snapshot_;
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::last_commit_index() -> ulong { return last_committed_idx_; }
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void {
|
||||
spdlog::info("create snapshot {} term {}", s.get_last_log_idx(), s.get_last_log_term());
|
||||
// Clone snapshot from `s`.
|
||||
{
|
||||
auto lock = std::lock_guard{last_snapshot_lock_};
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
last_snapshot_ = snapshot::deserialize(*snp_buf);
|
||||
}
|
||||
spdlog::debug("create_snapshot, last_log_idx: {}", s.get_last_log_idx());
|
||||
ptr<buffer> snp_buf = s.serialize();
|
||||
ptr<snapshot> ss = snapshot::deserialize(*snp_buf);
|
||||
create_snapshot_internal(ss);
|
||||
|
||||
ptr<std::exception> except(nullptr);
|
||||
bool ret = true;
|
||||
when_done(ret, except);
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::create_snapshot_internal(ptr<snapshot> snapshot) -> void {
|
||||
auto ll = std::lock_guard{snapshots_lock_};
|
||||
spdlog::debug("create_snapshot_internal, last_log_idx: {}", snapshot->get_last_log_idx());
|
||||
|
||||
auto ctx = cs_new<SnapshotCtx>(snapshot, cluster_state_);
|
||||
snapshots_[snapshot->get_last_log_idx()] = ctx;
|
||||
|
||||
while (snapshots_.size() > MAX_SNAPSHOTS) {
|
||||
snapshots_.erase(snapshots_.begin());
|
||||
}
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetInstances() const -> std::vector<InstanceState> {
|
||||
return cluster_state_.GetInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorStateMachine::GetUUID() const -> utils::UUID { return cluster_state_.GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "rpc_errors.hpp"
|
||||
#include "utils/result.hpp"
|
||||
@ -23,13 +24,13 @@
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
using HealthCheckCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
|
||||
using HealthCheckClientCallback = std::function<void(CoordinatorInstance *, std::string_view)>;
|
||||
using ReplicationClientsInfo = std::vector<ReplClientInfo>;
|
||||
|
||||
class CoordinatorClient {
|
||||
public:
|
||||
explicit CoordinatorClient(CoordinatorInstance *coord_instance, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb);
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb);
|
||||
|
||||
~CoordinatorClient() = default;
|
||||
|
||||
@ -45,16 +46,17 @@ class CoordinatorClient {
|
||||
void ResumeFrequentCheck();
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto SocketAddress() const -> std::string;
|
||||
auto CoordinatorSocketAddress() const -> std::string;
|
||||
auto ReplicationSocketAddress() const -> std::string;
|
||||
|
||||
[[nodiscard]] auto DemoteToReplica() const -> bool;
|
||||
|
||||
auto SendPromoteReplicaToMainRpc(const utils::UUID &uuid, ReplicationClientsInfo replication_clients_info) const
|
||||
auto SendPromoteReplicaToMainRpc(utils::UUID const &uuid, ReplicationClientsInfo replication_clients_info) const
|
||||
-> bool;
|
||||
|
||||
auto SendSwapMainUUIDRpc(const utils::UUID &uuid) const -> bool;
|
||||
auto SendSwapMainUUIDRpc(utils::UUID const &uuid) const -> bool;
|
||||
|
||||
auto SendUnregisterReplicaRpc(std::string const &instance_name) const -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto SendEnableWritingOnMainRpc() const -> bool;
|
||||
|
||||
@ -62,7 +64,8 @@ class CoordinatorClient {
|
||||
|
||||
auto ReplicationClientInfo() const -> ReplClientInfo;
|
||||
|
||||
auto SetCallbacks(HealthCheckCallback succ_cb, HealthCheckCallback fail_cb) -> void;
|
||||
auto SendGetInstanceTimestampsRpc() const
|
||||
-> utils::BasicResult<GetInstanceUUIDError, replication_coordination_glue::DatabaseHistories>;
|
||||
|
||||
auto RpcClient() -> rpc::Client & { return rpc_client_; }
|
||||
|
||||
@ -82,8 +85,8 @@ class CoordinatorClient {
|
||||
|
||||
CoordinatorClientConfig config_;
|
||||
CoordinatorInstance *coord_instance_;
|
||||
HealthCheckCallback succ_cb_;
|
||||
HealthCheckCallback fail_cb_;
|
||||
HealthCheckClientCallback succ_cb_;
|
||||
HealthCheckClientCallback fail_cb_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -14,12 +14,16 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "replication_coordination_glue/mode.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
inline constexpr auto *kDefaultReplicationServerIp = "0.0.0.0";
|
||||
@ -32,7 +36,11 @@ struct CoordinatorClientConfig {
|
||||
std::chrono::seconds instance_down_timeout_sec{5};
|
||||
std::chrono::seconds instance_get_uuid_frequency_sec{10};
|
||||
|
||||
auto SocketAddress() const -> std::string { return ip_address + ":" + std::to_string(port); }
|
||||
auto CoordinatorSocketAddress() const -> std::string { return fmt::format("{}:{}", ip_address, port); }
|
||||
auto ReplicationSocketAddress() const -> std::string {
|
||||
return fmt::format("{}:{}", replication_client_info.replication_ip_address,
|
||||
replication_client_info.replication_port);
|
||||
}
|
||||
|
||||
struct ReplicationClientInfo {
|
||||
std::string instance_name;
|
||||
@ -75,5 +83,11 @@ struct CoordinatorServerConfig {
|
||||
friend bool operator==(CoordinatorServerConfig const &, CoordinatorServerConfig const &) = default;
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, CoordinatorClientConfig const &config);
|
||||
void from_json(nlohmann::json const &j, CoordinatorClientConfig &config);
|
||||
|
||||
void to_json(nlohmann::json &j, ReplClientInfo const &config);
|
||||
void from_json(nlohmann::json const &j, ReplClientInfo &config);
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -83,5 +83,16 @@ class RaftCouldNotParseFlagsException final : public utils::BasicException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(RaftCouldNotParseFlagsException)
|
||||
};
|
||||
|
||||
class InvalidRaftLogActionException final : public utils::BasicException {
|
||||
public:
|
||||
explicit InvalidRaftLogActionException(std::string_view what) noexcept : BasicException(what) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit InvalidRaftLogActionException(fmt::format_string<Args...> fmt, Args &&...args) noexcept
|
||||
: InvalidRaftLogActionException(fmt::format(fmt, std::forward<Args>(args)...)) {}
|
||||
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(InvalidRaftLogActionException)
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -41,6 +41,9 @@ class CoordinatorHandlers {
|
||||
|
||||
static void GetInstanceUUIDHandler(replication::ReplicationHandler &replication_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
|
||||
static void GetDatabaseHistoriesHandler(replication::ReplicationHandler &replication_handler, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
};
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "coordination/register_main_replica_coordinator_status.hpp"
|
||||
#include "coordination/replication_instance.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
|
||||
@ -25,33 +26,54 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
struct NewMainRes {
|
||||
std::string most_up_to_date_instance;
|
||||
std::string latest_epoch;
|
||||
uint64_t latest_commit_timestamp;
|
||||
};
|
||||
using InstanceNameDbHistories = std::pair<std::string, replication_coordination_glue::DatabaseHistories>;
|
||||
|
||||
class CoordinatorInstance {
|
||||
public:
|
||||
CoordinatorInstance();
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string_view instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<InstanceStatus>;
|
||||
|
||||
auto TryFailover() -> void;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
auto GetMainUUID() const -> utils::UUID;
|
||||
|
||||
auto SetMainUUID(utils::UUID new_uuid) -> void;
|
||||
static auto ChooseMostUpToDateInstance(std::span<InstanceNameDbHistories> histories) -> NewMainRes;
|
||||
|
||||
private:
|
||||
HealthCheckCallback main_succ_cb_, main_fail_cb_, replica_succ_cb_, replica_fail_cb_;
|
||||
HealthCheckClientCallback client_succ_cb_, client_fail_cb_;
|
||||
|
||||
// NOTE: Must be std::list because we rely on pointer stability
|
||||
auto OnRaftCommitCallback(TRaftLog const &log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
auto FindReplicationInstance(std::string_view replication_instance_name) -> ReplicationInstance &;
|
||||
|
||||
void MainFailCallback(std::string_view);
|
||||
|
||||
void MainSuccessCallback(std::string_view);
|
||||
|
||||
void ReplicaSuccessCallback(std::string_view);
|
||||
|
||||
void ReplicaFailCallback(std::string_view);
|
||||
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
// NOTE: Must be std::list because we rely on pointer stability.
|
||||
// Leader and followers should both have same view on repl_instances_
|
||||
std::list<ReplicationInstance> repl_instances_;
|
||||
mutable utils::RWLock coord_instance_lock_{utils::RWLock::Priority::READ};
|
||||
|
||||
utils::UUID main_uuid_;
|
||||
mutable utils::ResourceLock coord_instance_lock_{};
|
||||
|
||||
RaftState raft_state_;
|
||||
};
|
||||
|
@ -15,6 +15,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
@ -89,7 +90,7 @@ struct UnregisterReplicaReq {
|
||||
static void Load(UnregisterReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(UnregisterReplicaReq const &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit UnregisterReplicaReq(std::string instance_name) : instance_name(std::move(instance_name)) {}
|
||||
explicit UnregisterReplicaReq(std::string_view inst_name) : instance_name(inst_name) {}
|
||||
|
||||
UnregisterReplicaReq() = default;
|
||||
|
||||
@ -161,6 +162,32 @@ struct GetInstanceUUIDRes {
|
||||
|
||||
using GetInstanceUUIDRpc = rpc::RequestResponse<GetInstanceUUIDReq, GetInstanceUUIDRes>;
|
||||
|
||||
struct GetDatabaseHistoriesReq {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(GetDatabaseHistoriesReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const GetDatabaseHistoriesReq &self, memgraph::slk::Builder *builder);
|
||||
|
||||
GetDatabaseHistoriesReq() = default;
|
||||
};
|
||||
|
||||
struct GetDatabaseHistoriesRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder);
|
||||
|
||||
explicit GetDatabaseHistoriesRes(const replication_coordination_glue::DatabaseHistories &database_histories)
|
||||
: database_histories(database_histories) {}
|
||||
GetDatabaseHistoriesRes() = default;
|
||||
|
||||
replication_coordination_glue::DatabaseHistories database_histories;
|
||||
};
|
||||
|
||||
using GetDatabaseHistoriesRpc = rpc::RequestResponse<GetDatabaseHistoriesReq, GetDatabaseHistoriesRes>;
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
||||
// SLK serialization declarations
|
||||
@ -183,15 +210,21 @@ void Save(const memgraph::coordination::GetInstanceUUIDReq &self, memgraph::slk:
|
||||
void Load(memgraph::coordination::GetInstanceUUIDReq *self, memgraph::slk::Reader *reader);
|
||||
void Save(const memgraph::coordination::GetInstanceUUIDRes &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::GetInstanceUUIDRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// UnregisterReplicaRpc
|
||||
void Save(memgraph::coordination::UnregisterReplicaRes const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::UnregisterReplicaRes *self, memgraph::slk::Reader *reader);
|
||||
void Save(memgraph::coordination::UnregisterReplicaReq const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::UnregisterReplicaReq *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// EnableWritingOnMainRpc
|
||||
void Save(memgraph::coordination::EnableWritingOnMainRes const &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::EnableWritingOnMainRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
// GetDatabaseHistoriesRpc
|
||||
void Save(const memgraph::coordination::GetDatabaseHistoriesRes &self, memgraph::slk::Builder *builder);
|
||||
void Load(memgraph::coordination::GetDatabaseHistoriesRes *self, memgraph::slk::Reader *reader);
|
||||
|
||||
} // namespace memgraph::slk
|
||||
|
||||
#endif
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "replication_coordination_glue/common.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
@ -34,5 +35,18 @@ inline void Load(ReplicationClientInfo *obj, Reader *reader) {
|
||||
Load(&obj->replication_ip_address, reader);
|
||||
Load(&obj->replication_port, reader);
|
||||
}
|
||||
|
||||
inline void Save(const replication_coordination_glue::DatabaseHistory &obj, Builder *builder) {
|
||||
Save(obj.db_uuid, builder);
|
||||
Save(obj.history, builder);
|
||||
Save(obj.name, builder);
|
||||
}
|
||||
|
||||
inline void Load(replication_coordination_glue::DatabaseHistory *obj, Reader *reader) {
|
||||
Load(&obj->db_uuid, reader);
|
||||
Load(&obj->history, reader);
|
||||
Load(&obj->name, reader);
|
||||
}
|
||||
|
||||
} // namespace memgraph::slk
|
||||
#endif
|
||||
|
@ -33,14 +33,16 @@ class CoordinatorState {
|
||||
CoordinatorState(CoordinatorState &&) noexcept = delete;
|
||||
CoordinatorState &operator=(CoordinatorState &&) noexcept = delete;
|
||||
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig config) -> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string instance_name) -> UnregisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto RegisterReplicationInstance(CoordinatorClientConfig const &config)
|
||||
-> RegisterInstanceCoordinatorStatus;
|
||||
[[nodiscard]] auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
[[nodiscard]] auto SetReplicationInstanceToMain(std::string_view instance_name) -> SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
// NOTE: The client code must check that the server exists before calling this method.
|
||||
auto GetCoordinatorServer() const -> CoordinatorServer &;
|
||||
|
@ -26,7 +26,7 @@ struct InstanceStatus {
|
||||
std::string raft_socket_address;
|
||||
std::string coord_socket_address;
|
||||
std::string cluster_role;
|
||||
bool is_alive;
|
||||
std::string health;
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -14,11 +14,17 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include <flags/replication.hpp>
|
||||
#include "io/network/endpoint.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
struct CoordinatorClientConfig;
|
||||
|
||||
using BecomeLeaderCb = std::function<void()>;
|
||||
using BecomeFollowerCb = std::function<void()>;
|
||||
|
||||
@ -47,26 +53,38 @@ class RaftState {
|
||||
RaftState &operator=(RaftState &&other) noexcept = default;
|
||||
~RaftState();
|
||||
|
||||
static auto MakeRaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb) -> RaftState;
|
||||
static auto MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState;
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto RaftSocketAddress() const -> std::string;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
auto GetAllCoordinators() const -> std::vector<ptr<srv_config>>;
|
||||
|
||||
auto RequestLeadership() -> bool;
|
||||
auto IsLeader() const -> bool;
|
||||
|
||||
auto AppendRegisterReplicationInstance(std::string const &instance) -> ptr<raft_result>;
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
// TODO: (andi) I think variables below can be abstracted
|
||||
auto AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool;
|
||||
auto AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool;
|
||||
auto AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool;
|
||||
auto AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
// TODO: (andi) I think variables below can be abstracted/clean them.
|
||||
io::network::Endpoint raft_endpoint_;
|
||||
uint32_t raft_server_id_;
|
||||
uint32_t raft_port_;
|
||||
std::string raft_address_;
|
||||
|
||||
ptr<state_machine> state_machine_;
|
||||
ptr<state_mgr> state_manager_;
|
||||
ptr<CoordinatorStateMachine> state_machine_;
|
||||
ptr<CoordinatorStateManager> state_manager_;
|
||||
ptr<raft_server> raft_server_;
|
||||
ptr<logger> logger_;
|
||||
raft_launcher launcher_;
|
||||
|
@ -19,12 +19,12 @@ namespace memgraph::coordination {
|
||||
|
||||
enum class RegisterInstanceCoordinatorStatus : uint8_t {
|
||||
NAME_EXISTS,
|
||||
ENDPOINT_EXISTS,
|
||||
COORD_ENDPOINT_EXISTS,
|
||||
REPL_ENDPOINT_EXISTS,
|
||||
NOT_COORDINATOR,
|
||||
RPC_FAILED,
|
||||
NOT_LEADER,
|
||||
RAFT_COULD_NOT_ACCEPT,
|
||||
RAFT_COULD_NOT_APPEND,
|
||||
RPC_FAILED,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS
|
||||
};
|
||||
|
||||
@ -32,8 +32,9 @@ enum class UnregisterInstanceCoordinatorStatus : uint8_t {
|
||||
NO_INSTANCE_WITH_NAME,
|
||||
IS_MAIN,
|
||||
NOT_COORDINATOR,
|
||||
NOT_LEADER,
|
||||
RPC_FAILED,
|
||||
NOT_LEADER,
|
||||
RAFT_LOG_ERROR,
|
||||
SUCCESS,
|
||||
};
|
||||
|
||||
@ -41,9 +42,11 @@ enum class SetInstanceToMainCoordinatorStatus : uint8_t {
|
||||
NO_INSTANCE_WITH_NAME,
|
||||
MAIN_ALREADY_EXISTS,
|
||||
NOT_COORDINATOR,
|
||||
SUCCESS,
|
||||
NOT_LEADER,
|
||||
RAFT_LOG_ERROR,
|
||||
COULD_NOT_PROMOTE_TO_MAIN,
|
||||
SWAP_UUID_FAILED
|
||||
SWAP_UUID_FAILED,
|
||||
SUCCESS,
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -17,18 +17,24 @@
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
class CoordinatorInstance;
|
||||
class ReplicationInstance;
|
||||
|
||||
using HealthCheckInstanceCallback = void (CoordinatorInstance::*)(std::string_view);
|
||||
|
||||
class ReplicationInstance {
|
||||
public:
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckCallback succ_cb,
|
||||
HealthCheckCallback fail_cb);
|
||||
ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config, HealthCheckClientCallback succ_cb,
|
||||
HealthCheckClientCallback fail_cb, HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb);
|
||||
|
||||
ReplicationInstance(ReplicationInstance const &other) = delete;
|
||||
ReplicationInstance &operator=(ReplicationInstance const &other) = delete;
|
||||
@ -45,14 +51,16 @@ class ReplicationInstance {
|
||||
auto IsAlive() const -> bool;
|
||||
|
||||
auto InstanceName() const -> std::string;
|
||||
auto SocketAddress() const -> std::string;
|
||||
auto CoordinatorSocketAddress() const -> std::string;
|
||||
auto ReplicationSocketAddress() const -> std::string;
|
||||
|
||||
auto IsReplica() const -> bool;
|
||||
auto IsMain() const -> bool;
|
||||
auto PromoteToMain(utils::UUID const &uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckInstanceCallback main_succ_cb, HealthCheckInstanceCallback main_fail_cb) -> bool;
|
||||
|
||||
auto PromoteToMain(utils::UUID uuid, ReplicationClientsInfo repl_clients_info, HealthCheckCallback main_succ_cb,
|
||||
HealthCheckCallback main_fail_cb) -> bool;
|
||||
auto DemoteToReplica(HealthCheckCallback replica_succ_cb, HealthCheckCallback replica_fail_cb) -> bool;
|
||||
auto SendDemoteToReplicaRpc() -> bool;
|
||||
|
||||
auto DemoteToReplica(HealthCheckInstanceCallback replica_succ_cb, HealthCheckInstanceCallback replica_fail_cb)
|
||||
-> bool;
|
||||
|
||||
auto StartFrequentCheck() -> void;
|
||||
auto StopFrequentCheck() -> void;
|
||||
@ -63,9 +71,8 @@ class ReplicationInstance {
|
||||
|
||||
auto EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool;
|
||||
|
||||
auto SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid) -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string const &instance_name) -> bool;
|
||||
|
||||
auto SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid) -> bool;
|
||||
auto SendUnregisterReplicaRpc(std::string_view instance_name) -> bool;
|
||||
|
||||
auto SendGetInstanceUUID() -> utils::BasicResult<coordination::GetInstanceUUIDError, std::optional<utils::UUID>>;
|
||||
auto GetClient() -> CoordinatorClient &;
|
||||
@ -74,11 +81,13 @@ class ReplicationInstance {
|
||||
|
||||
auto SetNewMainUUID(utils::UUID const &main_uuid) -> void;
|
||||
auto ResetMainUUID() -> void;
|
||||
auto GetMainUUID() const -> const std::optional<utils::UUID> &;
|
||||
auto GetMainUUID() const -> std::optional<utils::UUID> const &;
|
||||
|
||||
auto GetSuccessCallback() -> HealthCheckInstanceCallback &;
|
||||
auto GetFailCallback() -> HealthCheckInstanceCallback &;
|
||||
|
||||
private:
|
||||
CoordinatorClient client_;
|
||||
replication_coordination_glue::ReplicationRole replication_role_;
|
||||
std::chrono::system_clock::time_point last_response_time_{};
|
||||
bool is_alive_{false};
|
||||
std::chrono::system_clock::time_point last_check_of_uuid_{};
|
||||
@ -90,8 +99,12 @@ class ReplicationInstance {
|
||||
// so we need to send swap uuid again
|
||||
std::optional<utils::UUID> main_uuid_;
|
||||
|
||||
HealthCheckInstanceCallback succ_cb_;
|
||||
HealthCheckInstanceCallback fail_cb_;
|
||||
|
||||
friend bool operator==(ReplicationInstance const &first, ReplicationInstance const &second) {
|
||||
return first.client_ == second.client_ && first.replication_role_ == second.replication_role_;
|
||||
return first.client_ == second.client_ && first.last_response_time_ == second.last_response_time_ &&
|
||||
first.is_alive_ == second.is_alive_ && first.main_uuid_ == second.main_uuid_;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -11,4 +11,5 @@
|
||||
|
||||
namespace memgraph::coordination {
|
||||
enum class GetInstanceUUIDError { NO_RESPONSE, RPC_EXCEPTION };
|
||||
enum class GetInstanceTimestampsError { NO_RESPONSE, RPC_EXCEPTION };
|
||||
} // namespace memgraph::coordination
|
||||
|
@ -0,0 +1,92 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
#include "replication_coordination_glue/role.hpp"
|
||||
#include "utils/resource_lock.hpp"
|
||||
#include "utils/uuid.hpp"
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include <range/v3/view.hpp>
|
||||
#include "json/json.hpp"
|
||||
|
||||
#include <map>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using replication_coordination_glue::ReplicationRole;
|
||||
|
||||
struct InstanceState {
|
||||
CoordinatorClientConfig config;
|
||||
ReplicationRole status;
|
||||
|
||||
friend auto operator==(InstanceState const &lhs, InstanceState const &rhs) -> bool {
|
||||
return lhs.config == rhs.config && lhs.status == rhs.status;
|
||||
}
|
||||
};
|
||||
|
||||
void to_json(nlohmann::json &j, InstanceState const &instance_state);
|
||||
void from_json(nlohmann::json const &j, InstanceState &instance_state);
|
||||
|
||||
using TRaftLog = std::variant<CoordinatorClientConfig, std::string, utils::UUID>;
|
||||
|
||||
using nuraft::buffer;
|
||||
using nuraft::buffer_serializer;
|
||||
using nuraft::ptr;
|
||||
|
||||
class CoordinatorClusterState {
|
||||
public:
|
||||
CoordinatorClusterState() = default;
|
||||
explicit CoordinatorClusterState(std::map<std::string, InstanceState, std::less<>> instances);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState const &);
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState const &);
|
||||
|
||||
CoordinatorClusterState(CoordinatorClusterState &&other) noexcept;
|
||||
CoordinatorClusterState &operator=(CoordinatorClusterState &&other) noexcept;
|
||||
~CoordinatorClusterState() = default;
|
||||
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
|
||||
auto MainExists() const -> bool;
|
||||
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
auto InsertInstance(std::string instance_name, InstanceState instance_state) -> void;
|
||||
|
||||
auto DoAction(TRaftLog log_entry, RaftLogAction log_action) -> void;
|
||||
|
||||
auto Serialize(ptr<buffer> &data) -> void;
|
||||
|
||||
static auto Deserialize(buffer &data) -> CoordinatorClusterState;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
std::map<std::string, InstanceState, std::less<>> instances_{};
|
||||
utils::UUID uuid_{};
|
||||
mutable utils::ResourceLock log_lock_{};
|
||||
};
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -13,9 +13,15 @@
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "nuraft/coordinator_cluster_state.hpp"
|
||||
#include "nuraft/raft_log_action.hpp"
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
|
||||
#include <variant>
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
using nuraft::async_result;
|
||||
@ -36,9 +42,19 @@ class CoordinatorStateMachine : public state_machine {
|
||||
CoordinatorStateMachine &operator=(CoordinatorStateMachine &&) = delete;
|
||||
~CoordinatorStateMachine() override {}
|
||||
|
||||
static auto EncodeRegisterReplicationInstance(const std::string &name) -> ptr<buffer>;
|
||||
auto FindCurrentMainInstanceName() const -> std::optional<std::string>;
|
||||
auto MainExists() const -> bool;
|
||||
auto IsMain(std::string_view instance_name) const -> bool;
|
||||
auto IsReplica(std::string_view instance_name) const -> bool;
|
||||
|
||||
static auto DecodeRegisterReplicationInstance(buffer &data) -> std::string;
|
||||
static auto CreateLog(nlohmann::json &&log) -> ptr<buffer>;
|
||||
static auto SerializeRegisterInstance(CoordinatorClientConfig const &config) -> ptr<buffer>;
|
||||
static auto SerializeUnregisterInstance(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsMain(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeSetInstanceAsReplica(std::string_view instance_name) -> ptr<buffer>;
|
||||
static auto SerializeUpdateUUID(utils::UUID const &uuid) -> ptr<buffer>;
|
||||
|
||||
static auto DecodeLog(buffer &data) -> std::pair<TRaftLog, RaftLogAction>;
|
||||
|
||||
auto pre_commit(ulong log_idx, buffer &data) -> ptr<buffer> override;
|
||||
|
||||
@ -64,11 +80,27 @@ class CoordinatorStateMachine : public state_machine {
|
||||
|
||||
auto create_snapshot(snapshot &s, async_result<bool>::handler_type &when_done) -> void override;
|
||||
|
||||
auto GetInstances() const -> std::vector<InstanceState>;
|
||||
auto GetUUID() const -> utils::UUID;
|
||||
|
||||
private:
|
||||
struct SnapshotCtx {
|
||||
SnapshotCtx(ptr<snapshot> &snapshot, CoordinatorClusterState const &cluster_state)
|
||||
: snapshot_(snapshot), cluster_state_(cluster_state) {}
|
||||
|
||||
ptr<snapshot> snapshot_;
|
||||
CoordinatorClusterState cluster_state_;
|
||||
};
|
||||
|
||||
auto create_snapshot_internal(ptr<snapshot> snapshot) -> void;
|
||||
|
||||
CoordinatorClusterState cluster_state_;
|
||||
std::atomic<uint64_t> last_committed_idx_{0};
|
||||
|
||||
ptr<snapshot> last_snapshot_;
|
||||
std::map<uint64_t, ptr<SnapshotCtx>> snapshots_;
|
||||
std::mutex snapshots_lock_;
|
||||
|
||||
ptr<snapshot> last_snapshot_;
|
||||
std::mutex last_snapshot_lock_;
|
||||
};
|
||||
|
||||
|
42
src/coordination/include/nuraft/raft_log_action.hpp
Normal file
42
src/coordination/include/nuraft/raft_log_action.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
enum class RaftLogAction : uint8_t {
|
||||
REGISTER_REPLICATION_INSTANCE,
|
||||
UNREGISTER_REPLICATION_INSTANCE,
|
||||
SET_INSTANCE_AS_MAIN,
|
||||
SET_INSTANCE_AS_REPLICA,
|
||||
UPDATE_UUID
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(RaftLogAction, {
|
||||
{RaftLogAction::REGISTER_REPLICATION_INSTANCE, "register"},
|
||||
{RaftLogAction::UNREGISTER_REPLICATION_INSTANCE, "unregister"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_MAIN, "promote"},
|
||||
{RaftLogAction::SET_INSTANCE_AS_REPLICA, "demote"},
|
||||
{RaftLogAction::UPDATE_UUID, "update_uuid"},
|
||||
})
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
@ -10,12 +10,12 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include <chrono>
|
||||
|
||||
#include "coordination/raft_state.hpp"
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "nuraft/coordinator_state_machine.hpp"
|
||||
#include "nuraft/coordinator_state_manager.hpp"
|
||||
#include "coordination/raft_state.hpp"
|
||||
#include "utils/counter.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
@ -33,31 +33,35 @@ using raft_result = cmd_result<ptr<buffer>>;
|
||||
|
||||
RaftState::RaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb, uint32_t raft_server_id,
|
||||
uint32_t raft_port, std::string raft_address)
|
||||
: raft_server_id_(raft_server_id),
|
||||
raft_port_(raft_port),
|
||||
raft_address_(std::move(raft_address)),
|
||||
: raft_endpoint_(raft_address, raft_port),
|
||||
raft_server_id_(raft_server_id),
|
||||
state_machine_(cs_new<CoordinatorStateMachine>()),
|
||||
state_manager_(
|
||||
cs_new<CoordinatorStateManager>(raft_server_id_, raft_address_ + ":" + std::to_string(raft_port_))),
|
||||
state_manager_(cs_new<CoordinatorStateManager>(raft_server_id_, raft_endpoint_.SocketAddress())),
|
||||
logger_(nullptr),
|
||||
become_leader_cb_(std::move(become_leader_cb)),
|
||||
become_follower_cb_(std::move(become_follower_cb)) {}
|
||||
|
||||
auto RaftState::InitRaftServer() -> void {
|
||||
asio_service::options asio_opts;
|
||||
asio_opts.thread_pool_size_ = 1; // TODO: (andi) Improve this
|
||||
asio_opts.thread_pool_size_ = 1;
|
||||
|
||||
raft_params params;
|
||||
params.heart_beat_interval_ = 100;
|
||||
params.election_timeout_lower_bound_ = 200;
|
||||
params.election_timeout_upper_bound_ = 400;
|
||||
// 5 logs are preserved before the last snapshot
|
||||
params.reserved_log_items_ = 5;
|
||||
// Create snapshot for every 5 log appends
|
||||
params.snapshot_distance_ = 5;
|
||||
params.client_req_timeout_ = 3000;
|
||||
params.return_method_ = raft_params::blocking;
|
||||
|
||||
// If the leader doesn't receive any response from quorum nodes
|
||||
// in 200ms, it will step down.
|
||||
// This allows us to achieve strong consistency even if network partition
|
||||
// happens between the current leader and followers.
|
||||
// The value must be <= election_timeout_lower_bound_ so that cluster can never
|
||||
// have multiple leaders.
|
||||
params.leadership_expiry_ = 200;
|
||||
|
||||
raft_server::init_options init_opts;
|
||||
init_opts.raft_callback_ = [this](cb_func::Type event_type, cb_func::Param *param) -> nuraft::CbReturnCode {
|
||||
if (event_type == cb_func::BecomeLeader) {
|
||||
@ -72,11 +76,11 @@ auto RaftState::InitRaftServer() -> void {
|
||||
|
||||
raft_launcher launcher;
|
||||
|
||||
raft_server_ = launcher.init(state_machine_, state_manager_, logger_, static_cast<int>(raft_port_), asio_opts, params,
|
||||
init_opts);
|
||||
raft_server_ =
|
||||
launcher.init(state_machine_, state_manager_, logger_, raft_endpoint_.port, asio_opts, params, init_opts);
|
||||
|
||||
if (!raft_server_) {
|
||||
throw RaftServerStartException("Failed to launch raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to launch raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto maybe_stop = utils::ResettableCounter<20>();
|
||||
@ -87,38 +91,61 @@ auto RaftState::InitRaftServer() -> void {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
} while (!maybe_stop());
|
||||
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}:{}", raft_address_, raft_port_);
|
||||
throw RaftServerStartException("Failed to initialize raft server on {}", raft_endpoint_.SocketAddress());
|
||||
}
|
||||
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb become_leader_cb, BecomeFollowerCb become_follower_cb) -> RaftState {
|
||||
uint32_t raft_server_id{0};
|
||||
uint32_t raft_port{0};
|
||||
try {
|
||||
raft_server_id = FLAGS_raft_server_id;
|
||||
raft_port = FLAGS_raft_server_port;
|
||||
} catch (std::exception const &e) {
|
||||
throw RaftCouldNotParseFlagsException("Failed to parse flags: {}", e.what());
|
||||
}
|
||||
auto RaftState::MakeRaftState(BecomeLeaderCb &&become_leader_cb, BecomeFollowerCb &&become_follower_cb) -> RaftState {
|
||||
uint32_t raft_server_id = FLAGS_raft_server_id;
|
||||
uint32_t raft_port = FLAGS_raft_server_port;
|
||||
|
||||
auto raft_state =
|
||||
RaftState(std::move(become_leader_cb), std::move(become_follower_cb), raft_server_id, raft_port, "127.0.0.1");
|
||||
|
||||
raft_state.InitRaftServer();
|
||||
return raft_state;
|
||||
}
|
||||
|
||||
RaftState::~RaftState() { launcher_.shutdown(); }
|
||||
|
||||
auto RaftState::InstanceName() const -> std::string { return "coordinator_" + std::to_string(raft_server_id_); }
|
||||
auto RaftState::InstanceName() const -> std::string {
|
||||
return fmt::format("coordinator_{}", std::to_string(raft_server_id_));
|
||||
}
|
||||
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_address_ + ":" + std::to_string(raft_port_); }
|
||||
auto RaftState::RaftSocketAddress() const -> std::string { return raft_endpoint_.SocketAddress(); }
|
||||
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void {
|
||||
auto const endpoint = raft_address + ":" + std::to_string(raft_port);
|
||||
auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address)
|
||||
-> void {
|
||||
auto const endpoint = fmt::format("{}:{}", raft_address, raft_port);
|
||||
srv_config const srv_config_to_add(static_cast<int>(raft_server_id), endpoint);
|
||||
if (!raft_server_->add_srv(srv_config_to_add)->get_accepted()) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster", endpoint);
|
||||
|
||||
auto cmd_result = raft_server_->add_srv(srv_config_to_add);
|
||||
|
||||
if (cmd_result->get_result_code() == nuraft::cmd_result_code::OK) {
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
} else {
|
||||
throw RaftAddServerException("Failed to accept request to add server {} to the cluster with error code {}",
|
||||
endpoint, int(cmd_result->get_result_code()));
|
||||
}
|
||||
|
||||
// Waiting for server to join
|
||||
constexpr int max_tries{10};
|
||||
auto maybe_stop = utils::ResettableCounter<max_tries>();
|
||||
constexpr int waiting_period{200};
|
||||
bool added{false};
|
||||
while (!maybe_stop()) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waiting_period));
|
||||
const auto server_config = raft_server_->get_srv_config(static_cast<nuraft::int32>(raft_server_id));
|
||||
if (server_config) {
|
||||
spdlog::trace("Server with id {} added to cluster", raft_server_id);
|
||||
added = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!added) {
|
||||
throw RaftAddServerException("Failed to add server {} to the cluster in {}ms", endpoint,
|
||||
max_tries * waiting_period);
|
||||
}
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
}
|
||||
|
||||
auto RaftState::GetAllCoordinators() const -> std::vector<ptr<srv_config>> {
|
||||
@ -131,10 +158,124 @@ auto RaftState::IsLeader() const -> bool { return raft_server_->is_leader(); }
|
||||
|
||||
auto RaftState::RequestLeadership() -> bool { return raft_server_->is_leader() || raft_server_->request_leadership(); }
|
||||
|
||||
auto RaftState::AppendRegisterReplicationInstance(std::string const &instance) -> ptr<raft_result> {
|
||||
auto new_log = CoordinatorStateMachine::EncodeRegisterReplicationInstance(instance);
|
||||
return raft_server_->append_entries({new_log});
|
||||
auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig const &config) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeRegisterInstance(config);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for registering instance {}. Most likely the reason is that the instance is not "
|
||||
"the "
|
||||
"leader.",
|
||||
config.instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for registering instance {} accepted", config.instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to register instance {} with error code {}", config.instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUnregisterReplicationInstanceLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUnregisterInstance(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for unregistering instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for unregistering instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to unregister instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsMain(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for promoting instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::info("Request for promoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendSetInstanceAsReplicaLog(std::string_view instance_name) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeSetInstanceAsReplica(instance_name);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for demoting instance {}. Most likely the reason is that the instance is not "
|
||||
"the leader.",
|
||||
instance_name);
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Request for demoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
|
||||
auto new_log = CoordinatorStateMachine::SerializeUpdateUUID(uuid);
|
||||
auto const res = raft_server_->append_entries({new_log});
|
||||
if (!res->get_accepted()) {
|
||||
spdlog::error(
|
||||
"Failed to accept request for updating UUID. Most likely the reason is that the instance is not "
|
||||
"the leader.");
|
||||
return false;
|
||||
}
|
||||
spdlog::info("Request for updating UUID accepted");
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to update UUID with error code {}", int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto RaftState::FindCurrentMainInstanceName() const -> std::optional<std::string> {
|
||||
return state_machine_->FindCurrentMainInstanceName();
|
||||
}
|
||||
|
||||
auto RaftState::MainExists() const -> bool { return state_machine_->MainExists(); }
|
||||
|
||||
auto RaftState::IsMain(std::string_view instance_name) const -> bool { return state_machine_->IsMain(instance_name); }
|
||||
|
||||
auto RaftState::IsReplica(std::string_view instance_name) const -> bool {
|
||||
return state_machine_->IsReplica(instance_name);
|
||||
}
|
||||
|
||||
auto RaftState::GetInstances() const -> std::vector<InstanceState> { return state_machine_->GetInstances(); }
|
||||
|
||||
auto RaftState::GetUUID() const -> utils::UUID { return state_machine_->GetUUID(); }
|
||||
|
||||
} // namespace memgraph::coordination
|
||||
#endif
|
||||
|
@ -13,21 +13,20 @@
|
||||
|
||||
#include "coordination/replication_instance.hpp"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "replication_coordination_glue/handler.hpp"
|
||||
#include "utils/result.hpp"
|
||||
|
||||
namespace memgraph::coordination {
|
||||
|
||||
ReplicationInstance::ReplicationInstance(CoordinatorInstance *peer, CoordinatorClientConfig config,
|
||||
HealthCheckCallback succ_cb, HealthCheckCallback fail_cb)
|
||||
HealthCheckClientCallback succ_cb, HealthCheckClientCallback fail_cb,
|
||||
HealthCheckInstanceCallback succ_instance_cb,
|
||||
HealthCheckInstanceCallback fail_instance_cb)
|
||||
: client_(peer, std::move(config), std::move(succ_cb), std::move(fail_cb)),
|
||||
replication_role_(replication_coordination_glue::ReplicationRole::REPLICA) {
|
||||
if (!client_.DemoteToReplica()) {
|
||||
throw CoordinatorRegisterInstanceException("Failed to demote instance {} to replica", client_.InstanceName());
|
||||
}
|
||||
|
||||
client_.StartFrequentCheck();
|
||||
}
|
||||
succ_cb_(succ_instance_cb),
|
||||
fail_cb_(fail_instance_cb) {}
|
||||
|
||||
auto ReplicationInstance::OnSuccessPing() -> void {
|
||||
last_response_time_ = std::chrono::system_clock::now();
|
||||
@ -46,37 +45,34 @@ auto ReplicationInstance::IsReadyForUUIDPing() -> bool {
|
||||
}
|
||||
|
||||
auto ReplicationInstance::InstanceName() const -> std::string { return client_.InstanceName(); }
|
||||
auto ReplicationInstance::SocketAddress() const -> std::string { return client_.SocketAddress(); }
|
||||
auto ReplicationInstance::CoordinatorSocketAddress() const -> std::string { return client_.CoordinatorSocketAddress(); }
|
||||
auto ReplicationInstance::ReplicationSocketAddress() const -> std::string { return client_.ReplicationSocketAddress(); }
|
||||
auto ReplicationInstance::IsAlive() const -> bool { return is_alive_; }
|
||||
|
||||
auto ReplicationInstance::IsReplica() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
}
|
||||
auto ReplicationInstance::IsMain() const -> bool {
|
||||
return replication_role_ == replication_coordination_glue::ReplicationRole::MAIN;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::PromoteToMain(utils::UUID new_uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckCallback main_succ_cb, HealthCheckCallback main_fail_cb) -> bool {
|
||||
auto ReplicationInstance::PromoteToMain(utils::UUID const &new_uuid, ReplicationClientsInfo repl_clients_info,
|
||||
HealthCheckInstanceCallback main_succ_cb,
|
||||
HealthCheckInstanceCallback main_fail_cb) -> bool {
|
||||
if (!client_.SendPromoteReplicaToMainRpc(new_uuid, std::move(repl_clients_info))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
replication_role_ = replication_coordination_glue::ReplicationRole::MAIN;
|
||||
main_uuid_ = new_uuid;
|
||||
client_.SetCallbacks(std::move(main_succ_cb), std::move(main_fail_cb));
|
||||
succ_cb_ = main_succ_cb;
|
||||
fail_cb_ = main_fail_cb;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::DemoteToReplica(HealthCheckCallback replica_succ_cb, HealthCheckCallback replica_fail_cb)
|
||||
-> bool {
|
||||
auto ReplicationInstance::SendDemoteToReplicaRpc() -> bool { return client_.DemoteToReplica(); }
|
||||
|
||||
auto ReplicationInstance::DemoteToReplica(HealthCheckInstanceCallback replica_succ_cb,
|
||||
HealthCheckInstanceCallback replica_fail_cb) -> bool {
|
||||
if (!client_.DemoteToReplica()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
replication_role_ = replication_coordination_glue::ReplicationRole::REPLICA;
|
||||
client_.SetCallbacks(std::move(replica_succ_cb), std::move(replica_fail_cb));
|
||||
succ_cb_ = replica_succ_cb;
|
||||
fail_cb_ = replica_fail_cb;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -90,10 +86,12 @@ auto ReplicationInstance::ReplicationClientInfo() const -> CoordinatorClientConf
|
||||
return client_.ReplicationClientInfo();
|
||||
}
|
||||
|
||||
auto ReplicationInstance::GetSuccessCallback() -> HealthCheckInstanceCallback & { return succ_cb_; }
|
||||
auto ReplicationInstance::GetFailCallback() -> HealthCheckInstanceCallback & { return fail_cb_; }
|
||||
|
||||
auto ReplicationInstance::GetClient() -> CoordinatorClient & { return client_; }
|
||||
|
||||
auto ReplicationInstance::SetNewMainUUID(utils::UUID const &main_uuid) -> void { main_uuid_ = main_uuid; }
|
||||
auto ReplicationInstance::ResetMainUUID() -> void { main_uuid_ = std::nullopt; }
|
||||
auto ReplicationInstance::GetMainUUID() const -> std::optional<utils::UUID> const & { return main_uuid_; }
|
||||
|
||||
auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &curr_main_uuid) -> bool {
|
||||
@ -106,6 +104,7 @@ auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &cur
|
||||
}
|
||||
UpdateReplicaLastResponseUUID();
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
if (res.GetValue().has_value() && res.GetValue().value() == curr_main_uuid) {
|
||||
return true;
|
||||
}
|
||||
@ -113,7 +112,7 @@ auto ReplicationInstance::EnsureReplicaHasCorrectMainUUID(utils::UUID const &cur
|
||||
return SendSwapAndUpdateUUID(curr_main_uuid);
|
||||
}
|
||||
|
||||
auto ReplicationInstance::SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid) -> bool {
|
||||
auto ReplicationInstance::SendSwapAndUpdateUUID(utils::UUID const &new_main_uuid) -> bool {
|
||||
if (!replication_coordination_glue::SendSwapMainUUIDRpc(client_.RpcClient(), new_main_uuid)) {
|
||||
return false;
|
||||
}
|
||||
@ -121,7 +120,7 @@ auto ReplicationInstance::SendSwapAndUpdateUUID(const utils::UUID &new_main_uuid
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ReplicationInstance::SendUnregisterReplicaRpc(std::string const &instance_name) -> bool {
|
||||
auto ReplicationInstance::SendUnregisterReplicaRpc(std::string_view instance_name) -> bool {
|
||||
return client_.SendUnregisterReplicaRpc(instance_name);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -119,6 +119,8 @@ class Reader {
|
||||
auto GetHeader() const -> Header const &;
|
||||
auto GetNextRow(utils::MemoryResource *mem) -> std::optional<Row>;
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
// Some implementation issues that need clearing up, but this is mainly because
|
||||
// I don't want `boost/iostreams/filtering_stream.hpp` included in this header file
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -34,6 +34,10 @@ struct Reader::impl {
|
||||
|
||||
[[nodiscard]] bool HasHeader() const { return read_config_.with_header; }
|
||||
[[nodiscard]] auto Header() const -> Header const & { return header_; }
|
||||
void Reset() {
|
||||
line_buffer_.clear();
|
||||
line_buffer_.shrink_to_fit();
|
||||
}
|
||||
|
||||
auto GetNextRow(utils::MemoryResource *mem) -> std::optional<Reader::Row>;
|
||||
|
||||
@ -42,7 +46,7 @@ struct Reader::impl {
|
||||
|
||||
void TryInitializeHeader();
|
||||
|
||||
std::optional<utils::pmr::string> GetNextLine(utils::MemoryResource *mem);
|
||||
bool GetNextLine();
|
||||
|
||||
ParsingResult ParseHeader();
|
||||
|
||||
@ -55,6 +59,8 @@ struct Reader::impl {
|
||||
Config read_config_;
|
||||
uint64_t line_count_{1};
|
||||
uint16_t number_of_columns_{0};
|
||||
uint64_t estimated_number_of_columns_{0};
|
||||
utils::pmr::string line_buffer_{memory_};
|
||||
Reader::Header header_{memory_};
|
||||
};
|
||||
|
||||
@ -129,17 +135,16 @@ void Reader::impl::InitializeStream() {
|
||||
MG_ASSERT(csv_stream_.is_complete(), "Should be 'complete' for correct operation");
|
||||
}
|
||||
|
||||
std::optional<utils::pmr::string> Reader::impl::GetNextLine(utils::MemoryResource *mem) {
|
||||
utils::pmr::string line(mem);
|
||||
if (!std::getline(csv_stream_, line)) {
|
||||
bool Reader::impl::GetNextLine() {
|
||||
if (!std::getline(csv_stream_, line_buffer_)) {
|
||||
// reached end of file or an I/0 error occurred
|
||||
if (!csv_stream_.good()) {
|
||||
csv_stream_.reset(); // this will close the file_stream_ and clear the chain
|
||||
}
|
||||
return std::nullopt;
|
||||
return false;
|
||||
}
|
||||
++line_count_;
|
||||
return std::move(line);
|
||||
return true;
|
||||
}
|
||||
|
||||
Reader::ParsingResult Reader::impl::ParseHeader() {
|
||||
@ -170,6 +175,8 @@ void Reader::impl::TryInitializeHeader() {
|
||||
|
||||
const Reader::Header &Reader::GetHeader() const { return pimpl->Header(); }
|
||||
|
||||
void Reader::Reset() { pimpl->Reset(); }
|
||||
|
||||
namespace {
|
||||
enum class CsvParserState : uint8_t { INITIAL_FIELD, NEXT_FIELD, QUOTING, EXPECT_DELIMITER, DONE };
|
||||
|
||||
@ -179,6 +186,8 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
utils::pmr::vector<utils::pmr::string> row(mem);
|
||||
if (number_of_columns_ != 0) {
|
||||
row.reserve(number_of_columns_);
|
||||
} else if (estimated_number_of_columns_ != 0) {
|
||||
row.reserve(estimated_number_of_columns_);
|
||||
}
|
||||
|
||||
utils::pmr::string column(memory_);
|
||||
@ -186,13 +195,12 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
auto state = CsvParserState::INITIAL_FIELD;
|
||||
|
||||
do {
|
||||
const auto maybe_line = GetNextLine(mem);
|
||||
if (!maybe_line) {
|
||||
if (!GetNextLine()) {
|
||||
// The whole file was processed.
|
||||
break;
|
||||
}
|
||||
|
||||
std::string_view line_string_view = *maybe_line;
|
||||
std::string_view line_string_view = line_buffer_;
|
||||
|
||||
// remove '\r' from the end in case we have dos file format
|
||||
if (line_string_view.back() == '\r') {
|
||||
@ -312,6 +320,11 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
fmt::format("Expected {:d} columns in row {:d}, but got {:d}", number_of_columns_,
|
||||
line_count_ - 1, row.size()));
|
||||
}
|
||||
// To avoid unessisary dynamic growth of the row, remember the number of
|
||||
// columns for future calls
|
||||
if (number_of_columns_ == 0 && estimated_number_of_columns_ == 0) {
|
||||
estimated_number_of_columns_ = row.size();
|
||||
}
|
||||
|
||||
return std::move(row);
|
||||
}
|
||||
@ -319,7 +332,7 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
std::optional<Reader::Row> Reader::impl::GetNextRow(utils::MemoryResource *mem) {
|
||||
auto row = ParseRow(mem);
|
||||
|
||||
if (row.HasError()) {
|
||||
if (row.HasError()) [[unlikely]] {
|
||||
if (!read_config_.ignore_bad) {
|
||||
throw CsvReadException("CSV Reader: Bad row at line {:d}: {}", line_count_ - 1, row.GetError().message);
|
||||
}
|
||||
@ -333,7 +346,7 @@ std::optional<Reader::Row> Reader::impl::GetNextRow(utils::MemoryResource *mem)
|
||||
} while (row.HasError());
|
||||
}
|
||||
|
||||
if (row->empty()) {
|
||||
if (row->empty()) [[unlikely]] {
|
||||
// reached end of file
|
||||
return std::nullopt;
|
||||
}
|
||||
|
@ -20,28 +20,28 @@ namespace memgraph::dbms {
|
||||
CoordinatorHandler::CoordinatorHandler(coordination::CoordinatorState &coordinator_state)
|
||||
: coordinator_state_(coordinator_state) {}
|
||||
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(memgraph::coordination::CoordinatorClientConfig config)
|
||||
auto CoordinatorHandler::RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus {
|
||||
return coordinator_state_.RegisterReplicationInstance(config);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::UnregisterReplicationInstance(std::string instance_name)
|
||||
auto CoordinatorHandler::UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> coordination::UnregisterInstanceCoordinatorStatus {
|
||||
return coordinator_state_.UnregisterReplicationInstance(std::move(instance_name));
|
||||
return coordinator_state_.UnregisterReplicationInstance(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::SetReplicationInstanceToMain(std::string instance_name)
|
||||
auto CoordinatorHandler::SetReplicationInstanceToMain(std::string_view instance_name)
|
||||
-> coordination::SetInstanceToMainCoordinatorStatus {
|
||||
return coordinator_state_.SetReplicationInstanceToMain(std::move(instance_name));
|
||||
return coordinator_state_.SetReplicationInstanceToMain(instance_name);
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::ShowInstances() const -> std::vector<coordination::InstanceStatus> {
|
||||
return coordinator_state_.ShowInstances();
|
||||
}
|
||||
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address)
|
||||
-> void {
|
||||
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, std::move(raft_address));
|
||||
auto CoordinatorHandler::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port,
|
||||
std::string_view raft_address) -> void {
|
||||
coordinator_state_.AddCoordinatorInstance(raft_server_id, raft_port, raft_address);
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -30,16 +30,17 @@ class CoordinatorHandler {
|
||||
|
||||
// TODO: (andi) When moving coordinator state on same instances, rename from RegisterReplicationInstance to
|
||||
// RegisterInstance
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig config)
|
||||
auto RegisterReplicationInstance(coordination::CoordinatorClientConfig const &config)
|
||||
-> coordination::RegisterInstanceCoordinatorStatus;
|
||||
|
||||
auto UnregisterReplicationInstance(std::string instance_name) -> coordination::UnregisterInstanceCoordinatorStatus;
|
||||
auto UnregisterReplicationInstance(std::string_view instance_name)
|
||||
-> coordination::UnregisterInstanceCoordinatorStatus;
|
||||
|
||||
auto SetReplicationInstanceToMain(std::string instance_name) -> coordination::SetInstanceToMainCoordinatorStatus;
|
||||
auto SetReplicationInstanceToMain(std::string_view instance_name) -> coordination::SetInstanceToMainCoordinatorStatus;
|
||||
|
||||
auto ShowInstances() const -> std::vector<coordination::InstanceStatus>;
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string raft_address) -> void;
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_port, std::string_view raft_address) -> void;
|
||||
|
||||
private:
|
||||
coordination::CoordinatorState &coordinator_state_;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -185,6 +185,16 @@ DbmsHandler::DbmsHandler(storage::Config config, replication::ReplicationState &
|
||||
auto directories = std::set{std::string{kDefaultDB}};
|
||||
|
||||
// Recover previous databases
|
||||
if (flags::AreExperimentsEnabled(flags::Experiments::SYSTEM_REPLICATION) && !recovery_on_startup) {
|
||||
// This will result in dropping databases on SystemRecoveryHandler
|
||||
// for MT case, and for single DB case we might not even set replication as commit timestamp is checked
|
||||
spdlog::warn(
|
||||
"Data recovery on startup not set, this will result in dropping database in case of multi-tenancy enabled.");
|
||||
}
|
||||
|
||||
// TODO: Problem is if user doesn't set this up "database" name won't be recovered
|
||||
// but if storage-recover-on-startup is true storage will be recovered which is an issue
|
||||
spdlog::info("Data recovery on startup set to {}", recovery_on_startup);
|
||||
if (recovery_on_startup) {
|
||||
auto it = durability_->begin(std::string(kDBPrefix));
|
||||
auto end = durability_->end(std::string(kDBPrefix));
|
||||
@ -410,9 +420,10 @@ void DbmsHandler::UpdateDurability(const storage::Config &config, std::optional<
|
||||
if (!durability_) return;
|
||||
// Save database in a list of active databases
|
||||
const auto &key = Durability::GenKey(config.salient.name);
|
||||
if (rel_dir == std::nullopt)
|
||||
if (rel_dir == std::nullopt) {
|
||||
rel_dir =
|
||||
std::filesystem::relative(config.durability.storage_directory, default_config_.durability.storage_directory);
|
||||
}
|
||||
const auto &val = Durability::GenVal(config.salient.uuid, *rel_dir);
|
||||
durability_->Put(key, val);
|
||||
}
|
||||
|
@ -155,6 +155,8 @@ class DbmsHandler {
|
||||
spdlog::debug("Trying to create db '{}' on replica which already exists.", config.name);
|
||||
|
||||
auto db = Get_(config.name);
|
||||
spdlog::debug("Aligning database with name {} which has UUID {}, where config UUID is {}", config.name,
|
||||
std::string(db->uuid()), std::string(config.uuid));
|
||||
if (db->uuid() == config.uuid) { // Same db
|
||||
return db;
|
||||
}
|
||||
@ -163,18 +165,22 @@ class DbmsHandler {
|
||||
|
||||
// TODO: Fix this hack
|
||||
if (config.name == kDefaultDB) {
|
||||
spdlog::debug("Last commit timestamp for DB {} is {}", kDefaultDB,
|
||||
db->storage()->repl_storage_state_.last_commit_timestamp_);
|
||||
// This seems correct, if database made progress
|
||||
if (db->storage()->repl_storage_state_.last_commit_timestamp_ != storage::kTimestampInitialId) {
|
||||
spdlog::debug("Default storage is not clean, cannot update UUID...");
|
||||
return NewError::GENERIC; // Update error
|
||||
}
|
||||
spdlog::debug("Update default db's UUID");
|
||||
spdlog::debug("Updated default db's UUID");
|
||||
// Default db cannot be deleted and remade, have to just update the UUID
|
||||
db->storage()->config_.salient.uuid = config.uuid;
|
||||
UpdateDurability(db->storage()->config_, ".");
|
||||
return db;
|
||||
}
|
||||
|
||||
spdlog::debug("Drop database and recreate with the correct UUID");
|
||||
spdlog::debug("Dropping database {} with UUID: {} and recreating with the correct UUID: {}", config.name,
|
||||
std::string(db->uuid()), std::string(config.uuid));
|
||||
// Defer drop
|
||||
(void)Delete_(db->name());
|
||||
// Second attempt
|
||||
@ -266,10 +272,6 @@ class DbmsHandler {
|
||||
bool IsMain() const { return repl_state_.IsMain(); }
|
||||
bool IsReplica() const { return repl_state_.IsReplica(); }
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// coordination::CoordinatorState &CoordinatorState() { return coordinator_state_; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Return all active databases.
|
||||
*
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/durability/version.hpp"
|
||||
#include "storage/v2/fmt.hpp"
|
||||
#include "storage/v2/indices/label_index_stats.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
@ -119,9 +118,14 @@ void InMemoryReplicationHandlers::Register(dbms::DbmsHandler *dbms_handler, repl
|
||||
});
|
||||
server.rpc_server_.Register<replication_coordination_glue::SwapMainUUIDRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received SwapMainUUIDHandler");
|
||||
spdlog::debug("Received SwapMainUUIDRpc");
|
||||
InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms_handler, data, req_reader, res_builder);
|
||||
});
|
||||
server.rpc_server_.Register<storage::replication::ForceResetStorageRpc>(
|
||||
[&data, dbms_handler](auto *req_reader, auto *res_builder) {
|
||||
spdlog::debug("Received ForceResetStorageRpc");
|
||||
InMemoryReplicationHandlers::ForceResetStorageHandler(dbms_handler, data.uuid_, req_reader, res_builder);
|
||||
});
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler,
|
||||
@ -135,7 +139,7 @@ void InMemoryReplicationHandlers::SwapMainUUIDHandler(dbms::DbmsHandler *dbms_ha
|
||||
|
||||
replication_coordination_glue::SwapMainUUIDReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
spdlog::info(fmt::format("Set replica data UUID to main uuid {}", std::string(req.uuid)));
|
||||
spdlog::info("Set replica data UUID to main uuid {}", std::string(req.uuid));
|
||||
dbms_handler->ReplicationState().TryPersistRoleReplica(role_replica_data.config, req.uuid);
|
||||
role_replica_data.uuid_ = req.uuid;
|
||||
|
||||
@ -330,6 +334,78 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
spdlog::debug("Replication recovery from snapshot finished!");
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
storage::replication::ForceResetStorageReq req;
|
||||
slk::Load(&req, req_reader);
|
||||
auto db_acc = GetDatabaseAccessor(dbms_handler, req.db_uuid);
|
||||
if (!db_acc) {
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
if (!current_main_uuid.has_value() || req.main_uuid != current_main_uuid) [[unlikely]] {
|
||||
LogWrongMain(current_main_uuid, req.main_uuid, storage::replication::SnapshotReq::kType.name);
|
||||
storage::replication::ForceResetStorageRes res{false, 0};
|
||||
slk::Save(res, res_builder);
|
||||
return;
|
||||
}
|
||||
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
|
||||
auto storage_guard = std::unique_lock{storage->main_lock_};
|
||||
|
||||
// Clear the database
|
||||
storage->vertices_.clear();
|
||||
storage->edges_.clear();
|
||||
storage->commit_log_.reset();
|
||||
storage->commit_log_.emplace();
|
||||
|
||||
storage->constraints_.existence_constraints_ = std::make_unique<storage::ExistenceConstraints>();
|
||||
storage->constraints_.unique_constraints_ = std::make_unique<storage::InMemoryUniqueConstraints>();
|
||||
storage->indices_.label_index_ = std::make_unique<storage::InMemoryLabelIndex>();
|
||||
storage->indices_.label_property_index_ = std::make_unique<storage::InMemoryLabelPropertyIndex>();
|
||||
|
||||
// Fine since we will force push when reading from WAL just random epoch with 0 timestamp, as it should be if it
|
||||
// acted as MAIN before
|
||||
storage->repl_storage_state_.epoch_.SetEpoch(std::string(utils::UUID{}));
|
||||
storage->repl_storage_state_.last_commit_timestamp_ = 0;
|
||||
|
||||
storage->repl_storage_state_.history.clear();
|
||||
storage->vertex_id_ = 0;
|
||||
storage->edge_id_ = 0;
|
||||
storage->timestamp_ = storage::kTimestampInitialId;
|
||||
|
||||
storage->CollectGarbage<true>(std::move(storage_guard), false);
|
||||
storage->vertices_.run_gc();
|
||||
storage->edges_.run_gc();
|
||||
|
||||
storage::replication::ForceResetStorageRes res{true, storage->repl_storage_state_.last_commit_timestamp_.load()};
|
||||
slk::Save(res, res_builder);
|
||||
|
||||
spdlog::trace("Deleting old snapshot files.");
|
||||
// Delete other durability files
|
||||
auto snapshot_files = storage::durability::GetSnapshotFiles(storage->recovery_.snapshot_directory_, storage->uuid_);
|
||||
for (const auto &[path, uuid, _] : snapshot_files) {
|
||||
spdlog::trace("Deleting snapshot file {}", path);
|
||||
storage->file_retainer_.DeleteFile(path);
|
||||
}
|
||||
|
||||
spdlog::trace("Deleting old WAL files.");
|
||||
auto wal_files = storage::durability::GetWalFiles(storage->recovery_.wal_directory_, storage->uuid_);
|
||||
if (wal_files) {
|
||||
for (const auto &wal_file : *wal_files) {
|
||||
spdlog::trace("Deleting WAL file {}", wal_file.path);
|
||||
storage->file_retainer_.DeleteFile(wal_file.path);
|
||||
}
|
||||
|
||||
storage->wal_file_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
@ -513,7 +589,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
if (timestamp < storage->timestamp_) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SPDLOG_INFO(" Delta {}", applied_deltas);
|
||||
switch (delta.type) {
|
||||
case WalDeltaData::Type::VERTEX_CREATE: {
|
||||
@ -558,9 +633,10 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::VERTEX_SET_PROPERTY: {
|
||||
spdlog::trace(" Vertex {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
spdlog::trace(" Vertex {} set property", delta.vertex_edge_set_property.gid.AsUint());
|
||||
// NOLINTNEXTLINE
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
// NOLINTNEXTLINE
|
||||
auto vertex = transaction->FindVertex(delta.vertex_edge_set_property.gid, View::NEW);
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
@ -608,8 +684,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_SET_PROPERTY: {
|
||||
spdlog::trace(" Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
spdlog::trace(" Edge {} set property", delta.vertex_edge_set_property.gid.AsUint());
|
||||
if (!storage->config_.salient.items.properties_on_edges)
|
||||
throw utils::BasicException(
|
||||
"Can't set properties on edges because properties on edges "
|
||||
@ -764,6 +839,20 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
transaction->DeleteLabelPropertyIndexStats(storage->NameToLabel(info.label));
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_CREATE: {
|
||||
spdlog::trace(" Create edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->CreateIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_INDEX_DROP: {
|
||||
spdlog::trace(" Drop edge index on :{}", delta.operation_edge_type.edge_type);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->DropIndex(storage->NameToEdgeType(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
|
||||
spdlog::trace(" Create existence constraint on :{} ({})", delta.operation_label_property.label,
|
||||
delta.operation_label_property.property);
|
||||
@ -827,5 +916,4 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
spdlog::debug("Applied {} deltas", applied_deltas);
|
||||
return applied_deltas;
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -48,6 +48,9 @@ class InMemoryReplicationHandlers {
|
||||
|
||||
static void SwapMainUUIDHandler(dbms::DbmsHandler *dbms_handler, replication::RoleReplicaData &role_replica_data,
|
||||
slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
static void ForceResetStorageHandler(dbms::DbmsHandler *dbms_handler,
|
||||
const std::optional<utils::UUID> ¤t_main_uuid, slk::Reader *req_reader,
|
||||
slk::Builder *res_builder);
|
||||
|
||||
static void LoadWal(storage::InMemoryStorage *storage, storage::replication::Decoder *decoder);
|
||||
|
||||
|
@ -59,12 +59,14 @@ class TypedValueResultStreamBase {
|
||||
public:
|
||||
explicit TypedValueResultStreamBase(memgraph::storage::Storage *storage);
|
||||
|
||||
std::vector<memgraph::communication::bolt::Value> DecodeValues(
|
||||
const std::vector<memgraph::query::TypedValue> &values) const;
|
||||
void DecodeValues(const std::vector<memgraph::query::TypedValue> &values);
|
||||
|
||||
auto AccessValues() const -> std::vector<memgraph::communication::bolt::Value> const & { return decoded_values_; }
|
||||
|
||||
protected:
|
||||
// NOTE: Needed only for ToBoltValue conversions
|
||||
memgraph::storage::Storage *storage_;
|
||||
std::vector<memgraph::communication::bolt::Value> decoded_values_;
|
||||
};
|
||||
|
||||
/// Wrapper around TEncoder which converts TypedValue to Value
|
||||
@ -75,16 +77,18 @@ class TypedValueResultStream : public TypedValueResultStreamBase {
|
||||
TypedValueResultStream(TEncoder *encoder, memgraph::storage::Storage *storage)
|
||||
: TypedValueResultStreamBase{storage}, encoder_(encoder) {}
|
||||
|
||||
void Result(const std::vector<memgraph::query::TypedValue> &values) { encoder_->MessageRecord(DecodeValues(values)); }
|
||||
void Result(const std::vector<memgraph::query::TypedValue> &values) {
|
||||
DecodeValues(values);
|
||||
encoder_->MessageRecord(AccessValues());
|
||||
}
|
||||
|
||||
private:
|
||||
TEncoder *encoder_;
|
||||
};
|
||||
|
||||
std::vector<memgraph::communication::bolt::Value> TypedValueResultStreamBase::DecodeValues(
|
||||
const std::vector<memgraph::query::TypedValue> &values) const {
|
||||
std::vector<memgraph::communication::bolt::Value> decoded_values;
|
||||
decoded_values.reserve(values.size());
|
||||
void TypedValueResultStreamBase::DecodeValues(const std::vector<memgraph::query::TypedValue> &values) {
|
||||
decoded_values_.reserve(values.size());
|
||||
decoded_values_.clear();
|
||||
for (const auto &v : values) {
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(v, storage_, memgraph::storage::View::NEW);
|
||||
if (maybe_value.HasError()) {
|
||||
@ -99,9 +103,8 @@ std::vector<memgraph::communication::bolt::Value> TypedValueResultStreamBase::De
|
||||
throw memgraph::communication::bolt::ClientError("Unexpected storage error when streaming results.");
|
||||
}
|
||||
}
|
||||
decoded_values.emplace_back(std::move(*maybe_value));
|
||||
decoded_values_.emplace_back(std::move(*maybe_value));
|
||||
}
|
||||
return decoded_values;
|
||||
}
|
||||
|
||||
TypedValueResultStreamBase::TypedValueResultStreamBase(memgraph::storage::Storage *storage) : storage_(storage) {}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -22,113 +22,15 @@
|
||||
#include "utils/message.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace {
|
||||
constexpr std::string_view delimiter = ":";
|
||||
} // namespace
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(const std::string &address) {
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, address.c_str(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, address.c_str(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
} else if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
} else {
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrIpAddress(
|
||||
const std::string &address, const std::optional<uint16_t> default_port) {
|
||||
/// expected address format:
|
||||
/// - "ip_address:port_number"
|
||||
/// - "ip_address"
|
||||
/// We parse the address first. If it's an IP address, a default port must
|
||||
// be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
// it into an ip address and a port number; even if a default port is given,
|
||||
// it won't be used, as we expect that it is given in the address string.
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{address, *default_port};
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
ip_address = std::move(parts[0]);
|
||||
if (GetIpFamily(ip_address) == IpFamily::NONE) {
|
||||
return std::nullopt;
|
||||
}
|
||||
int64_t int_port{0};
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{ip_address, static_cast<uint16_t>(int_port)};
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseHostname(
|
||||
const std::string &address, const std::optional<uint16_t> default_port = {}) {
|
||||
const std::string delimiter = ":";
|
||||
std::string ip_address;
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (default_port) {
|
||||
if (!IsResolvableAddress(address, *default_port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::pair{address, *default_port};
|
||||
}
|
||||
} else if (parts.size() == 2) {
|
||||
int64_t int_port{0};
|
||||
auto hostname = std::move(parts[0]);
|
||||
try {
|
||||
int_port = utils::ParseInt(parts[1]);
|
||||
} catch (utils::BasicException &e) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}.", parts[1], "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.",
|
||||
int_port, "https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (int_port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return std::nullopt;
|
||||
}
|
||||
if (IsResolvableAddress(hostname, static_cast<uint16_t>(int_port))) {
|
||||
return std::pair{hostname, static_cast<u_int16_t>(int_port)};
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::string Endpoint::SocketAddress() const {
|
||||
auto ip_address = address.empty() ? "EMPTY" : address;
|
||||
return ip_address + ":" + std::to_string(port);
|
||||
}
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
|
||||
Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip_address)), port(port) {
|
||||
IpFamily ip_family = GetIpFamily(address);
|
||||
@ -138,9 +40,23 @@ Endpoint::Endpoint(std::string ip_address, uint16_t port) : address(std::move(ip
|
||||
family = ip_family;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
Endpoint::Endpoint(needs_resolving_t, std::string hostname, uint16_t port)
|
||||
: address(std::move(hostname)), port(port), family{GetIpFamily(address)} {}
|
||||
std::string Endpoint::SocketAddress() const { return fmt::format("{}:{}", address, port); }
|
||||
|
||||
Endpoint::IpFamily Endpoint::GetIpFamily(std::string_view address) {
|
||||
// Ensure null-terminated
|
||||
auto const tmp = std::string(address);
|
||||
in_addr addr4;
|
||||
in6_addr addr6;
|
||||
int ipv4_result = inet_pton(AF_INET, tmp.c_str(), &addr4);
|
||||
int ipv6_result = inet_pton(AF_INET6, tmp.c_str(), &addr6);
|
||||
if (ipv4_result == 1) {
|
||||
return IpFamily::IP4;
|
||||
}
|
||||
if (ipv6_result == 1) {
|
||||
return IpFamily::IP6;
|
||||
}
|
||||
return IpFamily::NONE;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
// no need to cover the IpFamily::NONE case, as you can't even construct an
|
||||
@ -153,35 +69,73 @@ std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint) {
|
||||
return os << endpoint.address << ":" << endpoint.port;
|
||||
}
|
||||
|
||||
bool Endpoint::IsResolvableAddress(const std::string &address, uint16_t port) {
|
||||
// NOTE: Intentional copy to ensure null-terminated string
|
||||
bool Endpoint::IsResolvableAddress(std::string_view address, uint16_t port) {
|
||||
addrinfo hints{
|
||||
.ai_flags = AI_PASSIVE,
|
||||
.ai_family = AF_UNSPEC, // IPv4 and IPv6
|
||||
.ai_socktype = SOCK_STREAM // TCP socket
|
||||
};
|
||||
addrinfo *info = nullptr;
|
||||
auto status = getaddrinfo(address.c_str(), std::to_string(port).c_str(), &hints, &info);
|
||||
auto status = getaddrinfo(std::string(address).c_str(), std::to_string(port).c_str(), &hints, &info);
|
||||
if (info) freeaddrinfo(info);
|
||||
return status == 0;
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, uint16_t>> Endpoint::ParseSocketOrAddress(
|
||||
const std::string &address, const std::optional<uint16_t> default_port) {
|
||||
const std::string delimiter = ":";
|
||||
std::vector<std::string> parts = utils::Split(address, delimiter);
|
||||
if (parts.size() == 1) {
|
||||
if (GetIpFamily(address) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
std::optional<ParsedAddress> Endpoint::ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port) {
|
||||
auto const parts = utils::SplitView(address, delimiter);
|
||||
|
||||
if (parts.size() > 2) {
|
||||
return std::nullopt;
|
||||
}
|
||||
if (parts.size() == 2) {
|
||||
if (GetIpFamily(parts[0]) == IpFamily::NONE) {
|
||||
return ParseHostname(address, default_port);
|
||||
|
||||
auto const port = [default_port, &parts]() -> std::optional<uint16_t> {
|
||||
if (parts.size() == 2) {
|
||||
return static_cast<uint16_t>(utils::ParseInt(parts[1]));
|
||||
}
|
||||
return ParseSocketOrIpAddress(address, default_port);
|
||||
return default_port;
|
||||
}();
|
||||
|
||||
if (!ValidatePort(port)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::nullopt;
|
||||
|
||||
auto const addr = [address, &parts]() {
|
||||
if (parts.size() == 2) {
|
||||
return parts[0];
|
||||
}
|
||||
return address;
|
||||
}();
|
||||
|
||||
if (GetIpFamily(addr) == IpFamily::NONE) {
|
||||
if (IsResolvableAddress(addr, *port)) { // NOLINT
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return std::pair{addr, *port}; // NOLINT
|
||||
}
|
||||
|
||||
auto Endpoint::ValidatePort(std::optional<uint16_t> port) -> bool {
|
||||
if (!port) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port < 0) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number {}. The port number must be a positive integer.", *port,
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (port > std::numeric_limits<uint16_t>::max()) {
|
||||
spdlog::error(utils::MessageWithLink("Invalid port number. The port number exceedes the maximum possible size.",
|
||||
"https://memgr.ph/ports"));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -19,11 +19,8 @@
|
||||
|
||||
namespace memgraph::io::network {
|
||||
|
||||
/**
|
||||
* This class represents a network endpoint that is used in Socket.
|
||||
* It is used when connecting to an address and to get the current
|
||||
* connection address.
|
||||
*/
|
||||
using ParsedAddress = std::pair<std::string_view, uint16_t>;
|
||||
|
||||
struct Endpoint {
|
||||
static const struct needs_resolving_t {
|
||||
} needs_resolving;
|
||||
@ -31,59 +28,35 @@ struct Endpoint {
|
||||
Endpoint() = default;
|
||||
Endpoint(std::string ip_address, uint16_t port);
|
||||
Endpoint(needs_resolving_t, std::string hostname, uint16_t port);
|
||||
|
||||
Endpoint(Endpoint const &) = default;
|
||||
Endpoint(Endpoint &&) noexcept = default;
|
||||
|
||||
Endpoint &operator=(Endpoint const &) = default;
|
||||
Endpoint &operator=(Endpoint &&) noexcept = default;
|
||||
|
||||
~Endpoint() = default;
|
||||
|
||||
enum class IpFamily : std::uint8_t { NONE, IP4, IP6 };
|
||||
|
||||
std::string SocketAddress() const;
|
||||
static std::optional<ParsedAddress> ParseSocketOrAddress(std::string_view address,
|
||||
std::optional<uint16_t> default_port = {});
|
||||
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
std::string SocketAddress() const;
|
||||
|
||||
std::string address;
|
||||
uint16_t port{0};
|
||||
IpFamily family{IpFamily::NONE};
|
||||
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrAddress(const std::string &address,
|
||||
std::optional<uint16_t> default_port);
|
||||
bool operator==(const Endpoint &other) const = default;
|
||||
friend std::ostream &operator<<(std::ostream &os, const Endpoint &endpoint);
|
||||
|
||||
/**
|
||||
* Tries to parse the given string as either a socket address or ip address.
|
||||
* Expected address format:
|
||||
* - "ip_address:port_number"
|
||||
* - "ip_address"
|
||||
* We parse the address first. If it's an IP address, a default port must
|
||||
* be given, or we return nullopt. If it's a socket address, we try to parse
|
||||
* it into an ip address and a port number; even if a default port is given,
|
||||
* it won't be used, as we expect that it is given in the address string.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseSocketOrIpAddress(
|
||||
const std::string &address, std::optional<uint16_t> default_port = {});
|
||||
private:
|
||||
static IpFamily GetIpFamily(std::string_view address);
|
||||
|
||||
/**
|
||||
* Tries to parse given string as either socket address or hostname.
|
||||
* Expected address format:
|
||||
* - "hostname:port_number"
|
||||
* - "hostname"
|
||||
* After we parse hostname and port we try to resolve the hostname into an ip_address.
|
||||
*/
|
||||
static std::optional<std::pair<std::string, uint16_t>> ParseHostname(const std::string &address,
|
||||
std::optional<uint16_t> default_port);
|
||||
static bool IsResolvableAddress(std::string_view address, uint16_t port);
|
||||
|
||||
static IpFamily GetIpFamily(const std::string &address);
|
||||
|
||||
static bool IsResolvableAddress(const std::string &address, uint16_t port);
|
||||
|
||||
/**
|
||||
* Tries to resolve hostname to its corresponding IP address.
|
||||
* Given a DNS hostname, this function performs resolution and returns
|
||||
* the IP address associated with the hostname.
|
||||
*/
|
||||
static std::string ResolveHostnameIntoIpAddress(const std::string &address, uint16_t port);
|
||||
static auto ValidatePort(std::optional<uint16_t> port) -> bool;
|
||||
};
|
||||
|
||||
} // namespace memgraph::io::network
|
||||
|
@ -334,7 +334,8 @@ int main(int argc, char **argv) {
|
||||
.salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.salient.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
|
||||
spdlog::info("config recover on startup {}, flags {} {}", db_config.durability.recover_on_startup,
|
||||
FLAGS_storage_recover_on_startup, FLAGS_data_recovery_on_startup);
|
||||
memgraph::utils::Scheduler jemalloc_purge_scheduler;
|
||||
jemalloc_purge_scheduler.Run("Jemalloc purge", std::chrono::seconds(FLAGS_storage_gc_cycle_sec),
|
||||
[] { memgraph::memory::PurgeUnusedMemory(); });
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,64 +13,6 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
namespace impl {
|
||||
|
||||
bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
|
||||
// in ordering null comes after everything else
|
||||
// at the same time Null is not less that null
|
||||
// first deal with Null < Whatever case
|
||||
if (a.IsNull()) return false;
|
||||
// now deal with NotNull < Null case
|
||||
if (b.IsNull()) return true;
|
||||
|
||||
// comparisons are from this point legal only between values of
|
||||
// the same type, or int+float combinations
|
||||
if ((a.type() != b.type() && !(a.IsNumeric() && b.IsNumeric())))
|
||||
throw QueryRuntimeException("Can't compare value of type {} to value of type {}.", a.type(), b.type());
|
||||
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return !a.ValueBool() && b.ValueBool();
|
||||
case TypedValue::Type::Int:
|
||||
if (b.type() == TypedValue::Type::Double)
|
||||
return a.ValueInt() < b.ValueDouble();
|
||||
else
|
||||
return a.ValueInt() < b.ValueInt();
|
||||
case TypedValue::Type::Double:
|
||||
if (b.type() == TypedValue::Type::Int)
|
||||
return a.ValueDouble() < b.ValueInt();
|
||||
else
|
||||
return a.ValueDouble() < b.ValueDouble();
|
||||
case TypedValue::Type::String:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueString() < b.ValueString();
|
||||
case TypedValue::Type::Date:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDate() < b.ValueDate();
|
||||
case TypedValue::Type::LocalTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalTime() < b.ValueLocalTime();
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalDateTime() < b.ValueLocalDateTime();
|
||||
case TypedValue::Type::Duration:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDuration() < b.ValueDuration();
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Comparison is not defined for values of type {}.", a.type());
|
||||
case TypedValue::Type::Null:
|
||||
LOG_FATAL("Invalid type");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
int64_t QueryTimestamp() {
|
||||
return std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/frontend/semantic/symbol.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "range/v3/all.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/result.hpp"
|
||||
@ -31,9 +32,91 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
namespace impl {
|
||||
bool TypedValueCompare(const TypedValue &a, const TypedValue &b);
|
||||
} // namespace impl
|
||||
namespace {
|
||||
std::partial_ordering TypedValueCompare(TypedValue const &a, TypedValue const &b) {
|
||||
// First assume typical same type comparisons
|
||||
if (a.type() == b.type()) {
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return a.UnsafeValueBool() <=> b.UnsafeValueBool();
|
||||
case TypedValue::Type::Int:
|
||||
return a.UnsafeValueInt() <=> b.UnsafeValueInt();
|
||||
case TypedValue::Type::Double:
|
||||
return a.UnsafeValueDouble() <=> b.UnsafeValueDouble();
|
||||
case TypedValue::Type::String:
|
||||
return a.UnsafeValueString() <=> b.UnsafeValueString();
|
||||
case TypedValue::Type::Date:
|
||||
return a.UnsafeValueDate() <=> b.UnsafeValueDate();
|
||||
case TypedValue::Type::LocalTime:
|
||||
return a.UnsafeValueLocalTime() <=> b.UnsafeValueLocalTime();
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
return a.UnsafeValueLocalDateTime() <=> b.UnsafeValueLocalDateTime();
|
||||
case TypedValue::Type::Duration:
|
||||
return a.UnsafeValueDuration() <=> b.UnsafeValueDuration();
|
||||
case TypedValue::Type::Null:
|
||||
return std::partial_ordering::equivalent;
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Comparison is not defined for values of type {}.", a.type());
|
||||
}
|
||||
} else {
|
||||
// from this point legal only between values of
|
||||
// int+float combinations or against null
|
||||
|
||||
// in ordering null comes after everything else
|
||||
// at the same time Null is not less that null
|
||||
// first deal with Null < Whatever case
|
||||
if (a.IsNull()) return std::partial_ordering::greater;
|
||||
// now deal with NotNull < Null case
|
||||
if (b.IsNull()) return std::partial_ordering::less;
|
||||
|
||||
if (!(a.IsNumeric() && b.IsNumeric())) [[unlikely]]
|
||||
throw QueryRuntimeException("Can't compare value of type {} to value of type {}.", a.type(), b.type());
|
||||
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Int:
|
||||
return a.UnsafeValueInt() <=> b.ValueDouble();
|
||||
case TypedValue::Type::Double:
|
||||
return a.UnsafeValueDouble() <=> b.ValueInt();
|
||||
case TypedValue::Type::Bool:
|
||||
case TypedValue::Type::Null:
|
||||
case TypedValue::Type::String:
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Date:
|
||||
case TypedValue::Type::LocalTime:
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
case TypedValue::Type::Duration:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
LOG_FATAL("Invalid type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
struct OrderedTypedValueCompare {
|
||||
OrderedTypedValueCompare(Ordering ordering) : ordering_{ordering}, ascending{ordering == Ordering::ASC} {}
|
||||
|
||||
auto operator()(const TypedValue &lhs, const TypedValue &rhs) const -> std::partial_ordering {
|
||||
return ascending ? TypedValueCompare(lhs, rhs) : TypedValueCompare(rhs, lhs);
|
||||
}
|
||||
|
||||
auto ordering() const { return ordering_; }
|
||||
|
||||
private:
|
||||
Ordering ordering_;
|
||||
bool ascending = true;
|
||||
};
|
||||
|
||||
/// Custom Comparator type for comparing vectors of TypedValues.
|
||||
///
|
||||
@ -43,32 +126,27 @@ bool TypedValueCompare(const TypedValue &a, const TypedValue &b);
|
||||
class TypedValueVectorCompare final {
|
||||
public:
|
||||
TypedValueVectorCompare() = default;
|
||||
explicit TypedValueVectorCompare(const std::vector<Ordering> &ordering) : ordering_(ordering) {}
|
||||
explicit TypedValueVectorCompare(std::vector<OrderedTypedValueCompare> orderings)
|
||||
: orderings_{std::move(orderings)} {}
|
||||
|
||||
template <class TAllocator>
|
||||
bool operator()(const std::vector<TypedValue, TAllocator> &c1, const std::vector<TypedValue, TAllocator> &c2) const {
|
||||
// ordering is invalid if there are more elements in the collections
|
||||
// then there are in the ordering_ vector
|
||||
MG_ASSERT(c1.size() <= ordering_.size() && c2.size() <= ordering_.size(),
|
||||
"Collections contain more elements then there are orderings");
|
||||
const auto &orderings() const { return orderings_; }
|
||||
|
||||
auto c1_it = c1.begin();
|
||||
auto c2_it = c2.begin();
|
||||
auto ordering_it = ordering_.begin();
|
||||
for (; c1_it != c1.end() && c2_it != c2.end(); c1_it++, c2_it++, ordering_it++) {
|
||||
if (impl::TypedValueCompare(*c1_it, *c2_it)) return *ordering_it == Ordering::ASC;
|
||||
if (impl::TypedValueCompare(*c2_it, *c1_it)) return *ordering_it == Ordering::DESC;
|
||||
}
|
||||
|
||||
// at least one collection is exhausted
|
||||
// c1 is less then c2 iff c1 reached the end but c2 didn't
|
||||
return (c1_it == c1.end()) && (c2_it != c2.end());
|
||||
auto lex_cmp() const {
|
||||
return [orderings = &orderings_]<typename TAllocator>(const std::vector<TypedValue, TAllocator> &lhs,
|
||||
const std::vector<TypedValue, TAllocator> &rhs) {
|
||||
auto rng = ranges::views::zip(*orderings, lhs, rhs);
|
||||
for (auto const &[cmp, l, r] : rng) {
|
||||
auto res = cmp(l, r);
|
||||
if (res == std::partial_ordering::less) return true;
|
||||
if (res == std::partial_ordering::greater) return false;
|
||||
}
|
||||
DMG_ASSERT(orderings->size() == lhs.size() && lhs.size() == rhs.size());
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: Remove this, member is public
|
||||
const auto &ordering() const { return ordering_; }
|
||||
|
||||
std::vector<Ordering> ordering_;
|
||||
private:
|
||||
std::vector<OrderedTypedValueCompare> orderings_;
|
||||
};
|
||||
|
||||
/// Raise QueryRuntimeException if the value for symbol isn't of expected type.
|
||||
|
@ -371,6 +371,62 @@ class VerticesIterable final {
|
||||
}
|
||||
};
|
||||
|
||||
class EdgesIterable final {
|
||||
std::variant<storage::EdgesIterable, std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *>
|
||||
iterable_;
|
||||
|
||||
public:
|
||||
class Iterator final {
|
||||
std::variant<storage::EdgesIterable::Iterator,
|
||||
std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator>
|
||||
it_;
|
||||
|
||||
public:
|
||||
explicit Iterator(storage::EdgesIterable::Iterator it) : it_(std::move(it)) {}
|
||||
explicit Iterator(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>>::iterator it)
|
||||
: it_(it) {}
|
||||
|
||||
EdgeAccessor operator*() const {
|
||||
return std::visit([](auto &it_) { return EdgeAccessor(*it_); }, it_);
|
||||
}
|
||||
|
||||
Iterator &operator++() {
|
||||
std::visit([](auto &it_) { ++it_; }, it_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Iterator &other) const { return it_ == other.it_; }
|
||||
|
||||
bool operator!=(const Iterator &other) const { return !(other == *this); }
|
||||
};
|
||||
|
||||
explicit EdgesIterable(storage::EdgesIterable iterable) : iterable_(std::move(iterable)) {}
|
||||
explicit EdgesIterable(std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *edges)
|
||||
: iterable_(edges) {}
|
||||
|
||||
Iterator begin() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.begin()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->begin()); }},
|
||||
iterable_);
|
||||
}
|
||||
|
||||
Iterator end() {
|
||||
return std::visit(
|
||||
memgraph::utils::Overloaded{
|
||||
[](storage::EdgesIterable &iterable_) { return Iterator(iterable_.end()); },
|
||||
[](std::unordered_set<EdgeAccessor, std::hash<EdgeAccessor>, std::equal_to<void>,
|
||||
utils::Allocator<EdgeAccessor>> *iterable_) { return Iterator(iterable_->end()); }},
|
||||
iterable_);
|
||||
}
|
||||
};
|
||||
|
||||
class DbAccessor final {
|
||||
storage::Storage::Accessor *accessor_;
|
||||
|
||||
@ -416,6 +472,10 @@ class DbAccessor final {
|
||||
return VerticesIterable(accessor_->Vertices(label, property, lower, upper, view));
|
||||
}
|
||||
|
||||
EdgesIterable Edges(storage::View view, storage::EdgeTypeId edge_type) {
|
||||
return EdgesIterable(accessor_->Edges(edge_type, view));
|
||||
}
|
||||
|
||||
VertexAccessor InsertVertex() { return VertexAccessor(accessor_->CreateVertex()); }
|
||||
|
||||
storage::Result<EdgeAccessor> InsertEdge(VertexAccessor *from, VertexAccessor *to,
|
||||
@ -572,6 +632,8 @@ class DbAccessor final {
|
||||
return accessor_->LabelPropertyIndexExists(label, prop);
|
||||
}
|
||||
|
||||
bool EdgeTypeIndexExists(storage::EdgeTypeId edge_type) const { return accessor_->EdgeTypeIndexExists(edge_type); }
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const {
|
||||
return accessor_->GetIndexStats(label);
|
||||
}
|
||||
@ -638,6 +700,10 @@ class DbAccessor final {
|
||||
return accessor_->CreateIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> CreateIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->CreateIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::LabelId label) {
|
||||
return accessor_->DropIndex(label);
|
||||
}
|
||||
@ -647,6 +713,10 @@ class DbAccessor final {
|
||||
return accessor_->DropIndex(label, property);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageIndexDefinitionError, void> DropIndex(storage::EdgeTypeId edge_type) {
|
||||
return accessor_->DropIndex(edge_type);
|
||||
}
|
||||
|
||||
utils::BasicResult<storage::StorageExistenceConstraintDefinitionError, void> CreateExistenceConstraint(
|
||||
storage::LabelId label, storage::PropertyId property) {
|
||||
return accessor_->CreateExistenceConstraint(label, property);
|
||||
|
@ -242,6 +242,10 @@ void DumpLabelIndex(std::ostream *os, query::DbAccessor *dba, const storage::Lab
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << ";";
|
||||
}
|
||||
|
||||
void DumpEdgeTypeIndex(std::ostream *os, query::DbAccessor *dba, const storage::EdgeTypeId edge_type) {
|
||||
*os << "CREATE EDGE INDEX ON :" << EscapeName(dba->EdgeTypeToName(edge_type)) << ";";
|
||||
}
|
||||
|
||||
void DumpLabelPropertyIndex(std::ostream *os, query::DbAccessor *dba, storage::LabelId label,
|
||||
storage::PropertyId property) {
|
||||
*os << "CREATE INDEX ON :" << EscapeName(dba->LabelToName(label)) << "(" << EscapeName(dba->PropertyToName(property))
|
||||
@ -297,7 +301,9 @@ PullPlanDump::PullPlanDump(DbAccessor *dba, dbms::DatabaseAccess db_acc)
|
||||
// Internal index cleanup
|
||||
CreateInternalIndexCleanupPullChunk(),
|
||||
// Dump all triggers
|
||||
CreateTriggersPullChunk()} {}
|
||||
CreateTriggersPullChunk(),
|
||||
// Dump all edge-type indices
|
||||
CreateEdgeTypeIndicesPullChunk()} {}
|
||||
|
||||
bool PullPlanDump::Pull(AnyStream *stream, std::optional<int> n) {
|
||||
// Iterate all functions that stream some results.
|
||||
@ -352,6 +358,33 @@ PullPlanDump::PullChunk PullPlanDump::CreateLabelIndicesPullChunk() {
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateEdgeTypeIndicesPullChunk() {
|
||||
// Dump all label indices
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
if (!indices_info_) {
|
||||
indices_info_.emplace(dba_->ListAllIndices());
|
||||
}
|
||||
const auto &edge_type = indices_info_->edge_type;
|
||||
|
||||
size_t local_counter = 0;
|
||||
while (global_index < edge_type.size() && (!n || local_counter < *n)) {
|
||||
std::ostringstream os;
|
||||
DumpEdgeTypeIndex(&os, dba_, edge_type[global_index]);
|
||||
stream->Result({TypedValue(os.str())});
|
||||
|
||||
++global_index;
|
||||
++local_counter;
|
||||
}
|
||||
|
||||
if (global_index == edge_type.size()) {
|
||||
return local_counter;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
PullPlanDump::PullChunk PullPlanDump::CreateLabelPropertyIndicesPullChunk() {
|
||||
return [this, global_index = 0U](AnyStream *stream, std::optional<int> n) mutable -> std::optional<size_t> {
|
||||
// Delay the construction of indices vectors
|
||||
|
@ -63,5 +63,6 @@ struct PullPlanDump {
|
||||
PullChunk CreateDropInternalIndexPullChunk();
|
||||
PullChunk CreateInternalIndexCleanupPullChunk();
|
||||
PullChunk CreateTriggersPullChunk();
|
||||
PullChunk CreateEdgeTypeIndicesPullChunk();
|
||||
};
|
||||
} // namespace memgraph::query
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -126,10 +126,11 @@ class FrameChangeCollector {
|
||||
}
|
||||
|
||||
bool ResetTrackingValue(const std::string &key) {
|
||||
if (!tracked_values_.contains(utils::pmr::string(key, utils::NewDeleteResource()))) {
|
||||
auto const it = tracked_values_.find(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
if (it == tracked_values_.cend()) {
|
||||
return false;
|
||||
}
|
||||
tracked_values_.erase(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
tracked_values_.erase(it);
|
||||
AddTrackingKey(key);
|
||||
return true;
|
||||
}
|
||||
|
@ -186,6 +186,9 @@ constexpr utils::TypeInfo query::ProfileQuery::kType{utils::TypeId::AST_PROFILE_
|
||||
|
||||
constexpr utils::TypeInfo query::IndexQuery::kType{utils::TypeId::AST_INDEX_QUERY, "IndexQuery", &query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::EdgeIndexQuery::kType{utils::TypeId::AST_EDGE_INDEX_QUERY, "EdgeIndexQuery",
|
||||
&query::Query::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::Create::kType{utils::TypeId::AST_CREATE, "Create", &query::Clause::kType};
|
||||
|
||||
constexpr utils::TypeInfo query::CallProcedure::kType{utils::TypeId::AST_CALL_PROCEDURE, "CallProcedure",
|
||||
|
@ -21,10 +21,16 @@
|
||||
#include "query/interpret/awesome_memgraph_functions.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
constexpr std::string_view kBoltServer = "bolt_server";
|
||||
constexpr std::string_view kReplicationServer = "replication_server";
|
||||
constexpr std::string_view kCoordinatorServer = "coordinator_server";
|
||||
constexpr std::string_view kManagementServer = "management_server";
|
||||
|
||||
struct LabelIx {
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const { return kType; }
|
||||
@ -1248,6 +1254,8 @@ class AllPropertiesLookup : public memgraph::query::Expression {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
using QueryLabelType = std::variant<LabelIx, Expression *>;
|
||||
|
||||
class LabelsTest : public memgraph::query::Expression {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
@ -1280,6 +1288,16 @@ class LabelsTest : public memgraph::query::Expression {
|
||||
|
||||
protected:
|
||||
LabelsTest(Expression *expression, const std::vector<LabelIx> &labels) : expression_(expression), labels_(labels) {}
|
||||
LabelsTest(Expression *expression, const std::vector<QueryLabelType> &labels) : expression_(expression) {
|
||||
labels_.reserve(labels.size());
|
||||
for (const auto &label : labels) {
|
||||
if (const auto *label_ix = std::get_if<LabelIx>(&label)) {
|
||||
labels_.push_back(*label_ix);
|
||||
} else {
|
||||
throw SemanticException("You can't use labels in filter expressions.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -1770,7 +1788,7 @@ class NodeAtom : public memgraph::query::PatternAtom {
|
||||
return visitor.PostVisit(*this);
|
||||
}
|
||||
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
std::variant<std::unordered_map<memgraph::query::PropertyIx, memgraph::query::Expression *>,
|
||||
memgraph::query::ParameterLookup *>
|
||||
properties_;
|
||||
@ -1780,7 +1798,11 @@ class NodeAtom : public memgraph::query::PatternAtom {
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
if (const auto *properties = std::get_if<std::unordered_map<PropertyIx, Expression *>>(&properties_)) {
|
||||
auto &new_obj_properties = std::get<std::unordered_map<PropertyIx, Expression *>>(object->properties_);
|
||||
@ -2223,6 +2245,34 @@ class IndexQuery : public memgraph::query::Query {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class EdgeIndexQuery : public memgraph::query::Query {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class Action { CREATE, DROP };
|
||||
|
||||
EdgeIndexQuery() = default;
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::EdgeIndexQuery::Action action_;
|
||||
memgraph::query::EdgeTypeIx edge_type_;
|
||||
|
||||
EdgeIndexQuery *Clone(AstStorage *storage) const override {
|
||||
EdgeIndexQuery *object = storage->Create<EdgeIndexQuery>();
|
||||
object->action_ = action_;
|
||||
object->edge_type_ = storage->GetEdgeTypeIx(edge_type_.name);
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
EdgeIndexQuery(Action action, EdgeTypeIx edge_type) : action_(action), edge_type_(edge_type) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
class Create : public memgraph::query::Clause {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
@ -2628,20 +2678,25 @@ class SetLabels : public memgraph::query::Clause {
|
||||
}
|
||||
|
||||
memgraph::query::Identifier *identifier_{nullptr};
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
|
||||
SetLabels *Clone(AstStorage *storage) const override {
|
||||
SetLabels *object = storage->Create<SetLabels>();
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
SetLabels(Identifier *identifier, const std::vector<LabelIx> &labels) : identifier_(identifier), labels_(labels) {}
|
||||
SetLabels(Identifier *identifier, std::vector<QueryLabelType> labels)
|
||||
: identifier_(identifier), labels_(std::move(labels)) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -2691,20 +2746,25 @@ class RemoveLabels : public memgraph::query::Clause {
|
||||
}
|
||||
|
||||
memgraph::query::Identifier *identifier_{nullptr};
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
|
||||
RemoveLabels *Clone(AstStorage *storage) const override {
|
||||
RemoveLabels *object = storage->Create<RemoveLabels>();
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
RemoveLabels(Identifier *identifier, const std::vector<LabelIx> &labels) : identifier_(identifier), labels_(labels) {}
|
||||
RemoveLabels(Identifier *identifier, std::vector<QueryLabelType> labels)
|
||||
: identifier_(identifier), labels_(std::move(labels)) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -3085,24 +3145,21 @@ class CoordinatorQuery : public memgraph::query::Query {
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::CoordinatorQuery::Action action_;
|
||||
std::string instance_name_;
|
||||
memgraph::query::Expression *replication_socket_address_{nullptr};
|
||||
memgraph::query::Expression *coordinator_socket_address_{nullptr};
|
||||
memgraph::query::Expression *raft_socket_address_{nullptr};
|
||||
memgraph::query::Expression *raft_server_id_{nullptr};
|
||||
std::string instance_name_{};
|
||||
std::unordered_map<memgraph::query::Expression *, memgraph::query::Expression *> configs_;
|
||||
memgraph::query::Expression *coordinator_server_id_{nullptr};
|
||||
memgraph::query::CoordinatorQuery::SyncMode sync_mode_;
|
||||
|
||||
CoordinatorQuery *Clone(AstStorage *storage) const override {
|
||||
auto *object = storage->Create<CoordinatorQuery>();
|
||||
|
||||
object->action_ = action_;
|
||||
object->instance_name_ = instance_name_;
|
||||
object->replication_socket_address_ =
|
||||
replication_socket_address_ ? replication_socket_address_->Clone(storage) : nullptr;
|
||||
object->coordinator_server_id_ = coordinator_server_id_ ? coordinator_server_id_->Clone(storage) : nullptr;
|
||||
object->sync_mode_ = sync_mode_;
|
||||
object->coordinator_socket_address_ =
|
||||
coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr;
|
||||
object->raft_socket_address_ = raft_socket_address_ ? raft_socket_address_->Clone(storage) : nullptr;
|
||||
object->raft_server_id_ = raft_server_id_ ? raft_server_id_->Clone(storage) : nullptr;
|
||||
for (const auto &[key, value] : configs_) {
|
||||
object->configs_[key->Clone(storage)] = value->Clone(storage);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
@ -3586,7 +3643,7 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
bool Accept(HierarchicalTreeVisitor &visitor) override {
|
||||
if (visitor.PreVisit(*this)) {
|
||||
if (variable_) {
|
||||
variable_->Accept(visitor);
|
||||
throw utils::NotYetImplemented("Variable in pattern comprehension.");
|
||||
}
|
||||
pattern_->Accept(visitor);
|
||||
if (filter_) {
|
||||
@ -3615,7 +3672,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
int32_t symbol_pos_{-1};
|
||||
|
||||
PatternComprehension *Clone(AstStorage *storage) const override {
|
||||
PatternComprehension *object = storage->Create<PatternComprehension>();
|
||||
auto *object = storage->Create<PatternComprehension>();
|
||||
object->variable_ = variable_ ? variable_->Clone(storage) : nullptr;
|
||||
object->pattern_ = pattern_ ? pattern_->Clone(storage) : nullptr;
|
||||
object->filter_ = filter_ ? filter_->Clone(storage) : nullptr;
|
||||
object->resultExpr_ = resultExpr_ ? resultExpr_->Clone(storage) : nullptr;
|
||||
@ -3625,7 +3683,8 @@ class PatternComprehension : public memgraph::query::Expression {
|
||||
}
|
||||
|
||||
protected:
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern) : variable_(variable), pattern_(pattern) {}
|
||||
PatternComprehension(Identifier *variable, Pattern *pattern, Where *filter, Expression *resultExpr)
|
||||
: variable_(variable), pattern_(pattern), filter_(filter), resultExpr_(resultExpr) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
|
@ -82,6 +82,7 @@ class AuthQuery;
|
||||
class ExplainQuery;
|
||||
class ProfileQuery;
|
||||
class IndexQuery;
|
||||
class EdgeIndexQuery;
|
||||
class DatabaseInfoQuery;
|
||||
class SystemInfoQuery;
|
||||
class ConstraintQuery;
|
||||
@ -143,11 +144,11 @@ class ExpressionVisitor
|
||||
|
||||
template <class TResult>
|
||||
class QueryVisitor
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, AuthQuery, DatabaseInfoQuery,
|
||||
SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery, LockPathQuery,
|
||||
FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery, StreamQuery,
|
||||
SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery, StorageModeQuery,
|
||||
AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery, EdgeImportModeQuery,
|
||||
CoordinatorQuery> {};
|
||||
: public utils::Visitor<TResult, CypherQuery, ExplainQuery, ProfileQuery, IndexQuery, EdgeIndexQuery, AuthQuery,
|
||||
DatabaseInfoQuery, SystemInfoQuery, ConstraintQuery, DumpQuery, ReplicationQuery,
|
||||
LockPathQuery, FreeMemoryQuery, TriggerQuery, IsolationLevelQuery, CreateSnapshotQuery,
|
||||
StreamQuery, SettingQuery, VersionQuery, ShowConfigQuery, TransactionQueueQuery,
|
||||
StorageModeQuery, AnalyzeGraphQuery, MultiDatabaseQuery, ShowDatabasesQuery,
|
||||
EdgeImportModeQuery, CoordinatorQuery> {};
|
||||
|
||||
} // namespace memgraph::query
|
||||
|
@ -265,6 +265,27 @@ antlrcpp::Any CypherMainVisitor::visitDropIndex(MemgraphCypher::DropIndexContext
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "EdgeIndexQuery should have exactly one child!");
|
||||
auto *index_query = std::any_cast<EdgeIndexQuery *>(ctx->children[0]->accept(this));
|
||||
query_ = index_query;
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::CREATE;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) {
|
||||
auto *index_query = storage_->Create<EdgeIndexQuery>();
|
||||
index_query->action_ = EdgeIndexQuery::Action::DROP;
|
||||
index_query->edge_type_ = AddEdgeType(std::any_cast<std::string>(ctx->labelName()->accept(this)));
|
||||
return index_query;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) {
|
||||
MG_ASSERT(ctx->children.size() == 1, "AuthQuery should have exactly one child!");
|
||||
auto *auth_query = std::any_cast<AuthQuery *>(ctx->children[0]->accept(this));
|
||||
@ -377,24 +398,17 @@ antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterRe
|
||||
antlrcpp::Any CypherMainVisitor::visitRegisterInstanceOnCoordinator(
|
||||
MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
if (!ctx->replicationSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Replication socket address should be a string literal!");
|
||||
}
|
||||
|
||||
if (!ctx->coordinatorSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Coordinator socket address should be a string literal!");
|
||||
}
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::REGISTER_INSTANCE;
|
||||
coordinator_query->replication_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->replicationSocketAddress()->accept(this));
|
||||
coordinator_query->coordinator_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->coordinatorSocketAddress()->accept(this));
|
||||
coordinator_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
if (ctx->ASYNC()) {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::ASYNC;
|
||||
} else {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::SYNC;
|
||||
}
|
||||
coordinator_query->configs_ =
|
||||
std::any_cast<std::unordered_map<Expression *, Expression *>>(ctx->configsMap->accept(this));
|
||||
coordinator_query->sync_mode_ = [ctx]() {
|
||||
if (ctx->ASYNC()) {
|
||||
return CoordinatorQuery::SyncMode::ASYNC;
|
||||
}
|
||||
return CoordinatorQuery::SyncMode::SYNC;
|
||||
}();
|
||||
|
||||
return coordinator_query;
|
||||
}
|
||||
@ -410,17 +424,10 @@ antlrcpp::Any CypherMainVisitor::visitUnregisterInstanceOnCoordinator(
|
||||
antlrcpp::Any CypherMainVisitor::visitAddCoordinatorInstance(MemgraphCypher::AddCoordinatorInstanceContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
|
||||
if (!ctx->raftSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Raft socket address should be a string literal!");
|
||||
}
|
||||
|
||||
if (!ctx->raftServerId()->literal()->numberLiteral()) {
|
||||
throw SemanticException("Raft server id should be a number literal!");
|
||||
}
|
||||
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::ADD_COORDINATOR_INSTANCE;
|
||||
coordinator_query->raft_socket_address_ = std::any_cast<Expression *>(ctx->raftSocketAddress()->accept(this));
|
||||
coordinator_query->raft_server_id_ = std::any_cast<Expression *>(ctx->raftServerId()->accept(this));
|
||||
coordinator_query->coordinator_server_id_ = std::any_cast<Expression *>(ctx->coordinatorServerId()->accept(this));
|
||||
coordinator_query->configs_ =
|
||||
std::any_cast<std::unordered_map<Expression *, Expression *>>(ctx->configsMap->accept(this));
|
||||
|
||||
return coordinator_query;
|
||||
}
|
||||
@ -1912,7 +1919,7 @@ antlrcpp::Any CypherMainVisitor::visitNodePattern(MemgraphCypher::NodePatternCon
|
||||
anonymous_identifiers.push_back(&node->identifier_);
|
||||
}
|
||||
if (ctx->nodeLabels()) {
|
||||
node->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
node->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
}
|
||||
if (ctx->properties()) {
|
||||
// This can return either properties or parameters
|
||||
@ -1926,16 +1933,27 @@ antlrcpp::Any CypherMainVisitor::visitNodePattern(MemgraphCypher::NodePatternCon
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitNodeLabels(MemgraphCypher::NodeLabelsContext *ctx) {
|
||||
std::vector<LabelIx> labels;
|
||||
std::vector<QueryLabelType> labels;
|
||||
for (auto *node_label : ctx->nodeLabel()) {
|
||||
if (node_label->labelName()->symbolicName()) {
|
||||
auto *label_name = node_label->labelName();
|
||||
if (label_name->symbolicName()) {
|
||||
labels.emplace_back(AddLabel(std::any_cast<std::string>(node_label->accept(this))));
|
||||
} else {
|
||||
} else if (label_name->parameter()) {
|
||||
// If we have a parameter, we have to resolve it.
|
||||
const auto *param_lookup = std::any_cast<ParameterLookup *>(node_label->accept(this));
|
||||
const auto label_name = parameters_->AtTokenPosition(param_lookup->token_position_).ValueString();
|
||||
labels.emplace_back(storage_->GetLabelIx(label_name));
|
||||
query_info_.is_cacheable = false; // We can't cache queries with label parameters.
|
||||
} else {
|
||||
auto variable = std::any_cast<std::string>(label_name->variable()->accept(this));
|
||||
users_identifiers.insert(variable);
|
||||
auto *expression = static_cast<Expression *>(storage_->Create<Identifier>(variable));
|
||||
for (auto *lookup : label_name->propertyLookup()) {
|
||||
auto key = std::any_cast<PropertyIx>(lookup->accept(this));
|
||||
auto *property_lookup = storage_->Create<PropertyLookup>(expression, key);
|
||||
expression = property_lookup;
|
||||
}
|
||||
labels.emplace_back(expression);
|
||||
}
|
||||
}
|
||||
return labels;
|
||||
@ -2483,7 +2501,7 @@ antlrcpp::Any CypherMainVisitor::visitListIndexingOrSlicing(MemgraphCypher::List
|
||||
antlrcpp::Any CypherMainVisitor::visitExpression2a(MemgraphCypher::Expression2aContext *ctx) {
|
||||
auto *expression = std::any_cast<Expression *>(ctx->expression2b()->accept(this));
|
||||
if (ctx->nodeLabels()) {
|
||||
auto labels = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
auto labels = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
expression = storage_->Create<LabelsTest>(expression, labels);
|
||||
}
|
||||
return expression;
|
||||
@ -2809,7 +2827,7 @@ antlrcpp::Any CypherMainVisitor::visitSetItem(MemgraphCypher::SetItemContext *ct
|
||||
// SetLabels
|
||||
auto *set_labels = storage_->Create<SetLabels>();
|
||||
set_labels->identifier_ = storage_->Create<Identifier>(std::any_cast<std::string>(ctx->variable()->accept(this)));
|
||||
set_labels->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
set_labels->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
return static_cast<Clause *>(set_labels);
|
||||
}
|
||||
|
||||
@ -2832,7 +2850,7 @@ antlrcpp::Any CypherMainVisitor::visitRemoveItem(MemgraphCypher::RemoveItemConte
|
||||
// RemoveLabels
|
||||
auto *remove_labels = storage_->Create<RemoveLabels>();
|
||||
remove_labels->identifier_ = storage_->Create<Identifier>(std::any_cast<std::string>(ctx->variable()->accept(this)));
|
||||
remove_labels->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
remove_labels->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
return static_cast<Clause *>(remove_labels);
|
||||
}
|
||||
|
||||
|
@ -148,6 +148,11 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitIndexQuery(MemgraphCypher::IndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return IndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitEdgeIndexQuery(MemgraphCypher::EdgeIndexQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ExplainQuery*
|
||||
*/
|
||||
@ -499,6 +504,16 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitDropIndex(MemgraphCypher::DropIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return EdgeIndexQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateEdgeIndex(MemgraphCypher::CreateEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return DropEdgeIndex*
|
||||
*/
|
||||
antlrcpp::Any visitDropEdgeIndex(MemgraphCypher::DropEdgeIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
|
@ -193,7 +193,10 @@ nodeLabels : nodeLabel ( nodeLabel )* ;
|
||||
|
||||
nodeLabel : ':' labelName ;
|
||||
|
||||
labelName : symbolicName | parameter;
|
||||
labelName : symbolicName
|
||||
| parameter
|
||||
| variable ( propertyLookup )+
|
||||
;
|
||||
|
||||
relTypeName : symbolicName ;
|
||||
|
||||
|
@ -133,6 +133,7 @@ symbolicName : UnescapedSymbolicName
|
||||
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| edgeIndexQuery
|
||||
| explainQuery
|
||||
| profileQuery
|
||||
| databaseInfoQuery
|
||||
@ -387,22 +388,22 @@ instanceName : symbolicName ;
|
||||
|
||||
socketAddress : literal ;
|
||||
|
||||
coordinatorSocketAddress : literal ;
|
||||
replicationSocketAddress : literal ;
|
||||
raftSocketAddress : literal ;
|
||||
|
||||
registerReplica : REGISTER REPLICA instanceName ( SYNC | ASYNC )
|
||||
TO socketAddress ;
|
||||
|
||||
registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ON coordinatorSocketAddress ( AS ASYNC ) ? WITH replicationSocketAddress ;
|
||||
configKeyValuePair : literal ':' literal ;
|
||||
|
||||
configMap : '{' ( configKeyValuePair ( ',' configKeyValuePair )* )? '}' ;
|
||||
|
||||
registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ( AS ASYNC ) ? WITH CONFIG configsMap=configMap ;
|
||||
|
||||
unregisterInstanceOnCoordinator : UNREGISTER INSTANCE instanceName ;
|
||||
|
||||
setInstanceToMain : SET INSTANCE instanceName TO MAIN ;
|
||||
|
||||
raftServerId : literal ;
|
||||
coordinatorServerId : literal ;
|
||||
|
||||
addCoordinatorInstance : ADD COORDINATOR raftServerId ON raftSocketAddress ;
|
||||
addCoordinatorInstance : ADD COORDINATOR coordinatorServerId WITH CONFIG configsMap=configMap ;
|
||||
|
||||
dropReplica : DROP REPLICA instanceName ;
|
||||
|
||||
@ -456,10 +457,6 @@ commonCreateStreamConfig : TRANSFORM transformationName=procedureName
|
||||
|
||||
createStream : kafkaCreateStream | pulsarCreateStream ;
|
||||
|
||||
configKeyValuePair : literal ':' literal ;
|
||||
|
||||
configMap : '{' ( configKeyValuePair ( ',' configKeyValuePair )* )? '}' ;
|
||||
|
||||
kafkaCreateStreamConfig : TOPICS topicNames
|
||||
| CONSUMER_GROUP consumerGroup=symbolicNameWithDotsAndMinus
|
||||
| BOOTSTRAP_SERVERS bootstrapServers=literal
|
||||
@ -527,3 +524,9 @@ showDatabase : SHOW DATABASE ;
|
||||
showDatabases : SHOW DATABASES ;
|
||||
|
||||
edgeImportModeQuery : EDGE IMPORT MODE ( ACTIVE | INACTIVE ) ;
|
||||
|
||||
createEdgeIndex : CREATE EDGE INDEX ON ':' labelName ;
|
||||
|
||||
dropEdgeIndex : DROP EDGE INDEX ON ':' labelName ;
|
||||
|
||||
edgeIndexQuery : createEdgeIndex | dropEdgeIndex ;
|
||||
|
@ -27,6 +27,8 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
|
||||
void Visit(IndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(EdgeIndexQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AnalyzeGraphQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::INDEX); }
|
||||
|
||||
void Visit(AuthQuery & /*unused*/) override { AddPrivilege(AuthQuery::Privilege::AUTH); }
|
||||
|
@ -53,6 +53,8 @@ class Symbol {
|
||||
bool user_declared() const { return user_declared_; }
|
||||
int token_position() const { return token_position_; }
|
||||
|
||||
bool IsSymbolAnonym() const { return name_.substr(0U, 4U) == "anon"; }
|
||||
|
||||
std::string name_;
|
||||
int64_t position_;
|
||||
bool user_declared_{true};
|
||||
|
@ -568,6 +568,44 @@ bool SymbolGenerator::PostVisit(SetProperty & /*set_property*/) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(SetLabels &set_labels) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_set_labels = true;
|
||||
for (auto &label : set_labels.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(SetLabels & /*set_labels*/) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_set_labels = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(RemoveLabels &remove_labels) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_remove_labels = true;
|
||||
for (auto &label : remove_labels.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(RemoveLabels & /*remove_labels*/) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_remove_labels = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Pattern and its subparts.
|
||||
|
||||
bool SymbolGenerator::PreVisit(Pattern &pattern) {
|
||||
@ -602,6 +640,15 @@ bool SymbolGenerator::PreVisit(NodeAtom &node_atom) {
|
||||
};
|
||||
|
||||
scope.in_node_atom = true;
|
||||
|
||||
if (scope.in_create) { // you can use expressions with labels only in create
|
||||
for (auto &label : node_atom.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (auto *properties = std::get_if<std::unordered_map<PropertyIx, Expression *>>(&node_atom.properties_)) {
|
||||
bool props_or_labels = !properties->empty() || !node_atom.labels_.empty();
|
||||
|
||||
@ -721,6 +768,32 @@ bool SymbolGenerator::PostVisit(EdgeAtom &) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(PatternComprehension &pc) {
|
||||
auto &scope = scopes_.back();
|
||||
|
||||
if (scope.in_set_property) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within SET clause.!");
|
||||
}
|
||||
|
||||
if (scope.in_with) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within WITH!");
|
||||
}
|
||||
|
||||
if (scope.in_reduce) {
|
||||
throw utils::NotYetImplemented("Pattern Comprehension cannot be used within REDUCE!");
|
||||
}
|
||||
|
||||
if (scope.num_if_operators) {
|
||||
throw utils::NotYetImplemented("IF operator cannot be used with Pattern Comprehension!");
|
||||
}
|
||||
|
||||
const auto &symbol = CreateAnonymousSymbol();
|
||||
pc.MapTo(symbol);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(PatternComprehension & /*pc*/) { return true; }
|
||||
|
||||
void SymbolGenerator::VisitWithIdentifiers(Expression *expr, const std::vector<Identifier *> &identifiers) {
|
||||
auto &scope = scopes_.back();
|
||||
std::vector<std::pair<std::optional<Symbol>, Identifier *>> prev_symbols;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -68,6 +68,10 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(Foreach &) override;
|
||||
bool PreVisit(SetProperty & /*set_property*/) override;
|
||||
bool PostVisit(SetProperty & /*set_property*/) override;
|
||||
bool PreVisit(SetLabels &) override;
|
||||
bool PostVisit(SetLabels & /*set_labels*/) override;
|
||||
bool PreVisit(RemoveLabels &) override;
|
||||
bool PostVisit(RemoveLabels & /*remove_labels*/) override;
|
||||
|
||||
// Expressions
|
||||
ReturnType Visit(Identifier &) override;
|
||||
@ -97,6 +101,8 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(NodeAtom &) override;
|
||||
bool PreVisit(EdgeAtom &) override;
|
||||
bool PostVisit(EdgeAtom &) override;
|
||||
bool PreVisit(PatternComprehension &) override;
|
||||
bool PostVisit(PatternComprehension &) override;
|
||||
|
||||
private:
|
||||
// Scope stores the state of where we are when visiting the AST and a map of
|
||||
@ -128,6 +134,8 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool in_set_property{false};
|
||||
bool in_call_subquery{false};
|
||||
bool has_return{false};
|
||||
bool in_set_labels{false};
|
||||
bool in_remove_labels{false};
|
||||
// True when visiting a pattern atom (node or edge) identifier, which can be
|
||||
// reused or created in the pattern itself.
|
||||
bool in_pattern_atom_identifier{false};
|
||||
|
@ -761,13 +761,19 @@ TypedValue Range(const TypedValue *args, int64_t nargs, const FunctionContext &c
|
||||
int64_t step = nargs == 3 ? args[2].ValueInt() : 1;
|
||||
TypedValue::TVector list(ctx.memory);
|
||||
if (lbound <= rbound && step > 0) {
|
||||
int64_t n = ((rbound - lbound + 1) + (step - 1)) / step;
|
||||
list.reserve(n);
|
||||
for (auto i = lbound; i <= rbound; i += step) {
|
||||
list.emplace_back(i);
|
||||
}
|
||||
MG_ASSERT(list.size() == n);
|
||||
} else if (lbound >= rbound && step < 0) {
|
||||
int64_t n = ((lbound - rbound + 1) + (-step - 1)) / -step;
|
||||
list.reserve(n);
|
||||
for (auto i = lbound; i >= rbound; i += step) {
|
||||
list.emplace_back(i);
|
||||
}
|
||||
MG_ASSERT(list.size() == n);
|
||||
}
|
||||
return TypedValue(std::move(list));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,12 +13,12 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, const std::string &what) {
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, std::string_view what) {
|
||||
TypedValue value = expr->Accept(*evaluator);
|
||||
try {
|
||||
return value.ValueInt();
|
||||
} catch (TypedValueException &e) {
|
||||
throw QueryRuntimeException(what + " must be an int");
|
||||
throw QueryRuntimeException(std::string(what) + " must be an int");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,6 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
} \
|
||||
}
|
||||
|
||||
BINARY_OPERATOR_VISITOR(OrOperator, ||, OR);
|
||||
BINARY_OPERATOR_VISITOR(XorOperator, ^, XOR);
|
||||
BINARY_OPERATOR_VISITOR(AdditionOperator, +, +);
|
||||
BINARY_OPERATOR_VISITOR(SubtractionOperator, -, -);
|
||||
@ -261,6 +260,20 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
}
|
||||
}
|
||||
|
||||
TypedValue Visit(OrOperator &op) override {
|
||||
auto value1 = op.expression1_->Accept(*this);
|
||||
if (value1.IsBool() && value1.ValueBool()) {
|
||||
// If first expression is true, don't evaluate the second one.
|
||||
return value1;
|
||||
}
|
||||
auto value2 = op.expression2_->Accept(*this);
|
||||
try {
|
||||
return value1 || value2;
|
||||
} catch (const TypedValueException &) {
|
||||
throw QueryRuntimeException("Invalid types: {} and {} for OR.", value1.type(), value2.type());
|
||||
}
|
||||
}
|
||||
|
||||
TypedValue Visit(IfOperator &if_operator) override {
|
||||
auto condition = if_operator.condition_->Accept(*this);
|
||||
if (condition.IsNull()) {
|
||||
@ -1196,7 +1209,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
/// @param what - Name of what's getting evaluated. Used for user feedback (via
|
||||
/// exception) when the evaluated value is not an int.
|
||||
/// @throw QueryRuntimeException if expression doesn't evaluate to an int.
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, const std::string &what);
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, std::string_view what);
|
||||
|
||||
std::optional<size_t> EvaluateMemoryLimit(ExpressionVisitor<TypedValue> &eval, Expression *memory_limit,
|
||||
size_t memory_scale);
|
||||
|
@ -246,27 +246,6 @@ std::optional<std::string> GetOptionalStringValue(query::Expression *expression,
|
||||
return {};
|
||||
};
|
||||
|
||||
bool IsAllShortestPathsQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
for (const auto &clause : clauses) {
|
||||
if (clause->GetTypeInfo() != Match::kType) {
|
||||
continue;
|
||||
}
|
||||
auto *match_clause = utils::Downcast<Match>(clause);
|
||||
for (const auto &pattern : match_clause->patterns_) {
|
||||
for (const auto &atom : pattern->atoms_) {
|
||||
if (atom->GetTypeInfo() != EdgeAtom::kType) {
|
||||
continue;
|
||||
}
|
||||
auto *edge_atom = utils::Downcast<EdgeAtom>(atom);
|
||||
if (edge_atom->type_ == EdgeAtom::Type::ALL_SHORTEST_PATHS) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline auto convertFromCoordinatorToReplicationMode(const CoordinatorQuery::SyncMode &sync_mode)
|
||||
-> replication_coordination_glue::ReplicationMode {
|
||||
switch (sync_mode) {
|
||||
@ -355,7 +334,7 @@ class ReplQueryHandler {
|
||||
const auto replication_config =
|
||||
replication::ReplicationClientConfig{.name = name,
|
||||
.mode = repl_mode,
|
||||
.ip_address = ip,
|
||||
.ip_address = std::string(ip),
|
||||
.port = port,
|
||||
.replica_check_frequency = replica_check_frequency,
|
||||
.ssl = std::nullopt};
|
||||
@ -410,7 +389,7 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
|
||||
: coordinator_handler_(coordinator_state) {}
|
||||
|
||||
void UnregisterInstance(std::string const &instance_name) override {
|
||||
void UnregisterInstance(std::string_view instance_name) override {
|
||||
auto status = coordinator_handler_.UnregisterReplicationInstance(instance_name);
|
||||
switch (status) {
|
||||
using enum memgraph::coordination::UnregisterInstanceCoordinatorStatus;
|
||||
@ -423,6 +402,8 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
throw QueryRuntimeException("UNREGISTER INSTANCE query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't unregister replica instance since coordinator is not a leader!");
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't unregister replica instance since raft server couldn't append the log!");
|
||||
case RPC_FAILED:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't unregister replica instance because current main instance couldn't unregister replica!");
|
||||
@ -431,20 +412,18 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterReplicationInstance(std::string const &coordinator_socket_address,
|
||||
std::string const &replication_socket_address,
|
||||
void RegisterReplicationInstance(std::string_view coordinator_socket_address,
|
||||
std::string_view replication_socket_address,
|
||||
std::chrono::seconds const &instance_check_frequency,
|
||||
std::chrono::seconds const &instance_down_timeout,
|
||||
std::chrono::seconds const &instance_get_uuid_frequency,
|
||||
std::string const &instance_name, CoordinatorQuery::SyncMode sync_mode) override {
|
||||
const auto maybe_replication_ip_port =
|
||||
io::network::Endpoint::ParseSocketOrAddress(replication_socket_address, std::nullopt);
|
||||
std::string_view instance_name, CoordinatorQuery::SyncMode sync_mode) override {
|
||||
const auto maybe_replication_ip_port = io::network::Endpoint::ParseSocketOrAddress(replication_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
|
||||
const auto maybe_coordinator_ip_port =
|
||||
io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address, std::nullopt);
|
||||
const auto maybe_coordinator_ip_port = io::network::Endpoint::ParseSocketOrAddress(coordinator_socket_address);
|
||||
if (!maybe_replication_ip_port) {
|
||||
throw QueryRuntimeException("Invalid replication socket address!");
|
||||
}
|
||||
@ -452,14 +431,14 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
const auto [replication_ip, replication_port] = *maybe_replication_ip_port;
|
||||
const auto [coordinator_server_ip, coordinator_server_port] = *maybe_coordinator_ip_port;
|
||||
const auto repl_config = coordination::CoordinatorClientConfig::ReplicationClientInfo{
|
||||
.instance_name = instance_name,
|
||||
.instance_name = std::string(instance_name),
|
||||
.replication_mode = convertFromCoordinatorToReplicationMode(sync_mode),
|
||||
.replication_ip_address = replication_ip,
|
||||
.replication_ip_address = std::string(replication_ip),
|
||||
.replication_port = replication_port};
|
||||
|
||||
auto coordinator_client_config =
|
||||
coordination::CoordinatorClientConfig{.instance_name = instance_name,
|
||||
.ip_address = coordinator_server_ip,
|
||||
coordination::CoordinatorClientConfig{.instance_name = std::string(instance_name),
|
||||
.ip_address = std::string(coordinator_server_ip),
|
||||
.port = coordinator_server_port,
|
||||
.instance_health_check_frequency_sec = instance_check_frequency,
|
||||
.instance_down_timeout_sec = instance_down_timeout,
|
||||
@ -472,18 +451,17 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
using enum memgraph::coordination::RegisterInstanceCoordinatorStatus;
|
||||
case NAME_EXISTS:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since instance with such name already exists!");
|
||||
case ENDPOINT_EXISTS:
|
||||
case COORD_ENDPOINT_EXISTS:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since instance with such endpoint already exists!");
|
||||
"Couldn't register replica instance since instance with such coordinator endpoint already exists!");
|
||||
case REPL_ENDPOINT_EXISTS:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since instance with such replication endpoint already exists!");
|
||||
case NOT_COORDINATOR:
|
||||
throw QueryRuntimeException("REGISTER INSTANCE query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since coordinator is not a leader!");
|
||||
case RAFT_COULD_NOT_ACCEPT:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't register replica instance since raft server couldn't accept the log! Most likely the raft "
|
||||
"instance is not a leader!");
|
||||
case RAFT_COULD_NOT_APPEND:
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't register replica instance since raft server couldn't append the log!");
|
||||
case RPC_FAILED:
|
||||
throw QueryRuntimeException(
|
||||
@ -494,19 +472,19 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string const &raft_socket_address) -> void override {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrIpAddress(raft_socket_address);
|
||||
auto AddCoordinatorInstance(uint32_t raft_server_id, std::string_view raft_socket_address) -> void override {
|
||||
auto const maybe_ip_and_port = io::network::Endpoint::ParseSocketOrAddress(raft_socket_address);
|
||||
if (maybe_ip_and_port) {
|
||||
auto const [ip, port] = *maybe_ip_and_port;
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, port, ip);
|
||||
spdlog::info("Adding instance {} with raft socket address {}:{}.", raft_server_id, ip, port);
|
||||
coordinator_handler_.AddCoordinatorInstance(raft_server_id, port, ip);
|
||||
} else {
|
||||
spdlog::error("Invalid raft socket address {}.", raft_socket_address);
|
||||
}
|
||||
}
|
||||
|
||||
void SetReplicationInstanceToMain(const std::string &instance_name) override {
|
||||
auto status = coordinator_handler_.SetReplicationInstanceToMain(instance_name);
|
||||
void SetReplicationInstanceToMain(std::string_view instance_name) override {
|
||||
auto const status = coordinator_handler_.SetReplicationInstanceToMain(instance_name);
|
||||
switch (status) {
|
||||
using enum memgraph::coordination::SetInstanceToMainCoordinatorStatus;
|
||||
case NO_INSTANCE_WITH_NAME:
|
||||
@ -515,6 +493,10 @@ class CoordQueryHandler final : public query::CoordinatorQueryHandler {
|
||||
throw QueryRuntimeException("Couldn't set instance to main since there is already a main instance in cluster!");
|
||||
case NOT_COORDINATOR:
|
||||
throw QueryRuntimeException("SET INSTANCE TO MAIN query can only be run on a coordinator!");
|
||||
case NOT_LEADER:
|
||||
throw QueryRuntimeException("Couldn't set instance to main since coordinator is not a leader!");
|
||||
case RAFT_LOG_ERROR:
|
||||
throw QueryRuntimeException("Couldn't promote instance since raft server couldn't append the log!");
|
||||
case COULD_NOT_PROMOTE_TO_MAIN:
|
||||
throw QueryRuntimeException(
|
||||
"Couldn't set replica instance to main! Check coordinator and replica for more logs");
|
||||
@ -1143,6 +1125,27 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
auto ParseConfigMap(std::unordered_map<Expression *, Expression *> const &config_map,
|
||||
ExpressionVisitor<TypedValue> &evaluator)
|
||||
-> std::optional<std::map<std::string, std::string, std::less<>>> {
|
||||
if (std::ranges::any_of(config_map, [&evaluator](const auto &entry) {
|
||||
auto key_expr = entry.first->Accept(evaluator);
|
||||
auto value_expr = entry.second->Accept(evaluator);
|
||||
return !key_expr.IsString() || !value_expr.IsString();
|
||||
})) {
|
||||
spdlog::error("Config map must contain only string keys and values!");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return ranges::views::all(config_map) | ranges::views::transform([&evaluator](const auto &entry) {
|
||||
auto key_expr = entry.first->Accept(evaluator);
|
||||
auto value_expr = entry.second->Accept(evaluator);
|
||||
return std::pair{key_expr.ValueString(), value_expr.ValueString()};
|
||||
}) |
|
||||
ranges::to<std::map<std::string, std::string, std::less<>>>;
|
||||
}
|
||||
|
||||
Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Parameters ¶meters,
|
||||
coordination::CoordinatorState *coordinator_state,
|
||||
const query::InterpreterConfig &config, std::vector<Notification> *notifications) {
|
||||
@ -1170,17 +1173,37 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters};
|
||||
auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context};
|
||||
|
||||
auto raft_socket_address_tv = coordinator_query->raft_socket_address_->Accept(evaluator);
|
||||
auto raft_server_id_tv = coordinator_query->raft_server_id_->Accept(evaluator);
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, raft_socket_address_tv,
|
||||
raft_server_id_tv]() mutable {
|
||||
handler.AddCoordinatorInstance(raft_server_id_tv.ValueInt(), std::string(raft_socket_address_tv.ValueString()));
|
||||
auto config_map = ParseConfigMap(coordinator_query->configs_, evaluator);
|
||||
if (!config_map) {
|
||||
throw QueryRuntimeException("Failed to parse config map!");
|
||||
}
|
||||
|
||||
if (config_map->size() != 2) {
|
||||
throw QueryRuntimeException("Config map must contain exactly 2 entries: {} and !", kCoordinatorServer,
|
||||
kBoltServer);
|
||||
}
|
||||
|
||||
auto const &coordinator_server_it = config_map->find(kCoordinatorServer);
|
||||
if (coordinator_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kCoordinatorServer);
|
||||
}
|
||||
|
||||
auto const &bolt_server_it = config_map->find(kBoltServer);
|
||||
if (bolt_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kBoltServer);
|
||||
}
|
||||
|
||||
auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt();
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id,
|
||||
coordinator_server = coordinator_server_it->second]() mutable {
|
||||
handler.AddCoordinatorInstance(coord_server_id, coordinator_server);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
notifications->emplace_back(SeverityLevel::INFO, NotificationCode::ADD_COORDINATOR_INSTANCE,
|
||||
fmt::format("Coordinator has added instance {} on coordinator server {}.",
|
||||
coordinator_query->instance_name_, raft_socket_address_tv.ValueString()));
|
||||
coordinator_query->instance_name_, coordinator_server_it->second));
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::REGISTER_INSTANCE: {
|
||||
@ -1191,27 +1214,49 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
// the argument to Callback.
|
||||
EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters};
|
||||
auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context};
|
||||
auto config_map = ParseConfigMap(coordinator_query->configs_, evaluator);
|
||||
|
||||
auto coordinator_socket_address_tv = coordinator_query->coordinator_socket_address_->Accept(evaluator);
|
||||
auto replication_socket_address_tv = coordinator_query->replication_socket_address_->Accept(evaluator);
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coordinator_socket_address_tv,
|
||||
replication_socket_address_tv,
|
||||
if (!config_map) {
|
||||
throw QueryRuntimeException("Failed to parse config map!");
|
||||
}
|
||||
|
||||
if (config_map->size() != 3) {
|
||||
throw QueryRuntimeException("Config map must contain exactly 3 entries: {}, {} and {}!", kBoltServer,
|
||||
kManagementServer, kReplicationServer);
|
||||
}
|
||||
|
||||
auto const &replication_server_it = config_map->find(kReplicationServer);
|
||||
if (replication_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kReplicationServer);
|
||||
}
|
||||
|
||||
auto const &management_server_it = config_map->find(kManagementServer);
|
||||
if (management_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kManagementServer);
|
||||
}
|
||||
|
||||
auto const &bolt_server_it = config_map->find(kBoltServer);
|
||||
if (bolt_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kBoltServer);
|
||||
}
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
instance_health_check_frequency_sec = config.instance_health_check_frequency_sec,
|
||||
management_server = management_server_it->second,
|
||||
replication_server = replication_server_it->second, bolt_server = bolt_server_it->second,
|
||||
instance_name = coordinator_query->instance_name_,
|
||||
instance_down_timeout_sec = config.instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec,
|
||||
sync_mode = coordinator_query->sync_mode_]() mutable {
|
||||
handler.RegisterReplicationInstance(std::string(coordinator_socket_address_tv.ValueString()),
|
||||
std::string(replication_socket_address_tv.ValueString()),
|
||||
instance_health_check_frequency_sec, instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec, instance_name, sync_mode);
|
||||
handler.RegisterReplicationInstance(management_server, replication_server, instance_health_check_frequency_sec,
|
||||
instance_down_timeout_sec, instance_get_uuid_frequency_sec, instance_name,
|
||||
sync_mode);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
notifications->emplace_back(
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_COORDINATOR_SERVER,
|
||||
fmt::format("Coordinator has registered coordinator server on {} for instance {}.",
|
||||
coordinator_socket_address_tv.ValueString(), coordinator_query->instance_name_));
|
||||
notifications->emplace_back(SeverityLevel::INFO, NotificationCode::REGISTER_REPLICATION_INSTANCE,
|
||||
fmt::format("Coordinator has registered replication instance on {} for instance {}.",
|
||||
bolt_server_it->second, coordinator_query->instance_name_));
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::UNREGISTER_INSTANCE:
|
||||
@ -1251,17 +1296,16 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
throw QueryRuntimeException("Only coordinator can run SHOW INSTANCES.");
|
||||
}
|
||||
|
||||
callback.header = {"name", "raft_socket_address", "coordinator_socket_address", "alive", "role"};
|
||||
callback.header = {"name", "raft_socket_address", "coordinator_socket_address", "health", "role"};
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
replica_nfields = callback.header.size()]() mutable {
|
||||
auto const instances = handler.ShowInstances();
|
||||
auto const converter = [](const auto &status) -> std::vector<TypedValue> {
|
||||
return {TypedValue{status.instance_name}, TypedValue{status.raft_socket_address},
|
||||
TypedValue{status.coord_socket_address}, TypedValue{status.is_alive},
|
||||
TypedValue{status.cluster_role}};
|
||||
TypedValue{status.coord_socket_address}, TypedValue{status.health}, TypedValue{status.cluster_role}};
|
||||
};
|
||||
|
||||
return utils::fmap(converter, instances);
|
||||
return utils::fmap(instances, converter);
|
||||
};
|
||||
return callback;
|
||||
}
|
||||
@ -1668,8 +1712,7 @@ struct PullPlan {
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer,
|
||||
TriggerContextCollector *trigger_context_collector = nullptr,
|
||||
std::optional<size_t> memory_limit = {}, bool use_monotonic_memory = true,
|
||||
FrameChangeCollector *frame_change_collector_ = nullptr);
|
||||
std::optional<size_t> memory_limit = {}, FrameChangeCollector *frame_change_collector_ = nullptr);
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> Pull(AnyStream *stream, std::optional<int> n,
|
||||
const std::vector<Symbol> &output_symbols,
|
||||
@ -1694,26 +1737,17 @@ struct PullPlan {
|
||||
// we have to keep track of any unsent results from previous `PullPlan::Pull`
|
||||
// manually by using this flag.
|
||||
bool has_unsent_results_ = false;
|
||||
|
||||
// In the case of LOAD CSV, we want to use only PoolResource without MonotonicMemoryResource
|
||||
// to reuse allocated memory. As LOAD CSV is processing row by row
|
||||
// it is possible to reduce memory usage significantly if MemoryResource deals with memory allocation
|
||||
// can reuse memory that was allocated on processing the first row on all subsequent rows.
|
||||
// This flag signals to `PullPlan::Pull` which MemoryResource to use
|
||||
bool use_monotonic_memory_;
|
||||
};
|
||||
|
||||
PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters ¶meters, const bool is_profile_query,
|
||||
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer, TriggerContextCollector *trigger_context_collector,
|
||||
const std::optional<size_t> memory_limit, bool use_monotonic_memory,
|
||||
FrameChangeCollector *frame_change_collector)
|
||||
const std::optional<size_t> memory_limit, FrameChangeCollector *frame_change_collector)
|
||||
: plan_(plan),
|
||||
cursor_(plan->plan().MakeCursor(execution_memory)),
|
||||
frame_(plan->symbol_table().max_position(), execution_memory),
|
||||
memory_limit_(memory_limit),
|
||||
use_monotonic_memory_(use_monotonic_memory) {
|
||||
memory_limit_(memory_limit) {
|
||||
ctx_.db_accessor = dba;
|
||||
ctx_.symbol_table = plan->symbol_table();
|
||||
ctx_.evaluation_context.timestamp = QueryTimestamp();
|
||||
@ -1741,6 +1775,7 @@ PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters &pa
|
||||
ctx_.is_profile_query = is_profile_query;
|
||||
ctx_.trigger_context_collector = trigger_context_collector;
|
||||
ctx_.frame_change_collector = frame_change_collector;
|
||||
ctx_.evaluation_context.memory = execution_memory;
|
||||
}
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *stream, std::optional<int> n,
|
||||
@ -1764,43 +1799,14 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *strea
|
||||
}
|
||||
}};
|
||||
|
||||
// Set up temporary memory for a single Pull. Initial memory comes from the
|
||||
// stack. 256 KiB should fit on the stack and should be more than enough for a
|
||||
// single `Pull`.
|
||||
static constexpr size_t stack_size = 256UL * 1024UL;
|
||||
char stack_data[stack_size];
|
||||
|
||||
utils::ResourceWithOutOfMemoryException resource_with_exception;
|
||||
utils::MonotonicBufferResource monotonic_memory{&stack_data[0], stack_size, &resource_with_exception};
|
||||
std::optional<utils::PoolResource> pool_memory;
|
||||
static constexpr auto kMaxBlockPerChunks = 128;
|
||||
|
||||
if (!use_monotonic_memory_) {
|
||||
pool_memory.emplace(kMaxBlockPerChunks, kExecutionPoolMaxBlockSize, &resource_with_exception,
|
||||
&resource_with_exception);
|
||||
} else {
|
||||
// We can throw on every query because a simple queries for deleting will use only
|
||||
// the stack allocated buffer.
|
||||
// Also, we want to throw only when the query engine requests more memory and not the storage
|
||||
// so we add the exception to the allocator.
|
||||
// TODO (mferencevic): Tune the parameters accordingly.
|
||||
pool_memory.emplace(kMaxBlockPerChunks, 1024, &monotonic_memory, &resource_with_exception);
|
||||
}
|
||||
|
||||
ctx_.evaluation_context.memory = &*pool_memory;
|
||||
|
||||
// Returns true if a result was pulled.
|
||||
const auto pull_result = [&]() -> bool { return cursor_->Pull(frame_, ctx_); };
|
||||
|
||||
const auto stream_values = [&]() {
|
||||
// TODO: The streamed values should also probably use the above memory.
|
||||
std::vector<TypedValue> values;
|
||||
values.reserve(output_symbols.size());
|
||||
|
||||
for (const auto &symbol : output_symbols) {
|
||||
values.emplace_back(frame_[symbol]);
|
||||
auto values = std::vector<TypedValue>(output_symbols.size());
|
||||
const auto stream_values = [&] {
|
||||
for (auto const i : ranges::views::iota(0UL, output_symbols.size())) {
|
||||
values[i] = frame_[output_symbols[i]];
|
||||
}
|
||||
|
||||
stream->Result(values);
|
||||
};
|
||||
|
||||
@ -1910,7 +1916,6 @@ PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper,
|
||||
std::function<void()> handler;
|
||||
|
||||
if (query_upper == "BEGIN") {
|
||||
ResetInterpreter();
|
||||
// TODO: Evaluate doing move(extras). Currently the extras is very small, but this will be important if it ever
|
||||
// becomes large.
|
||||
handler = [this, extras = extras] {
|
||||
@ -1988,30 +1993,6 @@ inline static void TryCaching(const AstStorage &ast_storage, FrameChangeCollecto
|
||||
}
|
||||
}
|
||||
|
||||
bool IsLoadCsvQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
return std::any_of(clauses.begin(), clauses.end(),
|
||||
[](memgraph::query::Clause const *clause) { return clause->GetTypeInfo() == LoadCsv::kType; });
|
||||
}
|
||||
|
||||
bool IsCallBatchedProcedureQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
EvaluationContext evaluation_context;
|
||||
|
||||
return std::ranges::any_of(clauses, [&evaluation_context](memgraph::query::Clause *clause) -> bool {
|
||||
if (!(clause->GetTypeInfo() == CallProcedure::kType)) return false;
|
||||
auto *call_procedure_clause = utils::Downcast<CallProcedure>(clause);
|
||||
|
||||
const auto &maybe_found = memgraph::query::procedure::FindProcedure(
|
||||
procedure::gModuleRegistry, call_procedure_clause->procedure_name_, evaluation_context.memory);
|
||||
if (!maybe_found) {
|
||||
throw QueryRuntimeException("There is no procedure named '{}'.", call_procedure_clause->procedure_name_);
|
||||
}
|
||||
const auto &[module, proc] = *maybe_found;
|
||||
if (!proc->info.is_batched) return false;
|
||||
spdlog::trace("Using PoolResource for batched query procedure");
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string, TypedValue> *summary,
|
||||
InterpreterContext *interpreter_context, CurrentDB ¤t_db,
|
||||
utils::MemoryResource *execution_memory, std::vector<Notification> *notifications,
|
||||
@ -2031,7 +2012,6 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
spdlog::info("Running query with memory limit of {}", utils::GetReadableSize(*memory_limit));
|
||||
}
|
||||
auto clauses = cypher_query->single_query_->clauses_;
|
||||
bool contains_csv = false;
|
||||
if (std::any_of(clauses.begin(), clauses.end(),
|
||||
[](const auto *clause) { return clause->GetTypeInfo() == LoadCsv::kType; })) {
|
||||
notifications->emplace_back(
|
||||
@ -2039,13 +2019,8 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
"It's important to note that the parser parses the values as strings. It's up to the user to "
|
||||
"convert the parsed row values to the appropriate type. This can be done using the built-in "
|
||||
"conversion functions such as ToInteger, ToFloat, ToBoolean etc.");
|
||||
contains_csv = true;
|
||||
}
|
||||
|
||||
// If this is LOAD CSV query, use PoolResource without MonotonicMemoryResource as we want to reuse allocated memory
|
||||
auto use_monotonic_memory =
|
||||
!contains_csv && !IsCallBatchedProcedureQuery(clauses) && !IsAllShortestPathsQuery(clauses);
|
||||
|
||||
MG_ASSERT(current_db.execution_db_accessor_, "Cypher query expects a current DB transaction");
|
||||
auto *dba =
|
||||
&*current_db
|
||||
@ -2084,7 +2059,7 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
current_db.trigger_context_collector_ ? &*current_db.trigger_context_collector_ : nullptr;
|
||||
auto pull_plan = std::make_shared<PullPlan>(
|
||||
plan, parsed_query.parameters, false, dba, interpreter_context, execution_memory, std::move(user_or_role),
|
||||
transaction_status, std::move(tx_timer), trigger_context_collector, memory_limit, use_monotonic_memory,
|
||||
transaction_status, std::move(tx_timer), trigger_context_collector, memory_limit,
|
||||
frame_change_collector->IsTrackingValues() ? frame_change_collector : nullptr);
|
||||
return PreparedQuery{std::move(header), std::move(parsed_query.required_privileges),
|
||||
[pull_plan = std::move(pull_plan), output_symbols = std::move(output_symbols), summary](
|
||||
@ -2198,18 +2173,6 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
|
||||
|
||||
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
|
||||
|
||||
bool contains_csv = false;
|
||||
auto clauses = cypher_query->single_query_->clauses_;
|
||||
if (std::any_of(clauses.begin(), clauses.end(),
|
||||
[](const auto *clause) { return clause->GetTypeInfo() == LoadCsv::kType; })) {
|
||||
contains_csv = true;
|
||||
}
|
||||
|
||||
// If this is LOAD CSV, BatchedProcedure or AllShortest query, use PoolResource without MonotonicMemoryResource as we
|
||||
// want to reuse allocated memory
|
||||
auto use_monotonic_memory =
|
||||
!contains_csv && !IsCallBatchedProcedureQuery(clauses) && !IsAllShortestPathsQuery(clauses);
|
||||
|
||||
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in PROFILE");
|
||||
EvaluationContext evaluation_context;
|
||||
evaluation_context.timestamp = QueryTimestamp();
|
||||
@ -2243,14 +2206,14 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
|
||||
// We want to execute the query we are profiling lazily, so we delay
|
||||
// the construction of the corresponding context.
|
||||
stats_and_total_time = std::optional<plan::ProfilingStatsWithTotalTime>{},
|
||||
pull_plan = std::shared_ptr<PullPlanVector>(nullptr), transaction_status, use_monotonic_memory,
|
||||
frame_change_collector, tx_timer = std::move(tx_timer)](
|
||||
AnyStream *stream, std::optional<int> n) mutable -> std::optional<QueryHandlerResult> {
|
||||
pull_plan = std::shared_ptr<PullPlanVector>(nullptr), transaction_status, frame_change_collector,
|
||||
tx_timer = std::move(tx_timer)](AnyStream *stream,
|
||||
std::optional<int> n) mutable -> std::optional<QueryHandlerResult> {
|
||||
// No output symbols are given so that nothing is streamed.
|
||||
if (!stats_and_total_time) {
|
||||
stats_and_total_time =
|
||||
PullPlan(plan, parameters, true, dba, interpreter_context, execution_memory, std::move(user_or_role),
|
||||
transaction_status, std::move(tx_timer), nullptr, memory_limit, use_monotonic_memory,
|
||||
transaction_status, std::move(tx_timer), nullptr, memory_limit,
|
||||
frame_change_collector->IsTrackingValues() ? frame_change_collector : nullptr)
|
||||
.Pull(stream, {}, {}, summary);
|
||||
pull_plan = std::make_shared<PullPlanVector>(ProfilingStatsToTable(*stats_and_total_time));
|
||||
@ -2679,6 +2642,75 @@ PreparedQuery PrepareIndexQuery(ParsedQuery parsed_query, bool in_explicit_trans
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareEdgeIndexQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
std::vector<Notification> *notifications, CurrentDB ¤t_db) {
|
||||
if (in_explicit_transaction) {
|
||||
throw IndexInMulticommandTxException();
|
||||
}
|
||||
|
||||
auto *index_query = utils::Downcast<EdgeIndexQuery>(parsed_query.query);
|
||||
std::function<void(Notification &)> handler;
|
||||
|
||||
MG_ASSERT(current_db.db_acc_, "Index query expects a current DB");
|
||||
auto &db_acc = *current_db.db_acc_;
|
||||
|
||||
MG_ASSERT(current_db.db_transactional_accessor_, "Index query expects a current DB transaction");
|
||||
auto *dba = &*current_db.execution_db_accessor_;
|
||||
|
||||
auto invalidate_plan_cache = [plan_cache = db_acc->plan_cache()] {
|
||||
plan_cache->WithLock([&](auto &cache) { cache.reset(); });
|
||||
};
|
||||
|
||||
auto *storage = db_acc->storage();
|
||||
auto edge_type = storage->NameToEdgeType(index_query->edge_type_.name);
|
||||
|
||||
Notification index_notification(SeverityLevel::INFO);
|
||||
switch (index_query->action_) {
|
||||
case EdgeIndexQuery::Action::CREATE: {
|
||||
index_notification.code = NotificationCode::CREATE_INDEX;
|
||||
index_notification.title = fmt::format("Created index on edge-type {}.", index_query->edge_type_.name);
|
||||
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->CreateIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::EXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} already exists.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
case EdgeIndexQuery::Action::DROP: {
|
||||
index_notification.code = NotificationCode::DROP_INDEX;
|
||||
index_notification.title = fmt::format("Dropped index on edge-type {}.", index_query->edge_type_.name);
|
||||
handler = [dba, edge_type, label_name = index_query->edge_type_.name,
|
||||
invalidate_plan_cache = std::move(invalidate_plan_cache)](Notification &index_notification) {
|
||||
auto maybe_index_error = dba->DropIndex(edge_type);
|
||||
utils::OnScopeExit invalidator(invalidate_plan_cache);
|
||||
|
||||
if (maybe_index_error.HasError()) {
|
||||
index_notification.code = NotificationCode::NONEXISTENT_INDEX;
|
||||
index_notification.title = fmt::format("Index on edge-type {} doesn't exist.", label_name);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return PreparedQuery{
|
||||
{},
|
||||
std::move(parsed_query.required_privileges),
|
||||
[handler = std::move(handler), notifications, index_notification = std::move(index_notification)](
|
||||
AnyStream * /*stream*/, std::optional<int> /*unused*/) mutable {
|
||||
handler(index_notification);
|
||||
notifications->push_back(index_notification);
|
||||
return QueryHandlerResult::COMMIT;
|
||||
},
|
||||
RWType::W};
|
||||
}
|
||||
|
||||
PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
InterpreterContext *interpreter_context, Interpreter &interpreter) {
|
||||
if (in_explicit_transaction) {
|
||||
@ -3483,6 +3515,7 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
auto *storage = database->storage();
|
||||
const std::string_view label_index_mark{"label"};
|
||||
const std::string_view label_property_index_mark{"label+property"};
|
||||
const std::string_view edge_type_index_mark{"edge-type"};
|
||||
auto info = dba->ListAllIndices();
|
||||
auto storage_acc = database->Access();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
@ -3497,6 +3530,10 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
TypedValue(storage->PropertyToName(item.second)),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateVertexCount(item.first, item.second)))});
|
||||
}
|
||||
for (const auto &item : info.edge_type) {
|
||||
results.push_back({TypedValue(edge_type_index_mark), TypedValue(storage->EdgeTypeToName(item)), TypedValue(),
|
||||
TypedValue(static_cast<int>(storage_acc->ApproximateEdgeCount(item)))});
|
||||
}
|
||||
std::sort(results.begin(), results.end(), [&label_index_mark](const auto &record_1, const auto &record_2) {
|
||||
const auto type_1 = record_1[0].ValueString();
|
||||
const auto type_2 = record_2[0].ValueString();
|
||||
@ -4139,6 +4176,7 @@ PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, InterpreterCon
|
||||
std::optional<uint64_t> Interpreter::GetTransactionId() const { return current_transaction_; }
|
||||
|
||||
void Interpreter::BeginTransaction(QueryExtras const &extras) {
|
||||
ResetInterpreter();
|
||||
const auto prepared_query = PrepareTransactionQuery("BEGIN", extras);
|
||||
prepared_query.query_handler(nullptr, {});
|
||||
}
|
||||
@ -4173,12 +4211,12 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
const auto upper_case_query = utils::ToUpperCase(query_string);
|
||||
const auto trimmed_query = utils::Trim(upper_case_query);
|
||||
if (trimmed_query == "BEGIN" || trimmed_query == "COMMIT" || trimmed_query == "ROLLBACK") {
|
||||
auto resource = utils::MonotonicBufferResource(kExecutionMemoryBlockSize);
|
||||
auto prepared_query = PrepareTransactionQuery(trimmed_query, extras);
|
||||
auto &query_execution =
|
||||
query_executions_.emplace_back(QueryExecution::Create(std::move(resource), std::move(prepared_query)));
|
||||
std::optional<int> qid =
|
||||
in_explicit_transaction_ ? static_cast<int>(query_executions_.size() - 1) : std::optional<int>{};
|
||||
if (trimmed_query == "BEGIN") {
|
||||
ResetInterpreter();
|
||||
}
|
||||
auto &query_execution = query_executions_.emplace_back(QueryExecution::Create());
|
||||
query_execution->prepared_query = PrepareTransactionQuery(trimmed_query, extras);
|
||||
auto qid = in_explicit_transaction_ ? static_cast<int>(query_executions_.size() - 1) : std::optional<int>{};
|
||||
return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, {}};
|
||||
}
|
||||
|
||||
@ -4208,35 +4246,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
ParseQuery(query_string, params, &interpreter_context_->ast_cache, interpreter_context_->config.query);
|
||||
auto parsing_time = parsing_timer.Elapsed().count();
|
||||
|
||||
CypherQuery const *const cypher_query = [&]() -> CypherQuery * {
|
||||
if (auto *cypher_query = utils::Downcast<CypherQuery>(parsed_query.query)) {
|
||||
return cypher_query;
|
||||
}
|
||||
if (auto *profile_query = utils::Downcast<ProfileQuery>(parsed_query.query)) {
|
||||
return profile_query->cypher_query_;
|
||||
}
|
||||
return nullptr;
|
||||
}(); // IILE
|
||||
|
||||
auto const [usePool, hasAllShortestPaths] = [&]() -> std::pair<bool, bool> {
|
||||
if (!cypher_query) {
|
||||
return {false, false};
|
||||
}
|
||||
auto const &clauses = cypher_query->single_query_->clauses_;
|
||||
bool hasAllShortestPaths = IsAllShortestPathsQuery(clauses);
|
||||
// Using PoolResource without MonotonicMemoryResouce for LOAD CSV reduces memory usage.
|
||||
bool usePool = hasAllShortestPaths || IsCallBatchedProcedureQuery(clauses) || IsLoadCsvQuery(clauses);
|
||||
return {usePool, hasAllShortestPaths};
|
||||
}(); // IILE
|
||||
|
||||
// Setup QueryExecution
|
||||
// its MemoryResource is mostly used for allocations done on Frame and storing `row`s
|
||||
if (usePool) {
|
||||
query_executions_.emplace_back(QueryExecution::Create(utils::PoolResource(128, kExecutionPoolMaxBlockSize)));
|
||||
} else {
|
||||
query_executions_.emplace_back(QueryExecution::Create(utils::MonotonicBufferResource(kExecutionMemoryBlockSize)));
|
||||
}
|
||||
|
||||
query_executions_.emplace_back(QueryExecution::Create());
|
||||
auto &query_execution = query_executions_.back();
|
||||
query_execution_ptr = &query_execution;
|
||||
|
||||
@ -4283,13 +4294,14 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
utils::Downcast<CypherQuery>(parsed_query.query) || utils::Downcast<ExplainQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ProfileQuery>(parsed_query.query) || utils::Downcast<DumpQuery>(parsed_query.query) ||
|
||||
utils::Downcast<TriggerQuery>(parsed_query.query) || utils::Downcast<AnalyzeGraphQuery>(parsed_query.query) ||
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<DatabaseInfoQuery>(parsed_query.query) ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
utils::Downcast<IndexQuery>(parsed_query.query) || utils::Downcast<EdgeIndexQuery>(parsed_query.query) ||
|
||||
utils::Downcast<DatabaseInfoQuery>(parsed_query.query) || utils::Downcast<ConstraintQuery>(parsed_query.query);
|
||||
|
||||
if (!in_explicit_transaction_ && requires_db_transaction) {
|
||||
// TODO: ATM only a single database, will change when we have multiple database transactions
|
||||
bool could_commit = utils::Downcast<CypherQuery>(parsed_query.query) != nullptr;
|
||||
bool unique = utils::Downcast<IndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<EdgeIndexQuery>(parsed_query.query) != nullptr ||
|
||||
utils::Downcast<ConstraintQuery>(parsed_query.query) != nullptr ||
|
||||
upper_case_query.find(kSchemaAssert) != std::string::npos;
|
||||
SetupDatabaseTransaction(could_commit, unique);
|
||||
@ -4304,9 +4316,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
|
||||
utils::Timer planning_timer;
|
||||
PreparedQuery prepared_query;
|
||||
utils::MemoryResource *memory_resource =
|
||||
std::visit([](auto &execution_memory) -> utils::MemoryResource * { return &execution_memory; },
|
||||
query_execution->execution_memory);
|
||||
utils::MemoryResource *memory_resource = query_execution->execution_memory.resource();
|
||||
frame_change_collector_.reset();
|
||||
frame_change_collector_.emplace();
|
||||
if (utils::Downcast<CypherQuery>(parsed_query.query)) {
|
||||
@ -4317,15 +4327,18 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
prepared_query = PrepareExplainQuery(std::move(parsed_query), &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_);
|
||||
} else if (utils::Downcast<ProfileQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareProfileQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_,
|
||||
&query_execution->execution_memory_with_exception, user_or_role_,
|
||||
&transaction_status_, current_timeout_timer_, &*frame_change_collector_);
|
||||
prepared_query =
|
||||
PrepareProfileQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_, memory_resource,
|
||||
user_or_role_, &transaction_status_, current_timeout_timer_, &*frame_change_collector_);
|
||||
} else if (utils::Downcast<DumpQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareDumpQuery(std::move(parsed_query), current_db_);
|
||||
} else if (utils::Downcast<IndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<EdgeIndexQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareEdgeIndexQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<AnalyzeGraphQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareAnalyzeGraphQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
|
||||
} else if (utils::Downcast<AuthQuery>(parsed_query.query)) {
|
||||
@ -4519,7 +4532,7 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int
|
||||
std::atomic<TransactionStatus> *transaction_status) {
|
||||
// Run the triggers
|
||||
for (const auto &trigger : db_acc->trigger_store()->AfterCommitTriggers().access()) {
|
||||
utils::MonotonicBufferResource execution_memory{kExecutionMemoryBlockSize};
|
||||
QueryAllocator execution_memory{};
|
||||
|
||||
// create a new transaction for each trigger
|
||||
auto tx_acc = db_acc->Access();
|
||||
@ -4530,7 +4543,7 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int
|
||||
auto trigger_context = original_trigger_context;
|
||||
trigger_context.AdaptForAccessor(&db_accessor);
|
||||
try {
|
||||
trigger.Execute(&db_accessor, &execution_memory, flags::run_time::GetExecutionTimeout(),
|
||||
trigger.Execute(&db_accessor, execution_memory.resource(), flags::run_time::GetExecutionTimeout(),
|
||||
&interpreter_context->is_shutting_down, transaction_status, trigger_context);
|
||||
} catch (const utils::BasicException &exception) {
|
||||
spdlog::warn("Trigger '{}' failed with exception:\n{}", trigger.Name(), exception.what());
|
||||
@ -4684,11 +4697,12 @@ void Interpreter::Commit() {
|
||||
if (trigger_context) {
|
||||
// Run the triggers
|
||||
for (const auto &trigger : db->trigger_store()->BeforeCommitTriggers().access()) {
|
||||
utils::MonotonicBufferResource execution_memory{kExecutionMemoryBlockSize};
|
||||
QueryAllocator execution_memory{};
|
||||
AdvanceCommand();
|
||||
try {
|
||||
trigger.Execute(&*current_db_.execution_db_accessor_, &execution_memory, flags::run_time::GetExecutionTimeout(),
|
||||
&interpreter_context_->is_shutting_down, &transaction_status_, *trigger_context);
|
||||
trigger.Execute(&*current_db_.execution_db_accessor_, execution_memory.resource(),
|
||||
flags::run_time::GetExecutionTimeout(), &interpreter_context_->is_shutting_down,
|
||||
&transaction_status_, *trigger_context);
|
||||
} catch (const utils::BasicException &e) {
|
||||
throw utils::BasicException(
|
||||
fmt::format("Trigger '{}' caused the transaction to fail.\nException: {}", trigger.Name(), e.what()));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user