Merge branch 'master' of github.com:memgraph/memgraph into disk-rollback-on-fail
This commit is contained in:
commit
d083226487
@ -64,8 +64,8 @@ Checks: '*,
|
||||
-readability-identifier-length,
|
||||
-misc-no-recursion,
|
||||
-concurrency-mt-unsafe,
|
||||
-bugprone-easily-swappable-parameters'
|
||||
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-unchecked-optional-access'
|
||||
WarningsAsErrors: ''
|
||||
HeaderFilterRegex: 'src/.*'
|
||||
AnalyzeTemporaryDtors: false
|
||||
|
22
.github/pull_request_template.md
vendored
22
.github/pull_request_template.md
vendored
@ -1,14 +1,28 @@
|
||||
### Description
|
||||
|
||||
Please briefly explain the changes you made here.
|
||||
|
||||
|
||||
Please delete either the [master < EPIC] or [master < Task] part, depending on what are your needs.
|
||||
|
||||
[master < Epic] PR
|
||||
- [ ] Check, and update documentation if necessary
|
||||
- [ ] Write E2E tests
|
||||
- [ ] Compare the [benchmarking results](https://bench-graph.memgraph.com/) between the master branch and the Epic branch
|
||||
- [ ] Provide the full content or a guide for the final git message
|
||||
- [FINAL GIT MESSAGE]
|
||||
|
||||
[master < Task] PR
|
||||
- [ ] Check, and update documentation if necessary
|
||||
- [ ] Provide the full content or a guide for the final git message
|
||||
- **[FINAL GIT MESSAGE]**
|
||||
|
||||
|
||||
To keep docs changelog up to date, one more thing to do:
|
||||
- [ ] Write a release note here, including added/changed clauses
|
||||
### Documentation checklist
|
||||
- [ ] Add the documentation label tag
|
||||
- [ ] Add the bug / feature label tag
|
||||
- [ ] Add the milestone for which this feature is intended
|
||||
- If not known, set for a later milestone
|
||||
- [ ] Write a release note, including added/changed clauses
|
||||
- **[Release note text]**
|
||||
- [ ] Link the documentation PR here
|
||||
- **[Documentation PR link]**
|
||||
- [ ] Tag someone from docs team in the comments
|
||||
|
553
.github/workflows/diff.yaml
vendored
553
.github/workflows/diff.yaml
vendored
@ -4,10 +4,6 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
@ -19,11 +15,16 @@ on:
|
||||
jobs:
|
||||
community_build:
|
||||
name: "Community build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -33,35 +34,56 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build community binaries
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build community binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMG_ENTERPRISE=OFF ..
|
||||
make -j$THREADS
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --community
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
code_analysis:
|
||||
name: "Code analysis"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Debug
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -71,6 +93,14 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# This is also needed if we want do to comparison against other branches
|
||||
# See https://github.community/t/checkout-code-fails-when-it-runs-lerna-run-test-since-master/17920
|
||||
- name: Fetch all history for all tags and branches
|
||||
@ -78,11 +108,13 @@ jobs:
|
||||
|
||||
- name: Initialize deps
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --init-only
|
||||
|
||||
- name: Set base branch
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
@ -96,45 +128,43 @@ jobs:
|
||||
|
||||
- name: Python code analysis
|
||||
run: |
|
||||
CHANGED_FILES=$(git diff -U0 ${{ env.BASE_BRANCH }}... --name-only --diff-filter=d)
|
||||
for file in ${CHANGED_FILES}; do
|
||||
echo ${file}
|
||||
if [[ ${file} == *.py ]]; then
|
||||
python3 -m black --check --diff ${file}
|
||||
python3 -m isort --profile black --check-only --diff ${file}
|
||||
fi
|
||||
done
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph code-analysis --base-branch "${{ env.BASE_BRANCH }}"
|
||||
|
||||
- name: Build combined ASAN, UBSAN and coverage binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
cd build
|
||||
cmake -DTEST_COVERAGE=ON -DASAN=ON -DUBSAN=ON ..
|
||||
make -j$THREADS memgraph__unit
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph --coverage --asan --ubsan
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests. It is restricted to 2 threads intentionally, because higher concurrency makes the timing related tests unstable.
|
||||
cd build
|
||||
LSAN_OPTIONS=suppressions=$PWD/../tools/lsan.supp UBSAN_OPTIONS=halt_on_error=1 ctest -R memgraph__unit --output-on-failure -j2
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit-coverage
|
||||
|
||||
- name: Compute code coverage
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Compute code coverage.
|
||||
cd tools/github
|
||||
./coverage_convert
|
||||
|
||||
# Package code coverage.
|
||||
cd generated
|
||||
tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph code-coverage
|
||||
|
||||
- name: Save code coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -144,21 +174,36 @@ jobs:
|
||||
|
||||
- name: Run clang-tidy
|
||||
run: |
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph clang-tidy --base-branch "${{ env.BASE_BRANCH }}"
|
||||
|
||||
# Restrict clang-tidy results only to the modified parts
|
||||
git diff -U0 ${{ env.BASE_BRANCH }}... -- src | ./tools/github/clang-tidy/clang-tidy-diff.py -p 1 -j $THREADS -path build -regex ".+\.cpp" | tee ./build/clang_tidy_output.txt
|
||||
|
||||
# Fail if any warning is reported
|
||||
! cat ./build/clang_tidy_output.txt | ./tools/github/clang-tidy/grep_error_lines.sh > /dev/null
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
debug_build:
|
||||
name: "Debug build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 100
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Debug
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -168,58 +213,95 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build debug binaries
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build debug binaries.
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$THREADS
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run leftover CTest tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run leftover CTest tests (all except unit and benchmark tests).
|
||||
cd build
|
||||
ctest -E "(memgraph__unit|memgraph__benchmark)" --output-on-failure
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph leftover-CTest
|
||||
|
||||
- name: Run drivers tests
|
||||
run: |
|
||||
./tests/drivers/run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph drivers
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
tests/integration/run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph integration
|
||||
|
||||
- name: Run cppcheck and clang-format
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run cppcheck and clang-format.
|
||||
cd tools/github
|
||||
./cppcheck_and_clang_format diff
|
||||
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph cppcheck-and-clang-format
|
||||
|
||||
- name: Save cppcheck and clang-format errors
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Code coverage(Debug build)"
|
||||
path: tools/github/cppcheck_and_clang_format.txt
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Diff]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 100
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Release
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -229,26 +311,33 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run GQL Behave tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
cd gql_behave
|
||||
./continuous_integration
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph gql-behave
|
||||
|
||||
- name: Save quality assurance status
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -260,14 +349,19 @@ jobs:
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Run unit tests.
|
||||
cd build
|
||||
ctest -R memgraph__unit --output-on-failure -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--threads $THREADS \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph unit
|
||||
|
||||
# This step will be skipped because the e2e stream tests have been disabled
|
||||
# We need to fix this as soon as possible
|
||||
- name: Ensure Kafka and Pulsar are up
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose up -d
|
||||
@ -276,13 +370,17 @@ jobs:
|
||||
|
||||
- name: Run e2e tests
|
||||
run: |
|
||||
cd tests
|
||||
./setup.sh /opt/toolchain-v4/activate
|
||||
source ve3/bin/activate_e2e
|
||||
cd e2e
|
||||
./run.sh
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph e2e
|
||||
|
||||
# Same as two steps prior
|
||||
- name: Ensure Kafka and Pulsar are down
|
||||
if: false
|
||||
run: |
|
||||
cd tests/e2e/streams/kafka
|
||||
docker-compose down
|
||||
@ -291,59 +389,92 @@ jobs:
|
||||
|
||||
- name: Run stress test (plain)
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
./continuous_integration
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph stress-plain
|
||||
|
||||
- name: Run stress test (SSL)
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
./continuous_integration --use-ssl
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph stress-ssl
|
||||
|
||||
- name: Run durability test
|
||||
run: |
|
||||
cd tests/stress
|
||||
source ve3/bin/activate
|
||||
python3 durability --num-steps 5
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph durability
|
||||
|
||||
- name: Create enterprise DEB package
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
cd build
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
package-memgraph
|
||||
|
||||
# create mgconsole
|
||||
# we use the -B to force the build
|
||||
make -j$THREADS -B mgconsole
|
||||
|
||||
# Create enterprise DEB package.
|
||||
mkdir output && cd output
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --package
|
||||
|
||||
- name: Save enterprise DEB package
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "Enterprise DEB package"
|
||||
path: build/output/memgraph*.deb
|
||||
path: build/output/${{ env.OS }}/memgraph*.deb
|
||||
|
||||
- name: Copy build logs
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --build-logs
|
||||
|
||||
- name: Save test data
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: "Test data(Release build)"
|
||||
path: |
|
||||
# multiple paths could be defined
|
||||
build/logs
|
||||
path: build/logs
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_jepsen_test:
|
||||
name: "Release Jepsen Test"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10, JepsenControl]
|
||||
#continue-on-error: true
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild]
|
||||
timeout-minutes: 80
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-10
|
||||
TOOLCHAIN: v4
|
||||
ARCH: amd
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -353,16 +484,31 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
# Build only memgraph release binarie.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
make -j$THREADS memgraph
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Copy memgraph binary
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
copy --binary
|
||||
|
||||
- name: Refresh Jepsen Cluster
|
||||
run: |
|
||||
@ -381,13 +527,27 @@ jobs:
|
||||
name: "Jepsen Report"
|
||||
path: tests/jepsen/Jepsen.tar.gz
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
||||
release_benchmarks:
|
||||
name: "Release benchmarks"
|
||||
runs-on: [self-hosted, Linux, X64, Diff, Gen7]
|
||||
runs-on: [self-hosted, Linux, X64, DockerMgBuild, Gen7]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}
|
||||
OS: debian-11
|
||||
TOOLCHAIN: v5
|
||||
ARCH: amd
|
||||
BUILD_TYPE: Release
|
||||
|
||||
steps:
|
||||
- name: Set up repository
|
||||
@ -397,25 +557,33 @@ jobs:
|
||||
# branches and tags. (default: 1)
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Spin up mgbuild container
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
run
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
# Activate toolchain.
|
||||
source /opt/toolchain-v4/activate
|
||||
|
||||
# Initialize dependencies.
|
||||
./init
|
||||
|
||||
# Build only memgraph release binaries.
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=release ..
|
||||
make -j$THREADS
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--build-type $BUILD_TYPE \
|
||||
--threads $THREADS \
|
||||
build-memgraph
|
||||
|
||||
- name: Run macro benchmarks
|
||||
run: |
|
||||
cd tests/macro_benchmark
|
||||
./harness QuerySuite MemgraphRunner \
|
||||
--groups aggregation 1000_create unwind_create dense_expand match \
|
||||
--no-strict
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph macro-benchmark
|
||||
|
||||
- name: Get branch name (merge)
|
||||
if: github.event_name != 'pull_request'
|
||||
@ -429,30 +597,49 @@ jobs:
|
||||
|
||||
- name: Upload macro benchmark results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "macro_benchmark" \
|
||||
--benchmark-results "../../tests/macro_benchmark/.harness_summary" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph upload-to-bench-graph \
|
||||
--benchmark-name "macro_benchmark" \
|
||||
--benchmark-results "../../tests/macro_benchmark/.harness_summary" \
|
||||
--github-run-id ${{ github.run_id }} \
|
||||
--github-run-number ${{ github.run_number }} \
|
||||
--head-branch-name ${{ env.BRANCH_NAME }}
|
||||
|
||||
# TODO (andi) No need for path flags and for --disk-storage and --in-memory-analytical
|
||||
- name: Run mgbench
|
||||
run: |
|
||||
cd tests/mgbench
|
||||
./benchmark.py vendor-native --num-workers-for-benchmark 12 --export-results benchmark_result.json pokec/medium/*/*
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph mgbench
|
||||
|
||||
- name: Upload mgbench results
|
||||
run: |
|
||||
cd tools/bench-graph-client
|
||||
virtualenv -p python3 ve3
|
||||
source ve3/bin/activate
|
||||
pip install -r requirements.txt
|
||||
./main.py --benchmark-name "mgbench" \
|
||||
--benchmark-results "../../tests/mgbench/benchmark_result.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
--enterprise-license $MEMGRAPH_ENTERPRISE_LICENSE \
|
||||
--organization-name $MEMGRAPH_ORGANIZATION_NAME \
|
||||
test-memgraph upload-to-bench-graph \
|
||||
--benchmark-name "mgbench" \
|
||||
--benchmark-results "../../tests/mgbench/benchmark_result.json" \
|
||||
--github-run-id "${{ github.run_id }}" \
|
||||
--github-run-number "${{ github.run_number }}" \
|
||||
--head-branch-name "${{ env.BRANCH_NAME }}"
|
||||
|
||||
- name: Stop mgbuild container
|
||||
if: always()
|
||||
run: |
|
||||
./release/package/mgbuild.sh \
|
||||
--toolchain $TOOLCHAIN \
|
||||
--os $OS \
|
||||
--arch $ARCH \
|
||||
stop --remove
|
||||
|
@ -300,6 +300,19 @@ endif()
|
||||
|
||||
option(ENABLE_JEMALLOC "Use jemalloc" ON)
|
||||
|
||||
option(MG_MEMORY_PROFILE "If build should be setup for memory profiling" OFF)
|
||||
if (MG_MEMORY_PROFILE AND ENABLE_JEMALLOC)
|
||||
message(STATUS "Jemalloc has been disabled because MG_MEMORY_PROFILE is enabled")
|
||||
set(ENABLE_JEMALLOC OFF)
|
||||
endif ()
|
||||
if (MG_MEMORY_PROFILE AND ASAN)
|
||||
message(STATUS "ASAN has been disabled because MG_MEMORY_PROFILE is enabled")
|
||||
set(ASAN OFF)
|
||||
endif ()
|
||||
if (MG_MEMORY_PROFILE)
|
||||
add_compile_definitions(MG_MEMORY_PROFILE)
|
||||
endif ()
|
||||
|
||||
if (ASAN)
|
||||
message(WARNING "Disabling jemalloc as it doesn't work well with ASAN")
|
||||
set(ENABLE_JEMALLOC OFF)
|
||||
|
@ -59,7 +59,7 @@ MEMGRAPH_BUILD_DEPS=(
|
||||
doxygen graphviz # source documentation generators
|
||||
which nodejs golang custom-golang1.18.9 # for driver tests
|
||||
zip unzip java-11-openjdk-devel java-17-openjdk java-17-openjdk-devel custom-maven3.9.3 # for driver tests
|
||||
sbcl # for custom Lisp C++ preprocessing
|
||||
cl-asdf common-lisp-controller sbcl # for custom Lisp C++ preprocessing
|
||||
autoconf # for jemalloc code generation
|
||||
libtool # for protobuf code generation
|
||||
cyrus-sasl-devel
|
||||
@ -162,6 +162,30 @@ install() {
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == doxygen ]; then
|
||||
if ! dnf list installed doxygen >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://dl.rockylinux.org/pub/rocky/9/CRB/x86_64/os/Packages/d/doxygen-1.9.1-11.el9.x86_64.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == cl-asdf ]; then
|
||||
if ! dnf list installed cl-asdf >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/cl-asdf-20101028-18.el8.noarch.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == common-lisp-controller ]; then
|
||||
if ! dnf list installed common-lisp-controller >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/common-lisp-controller-7.4-20.el8.noarch.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == sbcl ]; then
|
||||
if ! dnf list installed sbcl >/dev/null 2>/dev/null; then
|
||||
dnf install -y https://pkgs.sysadmins.ws/el8/base/x86_64/sbcl-2.0.1-4.el8.x86_64.rpm
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
if [ "$pkg" == PyYAML ]; then
|
||||
if [ -z ${SUDO_USER+x} ]; then # Running as root (e.g. Docker).
|
||||
pip3 install --user PyYAML
|
||||
|
34
init
34
init
@ -14,6 +14,7 @@ function print_help () {
|
||||
echo "Optional arguments:"
|
||||
echo -e " -h\tdisplay this help and exit"
|
||||
echo -e " --without-libs-setup\tskip the step for setting up libs"
|
||||
echo -e " --ci\tscript is being run inside ci"
|
||||
}
|
||||
|
||||
function setup_virtualenv () {
|
||||
@ -35,6 +36,7 @@ function setup_virtualenv () {
|
||||
}
|
||||
|
||||
setup_libs=true
|
||||
ci=false
|
||||
if [[ $# -eq 1 && "$1" == "-h" ]]; then
|
||||
print_help
|
||||
exit 0
|
||||
@ -45,6 +47,10 @@ else
|
||||
shift
|
||||
setup_libs=false
|
||||
;;
|
||||
--ci)
|
||||
shift
|
||||
ci=true
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
echo "Invalid argument provided: $1"
|
||||
@ -76,11 +82,13 @@ if [[ "$setup_libs" == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Fix for centos 7 during release
|
||||
if [ "${DISTRO}" = "centos-7" ] || [ "${DISTRO}" = "debian-11" ] || [ "${DISTRO}" = "amzn-2" ]; then
|
||||
if python3 -m pip show virtualenv >/dev/null 2>/dev/null; then
|
||||
python3 -m pip uninstall -y virtualenv
|
||||
if [[ "$ci" == "false" ]]; then
|
||||
if [ "${DISTRO}" = "centos-7" ] || [ "${DISTRO}" = "debian-11" ] || [ "${DISTRO}" = "amzn-2" ]; then
|
||||
if python3 -m pip show virtualenv >/dev/null 2>/dev/null; then
|
||||
python3 -m pip uninstall -y virtualenv
|
||||
fi
|
||||
python3 -m pip install virtualenv
|
||||
fi
|
||||
python3 -m pip install virtualenv
|
||||
fi
|
||||
|
||||
# setup gql_behave dependencies
|
||||
@ -119,14 +127,16 @@ fi
|
||||
# Install precommit hook except on old operating systems because we don't
|
||||
# develop on them -> pre-commit hook not required -> we can use latest
|
||||
# packages.
|
||||
if [ "${DISTRO}" != "centos-7" ] && [ "$DISTRO" != "debian-10" ] && [ "${DISTRO}" != "ubuntu-18.04" ] && [ "${DISTRO}" != "amzn-2" ]; then
|
||||
python3 -m pip install pre-commit
|
||||
python3 -m pre_commit install
|
||||
# Install py format tools for usage during the development.
|
||||
echo "Install black formatter"
|
||||
python3 -m pip install black==23.1.*
|
||||
echo "Install isort"
|
||||
python3 -m pip install isort==5.12.*
|
||||
if [[ "$ci" == "false" ]]; then
|
||||
if [ "${DISTRO}" != "centos-7" ] && [ "$DISTRO" != "debian-10" ] && [ "${DISTRO}" != "ubuntu-18.04" ] && [ "${DISTRO}" != "amzn-2" ]; then
|
||||
python3 -m pip install pre-commit
|
||||
python3 -m pre_commit install
|
||||
# Install py format tools for usage during the development.
|
||||
echo "Install black formatter"
|
||||
python3 -m pip install black==23.1.*
|
||||
echo "Install isort"
|
||||
python3 -m pip install isort==5.12.*
|
||||
fi
|
||||
fi
|
||||
|
||||
# Link `include/mgp.py` with `release/mgp/mgp.py`
|
||||
|
@ -127,6 +127,7 @@ declare -A primary_urls=(
|
||||
["jemalloc"]="http://$local_cache_host/git/jemalloc.git"
|
||||
["range-v3"]="http://$local_cache_host/git/range-v3.git"
|
||||
["nuraft"]="http://$local_cache_host/git/NuRaft.git"
|
||||
["asio"]="http://$local_cache_host/git/asio.git"
|
||||
)
|
||||
|
||||
# The goal of secondary urls is to have links to the "source of truth" of
|
||||
@ -157,6 +158,7 @@ declare -A secondary_urls=(
|
||||
["jemalloc"]="https://github.com/jemalloc/jemalloc.git"
|
||||
["range-v3"]="https://github.com/ericniebler/range-v3.git"
|
||||
["nuraft"]="https://github.com/eBay/NuRaft.git"
|
||||
["asio"]="https://github.com/chriskohlhoff/asio.git"
|
||||
)
|
||||
|
||||
# antlr
|
||||
@ -266,13 +268,13 @@ repo_clone_try_double "${primary_urls[jemalloc]}" "${secondary_urls[jemalloc]}"
|
||||
pushd jemalloc
|
||||
|
||||
./autogen.sh
|
||||
MALLOC_CONF="retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000" \
|
||||
MALLOC_CONF="background_thread:true,retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000" \
|
||||
./configure \
|
||||
--disable-cxx \
|
||||
--with-lg-page=12 \
|
||||
--with-lg-hugepage=21 \
|
||||
--enable-shared=no --prefix=$working_dir \
|
||||
--with-malloc-conf="retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000"
|
||||
--with-malloc-conf="background_thread:true,retain:false,percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000"
|
||||
|
||||
make -j$CPUS install
|
||||
popd
|
||||
@ -286,5 +288,7 @@ nuraft_tag="v2.1.0"
|
||||
repo_clone_try_double "${primary_urls[nuraft]}" "${secondary_urls[nuraft]}" "nuraft" "$nuraft_tag" true
|
||||
pushd nuraft
|
||||
git apply ../nuraft2.1.0.patch
|
||||
asio_tag="asio-1-29-0"
|
||||
repo_clone_try_double "${primary_urls[asio]}" "${secondary_urls[asio]}" "asio" "$asio_tag" true
|
||||
./prepare.sh
|
||||
popd
|
||||
|
73
release/package/amd-builders-v4.yml
Normal file
73
release/package/amd-builders-v4.yml
Normal file
@ -0,0 +1,73 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_v4_amzn-2:
|
||||
image: "memgraph/mgbuild:v4_amzn-2"
|
||||
build:
|
||||
context: amzn-2
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_amzn-2"
|
||||
|
||||
mgbuild_v4_centos-7:
|
||||
image: "memgraph/mgbuild:v4_centos-7"
|
||||
build:
|
||||
context: centos-7
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_centos-7"
|
||||
|
||||
mgbuild_v4_centos-9:
|
||||
image: "memgraph/mgbuild:v4_centos-9"
|
||||
build:
|
||||
context: centos-9
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_centos-9"
|
||||
|
||||
mgbuild_v4_debian-10:
|
||||
image: "memgraph/mgbuild:v4_debian-10"
|
||||
build:
|
||||
context: debian-10
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-10"
|
||||
|
||||
mgbuild_v4_debian-11:
|
||||
image: "memgraph/mgbuild:v4_debian-11"
|
||||
build:
|
||||
context: debian-11
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-11"
|
||||
|
||||
mgbuild_v4_fedora-36:
|
||||
image: "memgraph/mgbuild:v4_fedora-36"
|
||||
build:
|
||||
context: fedora-36
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_fedora-36"
|
||||
|
||||
mgbuild_v4_ubuntu-18.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-18.04"
|
||||
build:
|
||||
context: ubuntu-18.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-18.04"
|
||||
|
||||
mgbuild_v4_ubuntu-20.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-20.04"
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-20.04"
|
||||
|
||||
mgbuild_v4_ubuntu-22.04:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-22.04"
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-22.04"
|
81
release/package/amd-builders-v5.yml
Normal file
81
release/package/amd-builders-v5.yml
Normal file
@ -0,0 +1,81 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_v5_amzn-2:
|
||||
image: "memgraph/mgbuild:v5_amzn-2"
|
||||
build:
|
||||
context: amzn-2
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_amzn-2"
|
||||
|
||||
mgbuild_v5_centos-7:
|
||||
image: "memgraph/mgbuild:v5_centos-7"
|
||||
build:
|
||||
context: centos-7
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_centos-7"
|
||||
|
||||
mgbuild_v5_centos-9:
|
||||
image: "memgraph/mgbuild:v5_centos-9"
|
||||
build:
|
||||
context: centos-9
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_centos-9"
|
||||
|
||||
mgbuild_v5_debian-11:
|
||||
image: "memgraph/mgbuild:v5_debian-11"
|
||||
build:
|
||||
context: debian-11
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_debian-11"
|
||||
|
||||
mgbuild_v5_debian-12:
|
||||
image: "memgraph/mgbuild:v5_debian-12"
|
||||
build:
|
||||
context: debian-12
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_debian-12"
|
||||
|
||||
mgbuild_v5_fedora-38:
|
||||
image: "memgraph/mgbuild:v5_fedora-38"
|
||||
build:
|
||||
context: fedora-38
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_fedora-38"
|
||||
|
||||
mgbuild_v5_fedora-39:
|
||||
image: "memgraph/mgbuild:v5_fedora-39"
|
||||
build:
|
||||
context: fedora-39
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_fedora-39"
|
||||
|
||||
mgbuild_v5_rocky-9.3:
|
||||
image: "memgraph/mgbuild:v5_rocky-9.3"
|
||||
build:
|
||||
context: rocky-9.3
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_rocky-9.3"
|
||||
|
||||
mgbuild_v5_ubuntu-20.04:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-20.04"
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_ubuntu-20.04"
|
||||
|
||||
mgbuild_v5_ubuntu-22.04:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-22.04"
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v5"
|
||||
container_name: "mgbuild_v5_ubuntu-22.04"
|
@ -7,9 +7,34 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz
|
||||
# Download and install toolchain
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-amzn-2-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/amzn-2.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/amzn-2.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
18
release/package/arm-builders-v4.yml
Normal file
18
release/package/arm-builders-v4.yml
Normal file
@ -0,0 +1,18 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
mgbuild_v4_debian-11-arm:
|
||||
image: "memgraph/mgbuild:v4_debian-11-arm"
|
||||
build:
|
||||
context: debian-11-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_debian-11-arm"
|
||||
|
||||
mgbuild_v4_ubuntu_v4_22.04-arm:
|
||||
image: "memgraph/mgbuild:v4_ubuntu-22.04-arm"
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_v4_ubuntu-22.04-arm"
|
18
release/package/arm-builders-v5.yml
Normal file
18
release/package/arm-builders-v5.yml
Normal file
@ -0,0 +1,18 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
debian-12-arm:
|
||||
image: "memgraph/mgbuild:v5_debian-12-arm"
|
||||
build:
|
||||
context: debian-12-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_debian-12-arm"
|
||||
|
||||
ubuntu-22.04-arm:
|
||||
image: "memgraph/mgbuild:v5_ubuntu-22.04-arm"
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
args:
|
||||
TOOLCHAIN_VERSION: "v4"
|
||||
container_name: "mgbuild_ubuntu-22.04-arm"
|
@ -1,11 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
debian-11-arm:
|
||||
build:
|
||||
context: debian-11-arm
|
||||
container_name: "mgbuild_debian-11-arm"
|
||||
ubuntu-2204-arm:
|
||||
build:
|
||||
context: ubuntu-22.04-arm
|
||||
container_name: "mgbuild_ubuntu-22.04-arm"
|
@ -7,9 +7,33 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-centos-7-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/centos-7.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/centos-7.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -7,9 +7,33 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-centos-9-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/centos-9.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/centos-9.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-10-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-10.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-10.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-11-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-11-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-11-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-11.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-11.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
39
release/package/debian-12-arm/Dockerfile
Normal file
39
release/package/debian-12-arm/Dockerfile
Normal file
@ -0,0 +1,39 @@
|
||||
FROM debian:12
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
ca-certificates wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-12-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-12-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
39
release/package/debian-12/Dockerfile
Normal file
39
release/package/debian-12/Dockerfile
Normal file
@ -0,0 +1,39 @@
|
||||
FROM debian:12
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
ca-certificates wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-debian-12-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/debian-12.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/debian-12.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
@ -1,38 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
mgbuild_centos-7:
|
||||
build:
|
||||
context: centos-7
|
||||
container_name: "mgbuild_centos-7"
|
||||
mgbuild_centos-9:
|
||||
build:
|
||||
context: centos-9
|
||||
container_name: "mgbuild_centos-9"
|
||||
mgbuild_debian-10:
|
||||
build:
|
||||
context: debian-10
|
||||
container_name: "mgbuild_debian-10"
|
||||
mgbuild_debian-11:
|
||||
build:
|
||||
context: debian-11
|
||||
container_name: "mgbuild_debian-11"
|
||||
mgbuild_ubuntu-18.04:
|
||||
build:
|
||||
context: ubuntu-18.04
|
||||
container_name: "mgbuild_ubuntu-18.04"
|
||||
mgbuild_ubuntu-20.04:
|
||||
build:
|
||||
context: ubuntu-20.04
|
||||
container_name: "mgbuild_ubuntu-20.04"
|
||||
mgbuild_ubuntu-22.04:
|
||||
build:
|
||||
context: ubuntu-22.04
|
||||
container_name: "mgbuild_ubuntu-22.04"
|
||||
mgbuild_fedora-36:
|
||||
build:
|
||||
context: fedora-36
|
||||
container_name: "mgbuild_fedora-36"
|
||||
mgbuild_amzn-2:
|
||||
build:
|
||||
context: amzn-2
|
||||
container_name: "mgbuild_amzn-2"
|
@ -8,9 +8,30 @@ RUN yum -y update \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-36-x86_64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-36.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-36.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
37
release/package/fedora-38/Dockerfile
Normal file
37
release/package/fedora-38/Dockerfile
Normal file
@ -0,0 +1,37 @@
|
||||
FROM fedora:38
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-38-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-38.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-38.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
37
release/package/fedora-39/Dockerfile
Normal file
37
release/package/fedora-39/Dockerfile
Normal file
@ -0,0 +1,37 @@
|
||||
FROM fedora:39
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-fedora-39-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/fedora-39.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/fedora-39.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
665
release/package/mgbuild.sh
Executable file
665
release/package/mgbuild.sh
Executable file
@ -0,0 +1,665 @@
|
||||
#!/bin/bash
|
||||
set -Eeuo pipefail
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
SCRIPT_NAME=${0##*/}
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
MGBUILD_HOME_DIR="/home/mg"
|
||||
MGBUILD_ROOT_DIR="$MGBUILD_HOME_DIR/memgraph"
|
||||
|
||||
DEFAULT_TOOLCHAIN="v5"
|
||||
SUPPORTED_TOOLCHAINS=(
|
||||
v4 v5
|
||||
)
|
||||
DEFAULT_OS="all"
|
||||
SUPPORTED_OS=(
|
||||
all
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm debian-12 debian-12-arm
|
||||
fedora-36 fedora-38 fedora-39
|
||||
rocky-9.3
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
SUPPORTED_OS_V4=(
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm
|
||||
fedora-36
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
SUPPORTED_OS_V5=(
|
||||
amzn-2
|
||||
centos-7 centos-9
|
||||
debian-11 debian-11-arm debian-12 debian-12-arm
|
||||
fedora-38 fedora-39
|
||||
rocky-9.3
|
||||
ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
)
|
||||
DEFAULT_BUILD_TYPE="Release"
|
||||
SUPPORTED_BUILD_TYPES=(
|
||||
Debug
|
||||
Release
|
||||
RelWithDebInfo
|
||||
)
|
||||
DEFAULT_ARCH="amd"
|
||||
SUPPORTED_ARCHS=(
|
||||
amd
|
||||
arm
|
||||
)
|
||||
SUPPORTED_TESTS=(
|
||||
clang-tidy cppcheck-and-clang-format code-analysis
|
||||
code-coverage drivers durability e2e gql-behave
|
||||
integration leftover-CTest macro-benchmark
|
||||
mgbench stress-plain stress-ssl
|
||||
unit unit-coverage upload-to-bench-graph
|
||||
|
||||
)
|
||||
DEFAULT_THREADS=0
|
||||
DEFAULT_ENTERPRISE_LICENSE=""
|
||||
DEFAULT_ORGANIZATION_NAME="memgraph"
|
||||
|
||||
print_help () {
|
||||
echo -e "\nUsage: $SCRIPT_NAME [GLOBAL OPTIONS] COMMAND [COMMAND OPTIONS]"
|
||||
echo -e "\nInteract with mgbuild containers"
|
||||
|
||||
echo -e "\nCommands:"
|
||||
echo -e " build Build mgbuild image"
|
||||
echo -e " build-memgraph [OPTIONS] Build memgraph binary inside mgbuild container"
|
||||
echo -e " copy OPTIONS Copy an artifact from mgbuild container to host"
|
||||
echo -e " package-memgraph Create memgraph package from built binary inside mgbuild container"
|
||||
echo -e " pull Pull mgbuild image from dockerhub"
|
||||
echo -e " push [OPTIONS] Push mgbuild image to dockerhub"
|
||||
echo -e " run [OPTIONS] Run mgbuild container"
|
||||
echo -e " stop [OPTIONS] Stop mgbuild container"
|
||||
echo -e " test-memgraph TEST Run a selected test TEST (see supported tests below) inside mgbuild container"
|
||||
|
||||
echo -e "\nSupported tests:"
|
||||
echo -e " \"${SUPPORTED_TESTS[*]}\""
|
||||
|
||||
echo -e "\nGlobal options:"
|
||||
echo -e " --arch string Specify target architecture (\"${SUPPORTED_ARCHS[*]}\") (default \"$DEFAULT_ARCH\")"
|
||||
echo -e " --build-type string Specify build type (\"${SUPPORTED_BUILD_TYPES[*]}\") (default \"$DEFAULT_BUILD_TYPE\")"
|
||||
echo -e " --enterprise-license string Specify the enterprise license (default \"\")"
|
||||
echo -e " --organization-name string Specify the organization name (default \"memgraph\")"
|
||||
echo -e " --os string Specify operating system (\"${SUPPORTED_OS[*]}\") (default \"$DEFAULT_OS\")"
|
||||
echo -e " --threads int Specify the number of threads a command will use (default \"\$(nproc)\" for container)"
|
||||
echo -e " --toolchain string Specify toolchain version (\"${SUPPORTED_TOOLCHAINS[*]}\") (default \"$DEFAULT_TOOLCHAIN\")"
|
||||
|
||||
echo -e "\nbuild-memgraph options:"
|
||||
echo -e " --asan Build with ASAN"
|
||||
echo -e " --community Build community version"
|
||||
echo -e " --coverage Build with code coverage"
|
||||
echo -e " --for-docker Add flag -DMG_TELEMETRY_ID_OVERRIDE=DOCKER to cmake"
|
||||
echo -e " --for-platform Add flag -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM to cmake"
|
||||
echo -e " --init-only Only run init script"
|
||||
echo -e " --no-copy Don't copy the memgraph repo from host."
|
||||
echo -e " Use this option with caution, be sure that memgraph source code is in correct location inside mgbuild container"
|
||||
echo -e " --ubsan Build with UBSAN"
|
||||
|
||||
echo -e "\ncopy options:"
|
||||
echo -e " --binary Copy memgraph binary from mgbuild container to host"
|
||||
echo -e " --build-logs Copy build logs from mgbuild container to host"
|
||||
echo -e " --package Copy memgraph package from mgbuild container to host"
|
||||
|
||||
echo -e "\npush options:"
|
||||
echo -e " -p, --password string Specify password for docker login"
|
||||
echo -e " -u, --username string Specify username for docker login"
|
||||
|
||||
echo -e "\nrun options:"
|
||||
echo -e " --pull Pull the mgbuild image before running"
|
||||
|
||||
echo -e "\nstop options:"
|
||||
echo -e " --remove Remove the stopped mgbuild container"
|
||||
|
||||
echo -e "\nToolchain v4 supported OSs:"
|
||||
echo -e " \"${SUPPORTED_OS_V4[*]}\""
|
||||
|
||||
echo -e "\nToolchain v5 supported OSs:"
|
||||
echo -e " \"${SUPPORTED_OS_V5[*]}\""
|
||||
|
||||
echo -e "\nExample usage:"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd run"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo build-memgraph --community"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd --build-type RelWithDebInfo test-memgraph unit"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd package"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd copy --package"
|
||||
echo -e " $SCRIPT_NAME --os debian-12 --toolchain v5 --arch amd stop --remove"
|
||||
}
|
||||
|
||||
check_support() {
|
||||
local is_supported=false
|
||||
case "$1" in
|
||||
arch)
|
||||
for e in "${SUPPORTED_ARCHS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Architecture $2 isn't supported!\nChoose from ${SUPPORTED_ARCHS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
build_type)
|
||||
for e in "${SUPPORTED_BUILD_TYPES[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Build type $2 isn't supported!\nChoose from ${SUPPORTED_BUILD_TYPES[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
os)
|
||||
for e in "${SUPPORTED_OS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: OS $2 isn't supported!\nChoose from ${SUPPORTED_OS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
toolchain)
|
||||
for e in "${SUPPORTED_TOOLCHAINS[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "TError: oolchain version $2 isn't supported!\nChoose from ${SUPPORTED_TOOLCHAINS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
os_toolchain_combo)
|
||||
if [[ "$3" == "v4" ]]; then
|
||||
local SUPPORTED_OS_TOOLCHAIN=("${SUPPORTED_OS_V4[@]}")
|
||||
elif [[ "$3" == "v5" ]]; then
|
||||
local SUPPORTED_OS_TOOLCHAIN=("${SUPPORTED_OS_V5[@]}")
|
||||
else
|
||||
echo -e "Error: $3 isn't a supported toolchain_version!\nChoose from ${SUPPORTED_TOOLCHAINS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
for e in "${SUPPORTED_OS_TOOLCHAIN[@]}"; do
|
||||
if [[ "$e" == "$2" ]]; then
|
||||
is_supported=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_supported" == false ]]; then
|
||||
echo -e "Error: Toolchain version $3 doesn't support OS $2!\nChoose from ${SUPPORTED_OS_TOOLCHAIN[*]}"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo -e "Error: This function can only check arch, build_type, os, toolchain version and os toolchain combination"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
######## BUILD, COPY AND PACKAGE MEMGRAPH ########
|
||||
##################################################
|
||||
build_memgraph () {
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local container_build_dir="$MGBUILD_ROOT_DIR/build"
|
||||
local container_output_dir="$container_build_dir/output"
|
||||
local arm_flag=""
|
||||
if [[ "$arch" == "arm" ]] || [[ "$os" =~ "-arm" ]]; then
|
||||
arm_flag="-DMG_ARCH="ARM64""
|
||||
fi
|
||||
local build_type_flag="-DCMAKE_BUILD_TYPE=$build_type"
|
||||
local telemetry_id_override_flag=""
|
||||
local community_flag=""
|
||||
local coverage_flag=""
|
||||
local asan_flag=""
|
||||
local ubsan_flag=""
|
||||
local init_only=false
|
||||
local for_docker=false
|
||||
local for_platform=false
|
||||
local copy_from_host=true
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--community)
|
||||
community_flag="-DMG_ENTERPRISE=OFF"
|
||||
shift 1
|
||||
;;
|
||||
--init-only)
|
||||
init_only=true
|
||||
shift 1
|
||||
;;
|
||||
--for-docker)
|
||||
for_docker=true
|
||||
if [[ "$for_platform" == "true" ]]; then
|
||||
echo "Error: Cannot combine --for-docker and --for-platform flags"
|
||||
exit 1
|
||||
fi
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER "
|
||||
shift 1
|
||||
;;
|
||||
--for-platform)
|
||||
for_platform=true
|
||||
if [[ "$for_docker" == "true" ]]; then
|
||||
echo "Error: Cannot combine --for-docker and --for-platform flags"
|
||||
exit 1
|
||||
fi
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM "
|
||||
shift 1
|
||||
;;
|
||||
--coverage)
|
||||
coverage_flag="-DTEST_COVERAGE=ON"
|
||||
shift 1
|
||||
;;
|
||||
--asan)
|
||||
asan_flag="-DASAN=ON"
|
||||
shift 1
|
||||
;;
|
||||
--ubsan)
|
||||
ubsan_flag="-DUBSAN=ON"
|
||||
shift 1
|
||||
;;
|
||||
--no-copy)
|
||||
copy_from_host=false
|
||||
shift 1
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Initializing deps ..."
|
||||
# If master is not the current branch, fetch it, because the get_version
|
||||
# script depends on it. If we are on master, the fetch command is going to
|
||||
# fail so that's why there is the explicit check.
|
||||
# Required here because Docker build container can't access remote.
|
||||
cd "$PROJECT_ROOT"
|
||||
if [[ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]]; then
|
||||
git fetch origin master:master
|
||||
fi
|
||||
|
||||
if [[ "$copy_from_host" == "true" ]]; then
|
||||
# Ensure we have a clean build directory
|
||||
docker exec -u mg "$build_container" bash -c "rm -rf $MGBUILD_ROOT_DIR && mkdir -p $MGBUILD_ROOT_DIR"
|
||||
echo "Copying project files..."
|
||||
docker cp "$PROJECT_ROOT/." "$build_container:$MGBUILD_ROOT_DIR/"
|
||||
fi
|
||||
# Change ownership of copied files so the mg user inside container can access them
|
||||
docker exec -u root $build_container bash -c "chown -R mg:mg $MGBUILD_ROOT_DIR"
|
||||
|
||||
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
|
||||
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check TOOLCHAIN_RUN_DEPS || /environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
|
||||
docker exec -u root "$build_container" bash -c "$MGBUILD_ROOT_DIR/environment/os/$os.sh check MEMGRAPH_BUILD_DEPS || /environment/os/$os.sh install MEMGRAPH_BUILD_DEPS"
|
||||
|
||||
echo "Building targeted package..."
|
||||
# Fix issue with git marking directory as not safe
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && git config --global --add safe.directory '*'"
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && $ACTIVATE_TOOLCHAIN && ./init --ci"
|
||||
if [[ "$init_only" == "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Building Memgraph for $os on $build_container..."
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && rm -rf ./*"
|
||||
# Fix cmake failing locally if remote is clone via ssh
|
||||
docker exec -u mg "$build_container" bash -c "cd $MGBUILD_ROOT_DIR && git remote set-url origin https://github.com/memgraph/memgraph.git"
|
||||
|
||||
# Define cmake command
|
||||
local cmake_cmd="cmake $build_type_flag $arm_flag $community_flag $telemetry_id_override_flag $coverage_flag $asan_flag $ubsan_flag .."
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && $cmake_cmd"
|
||||
|
||||
# ' is used instead of " because we need to run make within the allowed
|
||||
# container resources.
|
||||
# Default value for $threads is 0 instead of $(nproc) because macos
|
||||
# doesn't support the nproc command.
|
||||
# 0 is set for default value and checked here because mgbuild containers
|
||||
# support nproc
|
||||
# shellcheck disable=SC2016
|
||||
if [[ "$threads" == 0 ]]; then
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc)'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc) -B mgconsole'
|
||||
else
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads'
|
||||
docker exec -u mg "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$threads -B mgconsole'
|
||||
fi
|
||||
}
|
||||
|
||||
package_memgraph() {
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
local container_output_dir="$MGBUILD_ROOT_DIR/build/output"
|
||||
local package_command=""
|
||||
if [[ "$os" =~ ^"centos".* ]] || [[ "$os" =~ ^"fedora".* ]] || [[ "$os" =~ ^"amzn".* ]] || [[ "$os" =~ ^"rocky".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "yum -y update"
|
||||
package_command=" cpack -G RPM --config ../CPackConfig.cmake && rpmlint --file='../../release/rpm/rpmlintrc' memgraph*.rpm "
|
||||
fi
|
||||
if [[ "$os" =~ ^"debian".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "apt --allow-releaseinfo-change -y update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
if [[ "$os" =~ ^"ubuntu".* ]]; then
|
||||
docker exec -u root "$build_container" bash -c "apt update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
docker exec -u mg "$build_container" bash -c "mkdir -p $container_output_dir && cd $container_output_dir && $ACTIVATE_TOOLCHAIN && $package_command"
|
||||
}
|
||||
|
||||
copy_memgraph() {
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
case "$1" in
|
||||
--binary)
|
||||
echo "Copying memgraph binary to host..."
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/memgraph"
|
||||
local host_output_path="$PROJECT_ROOT/build/memgraph"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Binary saved to $host_output_path"
|
||||
;;
|
||||
--build-logs)
|
||||
echo "Copying memgraph build logs to host..."
|
||||
local container_output_path="$MGBUILD_ROOT_DIR/build/logs"
|
||||
local host_output_path="$PROJECT_ROOT/build/logs"
|
||||
mkdir -p "$PROJECT_ROOT/build"
|
||||
docker cp -L $build_container:$container_output_path $host_output_path
|
||||
echo "Build logs saved to $host_output_path"
|
||||
;;
|
||||
--package)
|
||||
echo "Copying memgraph package to host..."
|
||||
local container_output_dir="$MGBUILD_ROOT_DIR/build/output"
|
||||
local host_output_dir="$PROJECT_ROOT/build/output/$os"
|
||||
local last_package_name=$(docker exec -u mg "$build_container" bash -c "cd $container_output_dir && ls -t memgraph* | head -1")
|
||||
mkdir -p "$host_output_dir"
|
||||
docker cp "$build_container:$container_output_dir/$last_package_name" "$host_output_dir/$last_package_name"
|
||||
echo "Package saved to $host_output_dir/$last_package_name"
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
##################### TESTS ######################
|
||||
##################################################
|
||||
test_memgraph() {
|
||||
local ACTIVATE_TOOLCHAIN="source /opt/toolchain-${toolchain_version}/activate"
|
||||
local ACTIVATE_VENV="./setup.sh /opt/toolchain-${toolchain_version}/activate"
|
||||
local EXPORT_LICENSE="export MEMGRAPH_ENTERPRISE_LICENSE=$enterprise_license"
|
||||
local EXPORT_ORG_NAME="export MEMGRAPH_ORGANIZATION_NAME=$organization_name"
|
||||
local BUILD_DIR="$MGBUILD_ROOT_DIR/build"
|
||||
local build_container="mgbuild_${toolchain_version}_${os}"
|
||||
echo "Running $1 test on $build_container..."
|
||||
|
||||
case "$1" in
|
||||
unit)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN "'&& ctest -R memgraph__unit --output-on-failure -j$threads'
|
||||
;;
|
||||
unit-coverage)
|
||||
local setup_lsan_ubsan="export LSAN_OPTIONS=suppressions=$BUILD_DIR/../tools/lsan.supp && export UBSAN_OPTIONS=halt_on_error=1"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN && $setup_lsan_ubsan "'&& ctest -R memgraph__unit --output-on-failure -j2'
|
||||
;;
|
||||
leftover-CTest)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $BUILD_DIR && $ACTIVATE_TOOLCHAIN "'&& ctest -E "(memgraph__unit|memgraph__benchmark)" --output-on-failure'
|
||||
;;
|
||||
drivers)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& ./tests/drivers/run.sh'
|
||||
;;
|
||||
integration)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR "'&& tests/integration/run.sh'
|
||||
;;
|
||||
cppcheck-and-clang-format)
|
||||
local test_output_path="$MGBUILD_ROOT_DIR/tools/github/cppcheck_and_clang_format.txt"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tools/github/cppcheck_and_clang_format.txt"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tools/github && $ACTIVATE_TOOLCHAIN "'&& ./cppcheck_and_clang_format diff'
|
||||
docker cp $build_container:$test_output_path $test_output_host_dest
|
||||
;;
|
||||
stress-plain)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& ./continuous_integration'
|
||||
;;
|
||||
stress-ssl)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& ./continuous_integration --use-ssl'
|
||||
;;
|
||||
durability)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/stress && source ve3/bin/activate "'&& python3 durability --num-steps 5'
|
||||
;;
|
||||
gql-behave)
|
||||
local test_output_dir="$MGBUILD_ROOT_DIR/tests/gql_behave"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tests/gql_behave"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && cd $MGBUILD_ROOT_DIR/tests/gql_behave "'&& ./continuous_integration'
|
||||
docker cp $build_container:$test_output_dir/gql_behave_status.csv $test_output_host_dest/gql_behave_status.csv
|
||||
docker cp $build_container:$test_output_dir/gql_behave_status.html $test_output_host_dest/gql_behave_status.html
|
||||
;;
|
||||
macro-benchmark)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && export USER=mg && export LANG=$(echo $LANG) && cd $MGBUILD_ROOT_DIR/tests/macro_benchmark "'&& ./harness QuerySuite MemgraphRunner --groups aggregation 1000_create unwind_create dense_expand match --no-strict'
|
||||
;;
|
||||
mgbench)
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/mgbench "'&& ./benchmark.py vendor-native --num-workers-for-benchmark 12 --export-results benchmark_result.json pokec/medium/*/*'
|
||||
;;
|
||||
upload-to-bench-graph)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
local SETUP_VE3_ENV="virtualenv -p python3 ve3 && source ve3/bin/activate && pip install -r requirements.txt"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tools/bench-graph-client && $SETUP_VE3_ENV && $SETUP_PASSED_ARGS "'&& ./main.py $PASSED_ARGS'
|
||||
;;
|
||||
code-analysis)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests/code_analysis && $SETUP_PASSED_ARGS "'&& ./python_code_analysis.sh $PASSED_ARGS'
|
||||
;;
|
||||
code-coverage)
|
||||
local test_output_path="$MGBUILD_ROOT_DIR/tools/github/generated/code_coverage.tar.gz"
|
||||
local test_output_host_dest="$PROJECT_ROOT/tools/github/generated/code_coverage.tar.gz"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && $ACTIVATE_TOOLCHAIN && cd $MGBUILD_ROOT_DIR/tools/github "'&& ./coverage_convert'
|
||||
docker exec -u mg $build_container bash -c "cd $MGBUILD_ROOT_DIR/tools/github/generated && tar -czf code_coverage.tar.gz coverage.json html report.json summary.rmu"
|
||||
mkdir -p $PROJECT_ROOT/tools/github/generated
|
||||
docker cp $build_container:$test_output_path $test_output_host_dest
|
||||
;;
|
||||
clang-tidy)
|
||||
shift 1
|
||||
local SETUP_PASSED_ARGS="export PASSED_ARGS=\"$@\""
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && export THREADS=$threads && $ACTIVATE_TOOLCHAIN && cd $MGBUILD_ROOT_DIR/tests/code_analysis && $SETUP_PASSED_ARGS "'&& ./clang_tidy.sh $PASSED_ARGS'
|
||||
;;
|
||||
e2e)
|
||||
# local kafka_container="kafka_kafka_1"
|
||||
# local kafka_hostname="kafka"
|
||||
# local pulsar_container="pulsar_pulsar_1"
|
||||
# local pulsar_hostname="pulsar"
|
||||
# local setup_hostnames="export KAFKA_HOSTNAME=$kafka_hostname && PULSAR_HOSTNAME=$pulsar_hostname"
|
||||
# local build_container_network=$(docker inspect $build_container --format='{{ .HostConfig.NetworkMode }}')
|
||||
# docker network connect --alias $kafka_hostname $build_container_network $kafka_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
# docker network connect --alias $pulsar_hostname $build_container_network $pulsar_container > /dev/null 2>&1 || echo "Kafka container already inside correct network or something went wrong ..."
|
||||
docker exec -u mg $build_container bash -c "pip install --user networkx && pip3 install --user networkx"
|
||||
docker exec -u mg $build_container bash -c "$EXPORT_LICENSE && $EXPORT_ORG_NAME && cd $MGBUILD_ROOT_DIR/tests && $ACTIVATE_VENV && source ve3/bin/activate_e2e && cd $MGBUILD_ROOT_DIR/tests/e2e "'&& ./run.sh'
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown test '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
##################################################
|
||||
################### PARSE ARGS ###################
|
||||
##################################################
|
||||
if [ "$#" -eq 0 ] || [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
|
||||
print_help
|
||||
exit 0
|
||||
fi
|
||||
arch=$DEFAULT_ARCH
|
||||
build_type=$DEFAULT_BUILD_TYPE
|
||||
enterprise_license=$DEFAULT_ENTERPRISE_LICENSE
|
||||
organization_name=$DEFAULT_ORGANIZATION_NAME
|
||||
os=$DEFAULT_OS
|
||||
threads=$DEFAULT_THREADS
|
||||
toolchain_version=$DEFAULT_TOOLCHAIN
|
||||
command=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--arch)
|
||||
arch=$2
|
||||
check_support arch $arch
|
||||
shift 2
|
||||
;;
|
||||
--build-type)
|
||||
build_type=$2
|
||||
check_support build_type $build_type
|
||||
shift 2
|
||||
;;
|
||||
--enterprise-license)
|
||||
enterprise_license=$2
|
||||
shift 2
|
||||
;;
|
||||
--organization-name)
|
||||
organization_name=$2
|
||||
shift 2
|
||||
;;
|
||||
--os)
|
||||
os=$2
|
||||
check_support os $os
|
||||
shift 2
|
||||
;;
|
||||
--threads)
|
||||
threads=$2
|
||||
shift 2
|
||||
;;
|
||||
--toolchain)
|
||||
toolchain_version=$2
|
||||
check_support toolchain $toolchain_version
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
if [[ "$1" =~ ^--.* ]]; then
|
||||
echo -e "Error: Unknown option '$1'"
|
||||
exit 1
|
||||
else
|
||||
command=$1
|
||||
shift 1
|
||||
break
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
check_support os_toolchain_combo $os $toolchain_version
|
||||
|
||||
if [[ "$command" == "" ]]; then
|
||||
echo -e "Error: Command not provided, please provide command"
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if docker compose version > /dev/null 2>&1; then
|
||||
docker_compose_cmd="docker compose"
|
||||
elif which docker-compose > /dev/null 2>&1; then
|
||||
docker_compose_cmd="docker-compose"
|
||||
else
|
||||
echo -e "Missing command: There has to be installed either 'docker-compose' or 'docker compose'"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using $docker_compose_cmd"
|
||||
|
||||
##################################################
|
||||
################# PARSE COMMAND ##################
|
||||
##################################################
|
||||
case $command in
|
||||
build)
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml build
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml build mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
run)
|
||||
cd $SCRIPT_DIR
|
||||
pull=false
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
if [[ "$1" == "--pull" ]]; then
|
||||
pull=true
|
||||
else
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [[ "$os" == "all" ]]; then
|
||||
if [[ "$pull" == "true" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures
|
||||
elif [[ "$docker_compose_cmd" == "docker compose" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures --policy missing
|
||||
fi
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml up -d
|
||||
else
|
||||
if [[ "$pull" == "true" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull mgbuild_${toolchain_version}_${os}
|
||||
elif ! docker image inspect memgraph/mgbuild:${toolchain_version}_${os} > /dev/null 2>&1; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml up -d mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
cd $SCRIPT_DIR
|
||||
remove=false
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
if [[ "$1" == "--remove" ]]; then
|
||||
remove=true
|
||||
else
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml down
|
||||
else
|
||||
docker stop mgbuild_${toolchain_version}_${os}
|
||||
if [[ "$remove" == "true" ]]; then
|
||||
docker rm mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
pull)
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull --ignore-pull-failures
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml pull mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
push)
|
||||
docker login $@
|
||||
cd $SCRIPT_DIR
|
||||
if [[ "$os" == "all" ]]; then
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml push --ignore-push-failures
|
||||
else
|
||||
$docker_compose_cmd -f ${arch}-builders-${toolchain_version}.yml push mgbuild_${toolchain_version}_${os}
|
||||
fi
|
||||
;;
|
||||
build-memgraph)
|
||||
build_memgraph $@
|
||||
;;
|
||||
package-memgraph)
|
||||
package_memgraph
|
||||
;;
|
||||
test-memgraph)
|
||||
test_memgraph $@
|
||||
;;
|
||||
copy)
|
||||
copy_memgraph $@
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown command '$command'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
40
release/package/rocky-9.3/Dockerfile
Normal file
40
release/package/rocky-9.3/Dockerfile
Normal file
@ -0,0 +1,40 @@
|
||||
FROM rockylinux:9.3
|
||||
|
||||
ARG TOOLCHAIN_VERSION
|
||||
|
||||
# Stops tzdata interactive configuration.
|
||||
RUN yum -y update \
|
||||
&& yum install -y wget git
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-rocky-9.3-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/rocky-9.3.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/rocky-9.3.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
# Install PyYAML (only for amzn-2, centos-7, cento-9 and rocky-9.3)
|
||||
RUN pip3 install --user PyYAML
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
@ -1,208 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
SUPPORTED_OS=(
|
||||
centos-7 centos-9
|
||||
debian-10 debian-11 debian-11-arm
|
||||
ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 ubuntu-22.04-arm
|
||||
fedora-36
|
||||
amzn-2
|
||||
)
|
||||
|
||||
SUPPORTED_BUILD_TYPES=(
|
||||
Debug
|
||||
Release
|
||||
RelWithDebInfo
|
||||
)
|
||||
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
TOOLCHAIN_VERSION="toolchain-v4"
|
||||
ACTIVATE_TOOLCHAIN="source /opt/${TOOLCHAIN_VERSION}/activate"
|
||||
HOST_OUTPUT_DIR="$PROJECT_ROOT/build/output"
|
||||
|
||||
print_help () {
|
||||
# TODO(gitbuda): Update the release/package/run.sh help
|
||||
echo "$0 init|package|docker|test {os} {build_type} [--for-docker|--for-platform]"
|
||||
echo ""
|
||||
echo " OSs: ${SUPPORTED_OS[*]}"
|
||||
echo " Build types: ${SUPPORTED_BUILD_TYPES[*]}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
make_package () {
|
||||
os="$1"
|
||||
build_type="$2"
|
||||
|
||||
build_container="mgbuild_$os"
|
||||
echo "Building Memgraph for $os on $build_container..."
|
||||
|
||||
package_command=""
|
||||
if [[ "$os" =~ ^"centos".* ]] || [[ "$os" =~ ^"fedora".* ]] || [[ "$os" =~ ^"amzn".* ]]; then
|
||||
docker exec "$build_container" bash -c "yum -y update"
|
||||
package_command=" cpack -G RPM --config ../CPackConfig.cmake && rpmlint --file='../../release/rpm/rpmlintrc' memgraph*.rpm "
|
||||
fi
|
||||
if [[ "$os" =~ ^"debian".* ]]; then
|
||||
docker exec "$build_container" bash -c "apt --allow-releaseinfo-change -y update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
if [[ "$os" =~ ^"ubuntu".* ]]; then
|
||||
docker exec "$build_container" bash -c "apt update"
|
||||
package_command=" cpack -G DEB --config ../CPackConfig.cmake "
|
||||
fi
|
||||
telemetry_id_override_flag=""
|
||||
if [[ "$#" -gt 2 ]]; then
|
||||
if [[ "$3" == "--for-docker" ]]; then
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER "
|
||||
elif [[ "$3" == "--for-platform" ]]; then
|
||||
telemetry_id_override_flag=" -DMG_TELEMETRY_ID_OVERRIDE=DOCKER-PLATFORM"
|
||||
else
|
||||
print_help
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Copying project files..."
|
||||
# If master is not the current branch, fetch it, because the get_version
|
||||
# script depends on it. If we are on master, the fetch command is going to
|
||||
# fail so that's why there is the explicit check.
|
||||
# Required here because Docker build container can't access remote.
|
||||
cd "$PROJECT_ROOT"
|
||||
if [[ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]]; then
|
||||
git fetch origin master:master
|
||||
fi
|
||||
|
||||
# Ensure we have a clean build directory
|
||||
docker exec "$build_container" rm -rf /memgraph
|
||||
|
||||
docker exec "$build_container" mkdir -p /memgraph
|
||||
# TODO(gitbuda): Revisit copying the whole repo -> makese sense under CI.
|
||||
docker cp "$PROJECT_ROOT/." "$build_container:/memgraph/"
|
||||
|
||||
container_build_dir="/memgraph/build"
|
||||
container_output_dir="$container_build_dir/output"
|
||||
|
||||
# TODO(gitbuda): TOOLCHAIN_RUN_DEPS should be installed during the Docker
|
||||
# image build phase, but that is not easy at this point because the
|
||||
# environment/os/{os}.sh does not come within the toolchain package. When
|
||||
# migrating to the next version of toolchain do that, and remove the
|
||||
# TOOLCHAIN_RUN_DEPS installation from here.
|
||||
# TODO(gitbuda): On the other side, having this here allows updating deps
|
||||
# wihout reruning the build containers.
|
||||
echo "Installing dependencies using '/memgraph/environment/os/$os.sh' script..."
|
||||
docker exec "$build_container" bash -c "/memgraph/environment/os/$os.sh install TOOLCHAIN_RUN_DEPS"
|
||||
docker exec "$build_container" bash -c "/memgraph/environment/os/$os.sh install MEMGRAPH_BUILD_DEPS"
|
||||
|
||||
echo "Building targeted package..."
|
||||
# Fix issue with git marking directory as not safe
|
||||
docker exec "$build_container" bash -c "cd /memgraph && git config --global --add safe.directory '*'"
|
||||
docker exec "$build_container" bash -c "cd /memgraph && $ACTIVATE_TOOLCHAIN && ./init"
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && rm -rf ./*"
|
||||
# TODO(gitbuda): cmake fails locally if remote is clone via ssh because of the key -> FIX
|
||||
if [[ "$os" =~ "-arm" ]]; then
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && cmake -DCMAKE_BUILD_TYPE=$build_type -DMG_ARCH="ARM64" $telemetry_id_override_flag .."
|
||||
else
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN && cmake -DCMAKE_BUILD_TYPE=$build_type $telemetry_id_override_flag .."
|
||||
fi
|
||||
# ' is used instead of " because we need to run make within the allowed
|
||||
# container resources.
|
||||
# shellcheck disable=SC2016
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc)'
|
||||
docker exec "$build_container" bash -c "cd $container_build_dir && $ACTIVATE_TOOLCHAIN "'&& make -j$(nproc) -B mgconsole'
|
||||
docker exec "$build_container" bash -c "mkdir -p $container_output_dir && cd $container_output_dir && $ACTIVATE_TOOLCHAIN && $package_command"
|
||||
|
||||
echo "Copying targeted package to host..."
|
||||
last_package_name=$(docker exec "$build_container" bash -c "cd $container_output_dir && ls -t memgraph* | head -1")
|
||||
# The operating system folder is introduced because multiple different
|
||||
# packages could be preserved during the same build "session".
|
||||
mkdir -p "$HOST_OUTPUT_DIR/$os"
|
||||
package_host_destination="$HOST_OUTPUT_DIR/$os/$last_package_name"
|
||||
docker cp "$build_container:$container_output_dir/$last_package_name" "$package_host_destination"
|
||||
echo "Package saved to $package_host_destination."
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
init)
|
||||
cd "$SCRIPT_DIR"
|
||||
if ! which "docker-compose" >/dev/null; then
|
||||
docker_compose_cmd="docker compose"
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
fi
|
||||
$docker_compose_cmd build --build-arg TOOLCHAIN_VERSION="${TOOLCHAIN_VERSION}"
|
||||
$docker_compose_cmd up -d
|
||||
;;
|
||||
|
||||
docker)
|
||||
# NOTE: Docker is build on top of Debian 11 package.
|
||||
based_on_os="debian-11"
|
||||
# shellcheck disable=SC2012
|
||||
last_package_name=$(cd "$HOST_OUTPUT_DIR/$based_on_os" && ls -t memgraph* | head -1)
|
||||
docker_build_folder="$PROJECT_ROOT/release/docker"
|
||||
cd "$docker_build_folder"
|
||||
./package_docker --latest "$HOST_OUTPUT_DIR/$based_on_os/$last_package_name"
|
||||
# shellcheck disable=SC2012
|
||||
docker_image_name=$(cd "$docker_build_folder" && ls -t memgraph* | head -1)
|
||||
docker_host_folder="$HOST_OUTPUT_DIR/docker"
|
||||
docker_host_image_path="$docker_host_folder/$docker_image_name"
|
||||
mkdir -p "$docker_host_folder"
|
||||
cp "$docker_build_folder/$docker_image_name" "$docker_host_image_path"
|
||||
echo "Docker images saved to $docker_host_image_path."
|
||||
;;
|
||||
|
||||
package)
|
||||
shift 1
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
print_help
|
||||
fi
|
||||
os="$1"
|
||||
build_type="$2"
|
||||
shift 2
|
||||
is_os_ok=false
|
||||
for supported_os in "${SUPPORTED_OS[@]}"; do
|
||||
if [[ "$supported_os" == "${os}" ]]; then
|
||||
is_os_ok=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
is_build_type_ok=false
|
||||
for supported_build_type in "${SUPPORTED_BUILD_TYPES[@]}"; do
|
||||
if [[ "$supported_build_type" == "${build_type}" ]]; then
|
||||
is_build_type_ok=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_os_ok" == true && "$is_build_type_ok" == true ]]; then
|
||||
make_package "$os" "$build_type" "$@"
|
||||
else
|
||||
if [[ "$is_os_ok" == false ]]; then
|
||||
echo "Unsupported OS: $os"
|
||||
elif [[ "$is_build_type_ok" == false ]]; then
|
||||
echo "Unsupported build type: $build_type"
|
||||
fi
|
||||
print_help
|
||||
fi
|
||||
;;
|
||||
|
||||
build)
|
||||
shift 1
|
||||
if [[ "$#" -ne 2 ]]; then
|
||||
print_help
|
||||
fi
|
||||
# in the vX format, e.g. v5
|
||||
toolchain_version="$1"
|
||||
# a name of the os folder, e.g. ubuntu-22.04-arm
|
||||
os="$2"
|
||||
cd "$SCRIPT_DIR/$os"
|
||||
docker build -f Dockerfile --build-arg TOOLCHAIN_VERSION="toolchain-$toolchain_version" -t "memgraph/memgraph-builder:${toolchain_version}_$os" .
|
||||
;;
|
||||
|
||||
test)
|
||||
echo "TODO(gitbuda): Test all packages on mgtest containers."
|
||||
;;
|
||||
|
||||
*)
|
||||
print_help
|
||||
;;
|
||||
esac
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-18.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-18.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-18.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-20.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-20.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-20.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-arm64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-22.04-arm.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-22.04-arm.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -10,9 +10,30 @@ RUN apt update && apt install -y \
|
||||
# Do NOT be smart here and clean the cache because the container is used in the
|
||||
# stateful context.
|
||||
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/${TOOLCHAIN_VERSION}/${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
-O ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
&& tar xzvf ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz -C /opt \
|
||||
&& rm ${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz
|
||||
RUN wget -q https://s3-eu-west-1.amazonaws.com/deps.memgraph.io/toolchain-${TOOLCHAIN_VERSION}/toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
-O toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz \
|
||||
&& tar xzvf toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz -C /opt \
|
||||
&& rm toolchain-${TOOLCHAIN_VERSION}-binaries-ubuntu-22.04-amd64.tar.gz
|
||||
|
||||
# Install toolchain run deps and memgraph build deps
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN git clone https://github.com/memgraph/memgraph.git \
|
||||
&& cd memgraph \
|
||||
&& ./environment/os/ubuntu-22.04.sh install TOOLCHAIN_RUN_DEPS \
|
||||
&& ./environment/os/ubuntu-22.04.sh install MEMGRAPH_BUILD_DEPS \
|
||||
&& cd .. && rm -rf memgraph
|
||||
|
||||
# Add mgdeps-cache and bench-graph-api hostnames
|
||||
RUN echo -e "10.42.16.10 mgdeps-cache\n10.42.16.10 bench-graph-api" >> /etc/hosts
|
||||
|
||||
# Create mg user and set as default
|
||||
RUN useradd -m -s /bin/bash mg
|
||||
USER mg
|
||||
|
||||
# Install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Fix node
|
||||
RUN curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
|
||||
|
||||
ENTRYPOINT ["sleep", "infinity"]
|
||||
|
@ -12,6 +12,7 @@
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include <chrono>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
#include "coordination/coordinator_config.hpp"
|
||||
#include "coordination/coordinator_exceptions.hpp"
|
||||
#include "coordination/raft_state.hpp"
|
||||
@ -123,7 +124,7 @@ auto RaftState::AddCoordinatorInstance(uint32_t raft_server_id, uint32_t raft_po
|
||||
spdlog::info("Request to add server {} to the cluster accepted", endpoint);
|
||||
} else {
|
||||
throw RaftAddServerException("Failed to accept request to add server {} to the cluster with error code {}",
|
||||
endpoint, cmd_result->get_result_code());
|
||||
endpoint, int(cmd_result->get_result_code()));
|
||||
}
|
||||
|
||||
// Waiting for server to join
|
||||
@ -173,7 +174,8 @@ auto RaftState::AppendRegisterReplicationInstanceLog(CoordinatorClientConfig con
|
||||
spdlog::info("Request for registering instance {} accepted", config.instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to register instance {} with error code {}", config.instance_name, res->get_result_code());
|
||||
spdlog::error("Failed to register instance {} with error code {}", config.instance_name,
|
||||
int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -194,7 +196,7 @@ auto RaftState::AppendUnregisterReplicationInstanceLog(std::string_view instance
|
||||
spdlog::info("Request for unregistering instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to unregister instance {} with error code {}", instance_name, res->get_result_code());
|
||||
spdlog::error("Failed to unregister instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -214,7 +216,7 @@ auto RaftState::AppendSetInstanceAsMainLog(std::string_view instance_name) -> bo
|
||||
spdlog::info("Request for promoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, res->get_result_code());
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -233,7 +235,7 @@ auto RaftState::AppendSetInstanceAsReplicaLog(std::string_view instance_name) ->
|
||||
spdlog::info("Request for demoting instance {} accepted", instance_name);
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, res->get_result_code());
|
||||
spdlog::error("Failed to promote instance {} with error code {}", instance_name, int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -252,7 +254,7 @@ auto RaftState::AppendUpdateUUIDLog(utils::UUID const &uuid) -> bool {
|
||||
spdlog::info("Request for updating UUID accepted");
|
||||
|
||||
if (res->get_result_code() != nuraft::cmd_result_code::OK) {
|
||||
spdlog::error("Failed to update UUID with error code {}", res->get_result_code());
|
||||
spdlog::error("Failed to update UUID with error code {}", int(res->get_result_code()));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -119,6 +119,8 @@ class Reader {
|
||||
auto GetHeader() const -> Header const &;
|
||||
auto GetNextRow(utils::MemoryResource *mem) -> std::optional<Row>;
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
// Some implementation issues that need clearing up, but this is mainly because
|
||||
// I don't want `boost/iostreams/filtering_stream.hpp` included in this header file
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -34,6 +34,10 @@ struct Reader::impl {
|
||||
|
||||
[[nodiscard]] bool HasHeader() const { return read_config_.with_header; }
|
||||
[[nodiscard]] auto Header() const -> Header const & { return header_; }
|
||||
void Reset() {
|
||||
line_buffer_.clear();
|
||||
line_buffer_.shrink_to_fit();
|
||||
}
|
||||
|
||||
auto GetNextRow(utils::MemoryResource *mem) -> std::optional<Reader::Row>;
|
||||
|
||||
@ -42,7 +46,7 @@ struct Reader::impl {
|
||||
|
||||
void TryInitializeHeader();
|
||||
|
||||
std::optional<utils::pmr::string> GetNextLine(utils::MemoryResource *mem);
|
||||
bool GetNextLine();
|
||||
|
||||
ParsingResult ParseHeader();
|
||||
|
||||
@ -55,6 +59,8 @@ struct Reader::impl {
|
||||
Config read_config_;
|
||||
uint64_t line_count_{1};
|
||||
uint16_t number_of_columns_{0};
|
||||
uint64_t estimated_number_of_columns_{0};
|
||||
utils::pmr::string line_buffer_{memory_};
|
||||
Reader::Header header_{memory_};
|
||||
};
|
||||
|
||||
@ -129,17 +135,16 @@ void Reader::impl::InitializeStream() {
|
||||
MG_ASSERT(csv_stream_.is_complete(), "Should be 'complete' for correct operation");
|
||||
}
|
||||
|
||||
std::optional<utils::pmr::string> Reader::impl::GetNextLine(utils::MemoryResource *mem) {
|
||||
utils::pmr::string line(mem);
|
||||
if (!std::getline(csv_stream_, line)) {
|
||||
bool Reader::impl::GetNextLine() {
|
||||
if (!std::getline(csv_stream_, line_buffer_)) {
|
||||
// reached end of file or an I/0 error occurred
|
||||
if (!csv_stream_.good()) {
|
||||
csv_stream_.reset(); // this will close the file_stream_ and clear the chain
|
||||
}
|
||||
return std::nullopt;
|
||||
return false;
|
||||
}
|
||||
++line_count_;
|
||||
return std::move(line);
|
||||
return true;
|
||||
}
|
||||
|
||||
Reader::ParsingResult Reader::impl::ParseHeader() {
|
||||
@ -170,6 +175,8 @@ void Reader::impl::TryInitializeHeader() {
|
||||
|
||||
const Reader::Header &Reader::GetHeader() const { return pimpl->Header(); }
|
||||
|
||||
void Reader::Reset() { pimpl->Reset(); }
|
||||
|
||||
namespace {
|
||||
enum class CsvParserState : uint8_t { INITIAL_FIELD, NEXT_FIELD, QUOTING, EXPECT_DELIMITER, DONE };
|
||||
|
||||
@ -179,6 +186,8 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
utils::pmr::vector<utils::pmr::string> row(mem);
|
||||
if (number_of_columns_ != 0) {
|
||||
row.reserve(number_of_columns_);
|
||||
} else if (estimated_number_of_columns_ != 0) {
|
||||
row.reserve(estimated_number_of_columns_);
|
||||
}
|
||||
|
||||
utils::pmr::string column(memory_);
|
||||
@ -186,13 +195,12 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
auto state = CsvParserState::INITIAL_FIELD;
|
||||
|
||||
do {
|
||||
const auto maybe_line = GetNextLine(mem);
|
||||
if (!maybe_line) {
|
||||
if (!GetNextLine()) {
|
||||
// The whole file was processed.
|
||||
break;
|
||||
}
|
||||
|
||||
std::string_view line_string_view = *maybe_line;
|
||||
std::string_view line_string_view = line_buffer_;
|
||||
|
||||
// remove '\r' from the end in case we have dos file format
|
||||
if (line_string_view.back() == '\r') {
|
||||
@ -312,6 +320,11 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
fmt::format("Expected {:d} columns in row {:d}, but got {:d}", number_of_columns_,
|
||||
line_count_ - 1, row.size()));
|
||||
}
|
||||
// To avoid unessisary dynamic growth of the row, remember the number of
|
||||
// columns for future calls
|
||||
if (number_of_columns_ == 0 && estimated_number_of_columns_ == 0) {
|
||||
estimated_number_of_columns_ = row.size();
|
||||
}
|
||||
|
||||
return std::move(row);
|
||||
}
|
||||
@ -319,7 +332,7 @@ Reader::ParsingResult Reader::impl::ParseRow(utils::MemoryResource *mem) {
|
||||
std::optional<Reader::Row> Reader::impl::GetNextRow(utils::MemoryResource *mem) {
|
||||
auto row = ParseRow(mem);
|
||||
|
||||
if (row.HasError()) {
|
||||
if (row.HasError()) [[unlikely]] {
|
||||
if (!read_config_.ignore_bad) {
|
||||
throw CsvReadException("CSV Reader: Bad row at line {:d}: {}", line_count_ - 1, row.GetError().message);
|
||||
}
|
||||
@ -333,7 +346,7 @@ std::optional<Reader::Row> Reader::impl::GetNextRow(utils::MemoryResource *mem)
|
||||
} while (row.HasError());
|
||||
}
|
||||
|
||||
if (row->empty()) {
|
||||
if (row->empty()) [[unlikely]] {
|
||||
// reached end of file
|
||||
return std::nullopt;
|
||||
}
|
||||
|
@ -589,7 +589,6 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
if (timestamp < storage->timestamp_) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SPDLOG_INFO(" Delta {}", applied_deltas);
|
||||
switch (delta.type) {
|
||||
case WalDeltaData::Type::VERTEX_CREATE: {
|
||||
@ -634,9 +633,10 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::VERTEX_SET_PROPERTY: {
|
||||
spdlog::trace(" Vertex {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
spdlog::trace(" Vertex {} set property", delta.vertex_edge_set_property.gid.AsUint());
|
||||
// NOLINTNEXTLINE
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
// NOLINTNEXTLINE
|
||||
auto vertex = transaction->FindVertex(delta.vertex_edge_set_property.gid, View::NEW);
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
@ -684,8 +684,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_SET_PROPERTY: {
|
||||
spdlog::trace(" Edge {} set property {} to {}", delta.vertex_edge_set_property.gid.AsUint(),
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
spdlog::trace(" Edge {} set property", delta.vertex_edge_set_property.gid.AsUint());
|
||||
if (!storage->config_.salient.items.properties_on_edges)
|
||||
throw utils::BasicException(
|
||||
"Can't set properties on edges because properties on edges "
|
||||
@ -917,5 +916,4 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
spdlog::debug("Applied {} deltas", applied_deltas);
|
||||
return applied_deltas;
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -131,6 +131,10 @@ DEFINE_uint64(storage_recovery_thread_count,
|
||||
DEFINE_bool(storage_enable_schema_metadata, false,
|
||||
"Controls whether metadata should be collected about the resident labels and edge types.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_delta_on_identical_property_update, true,
|
||||
"Controls whether updating a property with the same value should create a delta object.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(telemetry_enabled, false,
|
||||
"Set to true to enable telemetry. We collect information about the "
|
||||
|
@ -84,6 +84,8 @@ DECLARE_bool(storage_parallel_schema_recovery);
|
||||
DECLARE_uint64(storage_recovery_thread_count);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_enable_schema_metadata);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_delta_on_identical_property_update);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(telemetry_enabled);
|
||||
|
@ -59,12 +59,14 @@ class TypedValueResultStreamBase {
|
||||
public:
|
||||
explicit TypedValueResultStreamBase(memgraph::storage::Storage *storage);
|
||||
|
||||
std::vector<memgraph::communication::bolt::Value> DecodeValues(
|
||||
const std::vector<memgraph::query::TypedValue> &values) const;
|
||||
void DecodeValues(const std::vector<memgraph::query::TypedValue> &values);
|
||||
|
||||
auto AccessValues() const -> std::vector<memgraph::communication::bolt::Value> const & { return decoded_values_; }
|
||||
|
||||
protected:
|
||||
// NOTE: Needed only for ToBoltValue conversions
|
||||
memgraph::storage::Storage *storage_;
|
||||
std::vector<memgraph::communication::bolt::Value> decoded_values_;
|
||||
};
|
||||
|
||||
/// Wrapper around TEncoder which converts TypedValue to Value
|
||||
@ -75,16 +77,18 @@ class TypedValueResultStream : public TypedValueResultStreamBase {
|
||||
TypedValueResultStream(TEncoder *encoder, memgraph::storage::Storage *storage)
|
||||
: TypedValueResultStreamBase{storage}, encoder_(encoder) {}
|
||||
|
||||
void Result(const std::vector<memgraph::query::TypedValue> &values) { encoder_->MessageRecord(DecodeValues(values)); }
|
||||
void Result(const std::vector<memgraph::query::TypedValue> &values) {
|
||||
DecodeValues(values);
|
||||
encoder_->MessageRecord(AccessValues());
|
||||
}
|
||||
|
||||
private:
|
||||
TEncoder *encoder_;
|
||||
};
|
||||
|
||||
std::vector<memgraph::communication::bolt::Value> TypedValueResultStreamBase::DecodeValues(
|
||||
const std::vector<memgraph::query::TypedValue> &values) const {
|
||||
std::vector<memgraph::communication::bolt::Value> decoded_values;
|
||||
decoded_values.reserve(values.size());
|
||||
void TypedValueResultStreamBase::DecodeValues(const std::vector<memgraph::query::TypedValue> &values) {
|
||||
decoded_values_.reserve(values.size());
|
||||
decoded_values_.clear();
|
||||
for (const auto &v : values) {
|
||||
auto maybe_value = memgraph::glue::ToBoltValue(v, storage_, memgraph::storage::View::NEW);
|
||||
if (maybe_value.HasError()) {
|
||||
@ -99,9 +103,8 @@ std::vector<memgraph::communication::bolt::Value> TypedValueResultStreamBase::De
|
||||
throw memgraph::communication::bolt::ClientError("Unexpected storage error when streaming results.");
|
||||
}
|
||||
}
|
||||
decoded_values.emplace_back(std::move(*maybe_value));
|
||||
decoded_values_.emplace_back(std::move(*maybe_value));
|
||||
}
|
||||
return decoded_values;
|
||||
}
|
||||
|
||||
TypedValueResultStreamBase::TypedValueResultStreamBase(memgraph::storage::Storage *storage) : storage_(storage) {}
|
||||
|
@ -332,7 +332,8 @@ int main(int argc, char **argv) {
|
||||
.durability_directory = FLAGS_data_directory + "/rocksdb_durability",
|
||||
.wal_directory = FLAGS_data_directory + "/rocksdb_wal"},
|
||||
.salient.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata,
|
||||
.delta_on_identical_property_update = FLAGS_storage_delta_on_identical_property_update},
|
||||
.salient.storage_mode = memgraph::flags::ParseStorageMode()};
|
||||
spdlog::info("config recover on startup {}, flags {} {}", db_config.durability.recover_on_startup,
|
||||
FLAGS_storage_recover_on_startup, FLAGS_data_recovery_on_startup);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,64 +13,6 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
namespace impl {
|
||||
|
||||
bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
|
||||
// in ordering null comes after everything else
|
||||
// at the same time Null is not less that null
|
||||
// first deal with Null < Whatever case
|
||||
if (a.IsNull()) return false;
|
||||
// now deal with NotNull < Null case
|
||||
if (b.IsNull()) return true;
|
||||
|
||||
// comparisons are from this point legal only between values of
|
||||
// the same type, or int+float combinations
|
||||
if ((a.type() != b.type() && !(a.IsNumeric() && b.IsNumeric())))
|
||||
throw QueryRuntimeException("Can't compare value of type {} to value of type {}.", a.type(), b.type());
|
||||
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return !a.ValueBool() && b.ValueBool();
|
||||
case TypedValue::Type::Int:
|
||||
if (b.type() == TypedValue::Type::Double)
|
||||
return a.ValueInt() < b.ValueDouble();
|
||||
else
|
||||
return a.ValueInt() < b.ValueInt();
|
||||
case TypedValue::Type::Double:
|
||||
if (b.type() == TypedValue::Type::Int)
|
||||
return a.ValueDouble() < b.ValueInt();
|
||||
else
|
||||
return a.ValueDouble() < b.ValueDouble();
|
||||
case TypedValue::Type::String:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueString() < b.ValueString();
|
||||
case TypedValue::Type::Date:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDate() < b.ValueDate();
|
||||
case TypedValue::Type::LocalTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalTime() < b.ValueLocalTime();
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueLocalDateTime() < b.ValueLocalDateTime();
|
||||
case TypedValue::Type::Duration:
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
return a.ValueDuration() < b.ValueDuration();
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Comparison is not defined for values of type {}.", a.type());
|
||||
case TypedValue::Type::Null:
|
||||
LOG_FATAL("Invalid type");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
int64_t QueryTimestamp() {
|
||||
return std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/frontend/semantic/symbol.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "range/v3/all.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/result.hpp"
|
||||
@ -31,9 +32,91 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
namespace impl {
|
||||
bool TypedValueCompare(const TypedValue &a, const TypedValue &b);
|
||||
} // namespace impl
|
||||
namespace {
|
||||
std::partial_ordering TypedValueCompare(TypedValue const &a, TypedValue const &b) {
|
||||
// First assume typical same type comparisons
|
||||
if (a.type() == b.type()) {
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Bool:
|
||||
return a.UnsafeValueBool() <=> b.UnsafeValueBool();
|
||||
case TypedValue::Type::Int:
|
||||
return a.UnsafeValueInt() <=> b.UnsafeValueInt();
|
||||
case TypedValue::Type::Double:
|
||||
return a.UnsafeValueDouble() <=> b.UnsafeValueDouble();
|
||||
case TypedValue::Type::String:
|
||||
return a.UnsafeValueString() <=> b.UnsafeValueString();
|
||||
case TypedValue::Type::Date:
|
||||
return a.UnsafeValueDate() <=> b.UnsafeValueDate();
|
||||
case TypedValue::Type::LocalTime:
|
||||
return a.UnsafeValueLocalTime() <=> b.UnsafeValueLocalTime();
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
return a.UnsafeValueLocalDateTime() <=> b.UnsafeValueLocalDateTime();
|
||||
case TypedValue::Type::Duration:
|
||||
return a.UnsafeValueDuration() <=> b.UnsafeValueDuration();
|
||||
case TypedValue::Type::Null:
|
||||
return std::partial_ordering::equivalent;
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Comparison is not defined for values of type {}.", a.type());
|
||||
}
|
||||
} else {
|
||||
// from this point legal only between values of
|
||||
// int+float combinations or against null
|
||||
|
||||
// in ordering null comes after everything else
|
||||
// at the same time Null is not less that null
|
||||
// first deal with Null < Whatever case
|
||||
if (a.IsNull()) return std::partial_ordering::greater;
|
||||
// now deal with NotNull < Null case
|
||||
if (b.IsNull()) return std::partial_ordering::less;
|
||||
|
||||
if (!(a.IsNumeric() && b.IsNumeric())) [[unlikely]]
|
||||
throw QueryRuntimeException("Can't compare value of type {} to value of type {}.", a.type(), b.type());
|
||||
|
||||
switch (a.type()) {
|
||||
case TypedValue::Type::Int:
|
||||
return a.UnsafeValueInt() <=> b.ValueDouble();
|
||||
case TypedValue::Type::Double:
|
||||
return a.UnsafeValueDouble() <=> b.ValueInt();
|
||||
case TypedValue::Type::Bool:
|
||||
case TypedValue::Type::Null:
|
||||
case TypedValue::Type::String:
|
||||
case TypedValue::Type::List:
|
||||
case TypedValue::Type::Map:
|
||||
case TypedValue::Type::Vertex:
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Date:
|
||||
case TypedValue::Type::LocalTime:
|
||||
case TypedValue::Type::LocalDateTime:
|
||||
case TypedValue::Type::Duration:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
LOG_FATAL("Invalid type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
struct OrderedTypedValueCompare {
|
||||
OrderedTypedValueCompare(Ordering ordering) : ordering_{ordering}, ascending{ordering == Ordering::ASC} {}
|
||||
|
||||
auto operator()(const TypedValue &lhs, const TypedValue &rhs) const -> std::partial_ordering {
|
||||
return ascending ? TypedValueCompare(lhs, rhs) : TypedValueCompare(rhs, lhs);
|
||||
}
|
||||
|
||||
auto ordering() const { return ordering_; }
|
||||
|
||||
private:
|
||||
Ordering ordering_;
|
||||
bool ascending = true;
|
||||
};
|
||||
|
||||
/// Custom Comparator type for comparing vectors of TypedValues.
|
||||
///
|
||||
@ -43,32 +126,27 @@ bool TypedValueCompare(const TypedValue &a, const TypedValue &b);
|
||||
class TypedValueVectorCompare final {
|
||||
public:
|
||||
TypedValueVectorCompare() = default;
|
||||
explicit TypedValueVectorCompare(const std::vector<Ordering> &ordering) : ordering_(ordering) {}
|
||||
explicit TypedValueVectorCompare(std::vector<OrderedTypedValueCompare> orderings)
|
||||
: orderings_{std::move(orderings)} {}
|
||||
|
||||
template <class TAllocator>
|
||||
bool operator()(const std::vector<TypedValue, TAllocator> &c1, const std::vector<TypedValue, TAllocator> &c2) const {
|
||||
// ordering is invalid if there are more elements in the collections
|
||||
// then there are in the ordering_ vector
|
||||
MG_ASSERT(c1.size() <= ordering_.size() && c2.size() <= ordering_.size(),
|
||||
"Collections contain more elements then there are orderings");
|
||||
const auto &orderings() const { return orderings_; }
|
||||
|
||||
auto c1_it = c1.begin();
|
||||
auto c2_it = c2.begin();
|
||||
auto ordering_it = ordering_.begin();
|
||||
for (; c1_it != c1.end() && c2_it != c2.end(); c1_it++, c2_it++, ordering_it++) {
|
||||
if (impl::TypedValueCompare(*c1_it, *c2_it)) return *ordering_it == Ordering::ASC;
|
||||
if (impl::TypedValueCompare(*c2_it, *c1_it)) return *ordering_it == Ordering::DESC;
|
||||
}
|
||||
|
||||
// at least one collection is exhausted
|
||||
// c1 is less then c2 iff c1 reached the end but c2 didn't
|
||||
return (c1_it == c1.end()) && (c2_it != c2.end());
|
||||
auto lex_cmp() const {
|
||||
return [orderings = &orderings_]<typename TAllocator>(const std::vector<TypedValue, TAllocator> &lhs,
|
||||
const std::vector<TypedValue, TAllocator> &rhs) {
|
||||
auto rng = ranges::views::zip(*orderings, lhs, rhs);
|
||||
for (auto const &[cmp, l, r] : rng) {
|
||||
auto res = cmp(l, r);
|
||||
if (res == std::partial_ordering::less) return true;
|
||||
if (res == std::partial_ordering::greater) return false;
|
||||
}
|
||||
DMG_ASSERT(orderings->size() == lhs.size() && lhs.size() == rhs.size());
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: Remove this, member is public
|
||||
const auto &ordering() const { return ordering_; }
|
||||
|
||||
std::vector<Ordering> ordering_;
|
||||
private:
|
||||
std::vector<OrderedTypedValueCompare> orderings_;
|
||||
};
|
||||
|
||||
/// Raise QueryRuntimeException if the value for symbol isn't of expected type.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -126,10 +126,11 @@ class FrameChangeCollector {
|
||||
}
|
||||
|
||||
bool ResetTrackingValue(const std::string &key) {
|
||||
if (!tracked_values_.contains(utils::pmr::string(key, utils::NewDeleteResource()))) {
|
||||
auto const it = tracked_values_.find(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
if (it == tracked_values_.cend()) {
|
||||
return false;
|
||||
}
|
||||
tracked_values_.erase(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
tracked_values_.erase(it);
|
||||
AddTrackingKey(key);
|
||||
return true;
|
||||
}
|
||||
|
@ -26,6 +26,11 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
constexpr std::string_view kBoltServer = "bolt_server";
|
||||
constexpr std::string_view kReplicationServer = "replication_server";
|
||||
constexpr std::string_view kCoordinatorServer = "coordinator_server";
|
||||
constexpr std::string_view kManagementServer = "management_server";
|
||||
|
||||
struct LabelIx {
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const { return kType; }
|
||||
@ -1249,6 +1254,8 @@ class AllPropertiesLookup : public memgraph::query::Expression {
|
||||
friend class AstStorage;
|
||||
};
|
||||
|
||||
using QueryLabelType = std::variant<LabelIx, Expression *>;
|
||||
|
||||
class LabelsTest : public memgraph::query::Expression {
|
||||
public:
|
||||
static const utils::TypeInfo kType;
|
||||
@ -1281,6 +1288,16 @@ class LabelsTest : public memgraph::query::Expression {
|
||||
|
||||
protected:
|
||||
LabelsTest(Expression *expression, const std::vector<LabelIx> &labels) : expression_(expression), labels_(labels) {}
|
||||
LabelsTest(Expression *expression, const std::vector<QueryLabelType> &labels) : expression_(expression) {
|
||||
labels_.reserve(labels.size());
|
||||
for (const auto &label : labels) {
|
||||
if (const auto *label_ix = std::get_if<LabelIx>(&label)) {
|
||||
labels_.push_back(*label_ix);
|
||||
} else {
|
||||
throw SemanticException("You can't use labels in filter expressions.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -1771,7 +1788,7 @@ class NodeAtom : public memgraph::query::PatternAtom {
|
||||
return visitor.PostVisit(*this);
|
||||
}
|
||||
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
std::variant<std::unordered_map<memgraph::query::PropertyIx, memgraph::query::Expression *>,
|
||||
memgraph::query::ParameterLookup *>
|
||||
properties_;
|
||||
@ -1781,7 +1798,11 @@ class NodeAtom : public memgraph::query::PatternAtom {
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
if (const auto *properties = std::get_if<std::unordered_map<PropertyIx, Expression *>>(&properties_)) {
|
||||
auto &new_obj_properties = std::get<std::unordered_map<PropertyIx, Expression *>>(object->properties_);
|
||||
@ -2657,20 +2678,25 @@ class SetLabels : public memgraph::query::Clause {
|
||||
}
|
||||
|
||||
memgraph::query::Identifier *identifier_{nullptr};
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
|
||||
SetLabels *Clone(AstStorage *storage) const override {
|
||||
SetLabels *object = storage->Create<SetLabels>();
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
SetLabels(Identifier *identifier, const std::vector<LabelIx> &labels) : identifier_(identifier), labels_(labels) {}
|
||||
SetLabels(Identifier *identifier, std::vector<QueryLabelType> labels)
|
||||
: identifier_(identifier), labels_(std::move(labels)) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -2720,20 +2746,25 @@ class RemoveLabels : public memgraph::query::Clause {
|
||||
}
|
||||
|
||||
memgraph::query::Identifier *identifier_{nullptr};
|
||||
std::vector<memgraph::query::LabelIx> labels_;
|
||||
std::vector<QueryLabelType> labels_;
|
||||
|
||||
RemoveLabels *Clone(AstStorage *storage) const override {
|
||||
RemoveLabels *object = storage->Create<RemoveLabels>();
|
||||
object->identifier_ = identifier_ ? identifier_->Clone(storage) : nullptr;
|
||||
object->labels_.resize(labels_.size());
|
||||
for (auto i = 0; i < object->labels_.size(); ++i) {
|
||||
object->labels_[i] = storage->GetLabelIx(labels_[i].name);
|
||||
if (const auto *label = std::get_if<LabelIx>(&labels_[i])) {
|
||||
object->labels_[i] = storage->GetLabelIx(label->name);
|
||||
} else {
|
||||
object->labels_[i] = std::get<Expression *>(labels_[i])->Clone(storage);
|
||||
}
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
||||
protected:
|
||||
RemoveLabels(Identifier *identifier, const std::vector<LabelIx> &labels) : identifier_(identifier), labels_(labels) {}
|
||||
RemoveLabels(Identifier *identifier, std::vector<QueryLabelType> labels)
|
||||
: identifier_(identifier), labels_(std::move(labels)) {}
|
||||
|
||||
private:
|
||||
friend class AstStorage;
|
||||
@ -3114,24 +3145,21 @@ class CoordinatorQuery : public memgraph::query::Query {
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
memgraph::query::CoordinatorQuery::Action action_;
|
||||
std::string instance_name_;
|
||||
memgraph::query::Expression *replication_socket_address_{nullptr};
|
||||
memgraph::query::Expression *coordinator_socket_address_{nullptr};
|
||||
memgraph::query::Expression *raft_socket_address_{nullptr};
|
||||
memgraph::query::Expression *raft_server_id_{nullptr};
|
||||
std::string instance_name_{};
|
||||
std::unordered_map<memgraph::query::Expression *, memgraph::query::Expression *> configs_;
|
||||
memgraph::query::Expression *coordinator_server_id_{nullptr};
|
||||
memgraph::query::CoordinatorQuery::SyncMode sync_mode_;
|
||||
|
||||
CoordinatorQuery *Clone(AstStorage *storage) const override {
|
||||
auto *object = storage->Create<CoordinatorQuery>();
|
||||
|
||||
object->action_ = action_;
|
||||
object->instance_name_ = instance_name_;
|
||||
object->replication_socket_address_ =
|
||||
replication_socket_address_ ? replication_socket_address_->Clone(storage) : nullptr;
|
||||
object->coordinator_server_id_ = coordinator_server_id_ ? coordinator_server_id_->Clone(storage) : nullptr;
|
||||
object->sync_mode_ = sync_mode_;
|
||||
object->coordinator_socket_address_ =
|
||||
coordinator_socket_address_ ? coordinator_socket_address_->Clone(storage) : nullptr;
|
||||
object->raft_socket_address_ = raft_socket_address_ ? raft_socket_address_->Clone(storage) : nullptr;
|
||||
object->raft_server_id_ = raft_server_id_ ? raft_server_id_->Clone(storage) : nullptr;
|
||||
for (const auto &[key, value] : configs_) {
|
||||
object->configs_[key->Clone(storage)] = value->Clone(storage);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
@ -398,24 +398,17 @@ antlrcpp::Any CypherMainVisitor::visitRegisterReplica(MemgraphCypher::RegisterRe
|
||||
antlrcpp::Any CypherMainVisitor::visitRegisterInstanceOnCoordinator(
|
||||
MemgraphCypher::RegisterInstanceOnCoordinatorContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
if (!ctx->replicationSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Replication socket address should be a string literal!");
|
||||
}
|
||||
|
||||
if (!ctx->coordinatorSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Coordinator socket address should be a string literal!");
|
||||
}
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::REGISTER_INSTANCE;
|
||||
coordinator_query->replication_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->replicationSocketAddress()->accept(this));
|
||||
coordinator_query->coordinator_socket_address_ =
|
||||
std::any_cast<Expression *>(ctx->coordinatorSocketAddress()->accept(this));
|
||||
coordinator_query->instance_name_ = std::any_cast<std::string>(ctx->instanceName()->symbolicName()->accept(this));
|
||||
if (ctx->ASYNC()) {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::ASYNC;
|
||||
} else {
|
||||
coordinator_query->sync_mode_ = memgraph::query::CoordinatorQuery::SyncMode::SYNC;
|
||||
}
|
||||
coordinator_query->configs_ =
|
||||
std::any_cast<std::unordered_map<Expression *, Expression *>>(ctx->configsMap->accept(this));
|
||||
coordinator_query->sync_mode_ = [ctx]() {
|
||||
if (ctx->ASYNC()) {
|
||||
return CoordinatorQuery::SyncMode::ASYNC;
|
||||
}
|
||||
return CoordinatorQuery::SyncMode::SYNC;
|
||||
}();
|
||||
|
||||
return coordinator_query;
|
||||
}
|
||||
@ -431,17 +424,10 @@ antlrcpp::Any CypherMainVisitor::visitUnregisterInstanceOnCoordinator(
|
||||
antlrcpp::Any CypherMainVisitor::visitAddCoordinatorInstance(MemgraphCypher::AddCoordinatorInstanceContext *ctx) {
|
||||
auto *coordinator_query = storage_->Create<CoordinatorQuery>();
|
||||
|
||||
if (!ctx->raftSocketAddress()->literal()->StringLiteral()) {
|
||||
throw SemanticException("Raft socket address should be a string literal!");
|
||||
}
|
||||
|
||||
if (!ctx->raftServerId()->literal()->numberLiteral()) {
|
||||
throw SemanticException("Raft server id should be a number literal!");
|
||||
}
|
||||
|
||||
coordinator_query->action_ = CoordinatorQuery::Action::ADD_COORDINATOR_INSTANCE;
|
||||
coordinator_query->raft_socket_address_ = std::any_cast<Expression *>(ctx->raftSocketAddress()->accept(this));
|
||||
coordinator_query->raft_server_id_ = std::any_cast<Expression *>(ctx->raftServerId()->accept(this));
|
||||
coordinator_query->coordinator_server_id_ = std::any_cast<Expression *>(ctx->coordinatorServerId()->accept(this));
|
||||
coordinator_query->configs_ =
|
||||
std::any_cast<std::unordered_map<Expression *, Expression *>>(ctx->configsMap->accept(this));
|
||||
|
||||
return coordinator_query;
|
||||
}
|
||||
@ -1933,7 +1919,7 @@ antlrcpp::Any CypherMainVisitor::visitNodePattern(MemgraphCypher::NodePatternCon
|
||||
anonymous_identifiers.push_back(&node->identifier_);
|
||||
}
|
||||
if (ctx->nodeLabels()) {
|
||||
node->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
node->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
}
|
||||
if (ctx->properties()) {
|
||||
// This can return either properties or parameters
|
||||
@ -1947,16 +1933,27 @@ antlrcpp::Any CypherMainVisitor::visitNodePattern(MemgraphCypher::NodePatternCon
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitNodeLabels(MemgraphCypher::NodeLabelsContext *ctx) {
|
||||
std::vector<LabelIx> labels;
|
||||
std::vector<QueryLabelType> labels;
|
||||
for (auto *node_label : ctx->nodeLabel()) {
|
||||
if (node_label->labelName()->symbolicName()) {
|
||||
auto *label_name = node_label->labelName();
|
||||
if (label_name->symbolicName()) {
|
||||
labels.emplace_back(AddLabel(std::any_cast<std::string>(node_label->accept(this))));
|
||||
} else {
|
||||
} else if (label_name->parameter()) {
|
||||
// If we have a parameter, we have to resolve it.
|
||||
const auto *param_lookup = std::any_cast<ParameterLookup *>(node_label->accept(this));
|
||||
const auto label_name = parameters_->AtTokenPosition(param_lookup->token_position_).ValueString();
|
||||
labels.emplace_back(storage_->GetLabelIx(label_name));
|
||||
query_info_.is_cacheable = false; // We can't cache queries with label parameters.
|
||||
} else {
|
||||
auto variable = std::any_cast<std::string>(label_name->variable()->accept(this));
|
||||
users_identifiers.insert(variable);
|
||||
auto *expression = static_cast<Expression *>(storage_->Create<Identifier>(variable));
|
||||
for (auto *lookup : label_name->propertyLookup()) {
|
||||
auto key = std::any_cast<PropertyIx>(lookup->accept(this));
|
||||
auto *property_lookup = storage_->Create<PropertyLookup>(expression, key);
|
||||
expression = property_lookup;
|
||||
}
|
||||
labels.emplace_back(expression);
|
||||
}
|
||||
}
|
||||
return labels;
|
||||
@ -2504,7 +2501,7 @@ antlrcpp::Any CypherMainVisitor::visitListIndexingOrSlicing(MemgraphCypher::List
|
||||
antlrcpp::Any CypherMainVisitor::visitExpression2a(MemgraphCypher::Expression2aContext *ctx) {
|
||||
auto *expression = std::any_cast<Expression *>(ctx->expression2b()->accept(this));
|
||||
if (ctx->nodeLabels()) {
|
||||
auto labels = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
auto labels = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
expression = storage_->Create<LabelsTest>(expression, labels);
|
||||
}
|
||||
return expression;
|
||||
@ -2830,7 +2827,7 @@ antlrcpp::Any CypherMainVisitor::visitSetItem(MemgraphCypher::SetItemContext *ct
|
||||
// SetLabels
|
||||
auto *set_labels = storage_->Create<SetLabels>();
|
||||
set_labels->identifier_ = storage_->Create<Identifier>(std::any_cast<std::string>(ctx->variable()->accept(this)));
|
||||
set_labels->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
set_labels->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
return static_cast<Clause *>(set_labels);
|
||||
}
|
||||
|
||||
@ -2853,7 +2850,7 @@ antlrcpp::Any CypherMainVisitor::visitRemoveItem(MemgraphCypher::RemoveItemConte
|
||||
// RemoveLabels
|
||||
auto *remove_labels = storage_->Create<RemoveLabels>();
|
||||
remove_labels->identifier_ = storage_->Create<Identifier>(std::any_cast<std::string>(ctx->variable()->accept(this)));
|
||||
remove_labels->labels_ = std::any_cast<std::vector<LabelIx>>(ctx->nodeLabels()->accept(this));
|
||||
remove_labels->labels_ = std::any_cast<std::vector<QueryLabelType>>(ctx->nodeLabels()->accept(this));
|
||||
return static_cast<Clause *>(remove_labels);
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,10 @@ nodeLabels : nodeLabel ( nodeLabel )* ;
|
||||
|
||||
nodeLabel : ':' labelName ;
|
||||
|
||||
labelName : symbolicName | parameter;
|
||||
labelName : symbolicName
|
||||
| parameter
|
||||
| variable ( propertyLookup )+
|
||||
;
|
||||
|
||||
relTypeName : symbolicName ;
|
||||
|
||||
|
@ -388,22 +388,22 @@ instanceName : symbolicName ;
|
||||
|
||||
socketAddress : literal ;
|
||||
|
||||
coordinatorSocketAddress : literal ;
|
||||
replicationSocketAddress : literal ;
|
||||
raftSocketAddress : literal ;
|
||||
|
||||
registerReplica : REGISTER REPLICA instanceName ( SYNC | ASYNC )
|
||||
TO socketAddress ;
|
||||
|
||||
registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ON coordinatorSocketAddress ( AS ASYNC ) ? WITH replicationSocketAddress ;
|
||||
configKeyValuePair : literal ':' literal ;
|
||||
|
||||
configMap : '{' ( configKeyValuePair ( ',' configKeyValuePair )* )? '}' ;
|
||||
|
||||
registerInstanceOnCoordinator : REGISTER INSTANCE instanceName ( AS ASYNC ) ? WITH CONFIG configsMap=configMap ;
|
||||
|
||||
unregisterInstanceOnCoordinator : UNREGISTER INSTANCE instanceName ;
|
||||
|
||||
setInstanceToMain : SET INSTANCE instanceName TO MAIN ;
|
||||
|
||||
raftServerId : literal ;
|
||||
coordinatorServerId : literal ;
|
||||
|
||||
addCoordinatorInstance : ADD COORDINATOR raftServerId ON raftSocketAddress ;
|
||||
addCoordinatorInstance : ADD COORDINATOR coordinatorServerId WITH CONFIG configsMap=configMap ;
|
||||
|
||||
dropReplica : DROP REPLICA instanceName ;
|
||||
|
||||
@ -457,10 +457,6 @@ commonCreateStreamConfig : TRANSFORM transformationName=procedureName
|
||||
|
||||
createStream : kafkaCreateStream | pulsarCreateStream ;
|
||||
|
||||
configKeyValuePair : literal ':' literal ;
|
||||
|
||||
configMap : '{' ( configKeyValuePair ( ',' configKeyValuePair )* )? '}' ;
|
||||
|
||||
kafkaCreateStreamConfig : TOPICS topicNames
|
||||
| CONSUMER_GROUP consumerGroup=symbolicNameWithDotsAndMinus
|
||||
| BOOTSTRAP_SERVERS bootstrapServers=literal
|
||||
|
@ -568,6 +568,44 @@ bool SymbolGenerator::PostVisit(SetProperty & /*set_property*/) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(SetLabels &set_labels) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_set_labels = true;
|
||||
for (auto &label : set_labels.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(SetLabels & /*set_labels*/) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_set_labels = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PreVisit(RemoveLabels &remove_labels) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_remove_labels = true;
|
||||
for (auto &label : remove_labels.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolGenerator::PostVisit(RemoveLabels & /*remove_labels*/) {
|
||||
auto &scope = scopes_.back();
|
||||
scope.in_remove_labels = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Pattern and its subparts.
|
||||
|
||||
bool SymbolGenerator::PreVisit(Pattern &pattern) {
|
||||
@ -602,6 +640,15 @@ bool SymbolGenerator::PreVisit(NodeAtom &node_atom) {
|
||||
};
|
||||
|
||||
scope.in_node_atom = true;
|
||||
|
||||
if (scope.in_create) { // you can use expressions with labels only in create
|
||||
for (auto &label : node_atom.labels_) {
|
||||
if (auto *expression = std::get_if<Expression *>(&label)) {
|
||||
(*expression)->Accept(*this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (auto *properties = std::get_if<std::unordered_map<PropertyIx, Expression *>>(&node_atom.properties_)) {
|
||||
bool props_or_labels = !properties->empty() || !node_atom.labels_.empty();
|
||||
|
||||
|
@ -68,6 +68,10 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(Foreach &) override;
|
||||
bool PreVisit(SetProperty & /*set_property*/) override;
|
||||
bool PostVisit(SetProperty & /*set_property*/) override;
|
||||
bool PreVisit(SetLabels &) override;
|
||||
bool PostVisit(SetLabels & /*set_labels*/) override;
|
||||
bool PreVisit(RemoveLabels &) override;
|
||||
bool PostVisit(RemoveLabels & /*remove_labels*/) override;
|
||||
|
||||
// Expressions
|
||||
ReturnType Visit(Identifier &) override;
|
||||
@ -130,6 +134,8 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool in_set_property{false};
|
||||
bool in_call_subquery{false};
|
||||
bool has_return{false};
|
||||
bool in_set_labels{false};
|
||||
bool in_remove_labels{false};
|
||||
// True when visiting a pattern atom (node or edge) identifier, which can be
|
||||
// reused or created in the pattern itself.
|
||||
bool in_pattern_atom_identifier{false};
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
|
@ -761,13 +761,19 @@ TypedValue Range(const TypedValue *args, int64_t nargs, const FunctionContext &c
|
||||
int64_t step = nargs == 3 ? args[2].ValueInt() : 1;
|
||||
TypedValue::TVector list(ctx.memory);
|
||||
if (lbound <= rbound && step > 0) {
|
||||
int64_t n = ((rbound - lbound + 1) + (step - 1)) / step;
|
||||
list.reserve(n);
|
||||
for (auto i = lbound; i <= rbound; i += step) {
|
||||
list.emplace_back(i);
|
||||
}
|
||||
MG_ASSERT(list.size() == n);
|
||||
} else if (lbound >= rbound && step < 0) {
|
||||
int64_t n = ((lbound - rbound + 1) + (-step - 1)) / -step;
|
||||
list.reserve(n);
|
||||
for (auto i = lbound; i >= rbound; i += step) {
|
||||
list.emplace_back(i);
|
||||
}
|
||||
MG_ASSERT(list.size() == n);
|
||||
}
|
||||
return TypedValue(std::move(list));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -13,12 +13,12 @@
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, const std::string &what) {
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, std::string_view what) {
|
||||
TypedValue value = expr->Accept(*evaluator);
|
||||
try {
|
||||
return value.ValueInt();
|
||||
} catch (TypedValueException &e) {
|
||||
throw QueryRuntimeException(what + " must be an int");
|
||||
throw QueryRuntimeException(std::string(what) + " must be an int");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,6 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
} \
|
||||
}
|
||||
|
||||
BINARY_OPERATOR_VISITOR(OrOperator, ||, OR);
|
||||
BINARY_OPERATOR_VISITOR(XorOperator, ^, XOR);
|
||||
BINARY_OPERATOR_VISITOR(AdditionOperator, +, +);
|
||||
BINARY_OPERATOR_VISITOR(SubtractionOperator, -, -);
|
||||
@ -261,6 +260,20 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
}
|
||||
}
|
||||
|
||||
TypedValue Visit(OrOperator &op) override {
|
||||
auto value1 = op.expression1_->Accept(*this);
|
||||
if (value1.IsBool() && value1.ValueBool()) {
|
||||
// If first expression is true, don't evaluate the second one.
|
||||
return value1;
|
||||
}
|
||||
auto value2 = op.expression2_->Accept(*this);
|
||||
try {
|
||||
return value1 || value2;
|
||||
} catch (const TypedValueException &) {
|
||||
throw QueryRuntimeException("Invalid types: {} and {} for OR.", value1.type(), value2.type());
|
||||
}
|
||||
}
|
||||
|
||||
TypedValue Visit(IfOperator &if_operator) override {
|
||||
auto condition = if_operator.condition_->Accept(*this);
|
||||
if (condition.IsNull()) {
|
||||
@ -1196,7 +1209,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
/// @param what - Name of what's getting evaluated. Used for user feedback (via
|
||||
/// exception) when the evaluated value is not an int.
|
||||
/// @throw QueryRuntimeException if expression doesn't evaluate to an int.
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, const std::string &what);
|
||||
int64_t EvaluateInt(ExpressionEvaluator *evaluator, Expression *expr, std::string_view what);
|
||||
|
||||
std::optional<size_t> EvaluateMemoryLimit(ExpressionVisitor<TypedValue> &eval, Expression *memory_limit,
|
||||
size_t memory_scale);
|
||||
|
@ -246,27 +246,6 @@ std::optional<std::string> GetOptionalStringValue(query::Expression *expression,
|
||||
return {};
|
||||
};
|
||||
|
||||
bool IsAllShortestPathsQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
for (const auto &clause : clauses) {
|
||||
if (clause->GetTypeInfo() != Match::kType) {
|
||||
continue;
|
||||
}
|
||||
auto *match_clause = utils::Downcast<Match>(clause);
|
||||
for (const auto &pattern : match_clause->patterns_) {
|
||||
for (const auto &atom : pattern->atoms_) {
|
||||
if (atom->GetTypeInfo() != EdgeAtom::kType) {
|
||||
continue;
|
||||
}
|
||||
auto *edge_atom = utils::Downcast<EdgeAtom>(atom);
|
||||
if (edge_atom->type_ == EdgeAtom::Type::ALL_SHORTEST_PATHS) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline auto convertFromCoordinatorToReplicationMode(const CoordinatorQuery::SyncMode &sync_mode)
|
||||
-> replication_coordination_glue::ReplicationMode {
|
||||
switch (sync_mode) {
|
||||
@ -1146,6 +1125,27 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
|
||||
auto ParseConfigMap(std::unordered_map<Expression *, Expression *> const &config_map,
|
||||
ExpressionVisitor<TypedValue> &evaluator)
|
||||
-> std::optional<std::map<std::string, std::string, std::less<>>> {
|
||||
if (std::ranges::any_of(config_map, [&evaluator](const auto &entry) {
|
||||
auto key_expr = entry.first->Accept(evaluator);
|
||||
auto value_expr = entry.second->Accept(evaluator);
|
||||
return !key_expr.IsString() || !value_expr.IsString();
|
||||
})) {
|
||||
spdlog::error("Config map must contain only string keys and values!");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return ranges::views::all(config_map) | ranges::views::transform([&evaluator](const auto &entry) {
|
||||
auto key_expr = entry.first->Accept(evaluator);
|
||||
auto value_expr = entry.second->Accept(evaluator);
|
||||
return std::pair{key_expr.ValueString(), value_expr.ValueString()};
|
||||
}) |
|
||||
ranges::to<std::map<std::string, std::string, std::less<>>>;
|
||||
}
|
||||
|
||||
Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Parameters ¶meters,
|
||||
coordination::CoordinatorState *coordinator_state,
|
||||
const query::InterpreterConfig &config, std::vector<Notification> *notifications) {
|
||||
@ -1173,17 +1173,37 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters};
|
||||
auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context};
|
||||
|
||||
auto raft_socket_address_tv = coordinator_query->raft_socket_address_->Accept(evaluator);
|
||||
auto raft_server_id_tv = coordinator_query->raft_server_id_->Accept(evaluator);
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, raft_socket_address_tv,
|
||||
raft_server_id_tv]() mutable {
|
||||
handler.AddCoordinatorInstance(raft_server_id_tv.ValueInt(), std::string(raft_socket_address_tv.ValueString()));
|
||||
auto config_map = ParseConfigMap(coordinator_query->configs_, evaluator);
|
||||
if (!config_map) {
|
||||
throw QueryRuntimeException("Failed to parse config map!");
|
||||
}
|
||||
|
||||
if (config_map->size() != 2) {
|
||||
throw QueryRuntimeException("Config map must contain exactly 2 entries: {} and !", kCoordinatorServer,
|
||||
kBoltServer);
|
||||
}
|
||||
|
||||
auto const &coordinator_server_it = config_map->find(kCoordinatorServer);
|
||||
if (coordinator_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kCoordinatorServer);
|
||||
}
|
||||
|
||||
auto const &bolt_server_it = config_map->find(kBoltServer);
|
||||
if (bolt_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kBoltServer);
|
||||
}
|
||||
|
||||
auto coord_server_id = coordinator_query->coordinator_server_id_->Accept(evaluator).ValueInt();
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coord_server_id,
|
||||
coordinator_server = coordinator_server_it->second]() mutable {
|
||||
handler.AddCoordinatorInstance(coord_server_id, coordinator_server);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
notifications->emplace_back(SeverityLevel::INFO, NotificationCode::ADD_COORDINATOR_INSTANCE,
|
||||
fmt::format("Coordinator has added instance {} on coordinator server {}.",
|
||||
coordinator_query->instance_name_, raft_socket_address_tv.ValueString()));
|
||||
coordinator_query->instance_name_, coordinator_server_it->second));
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::REGISTER_INSTANCE: {
|
||||
@ -1194,27 +1214,49 @@ Callback HandleCoordinatorQuery(CoordinatorQuery *coordinator_query, const Param
|
||||
// the argument to Callback.
|
||||
EvaluationContext evaluation_context{.timestamp = QueryTimestamp(), .parameters = parameters};
|
||||
auto evaluator = PrimitiveLiteralExpressionEvaluator{evaluation_context};
|
||||
auto config_map = ParseConfigMap(coordinator_query->configs_, evaluator);
|
||||
|
||||
auto coordinator_socket_address_tv = coordinator_query->coordinator_socket_address_->Accept(evaluator);
|
||||
auto replication_socket_address_tv = coordinator_query->replication_socket_address_->Accept(evaluator);
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state}, coordinator_socket_address_tv,
|
||||
replication_socket_address_tv,
|
||||
if (!config_map) {
|
||||
throw QueryRuntimeException("Failed to parse config map!");
|
||||
}
|
||||
|
||||
if (config_map->size() != 3) {
|
||||
throw QueryRuntimeException("Config map must contain exactly 3 entries: {}, {} and {}!", kBoltServer,
|
||||
kManagementServer, kReplicationServer);
|
||||
}
|
||||
|
||||
auto const &replication_server_it = config_map->find(kReplicationServer);
|
||||
if (replication_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kReplicationServer);
|
||||
}
|
||||
|
||||
auto const &management_server_it = config_map->find(kManagementServer);
|
||||
if (management_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kManagementServer);
|
||||
}
|
||||
|
||||
auto const &bolt_server_it = config_map->find(kBoltServer);
|
||||
if (bolt_server_it == config_map->end()) {
|
||||
throw QueryRuntimeException("Config map must contain {} entry!", kBoltServer);
|
||||
}
|
||||
|
||||
callback.fn = [handler = CoordQueryHandler{*coordinator_state},
|
||||
instance_health_check_frequency_sec = config.instance_health_check_frequency_sec,
|
||||
management_server = management_server_it->second,
|
||||
replication_server = replication_server_it->second, bolt_server = bolt_server_it->second,
|
||||
instance_name = coordinator_query->instance_name_,
|
||||
instance_down_timeout_sec = config.instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec = config.instance_get_uuid_frequency_sec,
|
||||
sync_mode = coordinator_query->sync_mode_]() mutable {
|
||||
handler.RegisterReplicationInstance(std::string(coordinator_socket_address_tv.ValueString()),
|
||||
std::string(replication_socket_address_tv.ValueString()),
|
||||
instance_health_check_frequency_sec, instance_down_timeout_sec,
|
||||
instance_get_uuid_frequency_sec, instance_name, sync_mode);
|
||||
handler.RegisterReplicationInstance(management_server, replication_server, instance_health_check_frequency_sec,
|
||||
instance_down_timeout_sec, instance_get_uuid_frequency_sec, instance_name,
|
||||
sync_mode);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
|
||||
notifications->emplace_back(
|
||||
SeverityLevel::INFO, NotificationCode::REGISTER_REPLICATION_INSTANCE,
|
||||
fmt::format("Coordinator has registered coordinator server on {} for instance {}.",
|
||||
coordinator_socket_address_tv.ValueString(), coordinator_query->instance_name_));
|
||||
notifications->emplace_back(SeverityLevel::INFO, NotificationCode::REGISTER_REPLICATION_INSTANCE,
|
||||
fmt::format("Coordinator has registered replication instance on {} for instance {}.",
|
||||
bolt_server_it->second, coordinator_query->instance_name_));
|
||||
return callback;
|
||||
}
|
||||
case CoordinatorQuery::Action::UNREGISTER_INSTANCE:
|
||||
@ -1670,8 +1712,7 @@ struct PullPlan {
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer,
|
||||
TriggerContextCollector *trigger_context_collector = nullptr,
|
||||
std::optional<size_t> memory_limit = {}, bool use_monotonic_memory = true,
|
||||
FrameChangeCollector *frame_change_collector_ = nullptr);
|
||||
std::optional<size_t> memory_limit = {}, FrameChangeCollector *frame_change_collector_ = nullptr);
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> Pull(AnyStream *stream, std::optional<int> n,
|
||||
const std::vector<Symbol> &output_symbols,
|
||||
@ -1696,26 +1737,17 @@ struct PullPlan {
|
||||
// we have to keep track of any unsent results from previous `PullPlan::Pull`
|
||||
// manually by using this flag.
|
||||
bool has_unsent_results_ = false;
|
||||
|
||||
// In the case of LOAD CSV, we want to use only PoolResource without MonotonicMemoryResource
|
||||
// to reuse allocated memory. As LOAD CSV is processing row by row
|
||||
// it is possible to reduce memory usage significantly if MemoryResource deals with memory allocation
|
||||
// can reuse memory that was allocated on processing the first row on all subsequent rows.
|
||||
// This flag signals to `PullPlan::Pull` which MemoryResource to use
|
||||
bool use_monotonic_memory_;
|
||||
};
|
||||
|
||||
PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters ¶meters, const bool is_profile_query,
|
||||
DbAccessor *dba, InterpreterContext *interpreter_context, utils::MemoryResource *execution_memory,
|
||||
std::shared_ptr<QueryUserOrRole> user_or_role, std::atomic<TransactionStatus> *transaction_status,
|
||||
std::shared_ptr<utils::AsyncTimer> tx_timer, TriggerContextCollector *trigger_context_collector,
|
||||
const std::optional<size_t> memory_limit, bool use_monotonic_memory,
|
||||
FrameChangeCollector *frame_change_collector)
|
||||
const std::optional<size_t> memory_limit, FrameChangeCollector *frame_change_collector)
|
||||
: plan_(plan),
|
||||
cursor_(plan->plan().MakeCursor(execution_memory)),
|
||||
frame_(plan->symbol_table().max_position(), execution_memory),
|
||||
memory_limit_(memory_limit),
|
||||
use_monotonic_memory_(use_monotonic_memory) {
|
||||
memory_limit_(memory_limit) {
|
||||
ctx_.db_accessor = dba;
|
||||
ctx_.symbol_table = plan->symbol_table();
|
||||
ctx_.evaluation_context.timestamp = QueryTimestamp();
|
||||
@ -1741,6 +1773,7 @@ PullPlan::PullPlan(const std::shared_ptr<PlanWrapper> plan, const Parameters &pa
|
||||
ctx_.is_profile_query = is_profile_query;
|
||||
ctx_.trigger_context_collector = trigger_context_collector;
|
||||
ctx_.frame_change_collector = frame_change_collector;
|
||||
ctx_.evaluation_context.memory = execution_memory;
|
||||
}
|
||||
|
||||
std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *stream, std::optional<int> n,
|
||||
@ -1764,43 +1797,14 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *strea
|
||||
}
|
||||
}};
|
||||
|
||||
// Set up temporary memory for a single Pull. Initial memory comes from the
|
||||
// stack. 256 KiB should fit on the stack and should be more than enough for a
|
||||
// single `Pull`.
|
||||
static constexpr size_t stack_size = 256UL * 1024UL;
|
||||
char stack_data[stack_size];
|
||||
|
||||
utils::ResourceWithOutOfMemoryException resource_with_exception;
|
||||
utils::MonotonicBufferResource monotonic_memory{&stack_data[0], stack_size, &resource_with_exception};
|
||||
std::optional<utils::PoolResource> pool_memory;
|
||||
static constexpr auto kMaxBlockPerChunks = 128;
|
||||
|
||||
if (!use_monotonic_memory_) {
|
||||
pool_memory.emplace(kMaxBlockPerChunks, kExecutionPoolMaxBlockSize, &resource_with_exception,
|
||||
&resource_with_exception);
|
||||
} else {
|
||||
// We can throw on every query because a simple queries for deleting will use only
|
||||
// the stack allocated buffer.
|
||||
// Also, we want to throw only when the query engine requests more memory and not the storage
|
||||
// so we add the exception to the allocator.
|
||||
// TODO (mferencevic): Tune the parameters accordingly.
|
||||
pool_memory.emplace(kMaxBlockPerChunks, 1024, &monotonic_memory, &resource_with_exception);
|
||||
}
|
||||
|
||||
ctx_.evaluation_context.memory = &*pool_memory;
|
||||
|
||||
// Returns true if a result was pulled.
|
||||
const auto pull_result = [&]() -> bool { return cursor_->Pull(frame_, ctx_); };
|
||||
|
||||
const auto stream_values = [&]() {
|
||||
// TODO: The streamed values should also probably use the above memory.
|
||||
std::vector<TypedValue> values;
|
||||
values.reserve(output_symbols.size());
|
||||
|
||||
for (const auto &symbol : output_symbols) {
|
||||
values.emplace_back(frame_[symbol]);
|
||||
auto values = std::vector<TypedValue>(output_symbols.size());
|
||||
const auto stream_values = [&] {
|
||||
for (auto const i : ranges::views::iota(0UL, output_symbols.size())) {
|
||||
values[i] = frame_[output_symbols[i]];
|
||||
}
|
||||
|
||||
stream->Result(values);
|
||||
};
|
||||
|
||||
@ -1910,7 +1914,6 @@ PreparedQuery Interpreter::PrepareTransactionQuery(std::string_view query_upper,
|
||||
std::function<void()> handler;
|
||||
|
||||
if (query_upper == "BEGIN") {
|
||||
ResetInterpreter();
|
||||
// TODO: Evaluate doing move(extras). Currently the extras is very small, but this will be important if it ever
|
||||
// becomes large.
|
||||
handler = [this, extras = extras] {
|
||||
@ -1988,30 +1991,6 @@ inline static void TryCaching(const AstStorage &ast_storage, FrameChangeCollecto
|
||||
}
|
||||
}
|
||||
|
||||
bool IsLoadCsvQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
return std::any_of(clauses.begin(), clauses.end(),
|
||||
[](memgraph::query::Clause const *clause) { return clause->GetTypeInfo() == LoadCsv::kType; });
|
||||
}
|
||||
|
||||
bool IsCallBatchedProcedureQuery(const std::vector<memgraph::query::Clause *> &clauses) {
|
||||
EvaluationContext evaluation_context;
|
||||
|
||||
return std::ranges::any_of(clauses, [&evaluation_context](memgraph::query::Clause *clause) -> bool {
|
||||
if (!(clause->GetTypeInfo() == CallProcedure::kType)) return false;
|
||||
auto *call_procedure_clause = utils::Downcast<CallProcedure>(clause);
|
||||
|
||||
const auto &maybe_found = memgraph::query::procedure::FindProcedure(
|
||||
procedure::gModuleRegistry, call_procedure_clause->procedure_name_, evaluation_context.memory);
|
||||
if (!maybe_found) {
|
||||
throw QueryRuntimeException("There is no procedure named '{}'.", call_procedure_clause->procedure_name_);
|
||||
}
|
||||
const auto &[module, proc] = *maybe_found;
|
||||
if (!proc->info.is_batched) return false;
|
||||
spdlog::trace("Using PoolResource for batched query procedure");
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string, TypedValue> *summary,
|
||||
InterpreterContext *interpreter_context, CurrentDB ¤t_db,
|
||||
utils::MemoryResource *execution_memory, std::vector<Notification> *notifications,
|
||||
@ -2031,7 +2010,6 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
spdlog::info("Running query with memory limit of {}", utils::GetReadableSize(*memory_limit));
|
||||
}
|
||||
auto clauses = cypher_query->single_query_->clauses_;
|
||||
bool contains_csv = false;
|
||||
if (std::any_of(clauses.begin(), clauses.end(),
|
||||
[](const auto *clause) { return clause->GetTypeInfo() == LoadCsv::kType; })) {
|
||||
notifications->emplace_back(
|
||||
@ -2039,13 +2017,8 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
"It's important to note that the parser parses the values as strings. It's up to the user to "
|
||||
"convert the parsed row values to the appropriate type. This can be done using the built-in "
|
||||
"conversion functions such as ToInteger, ToFloat, ToBoolean etc.");
|
||||
contains_csv = true;
|
||||
}
|
||||
|
||||
// If this is LOAD CSV query, use PoolResource without MonotonicMemoryResource as we want to reuse allocated memory
|
||||
auto use_monotonic_memory =
|
||||
!contains_csv && !IsCallBatchedProcedureQuery(clauses) && !IsAllShortestPathsQuery(clauses);
|
||||
|
||||
MG_ASSERT(current_db.execution_db_accessor_, "Cypher query expects a current DB transaction");
|
||||
auto *dba =
|
||||
&*current_db
|
||||
@ -2084,7 +2057,7 @@ PreparedQuery PrepareCypherQuery(ParsedQuery parsed_query, std::map<std::string,
|
||||
current_db.trigger_context_collector_ ? &*current_db.trigger_context_collector_ : nullptr;
|
||||
auto pull_plan = std::make_shared<PullPlan>(
|
||||
plan, parsed_query.parameters, false, dba, interpreter_context, execution_memory, std::move(user_or_role),
|
||||
transaction_status, std::move(tx_timer), trigger_context_collector, memory_limit, use_monotonic_memory,
|
||||
transaction_status, std::move(tx_timer), trigger_context_collector, memory_limit,
|
||||
frame_change_collector->IsTrackingValues() ? frame_change_collector : nullptr);
|
||||
return PreparedQuery{std::move(header), std::move(parsed_query.required_privileges),
|
||||
[pull_plan = std::move(pull_plan), output_symbols = std::move(output_symbols), summary](
|
||||
@ -2198,18 +2171,6 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
|
||||
|
||||
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
|
||||
|
||||
bool contains_csv = false;
|
||||
auto clauses = cypher_query->single_query_->clauses_;
|
||||
if (std::any_of(clauses.begin(), clauses.end(),
|
||||
[](const auto *clause) { return clause->GetTypeInfo() == LoadCsv::kType; })) {
|
||||
contains_csv = true;
|
||||
}
|
||||
|
||||
// If this is LOAD CSV, BatchedProcedure or AllShortest query, use PoolResource without MonotonicMemoryResource as we
|
||||
// want to reuse allocated memory
|
||||
auto use_monotonic_memory =
|
||||
!contains_csv && !IsCallBatchedProcedureQuery(clauses) && !IsAllShortestPathsQuery(clauses);
|
||||
|
||||
MG_ASSERT(cypher_query, "Cypher grammar should not allow other queries in PROFILE");
|
||||
EvaluationContext evaluation_context;
|
||||
evaluation_context.timestamp = QueryTimestamp();
|
||||
@ -2243,14 +2204,14 @@ PreparedQuery PrepareProfileQuery(ParsedQuery parsed_query, bool in_explicit_tra
|
||||
// We want to execute the query we are profiling lazily, so we delay
|
||||
// the construction of the corresponding context.
|
||||
stats_and_total_time = std::optional<plan::ProfilingStatsWithTotalTime>{},
|
||||
pull_plan = std::shared_ptr<PullPlanVector>(nullptr), transaction_status, use_monotonic_memory,
|
||||
frame_change_collector, tx_timer = std::move(tx_timer)](
|
||||
AnyStream *stream, std::optional<int> n) mutable -> std::optional<QueryHandlerResult> {
|
||||
pull_plan = std::shared_ptr<PullPlanVector>(nullptr), transaction_status, frame_change_collector,
|
||||
tx_timer = std::move(tx_timer)](AnyStream *stream,
|
||||
std::optional<int> n) mutable -> std::optional<QueryHandlerResult> {
|
||||
// No output symbols are given so that nothing is streamed.
|
||||
if (!stats_and_total_time) {
|
||||
stats_and_total_time =
|
||||
PullPlan(plan, parameters, true, dba, interpreter_context, execution_memory, std::move(user_or_role),
|
||||
transaction_status, std::move(tx_timer), nullptr, memory_limit, use_monotonic_memory,
|
||||
transaction_status, std::move(tx_timer), nullptr, memory_limit,
|
||||
frame_change_collector->IsTrackingValues() ? frame_change_collector : nullptr)
|
||||
.Pull(stream, {}, {}, summary);
|
||||
pull_plan = std::make_shared<PullPlanVector>(ProfilingStatsToTable(*stats_and_total_time));
|
||||
@ -4213,6 +4174,7 @@ PreparedQuery PrepareShowDatabasesQuery(ParsedQuery parsed_query, InterpreterCon
|
||||
std::optional<uint64_t> Interpreter::GetTransactionId() const { return current_transaction_; }
|
||||
|
||||
void Interpreter::BeginTransaction(QueryExtras const &extras) {
|
||||
ResetInterpreter();
|
||||
const auto prepared_query = PrepareTransactionQuery("BEGIN", extras);
|
||||
prepared_query.query_handler(nullptr, {});
|
||||
}
|
||||
@ -4247,12 +4209,12 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
const auto upper_case_query = utils::ToUpperCase(query_string);
|
||||
const auto trimmed_query = utils::Trim(upper_case_query);
|
||||
if (trimmed_query == "BEGIN" || trimmed_query == "COMMIT" || trimmed_query == "ROLLBACK") {
|
||||
auto resource = utils::MonotonicBufferResource(kExecutionMemoryBlockSize);
|
||||
auto prepared_query = PrepareTransactionQuery(trimmed_query, extras);
|
||||
auto &query_execution =
|
||||
query_executions_.emplace_back(QueryExecution::Create(std::move(resource), std::move(prepared_query)));
|
||||
std::optional<int> qid =
|
||||
in_explicit_transaction_ ? static_cast<int>(query_executions_.size() - 1) : std::optional<int>{};
|
||||
if (trimmed_query == "BEGIN") {
|
||||
ResetInterpreter();
|
||||
}
|
||||
auto &query_execution = query_executions_.emplace_back(QueryExecution::Create());
|
||||
query_execution->prepared_query = PrepareTransactionQuery(trimmed_query, extras);
|
||||
auto qid = in_explicit_transaction_ ? static_cast<int>(query_executions_.size() - 1) : std::optional<int>{};
|
||||
return {query_execution->prepared_query->header, query_execution->prepared_query->privileges, qid, {}};
|
||||
}
|
||||
|
||||
@ -4282,35 +4244,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
ParseQuery(query_string, params, &interpreter_context_->ast_cache, interpreter_context_->config.query);
|
||||
auto parsing_time = parsing_timer.Elapsed().count();
|
||||
|
||||
CypherQuery const *const cypher_query = [&]() -> CypherQuery * {
|
||||
if (auto *cypher_query = utils::Downcast<CypherQuery>(parsed_query.query)) {
|
||||
return cypher_query;
|
||||
}
|
||||
if (auto *profile_query = utils::Downcast<ProfileQuery>(parsed_query.query)) {
|
||||
return profile_query->cypher_query_;
|
||||
}
|
||||
return nullptr;
|
||||
}(); // IILE
|
||||
|
||||
auto const [usePool, hasAllShortestPaths] = [&]() -> std::pair<bool, bool> {
|
||||
if (!cypher_query) {
|
||||
return {false, false};
|
||||
}
|
||||
auto const &clauses = cypher_query->single_query_->clauses_;
|
||||
bool hasAllShortestPaths = IsAllShortestPathsQuery(clauses);
|
||||
// Using PoolResource without MonotonicMemoryResouce for LOAD CSV reduces memory usage.
|
||||
bool usePool = hasAllShortestPaths || IsCallBatchedProcedureQuery(clauses) || IsLoadCsvQuery(clauses);
|
||||
return {usePool, hasAllShortestPaths};
|
||||
}(); // IILE
|
||||
|
||||
// Setup QueryExecution
|
||||
// its MemoryResource is mostly used for allocations done on Frame and storing `row`s
|
||||
if (usePool) {
|
||||
query_executions_.emplace_back(QueryExecution::Create(utils::PoolResource(128, kExecutionPoolMaxBlockSize)));
|
||||
} else {
|
||||
query_executions_.emplace_back(QueryExecution::Create(utils::MonotonicBufferResource(kExecutionMemoryBlockSize)));
|
||||
}
|
||||
|
||||
query_executions_.emplace_back(QueryExecution::Create());
|
||||
auto &query_execution = query_executions_.back();
|
||||
query_execution_ptr = &query_execution;
|
||||
|
||||
@ -4379,9 +4314,7 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
|
||||
utils::Timer planning_timer;
|
||||
PreparedQuery prepared_query;
|
||||
utils::MemoryResource *memory_resource =
|
||||
std::visit([](auto &execution_memory) -> utils::MemoryResource * { return &execution_memory; },
|
||||
query_execution->execution_memory);
|
||||
utils::MemoryResource *memory_resource = query_execution->execution_memory.resource();
|
||||
frame_change_collector_.reset();
|
||||
frame_change_collector_.emplace();
|
||||
if (utils::Downcast<CypherQuery>(parsed_query.query)) {
|
||||
@ -4392,10 +4325,10 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
prepared_query = PrepareExplainQuery(std::move(parsed_query), &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_);
|
||||
} else if (utils::Downcast<ProfileQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareProfileQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_,
|
||||
&query_execution->execution_memory_with_exception, user_or_role_,
|
||||
&transaction_status_, current_timeout_timer_, &*frame_change_collector_);
|
||||
prepared_query =
|
||||
PrepareProfileQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->summary,
|
||||
&query_execution->notifications, interpreter_context_, current_db_, memory_resource,
|
||||
user_or_role_, &transaction_status_, current_timeout_timer_, &*frame_change_collector_);
|
||||
} else if (utils::Downcast<DumpQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareDumpQuery(std::move(parsed_query), current_db_);
|
||||
} else if (utils::Downcast<IndexQuery>(parsed_query.query)) {
|
||||
@ -4597,7 +4530,7 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int
|
||||
std::atomic<TransactionStatus> *transaction_status) {
|
||||
// Run the triggers
|
||||
for (const auto &trigger : db_acc->trigger_store()->AfterCommitTriggers().access()) {
|
||||
utils::MonotonicBufferResource execution_memory{kExecutionMemoryBlockSize};
|
||||
QueryAllocator execution_memory{};
|
||||
|
||||
// create a new transaction for each trigger
|
||||
auto tx_acc = db_acc->Access();
|
||||
@ -4608,7 +4541,7 @@ void RunTriggersAfterCommit(dbms::DatabaseAccess db_acc, InterpreterContext *int
|
||||
auto trigger_context = original_trigger_context;
|
||||
trigger_context.AdaptForAccessor(&db_accessor);
|
||||
try {
|
||||
trigger.Execute(&db_accessor, &execution_memory, flags::run_time::GetExecutionTimeout(),
|
||||
trigger.Execute(&db_accessor, execution_memory.resource(), flags::run_time::GetExecutionTimeout(),
|
||||
&interpreter_context->is_shutting_down, transaction_status, trigger_context);
|
||||
} catch (const utils::BasicException &exception) {
|
||||
spdlog::warn("Trigger '{}' failed with exception:\n{}", trigger.Name(), exception.what());
|
||||
@ -4762,11 +4695,12 @@ void Interpreter::Commit() {
|
||||
if (trigger_context) {
|
||||
// Run the triggers
|
||||
for (const auto &trigger : db->trigger_store()->BeforeCommitTriggers().access()) {
|
||||
utils::MonotonicBufferResource execution_memory{kExecutionMemoryBlockSize};
|
||||
QueryAllocator execution_memory{};
|
||||
AdvanceCommand();
|
||||
try {
|
||||
trigger.Execute(&*current_db_.execution_db_accessor_, &execution_memory, flags::run_time::GetExecutionTimeout(),
|
||||
&interpreter_context_->is_shutting_down, &transaction_status_, *trigger_context);
|
||||
trigger.Execute(&*current_db_.execution_db_accessor_, execution_memory.resource(),
|
||||
flags::run_time::GetExecutionTimeout(), &interpreter_context_->is_shutting_down,
|
||||
&transaction_status_, *trigger_context);
|
||||
} catch (const utils::BasicException &e) {
|
||||
throw utils::BasicException(
|
||||
fmt::format("Trigger '{}' caused the transaction to fail.\nException: {}", trigger.Name(), e.what()));
|
||||
|
@ -65,6 +65,54 @@ extern const Event SuccessfulQuery;
|
||||
|
||||
namespace memgraph::query {
|
||||
|
||||
struct QueryAllocator {
|
||||
QueryAllocator() = default;
|
||||
QueryAllocator(QueryAllocator const &) = delete;
|
||||
QueryAllocator &operator=(QueryAllocator const &) = delete;
|
||||
|
||||
// No move addresses to pool & monotonic fields must be stable
|
||||
QueryAllocator(QueryAllocator &&) = delete;
|
||||
QueryAllocator &operator=(QueryAllocator &&) = delete;
|
||||
|
||||
auto resource() -> utils::MemoryResource * {
|
||||
#ifndef MG_MEMORY_PROFILE
|
||||
return &pool;
|
||||
#else
|
||||
return upstream_resource();
|
||||
#endif
|
||||
}
|
||||
auto resource_without_pool() -> utils::MemoryResource * {
|
||||
#ifndef MG_MEMORY_PROFILE
|
||||
return &monotonic;
|
||||
#else
|
||||
return upstream_resource();
|
||||
#endif
|
||||
}
|
||||
auto resource_without_pool_or_mono() -> utils::MemoryResource * { return upstream_resource(); }
|
||||
|
||||
private:
|
||||
// At least one page to ensure not sharing page with other subsystems
|
||||
static constexpr auto kMonotonicInitialSize = 4UL * 1024UL;
|
||||
// TODO: need to profile to check for good defaults, also maybe PoolResource
|
||||
// needs to be smarter. We expect more reuse of smaller objects than larger
|
||||
// objects. 64*1024B is maybe wasteful, whereas 256*32B maybe sensible.
|
||||
// Depends on number of small objects expected.
|
||||
static constexpr auto kPoolBlockPerChunk = 64UL;
|
||||
static constexpr auto kPoolMaxBlockSize = 1024UL;
|
||||
|
||||
static auto upstream_resource() -> utils::MemoryResource * {
|
||||
// singleton ResourceWithOutOfMemoryException
|
||||
// explicitly backed by NewDeleteResource
|
||||
static auto upstream = utils::ResourceWithOutOfMemoryException{utils::NewDeleteResource()};
|
||||
return &upstream;
|
||||
}
|
||||
|
||||
#ifndef MG_MEMORY_PROFILE
|
||||
memgraph::utils::MonotonicBufferResource monotonic{kMonotonicInitialSize, upstream_resource()};
|
||||
memgraph::utils::PoolResource pool{kPoolBlockPerChunk, &monotonic, upstream_resource()};
|
||||
#endif
|
||||
};
|
||||
|
||||
struct InterpreterContext;
|
||||
|
||||
inline constexpr size_t kExecutionMemoryBlockSize = 1UL * 1024UL * 1024UL;
|
||||
@ -304,45 +352,25 @@ class Interpreter final {
|
||||
}
|
||||
|
||||
struct QueryExecution {
|
||||
std::variant<utils::MonotonicBufferResource, utils::PoolResource> execution_memory;
|
||||
utils::ResourceWithOutOfMemoryException execution_memory_with_exception;
|
||||
std::optional<PreparedQuery> prepared_query;
|
||||
QueryAllocator execution_memory; // NOTE: before all other fields which uses this memory
|
||||
|
||||
std::optional<PreparedQuery> prepared_query;
|
||||
std::map<std::string, TypedValue> summary;
|
||||
std::vector<Notification> notifications;
|
||||
|
||||
static auto Create(std::variant<utils::MonotonicBufferResource, utils::PoolResource> memory_resource,
|
||||
std::optional<PreparedQuery> prepared_query = std::nullopt) -> std::unique_ptr<QueryExecution> {
|
||||
return std::make_unique<QueryExecution>(std::move(memory_resource), std::move(prepared_query));
|
||||
}
|
||||
static auto Create() -> std::unique_ptr<QueryExecution> { return std::make_unique<QueryExecution>(); }
|
||||
|
||||
explicit QueryExecution(std::variant<utils::MonotonicBufferResource, utils::PoolResource> memory_resource,
|
||||
std::optional<PreparedQuery> prepared_query)
|
||||
: execution_memory(std::move(memory_resource)), prepared_query{std::move(prepared_query)} {
|
||||
std::visit(
|
||||
[&](auto &memory_resource) {
|
||||
execution_memory_with_exception = utils::ResourceWithOutOfMemoryException(&memory_resource);
|
||||
},
|
||||
execution_memory);
|
||||
};
|
||||
explicit QueryExecution() = default;
|
||||
|
||||
QueryExecution(const QueryExecution &) = delete;
|
||||
QueryExecution(QueryExecution &&) = default;
|
||||
QueryExecution(QueryExecution &&) = delete;
|
||||
QueryExecution &operator=(const QueryExecution &) = delete;
|
||||
QueryExecution &operator=(QueryExecution &&) = default;
|
||||
QueryExecution &operator=(QueryExecution &&) = delete;
|
||||
|
||||
~QueryExecution() {
|
||||
// We should always release the execution memory AFTER we
|
||||
// destroy the prepared query which is using that instance
|
||||
// of execution memory.
|
||||
prepared_query.reset();
|
||||
std::visit([](auto &memory_resource) { memory_resource.Release(); }, execution_memory);
|
||||
}
|
||||
~QueryExecution() = default;
|
||||
|
||||
void CleanRuntimeData() {
|
||||
if (prepared_query.has_value()) {
|
||||
prepared_query.reset();
|
||||
}
|
||||
prepared_query.reset();
|
||||
notifications.clear();
|
||||
}
|
||||
};
|
||||
@ -413,9 +441,7 @@ std::map<std::string, TypedValue> Interpreter::Pull(TStream *result_stream, std:
|
||||
try {
|
||||
// Wrap the (statically polymorphic) stream type into a common type which
|
||||
// the handler knows.
|
||||
AnyStream stream{result_stream,
|
||||
std::visit([](auto &execution_memory) -> utils::MemoryResource * { return &execution_memory; },
|
||||
query_execution->execution_memory)};
|
||||
AnyStream stream{result_stream, query_execution->execution_memory.resource()};
|
||||
const auto maybe_res = query_execution->prepared_query->query_handler(&stream, n);
|
||||
// Stream is using execution memory of the query_execution which
|
||||
// can be deleted after its execution so the stream should be cleared
|
||||
|
@ -47,6 +47,8 @@
|
||||
#include "query/procedure/mg_procedure_impl.hpp"
|
||||
#include "query/procedure/module.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "range/v3/all.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
@ -67,6 +69,7 @@
|
||||
#include "utils/pmr/vector.hpp"
|
||||
#include "utils/readable_size.hpp"
|
||||
#include "utils/string.hpp"
|
||||
#include "utils/tag.hpp"
|
||||
#include "utils/temporal.hpp"
|
||||
#include "utils/typeinfo.hpp"
|
||||
|
||||
@ -178,6 +181,20 @@ inline void AbortCheck(ExecutionContext const &context) {
|
||||
if (auto const reason = MustAbort(context); reason != AbortReason::NO_ABORT) throw HintedAbortError(reason);
|
||||
}
|
||||
|
||||
std::vector<storage::LabelId> EvaluateLabels(const std::vector<StorageLabelType> &labels,
|
||||
ExpressionEvaluator &evaluator, DbAccessor *dba) {
|
||||
std::vector<storage::LabelId> result;
|
||||
result.reserve(labels.size());
|
||||
for (const auto &label : labels) {
|
||||
if (const auto *label_atom = std::get_if<storage::LabelId>(&label)) {
|
||||
result.emplace_back(*label_atom);
|
||||
} else {
|
||||
result.emplace_back(dba->NameToLabel(std::get<Expression *>(label)->Accept(evaluator).ValueString()));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
@ -213,12 +230,13 @@ CreateNode::CreateNode(const std::shared_ptr<LogicalOperator> &input, NodeCreati
|
||||
|
||||
// Creates a vertex on this GraphDb. Returns a reference to vertex placed on the
|
||||
// frame.
|
||||
VertexAccessor &CreateLocalVertex(const NodeCreationInfo &node_info, Frame *frame, ExecutionContext &context) {
|
||||
VertexAccessor &CreateLocalVertex(const NodeCreationInfo &node_info, Frame *frame, ExecutionContext &context,
|
||||
std::vector<storage::LabelId> &labels, ExpressionEvaluator &evaluator) {
|
||||
auto &dba = *context.db_accessor;
|
||||
auto new_node = dba.InsertVertex();
|
||||
context.execution_stats[ExecutionStats::Key::CREATED_NODES] += 1;
|
||||
for (auto label : node_info.labels) {
|
||||
auto maybe_error = new_node.AddLabel(label);
|
||||
for (const auto &label : labels) {
|
||||
auto maybe_error = std::invoke([&] { return new_node.AddLabel(label); });
|
||||
if (maybe_error.HasError()) {
|
||||
switch (maybe_error.GetError()) {
|
||||
case storage::Error::SERIALIZATION_ERROR:
|
||||
@ -233,10 +251,6 @@ VertexAccessor &CreateLocalVertex(const NodeCreationInfo &node_info, Frame *fram
|
||||
}
|
||||
context.execution_stats[ExecutionStats::Key::CREATED_LABELS] += 1;
|
||||
}
|
||||
// Evaluator should use the latest accessors, as modified in this query, when
|
||||
// setting properties on new nodes.
|
||||
ExpressionEvaluator evaluator(frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
// TODO: PropsSetChecked allocates a PropertyValue, make it use context.memory
|
||||
// when we update PropertyValue with custom allocator.
|
||||
std::map<storage::PropertyId, storage::PropertyValue> properties;
|
||||
@ -276,16 +290,21 @@ CreateNode::CreateNodeCursor::CreateNodeCursor(const CreateNode &self, utils::Me
|
||||
bool CreateNode::CreateNodeCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP("CreateNode");
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && context.auth_checker &&
|
||||
!context.auth_checker->Has(self_.node_info_.labels,
|
||||
memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
throw QueryRuntimeException("Vertex not created due to not having enough permission!");
|
||||
}
|
||||
#endif
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
|
||||
if (input_cursor_->Pull(frame, context)) {
|
||||
auto created_vertex = CreateLocalVertex(self_.node_info_, &frame, context);
|
||||
// we have to resolve the labels before we can check for permissions
|
||||
auto labels = EvaluateLabels(self_.node_info_.labels, evaluator, context.db_accessor);
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && context.auth_checker &&
|
||||
!context.auth_checker->Has(labels, memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
throw QueryRuntimeException("Vertex not created due to not having enough permission!");
|
||||
}
|
||||
#endif
|
||||
|
||||
auto created_vertex = CreateLocalVertex(self_.node_info_, &frame, context, labels, evaluator);
|
||||
if (context.trigger_context_collector) {
|
||||
context.trigger_context_collector->RegisterCreatedObject(created_vertex);
|
||||
}
|
||||
@ -310,7 +329,7 @@ CreateExpand::CreateExpand(NodeCreationInfo node_info, EdgeCreationInfo edge_inf
|
||||
ACCEPT_WITH_INPUT(CreateExpand)
|
||||
|
||||
UniqueCursorPtr CreateExpand::MakeCursor(utils::MemoryResource *mem) const {
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::CreateNodeOperator);
|
||||
memgraph::metrics::IncrementCounter(memgraph::metrics::CreateExpandOperator);
|
||||
|
||||
return MakeUniqueCursorPtr<CreateExpandCursor>(mem, *this, mem);
|
||||
}
|
||||
@ -369,6 +388,9 @@ bool CreateExpand::CreateExpandCursor::Pull(Frame &frame, ExecutionContext &cont
|
||||
SCOPED_PROFILE_OP_BY_REF(self_);
|
||||
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
auto labels = EvaluateLabels(self_.node_info_.labels, evaluator, context.db_accessor);
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
@ -380,7 +402,7 @@ bool CreateExpand::CreateExpandCursor::Pull(Frame &frame, ExecutionContext &cont
|
||||
if (context.auth_checker &&
|
||||
!(context.auth_checker->Has(self_.edge_info_.edge_type,
|
||||
memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE) &&
|
||||
context.auth_checker->Has(self_.node_info_.labels, fine_grained_permission))) {
|
||||
context.auth_checker->Has(labels, fine_grained_permission))) {
|
||||
throw QueryRuntimeException("Edge not created due to not having enough permission!");
|
||||
}
|
||||
}
|
||||
@ -390,14 +412,8 @@ bool CreateExpand::CreateExpandCursor::Pull(Frame &frame, ExecutionContext &cont
|
||||
ExpectType(self_.input_symbol_, vertex_value, TypedValue::Type::Vertex);
|
||||
auto &v1 = vertex_value.ValueVertex();
|
||||
|
||||
// Similarly to CreateNode, newly created edges and nodes should use the
|
||||
// storage::View::NEW.
|
||||
// E.g. we pickup new properties: `CREATE (n {p: 42}) -[:r {ep: n.p}]-> ()`
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
|
||||
// get the destination vertex (possibly an existing node)
|
||||
auto &v2 = OtherVertex(frame, context);
|
||||
auto &v2 = OtherVertex(frame, context, labels, evaluator);
|
||||
|
||||
// create an edge between the two nodes
|
||||
auto *dba = context.db_accessor;
|
||||
@ -428,13 +444,15 @@ void CreateExpand::CreateExpandCursor::Shutdown() { input_cursor_->Shutdown(); }
|
||||
|
||||
void CreateExpand::CreateExpandCursor::Reset() { input_cursor_->Reset(); }
|
||||
|
||||
VertexAccessor &CreateExpand::CreateExpandCursor::OtherVertex(Frame &frame, ExecutionContext &context) {
|
||||
VertexAccessor &CreateExpand::CreateExpandCursor::OtherVertex(Frame &frame, ExecutionContext &context,
|
||||
std::vector<storage::LabelId> &labels,
|
||||
ExpressionEvaluator &evaluator) {
|
||||
if (self_.existing_node_) {
|
||||
TypedValue &dest_node_value = frame[self_.node_info_.symbol];
|
||||
ExpectType(self_.node_info_.symbol, dest_node_value, TypedValue::Type::Vertex);
|
||||
return dest_node_value.ValueVertex();
|
||||
} else {
|
||||
auto &created_vertex = CreateLocalVertex(self_.node_info_, &frame, context);
|
||||
auto &created_vertex = CreateLocalVertex(self_.node_info_, &frame, context, labels, evaluator);
|
||||
if (context.trigger_context_collector) {
|
||||
context.trigger_context_collector->RegisterCreatedObject(created_vertex);
|
||||
}
|
||||
@ -847,17 +865,15 @@ bool Expand::ExpandCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
SCOPED_PROFILE_OP_BY_REF(self_);
|
||||
|
||||
// A helper function for expanding a node from an edge.
|
||||
auto pull_node = [this, &frame](const EdgeAccessor &new_edge, EdgeAtom::Direction direction) {
|
||||
auto pull_node = [this, &frame]<EdgeAtom::Direction direction>(const EdgeAccessor &new_edge,
|
||||
utils::tag_value<direction>) {
|
||||
if (self_.common_.existing_node) return;
|
||||
switch (direction) {
|
||||
case EdgeAtom::Direction::IN:
|
||||
frame[self_.common_.node_symbol] = new_edge.From();
|
||||
break;
|
||||
case EdgeAtom::Direction::OUT:
|
||||
frame[self_.common_.node_symbol] = new_edge.To();
|
||||
break;
|
||||
case EdgeAtom::Direction::BOTH:
|
||||
LOG_FATAL("Must indicate exact expansion direction here");
|
||||
if constexpr (direction == EdgeAtom::Direction::IN) {
|
||||
frame[self_.common_.node_symbol] = new_edge.From();
|
||||
} else if constexpr (direction == EdgeAtom::Direction::OUT) {
|
||||
frame[self_.common_.node_symbol] = new_edge.To();
|
||||
} else {
|
||||
LOG_FATAL("Must indicate exact expansion direction here");
|
||||
}
|
||||
};
|
||||
|
||||
@ -876,7 +892,7 @@ bool Expand::ExpandCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
#endif
|
||||
|
||||
frame[self_.common_.edge_symbol] = edge;
|
||||
pull_node(edge, EdgeAtom::Direction::IN);
|
||||
pull_node(edge, utils::tag_v<EdgeAtom::Direction::IN>);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -896,7 +912,7 @@ bool Expand::ExpandCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
}
|
||||
#endif
|
||||
frame[self_.common_.edge_symbol] = edge;
|
||||
pull_node(edge, EdgeAtom::Direction::OUT);
|
||||
pull_node(edge, utils::tag_v<EdgeAtom::Direction::OUT>);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -990,12 +1006,12 @@ bool Expand::ExpandCursor::InitEdges(Frame &frame, ExecutionContext &context) {
|
||||
auto existing_node = *expansion_info_.existing_node;
|
||||
|
||||
auto edges_result = UnwrapEdgesResult(vertex.InEdges(self_.view_, self_.common_.edge_types, existing_node));
|
||||
in_edges_.emplace(edges_result.edges);
|
||||
in_edges_.emplace(std::move(edges_result.edges));
|
||||
num_expanded_first = edges_result.expanded_count;
|
||||
}
|
||||
} else {
|
||||
auto edges_result = UnwrapEdgesResult(vertex.InEdges(self_.view_, self_.common_.edge_types));
|
||||
in_edges_.emplace(edges_result.edges);
|
||||
in_edges_.emplace(std::move(edges_result.edges));
|
||||
num_expanded_first = edges_result.expanded_count;
|
||||
}
|
||||
if (in_edges_) {
|
||||
@ -1009,12 +1025,12 @@ bool Expand::ExpandCursor::InitEdges(Frame &frame, ExecutionContext &context) {
|
||||
if (expansion_info_.existing_node) {
|
||||
auto existing_node = *expansion_info_.existing_node;
|
||||
auto edges_result = UnwrapEdgesResult(vertex.OutEdges(self_.view_, self_.common_.edge_types, existing_node));
|
||||
out_edges_.emplace(edges_result.edges);
|
||||
out_edges_.emplace(std::move(edges_result.edges));
|
||||
num_expanded_second = edges_result.expanded_count;
|
||||
}
|
||||
} else {
|
||||
auto edges_result = UnwrapEdgesResult(vertex.OutEdges(self_.view_, self_.common_.edge_types));
|
||||
out_edges_.emplace(edges_result.edges);
|
||||
out_edges_.emplace(std::move(edges_result.edges));
|
||||
num_expanded_second = edges_result.expanded_count;
|
||||
}
|
||||
if (out_edges_) {
|
||||
@ -1100,14 +1116,14 @@ auto ExpandFromVertex(const VertexAccessor &vertex, EdgeAtom::Direction directio
|
||||
|
||||
if (direction != EdgeAtom::Direction::OUT) {
|
||||
auto edges = UnwrapEdgesResult(vertex.InEdges(view, edge_types)).edges;
|
||||
if (edges.begin() != edges.end()) {
|
||||
if (!edges.empty()) {
|
||||
chain_elements.emplace_back(wrapper(EdgeAtom::Direction::IN, std::move(edges)));
|
||||
}
|
||||
}
|
||||
|
||||
if (direction != EdgeAtom::Direction::IN) {
|
||||
auto edges = UnwrapEdgesResult(vertex.OutEdges(view, edge_types)).edges;
|
||||
if (edges.begin() != edges.end()) {
|
||||
if (!edges.empty()) {
|
||||
chain_elements.emplace_back(wrapper(EdgeAtom::Direction::OUT, std::move(edges)));
|
||||
}
|
||||
}
|
||||
@ -1227,8 +1243,13 @@ class ExpandVariableCursor : public Cursor {
|
||||
}
|
||||
|
||||
// reset the frame value to an empty edge list
|
||||
auto *pull_memory = context.evaluation_context.memory;
|
||||
frame[self_.common_.edge_symbol] = TypedValue::TVector(pull_memory);
|
||||
if (frame[self_.common_.edge_symbol].IsList()) {
|
||||
// Preserve the list capacity if possible
|
||||
frame[self_.common_.edge_symbol].ValueList().clear();
|
||||
} else {
|
||||
auto *pull_memory = context.evaluation_context.memory;
|
||||
frame[self_.common_.edge_symbol] = TypedValue::TVector(pull_memory);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -3207,8 +3228,8 @@ void SetProperties::SetPropertiesCursor::Shutdown() { input_cursor_->Shutdown();
|
||||
void SetProperties::SetPropertiesCursor::Reset() { input_cursor_->Reset(); }
|
||||
|
||||
SetLabels::SetLabels(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol,
|
||||
const std::vector<storage::LabelId> &labels)
|
||||
: input_(input), input_symbol_(std::move(input_symbol)), labels_(labels) {}
|
||||
std::vector<StorageLabelType> labels)
|
||||
: input_(input), input_symbol_(std::move(input_symbol)), labels_(std::move(labels)) {}
|
||||
|
||||
ACCEPT_WITH_INPUT(SetLabels)
|
||||
|
||||
@ -3228,16 +3249,18 @@ SetLabels::SetLabelsCursor::SetLabelsCursor(const SetLabels &self, utils::Memory
|
||||
bool SetLabels::SetLabelsCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP("SetLabels");
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
auto labels = EvaluateLabels(self_.labels_, evaluator, context.db_accessor);
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && context.auth_checker &&
|
||||
!context.auth_checker->Has(self_.labels_, memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
!context.auth_checker->Has(labels, memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
throw QueryRuntimeException("Couldn't set label due to not having enough permission!");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
|
||||
TypedValue &vertex_value = frame[self_.input_symbol_];
|
||||
// Skip setting labels on Null (can occur in optional match).
|
||||
if (vertex_value.IsNull()) return true;
|
||||
@ -3252,7 +3275,7 @@ bool SetLabels::SetLabelsCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
}
|
||||
#endif
|
||||
|
||||
for (auto label : self_.labels_) {
|
||||
for (auto label : labels) {
|
||||
auto maybe_value = vertex.AddLabel(label);
|
||||
if (maybe_value.HasError()) {
|
||||
switch (maybe_value.GetError()) {
|
||||
@ -3367,8 +3390,8 @@ void RemoveProperty::RemovePropertyCursor::Shutdown() { input_cursor_->Shutdown(
|
||||
void RemoveProperty::RemovePropertyCursor::Reset() { input_cursor_->Reset(); }
|
||||
|
||||
RemoveLabels::RemoveLabels(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol,
|
||||
const std::vector<storage::LabelId> &labels)
|
||||
: input_(input), input_symbol_(std::move(input_symbol)), labels_(labels) {}
|
||||
std::vector<StorageLabelType> labels)
|
||||
: input_(input), input_symbol_(std::move(input_symbol)), labels_(std::move(labels)) {}
|
||||
|
||||
ACCEPT_WITH_INPUT(RemoveLabels)
|
||||
|
||||
@ -3388,16 +3411,18 @@ RemoveLabels::RemoveLabelsCursor::RemoveLabelsCursor(const RemoveLabels &self, u
|
||||
bool RemoveLabels::RemoveLabelsCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP("RemoveLabels");
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::NEW);
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
auto labels = EvaluateLabels(self_.labels_, evaluator, context.db_accessor);
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && context.auth_checker &&
|
||||
!context.auth_checker->Has(self_.labels_, memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
!context.auth_checker->Has(labels, memgraph::query::AuthQuery::FineGrainedPrivilege::CREATE_DELETE)) {
|
||||
throw QueryRuntimeException("Couldn't remove label due to not having enough permission!");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
|
||||
TypedValue &vertex_value = frame[self_.input_symbol_];
|
||||
// Skip removing labels on Null (can occur in optional match).
|
||||
if (vertex_value.IsNull()) return true;
|
||||
@ -3412,7 +3437,7 @@ bool RemoveLabels::RemoveLabelsCursor::Pull(Frame &frame, ExecutionContext &cont
|
||||
}
|
||||
#endif
|
||||
|
||||
for (auto label : self_.labels_) {
|
||||
for (auto label : labels) {
|
||||
auto maybe_value = vertex.RemoveLabel(label);
|
||||
if (maybe_value.HasError()) {
|
||||
switch (maybe_value.GetError()) {
|
||||
@ -4147,14 +4172,14 @@ OrderBy::OrderBy(const std::shared_ptr<LogicalOperator> &input, const std::vecto
|
||||
const std::vector<Symbol> &output_symbols)
|
||||
: input_(input), output_symbols_(output_symbols) {
|
||||
// split the order_by vector into two vectors of orderings and expressions
|
||||
std::vector<Ordering> ordering;
|
||||
std::vector<OrderedTypedValueCompare> ordering;
|
||||
ordering.reserve(order_by.size());
|
||||
order_by_.reserve(order_by.size());
|
||||
for (const auto &ordering_expression_pair : order_by) {
|
||||
ordering.emplace_back(ordering_expression_pair.ordering);
|
||||
order_by_.emplace_back(ordering_expression_pair.expression);
|
||||
}
|
||||
compare_ = TypedValueVectorCompare(ordering);
|
||||
compare_ = TypedValueVectorCompare(std::move(ordering));
|
||||
}
|
||||
|
||||
ACCEPT_WITH_INPUT(OrderBy)
|
||||
@ -4175,29 +4200,43 @@ class OrderByCursor : public Cursor {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP_BY_REF(self_);
|
||||
|
||||
if (!did_pull_all_) {
|
||||
if (!did_pull_all_) [[unlikely]] {
|
||||
ExpressionEvaluator evaluator(&frame, context.symbol_table, context.evaluation_context, context.db_accessor,
|
||||
storage::View::OLD);
|
||||
auto *mem = cache_.get_allocator().GetMemoryResource();
|
||||
auto *pull_mem = context.evaluation_context.memory;
|
||||
auto *query_mem = cache_.get_allocator().GetMemoryResource();
|
||||
|
||||
utils::pmr::vector<utils::pmr::vector<TypedValue>> order_by(pull_mem); // Not cached, pull memory
|
||||
utils::pmr::vector<utils::pmr::vector<TypedValue>> output(query_mem); // Cached, query memory
|
||||
|
||||
while (input_cursor_->Pull(frame, context)) {
|
||||
// collect the order_by elements
|
||||
utils::pmr::vector<TypedValue> order_by(mem);
|
||||
order_by.reserve(self_.order_by_.size());
|
||||
for (auto expression_ptr : self_.order_by_) {
|
||||
order_by.emplace_back(expression_ptr->Accept(evaluator));
|
||||
utils::pmr::vector<TypedValue> order_by_elem(pull_mem);
|
||||
order_by_elem.reserve(self_.order_by_.size());
|
||||
for (auto const &expression_ptr : self_.order_by_) {
|
||||
order_by_elem.emplace_back(expression_ptr->Accept(evaluator));
|
||||
}
|
||||
order_by.emplace_back(std::move(order_by_elem));
|
||||
|
||||
// collect the output elements
|
||||
utils::pmr::vector<TypedValue> output(mem);
|
||||
output.reserve(self_.output_symbols_.size());
|
||||
for (const Symbol &output_sym : self_.output_symbols_) output.emplace_back(frame[output_sym]);
|
||||
|
||||
cache_.push_back(Element{std::move(order_by), std::move(output)});
|
||||
utils::pmr::vector<TypedValue> output_elem(query_mem);
|
||||
output_elem.reserve(self_.output_symbols_.size());
|
||||
for (const Symbol &output_sym : self_.output_symbols_) {
|
||||
output_elem.emplace_back(frame[output_sym]);
|
||||
}
|
||||
output.emplace_back(std::move(output_elem));
|
||||
}
|
||||
|
||||
std::sort(cache_.begin(), cache_.end(), [this](const auto &pair1, const auto &pair2) {
|
||||
return self_.compare_(pair1.order_by, pair2.order_by);
|
||||
});
|
||||
// sorting with range zip
|
||||
// we compare on just the projection of the 1st range (order_by)
|
||||
// this will also permute the 2nd range (output)
|
||||
ranges::sort(
|
||||
ranges::views::zip(order_by, output), self_.compare_.lex_cmp(),
|
||||
[](auto const &value) -> auto const & { return std::get<0>(value); });
|
||||
|
||||
// no longer need the order_by terms
|
||||
order_by.clear();
|
||||
cache_ = std::move(output);
|
||||
|
||||
did_pull_all_ = true;
|
||||
cache_it_ = cache_.begin();
|
||||
@ -4208,15 +4247,15 @@ class OrderByCursor : public Cursor {
|
||||
AbortCheck(context);
|
||||
|
||||
// place the output values on the frame
|
||||
DMG_ASSERT(self_.output_symbols_.size() == cache_it_->remember.size(),
|
||||
DMG_ASSERT(self_.output_symbols_.size() == cache_it_->size(),
|
||||
"Number of values does not match the number of output symbols "
|
||||
"in OrderBy");
|
||||
auto output_sym_it = self_.output_symbols_.begin();
|
||||
for (const TypedValue &output : cache_it_->remember) {
|
||||
if (context.frame_change_collector && context.frame_change_collector->IsKeyTracked(output_sym_it->name())) {
|
||||
for (TypedValue &output : *cache_it_) {
|
||||
if (context.frame_change_collector) {
|
||||
context.frame_change_collector->ResetTrackingValue(output_sym_it->name());
|
||||
}
|
||||
frame[*output_sym_it++] = output;
|
||||
frame[*output_sym_it++] = std::move(output);
|
||||
}
|
||||
cache_it_++;
|
||||
return true;
|
||||
@ -4231,17 +4270,12 @@ class OrderByCursor : public Cursor {
|
||||
}
|
||||
|
||||
private:
|
||||
struct Element {
|
||||
utils::pmr::vector<TypedValue> order_by;
|
||||
utils::pmr::vector<TypedValue> remember;
|
||||
};
|
||||
|
||||
const OrderBy &self_;
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
bool did_pull_all_{false};
|
||||
// a cache of elements pulled from the input
|
||||
// the cache is filled and sorted (only on first elem) on first Pull
|
||||
utils::pmr::vector<Element> cache_;
|
||||
// the cache is filled and sorted on first Pull
|
||||
utils::pmr::vector<utils::pmr::vector<TypedValue>> cache_;
|
||||
// iterator over the cache_, maintains state between Pulls
|
||||
decltype(cache_.begin()) cache_it_ = cache_.begin();
|
||||
};
|
||||
@ -4444,15 +4478,15 @@ class UnwindCursor : public Cursor {
|
||||
TypedValue input_value = self_.input_expression_->Accept(evaluator);
|
||||
if (input_value.type() != TypedValue::Type::List)
|
||||
throw QueryRuntimeException("Argument of UNWIND must be a list, but '{}' was provided.", input_value.type());
|
||||
// Copy the evaluted input_value_list to our vector.
|
||||
input_value_ = input_value.ValueList();
|
||||
// Move the evaluted input_value_list to our vector.
|
||||
input_value_ = std::move(input_value.ValueList());
|
||||
input_value_it_ = input_value_.begin();
|
||||
}
|
||||
|
||||
// if we reached the end of our list of values goto back to top
|
||||
if (input_value_it_ == input_value_.end()) continue;
|
||||
|
||||
frame[self_.output_symbol_] = *input_value_it_++;
|
||||
frame[self_.output_symbol_] = std::move(*input_value_it_++);
|
||||
if (context.frame_change_collector && context.frame_change_collector->IsKeyTracked(self_.output_symbol_.name_)) {
|
||||
context.frame_change_collector->ResetTrackingValue(self_.output_symbol_.name_);
|
||||
}
|
||||
@ -4493,7 +4527,11 @@ class DistinctCursor : public Cursor {
|
||||
SCOPED_PROFILE_OP("Distinct");
|
||||
|
||||
while (true) {
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
if (!input_cursor_->Pull(frame, context)) {
|
||||
// Nothing left to pull, we can dispose of seen_rows now
|
||||
seen_rows_.clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
utils::pmr::vector<TypedValue> row(seen_rows_.get_allocator().GetMemoryResource());
|
||||
row.reserve(self_.value_symbols_.size());
|
||||
@ -5301,6 +5339,7 @@ class LoadCsvCursor : public Cursor {
|
||||
"1");
|
||||
}
|
||||
did_pull_ = true;
|
||||
reader_->Reset();
|
||||
}
|
||||
|
||||
auto row = reader_->GetNextRow(context.evaluation_context.memory);
|
||||
|
@ -76,18 +76,13 @@ using UniqueCursorPtr = std::unique_ptr<Cursor, std::function<void(Cursor *)>>;
|
||||
template <class TCursor, class... TArgs>
|
||||
std::unique_ptr<Cursor, std::function<void(Cursor *)>> MakeUniqueCursorPtr(utils::Allocator<TCursor> allocator,
|
||||
TArgs &&...args) {
|
||||
auto *ptr = allocator.allocate(1);
|
||||
try {
|
||||
auto *cursor = new (ptr) TCursor(std::forward<TArgs>(args)...);
|
||||
return std::unique_ptr<Cursor, std::function<void(Cursor *)>>(cursor, [allocator](Cursor *base_ptr) mutable {
|
||||
auto *p = static_cast<TCursor *>(base_ptr);
|
||||
p->~TCursor();
|
||||
allocator.deallocate(p, 1);
|
||||
});
|
||||
} catch (...) {
|
||||
allocator.deallocate(ptr, 1);
|
||||
throw;
|
||||
}
|
||||
auto *cursor = allocator.template new_object<TCursor>(std::forward<TArgs>(args)...);
|
||||
auto dtr = [allocator](Cursor *base_ptr) mutable {
|
||||
auto *p = static_cast<TCursor *>(base_ptr);
|
||||
allocator.delete_object(p);
|
||||
};
|
||||
// TODO: not std::function
|
||||
return std::unique_ptr<Cursor, std::function<void(Cursor *)>>(cursor, std::move(dtr));
|
||||
}
|
||||
|
||||
class Once;
|
||||
@ -285,6 +280,7 @@ class Once : public memgraph::query::plan::LogicalOperator {
|
||||
};
|
||||
|
||||
using PropertiesMapList = std::vector<std::pair<storage::PropertyId, Expression *>>;
|
||||
using StorageLabelType = std::variant<storage::LabelId, Expression *>;
|
||||
|
||||
struct NodeCreationInfo {
|
||||
static const utils::TypeInfo kType;
|
||||
@ -292,18 +288,18 @@ struct NodeCreationInfo {
|
||||
|
||||
NodeCreationInfo() = default;
|
||||
|
||||
NodeCreationInfo(Symbol symbol, std::vector<storage::LabelId> labels,
|
||||
NodeCreationInfo(Symbol symbol, std::vector<StorageLabelType> labels,
|
||||
std::variant<PropertiesMapList, ParameterLookup *> properties)
|
||||
: symbol{std::move(symbol)}, labels{std::move(labels)}, properties{std::move(properties)} {};
|
||||
|
||||
NodeCreationInfo(Symbol symbol, std::vector<storage::LabelId> labels, PropertiesMapList properties)
|
||||
NodeCreationInfo(Symbol symbol, std::vector<StorageLabelType> labels, PropertiesMapList properties)
|
||||
: symbol{std::move(symbol)}, labels{std::move(labels)}, properties{std::move(properties)} {};
|
||||
|
||||
NodeCreationInfo(Symbol symbol, std::vector<storage::LabelId> labels, ParameterLookup *properties)
|
||||
NodeCreationInfo(Symbol symbol, std::vector<StorageLabelType> labels, ParameterLookup *properties)
|
||||
: symbol{std::move(symbol)}, labels{std::move(labels)}, properties{properties} {};
|
||||
|
||||
Symbol symbol;
|
||||
std::vector<storage::LabelId> labels;
|
||||
std::vector<StorageLabelType> labels;
|
||||
std::variant<PropertiesMapList, ParameterLookup *> properties;
|
||||
|
||||
NodeCreationInfo Clone(AstStorage *storage) const {
|
||||
@ -506,7 +502,8 @@ class CreateExpand : public memgraph::query::plan::LogicalOperator {
|
||||
const UniqueCursorPtr input_cursor_;
|
||||
|
||||
// Get the existing node (if existing_node_ == true), or create a new node
|
||||
VertexAccessor &OtherVertex(Frame &frame, ExecutionContext &context);
|
||||
VertexAccessor &OtherVertex(Frame &frame, ExecutionContext &context,
|
||||
std::vector<memgraph::storage::LabelId> &labels, ExpressionEvaluator &evaluator);
|
||||
};
|
||||
};
|
||||
|
||||
@ -1477,8 +1474,7 @@ class SetLabels : public memgraph::query::plan::LogicalOperator {
|
||||
|
||||
SetLabels() = default;
|
||||
|
||||
SetLabels(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol,
|
||||
const std::vector<storage::LabelId> &labels);
|
||||
SetLabels(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol, std::vector<StorageLabelType> labels);
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
@ -1489,7 +1485,7 @@ class SetLabels : public memgraph::query::plan::LogicalOperator {
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
Symbol input_symbol_;
|
||||
std::vector<storage::LabelId> labels_;
|
||||
std::vector<StorageLabelType> labels_;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<SetLabels>();
|
||||
@ -1567,7 +1563,7 @@ class RemoveLabels : public memgraph::query::plan::LogicalOperator {
|
||||
RemoveLabels() = default;
|
||||
|
||||
RemoveLabels(const std::shared_ptr<LogicalOperator> &input, Symbol input_symbol,
|
||||
const std::vector<storage::LabelId> &labels);
|
||||
std::vector<StorageLabelType> labels);
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override;
|
||||
@ -1578,7 +1574,7 @@ class RemoveLabels : public memgraph::query::plan::LogicalOperator {
|
||||
|
||||
std::shared_ptr<memgraph::query::plan::LogicalOperator> input_;
|
||||
Symbol input_symbol_;
|
||||
std::vector<storage::LabelId> labels_;
|
||||
std::vector<StorageLabelType> labels_;
|
||||
|
||||
std::unique_ptr<LogicalOperator> Clone(AstStorage *storage) const override {
|
||||
auto object = std::make_unique<RemoveLabels>();
|
||||
|
@ -358,11 +358,17 @@ void Filters::CollectPatternFilters(Pattern &pattern, SymbolTable &symbol_table,
|
||||
};
|
||||
auto add_node_filter = [&](NodeAtom *node) {
|
||||
const auto &node_symbol = symbol_table.at(*node->identifier_);
|
||||
if (!node->labels_.empty()) {
|
||||
// Create a LabelsTest and store it.
|
||||
auto *labels_test = storage.Create<LabelsTest>(node->identifier_, node->labels_);
|
||||
std::vector<LabelIx> labels;
|
||||
for (auto label : node->labels_) {
|
||||
if (const auto *label_node = std::get_if<Expression *>(&label)) {
|
||||
throw SemanticException("Property lookup not supported in MATCH/MERGE clause!");
|
||||
}
|
||||
labels.push_back(std::get<LabelIx>(label));
|
||||
}
|
||||
if (!labels.empty()) {
|
||||
auto *labels_test = storage.Create<LabelsTest>(node->identifier_, labels);
|
||||
auto label_filter = FilterInfo{FilterInfo::Type::Label, labels_test, std::unordered_set<Symbol>{node_symbol}};
|
||||
label_filter.labels = node->labels_;
|
||||
label_filter.labels = labels;
|
||||
all_filters_.emplace_back(label_filter);
|
||||
}
|
||||
add_properties(node);
|
||||
|
@ -340,7 +340,7 @@ json ToJson(NamedExpression *nexpr) {
|
||||
return json;
|
||||
}
|
||||
|
||||
json ToJson(const std::vector<std::pair<storage::PropertyId, Expression *>> &properties, const DbAccessor &dba) {
|
||||
json ToJson(const PropertiesMapList &properties, const DbAccessor &dba) {
|
||||
json json;
|
||||
for (const auto &prop_pair : properties) {
|
||||
json.emplace(ToJson(prop_pair.first, dba), ToJson(prop_pair.second));
|
||||
@ -348,6 +348,18 @@ json ToJson(const std::vector<std::pair<storage::PropertyId, Expression *>> &pro
|
||||
return json;
|
||||
}
|
||||
|
||||
json ToJson(const std::vector<StorageLabelType> &labels, const DbAccessor &dba) {
|
||||
json json;
|
||||
for (const auto &label : labels) {
|
||||
if (const auto *label_node = std::get_if<Expression *>(&label)) {
|
||||
json.emplace_back(ToJson(*label_node));
|
||||
} else {
|
||||
json.emplace_back(ToJson(std::get<storage::LabelId>(label), dba));
|
||||
}
|
||||
}
|
||||
return json;
|
||||
}
|
||||
|
||||
json ToJson(const NodeCreationInfo &node_info, const DbAccessor &dba) {
|
||||
json self;
|
||||
self["symbol"] = ToJson(node_info.symbol);
|
||||
@ -654,7 +666,6 @@ bool PlanToJsonVisitor::PreVisit(SetLabels &op) {
|
||||
self["name"] = "SetLabels";
|
||||
self["input_symbol"] = ToJson(op.input_symbol_);
|
||||
self["labels"] = ToJson(op.labels_, *dba_);
|
||||
|
||||
op.input_->Accept(*this);
|
||||
self["input"] = PopOutput();
|
||||
|
||||
@ -769,7 +780,7 @@ bool PlanToJsonVisitor::PreVisit(OrderBy &op) {
|
||||
|
||||
for (auto i = 0; i < op.order_by_.size(); ++i) {
|
||||
json json;
|
||||
json["ordering"] = ToString(op.compare_.ordering_[i]);
|
||||
json["ordering"] = ToString(op.compare_.orderings()[i].ordering());
|
||||
json["expression"] = ToJson(op.order_by_[i]);
|
||||
self["order_by"].push_back(json);
|
||||
}
|
||||
|
@ -465,6 +465,18 @@ class EdgeTypeIndexRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PreVisit(RollUpApply &op) override {
|
||||
prev_ops_.push_back(&op);
|
||||
op.input()->Accept(*this);
|
||||
RewriteBranch(&op.list_collection_branch_);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(RollUpApply &) override {
|
||||
prev_ops_.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalOperator> new_root_;
|
||||
|
||||
private:
|
||||
|
@ -293,6 +293,19 @@ class RuleBasedPlanner {
|
||||
|
||||
storage::EdgeTypeId GetEdgeType(EdgeTypeIx edge_type) { return context_->db->NameToEdgeType(edge_type.name); }
|
||||
|
||||
std::vector<StorageLabelType> GetLabelIds(const std::vector<QueryLabelType> &labels) {
|
||||
std::vector<StorageLabelType> label_ids;
|
||||
label_ids.reserve(labels.size());
|
||||
for (const auto &label : labels) {
|
||||
if (const auto *label_atom = std::get_if<LabelIx>(&label)) {
|
||||
label_ids.emplace_back(GetLabel(*label_atom));
|
||||
} else {
|
||||
label_ids.emplace_back(std::get<Expression *>(label));
|
||||
}
|
||||
}
|
||||
return label_ids;
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> HandleMatching(std::unique_ptr<LogicalOperator> last_op,
|
||||
const SingleQueryPart &single_query_part, SymbolTable &symbol_table,
|
||||
std::unordered_set<Symbol> &bound_symbols) {
|
||||
@ -328,11 +341,6 @@ class RuleBasedPlanner {
|
||||
std::unordered_set<Symbol> &bound_symbols) {
|
||||
auto node_to_creation_info = [&](const NodeAtom &node) {
|
||||
const auto &node_symbol = symbol_table.at(*node.identifier_);
|
||||
std::vector<storage::LabelId> labels;
|
||||
labels.reserve(node.labels_.size());
|
||||
for (const auto &label : node.labels_) {
|
||||
labels.push_back(GetLabel(label));
|
||||
}
|
||||
|
||||
auto properties = std::invoke([&]() -> std::variant<PropertiesMapList, ParameterLookup *> {
|
||||
if (const auto *node_properties =
|
||||
@ -346,7 +354,7 @@ class RuleBasedPlanner {
|
||||
}
|
||||
return std::get<ParameterLookup *>(node.properties_);
|
||||
});
|
||||
return NodeCreationInfo{node_symbol, labels, properties};
|
||||
return NodeCreationInfo{node_symbol, GetLabelIds(node.labels_), properties};
|
||||
};
|
||||
|
||||
auto base = [&](NodeAtom *node) -> std::unique_ptr<LogicalOperator> {
|
||||
@ -423,23 +431,13 @@ class RuleBasedPlanner {
|
||||
return std::make_unique<plan::SetProperties>(std::move(input_op), input_symbol, set->expression_, op);
|
||||
} else if (auto *set = utils::Downcast<query::SetLabels>(clause)) {
|
||||
const auto &input_symbol = symbol_table.at(*set->identifier_);
|
||||
std::vector<storage::LabelId> labels;
|
||||
labels.reserve(set->labels_.size());
|
||||
for (const auto &label : set->labels_) {
|
||||
labels.push_back(GetLabel(label));
|
||||
}
|
||||
return std::make_unique<plan::SetLabels>(std::move(input_op), input_symbol, labels);
|
||||
return std::make_unique<plan::SetLabels>(std::move(input_op), input_symbol, GetLabelIds(set->labels_));
|
||||
} else if (auto *rem = utils::Downcast<query::RemoveProperty>(clause)) {
|
||||
return std::make_unique<plan::RemoveProperty>(std::move(input_op), GetProperty(rem->property_lookup_->property_),
|
||||
rem->property_lookup_);
|
||||
} else if (auto *rem = utils::Downcast<query::RemoveLabels>(clause)) {
|
||||
const auto &input_symbol = symbol_table.at(*rem->identifier_);
|
||||
std::vector<storage::LabelId> labels;
|
||||
labels.reserve(rem->labels_.size());
|
||||
for (const auto &label : rem->labels_) {
|
||||
labels.push_back(GetLabel(label));
|
||||
}
|
||||
return std::make_unique<plan::RemoveLabels>(std::move(input_op), input_symbol, labels);
|
||||
return std::make_unique<plan::RemoveLabels>(std::move(input_op), input_symbol, GetLabelIds(rem->labels_));
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -191,9 +191,9 @@ std::shared_ptr<Trigger::TriggerPlan> Trigger::GetPlan(DbAccessor *db_accessor)
|
||||
return trigger_plan_;
|
||||
}
|
||||
|
||||
void Trigger::Execute(DbAccessor *dba, utils::MonotonicBufferResource *execution_memory,
|
||||
const double max_execution_time_sec, std::atomic<bool> *is_shutting_down,
|
||||
std::atomic<TransactionStatus> *transaction_status, const TriggerContext &context) const {
|
||||
void Trigger::Execute(DbAccessor *dba, utils::MemoryResource *execution_memory, const double max_execution_time_sec,
|
||||
std::atomic<bool> *is_shutting_down, std::atomic<TransactionStatus> *transaction_status,
|
||||
const TriggerContext &context) const {
|
||||
if (!context.ShouldEventTrigger(event_type_)) {
|
||||
return;
|
||||
}
|
||||
@ -214,22 +214,7 @@ void Trigger::Execute(DbAccessor *dba, utils::MonotonicBufferResource *execution
|
||||
ctx.is_shutting_down = is_shutting_down;
|
||||
ctx.transaction_status = transaction_status;
|
||||
ctx.is_profile_query = false;
|
||||
|
||||
// Set up temporary memory for a single Pull. Initial memory comes from the
|
||||
// stack. 256 KiB should fit on the stack and should be more than enough for a
|
||||
// single `Pull`.
|
||||
static constexpr size_t stack_size = 256UL * 1024UL;
|
||||
char stack_data[stack_size];
|
||||
|
||||
// We can throw on every query because a simple queries for deleting will use only
|
||||
// the stack allocated buffer.
|
||||
// Also, we want to throw only when the query engine requests more memory and not the storage
|
||||
// so we add the exception to the allocator.
|
||||
utils::ResourceWithOutOfMemoryException resource_with_exception;
|
||||
utils::MonotonicBufferResource monotonic_memory(&stack_data[0], stack_size, &resource_with_exception);
|
||||
// TODO (mferencevic): Tune the parameters accordingly.
|
||||
utils::PoolResource pool_memory(128, 1024, &monotonic_memory);
|
||||
ctx.evaluation_context.memory = &pool_memory;
|
||||
ctx.evaluation_context.memory = execution_memory;
|
||||
|
||||
auto cursor = plan.plan().MakeCursor(execution_memory);
|
||||
Frame frame{plan.symbol_table().max_position(), execution_memory};
|
||||
|
@ -39,7 +39,7 @@ struct Trigger {
|
||||
utils::SkipList<QueryCacheEntry> *query_cache, DbAccessor *db_accessor,
|
||||
const InterpreterConfig::Query &query_config, std::shared_ptr<QueryUserOrRole> owner);
|
||||
|
||||
void Execute(DbAccessor *dba, utils::MonotonicBufferResource *execution_memory, double max_execution_time_sec,
|
||||
void Execute(DbAccessor *dba, utils::MemoryResource *execution_memory, double max_execution_time_sec,
|
||||
std::atomic<bool> *is_shutting_down, std::atomic<TransactionStatus> *transaction_status,
|
||||
const TriggerContext &context) const;
|
||||
|
||||
|
@ -321,6 +321,20 @@ TypedValue::operator storage::PropertyValue() const {
|
||||
throw TypedValueException("Unsupported conversion from TypedValue to PropertyValue");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define DEFINE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(type_param, type_enum, field) \
|
||||
type_param &TypedValue::Value##type_enum() { \
|
||||
if (type_ != Type::type_enum) [[unlikely]] \
|
||||
throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \
|
||||
return field; \
|
||||
} \
|
||||
type_param TypedValue::Value##type_enum() const { \
|
||||
if (type_ != Type::type_enum) [[unlikely]] \
|
||||
throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::type_enum); \
|
||||
return field; \
|
||||
} \
|
||||
bool TypedValue::Is##type_enum() const { return type_ == Type::type_enum; }
|
||||
|
||||
#define DEFINE_VALUE_AND_TYPE_GETTERS(type_param, type_enum, field) \
|
||||
type_param &TypedValue::Value##type_enum() { \
|
||||
if (type_ != Type::type_enum) [[unlikely]] \
|
||||
@ -334,9 +348,9 @@ TypedValue::operator storage::PropertyValue() const {
|
||||
} \
|
||||
bool TypedValue::Is##type_enum() const { return type_ == Type::type_enum; }
|
||||
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(bool, Bool, bool_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(int64_t, Int, int_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(double, Double, double_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(bool, Bool, bool_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(int64_t, Int, int_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(double, Double, double_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(TypedValue::TString, String, string_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(TypedValue::TVector, List, list_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(TypedValue::TMap, Map, map_v)
|
||||
@ -348,24 +362,10 @@ DEFINE_VALUE_AND_TYPE_GETTERS(utils::LocalTime, LocalTime, local_time_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(utils::LocalDateTime, LocalDateTime, local_date_time_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(utils::Duration, Duration, duration_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(std::function<void(TypedValue *)>, Function, function_v)
|
||||
|
||||
Graph &TypedValue::ValueGraph() {
|
||||
if (type_ != Type::Graph) {
|
||||
throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::Graph);
|
||||
}
|
||||
return *graph_v;
|
||||
}
|
||||
|
||||
const Graph &TypedValue::ValueGraph() const {
|
||||
if (type_ != Type::Graph) {
|
||||
throw TypedValueException("TypedValue is of type '{}', not '{}'", type_, Type::Graph);
|
||||
}
|
||||
return *graph_v;
|
||||
}
|
||||
|
||||
bool TypedValue::IsGraph() const { return type_ == Type::Graph; }
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(Graph, Graph, *graph_v)
|
||||
|
||||
#undef DEFINE_VALUE_AND_TYPE_GETTERS
|
||||
#undef DEFINE_VALUE_AND_TYPE_GETTERS_PRIMITIVE
|
||||
|
||||
bool TypedValue::ContainsDeleted() const {
|
||||
switch (type_) {
|
||||
@ -399,8 +399,6 @@ bool TypedValue::ContainsDeleted() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool TypedValue::IsNull() const { return type_ == Type::Null; }
|
||||
|
||||
bool TypedValue::IsNumeric() const { return IsInt() || IsDouble(); }
|
||||
|
||||
bool TypedValue::IsPropertyValue() const {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -475,50 +475,51 @@ class TypedValue {
|
||||
|
||||
Type type() const { return type_; }
|
||||
|
||||
// TODO consider adding getters for primitives by value (and not by ref)
|
||||
#define DECLARE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(type_param, type_enum, field) \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
type_param &Value##type_enum(); \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
type_param Value##type_enum() const; \
|
||||
/** Checks if it's the value is of the given type */ \
|
||||
bool Is##type_enum() const; \
|
||||
/** Get the value of the type field. Unchecked */ \
|
||||
type_param UnsafeValue##type_enum() const { return field; }
|
||||
|
||||
#define DECLARE_VALUE_AND_TYPE_GETTERS(type_param, field) \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
type_param &Value##field(); \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
const type_param &Value##field() const; \
|
||||
/** Checks if it's the value is of the given type */ \
|
||||
bool Is##field() const;
|
||||
#define DECLARE_VALUE_AND_TYPE_GETTERS(type_param, type_enum, field) \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
type_param &Value##type_enum(); \
|
||||
/** Gets the value of type field. Throws if value is not field*/ \
|
||||
const type_param &Value##type_enum() const; \
|
||||
/** Checks if it's the value is of the given type */ \
|
||||
bool Is##type_enum() const; \
|
||||
/** Get the value of the type field. Unchecked */ \
|
||||
type_param const &UnsafeValue##type_enum() const { return field; }
|
||||
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(bool, Bool)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(int64_t, Int)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(double, Double)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(TString, String)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(bool, Bool, bool_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(int64_t, Int, int_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS_PRIMITIVE(double, Double, double_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(TString, String, string_v)
|
||||
|
||||
/**
|
||||
* Get the list value.
|
||||
* @throw TypedValueException if stored value is not a list.
|
||||
*/
|
||||
TVector &ValueList();
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(TVector, List, list_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(TMap, Map, map_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(VertexAccessor, Vertex, vertex_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(EdgeAccessor, Edge, edge_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(Path, Path, path_v)
|
||||
|
||||
const TVector &ValueList() const;
|
||||
|
||||
/** Check if the stored value is a list value */
|
||||
bool IsList() const;
|
||||
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(TMap, Map)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(VertexAccessor, Vertex)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(EdgeAccessor, Edge)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(Path, Path)
|
||||
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::Date, Date)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::LocalTime, LocalTime)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::LocalDateTime, LocalDateTime)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::Duration, Duration)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(Graph, Graph)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(std::function<void(TypedValue *)>, Function)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::Date, Date, date_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::LocalTime, LocalTime, local_time_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::LocalDateTime, LocalDateTime, local_date_time_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::Duration, Duration, duration_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(Graph, Graph, *graph_v)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(std::function<void(TypedValue *)>, Function, function_v)
|
||||
|
||||
#undef DECLARE_VALUE_AND_TYPE_GETTERS
|
||||
#undef DECLARE_VALUE_AND_TYPE_GETTERS_PRIMITIVE
|
||||
|
||||
bool ContainsDeleted() const;
|
||||
|
||||
/** Checks if value is a TypedValue::Null. */
|
||||
bool IsNull() const;
|
||||
bool IsNull() const { return type_ == Type::Null; }
|
||||
|
||||
/** Convenience function for checking if this TypedValue is either
|
||||
* an integer or double */
|
||||
|
@ -310,7 +310,7 @@ auto ReplicationHandler::ShowReplicas() const -> utils::BasicResult<query::ShowR
|
||||
// ATM we only support IN_MEMORY_TRANSACTIONAL
|
||||
if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
|
||||
if (!full_info && storage->name() == dbms::kDefaultDB) return;
|
||||
auto ok =
|
||||
[[maybe_unused]] auto ok =
|
||||
storage->repl_storage_state_.WithClient(replica.name_, [&](storage::ReplicationStorageClient &client) {
|
||||
auto ts_info = client.GetTimestampInfo(storage);
|
||||
auto state = client.State();
|
||||
|
@ -37,6 +37,7 @@ struct SalientConfig {
|
||||
struct Items {
|
||||
bool properties_on_edges{true};
|
||||
bool enable_schema_metadata{false};
|
||||
bool delta_on_identical_property_update{true};
|
||||
friend bool operator==(const Items &lrh, const Items &rhs) = default;
|
||||
} items;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -33,6 +33,7 @@ struct Constraints {
|
||||
|
||||
std::unique_ptr<ExistenceConstraints> existence_constraints_;
|
||||
std::unique_ptr<UniqueConstraints> unique_constraints_;
|
||||
bool empty() const { return existence_constraints_->empty() && unique_constraints_->empty(); }
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -40,6 +40,8 @@ class ExistenceConstraints {
|
||||
const LabelId &label, const PropertyId &property);
|
||||
};
|
||||
|
||||
bool empty() const { return constraints_.empty(); }
|
||||
|
||||
[[nodiscard]] static std::optional<ConstraintViolation> ValidateVertexOnConstraint(const Vertex &vertex,
|
||||
const LabelId &label,
|
||||
const PropertyId &property);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -60,6 +60,8 @@ class UniqueConstraints {
|
||||
|
||||
virtual void Clear() = 0;
|
||||
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
protected:
|
||||
static DeletionStatus CheckPropertiesBeforeDeletion(const std::set<PropertyId> &properties) {
|
||||
if (properties.empty()) {
|
||||
|
@ -2052,7 +2052,8 @@ Transaction DiskStorage::CreateTransaction(IsolationLevel isolation_level, Stora
|
||||
edge_import_mode_active = edge_import_status_ == EdgeImportMode::ACTIVE;
|
||||
}
|
||||
|
||||
return {transaction_id, start_timestamp, isolation_level, storage_mode, edge_import_mode_active};
|
||||
return {transaction_id, start_timestamp, isolation_level,
|
||||
storage_mode, edge_import_mode_active, !constraints_.empty()};
|
||||
}
|
||||
|
||||
uint64_t DiskStorage::CommitTimestamp(const std::optional<uint64_t> desired_commit_timestamp) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -347,5 +347,6 @@ void DiskUniqueConstraints::LoadUniqueConstraints(const std::vector<std::string>
|
||||
constraints_.emplace(label, properties);
|
||||
}
|
||||
}
|
||||
bool DiskUniqueConstraints::empty() const { return constraints_.empty(); }
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -59,6 +59,7 @@ class DiskUniqueConstraints : public UniqueConstraints {
|
||||
RocksDBStorage *GetRocksDBStorage() const;
|
||||
|
||||
void LoadUniqueConstraints(const std::vector<std::string> &keys);
|
||||
bool empty() const override;
|
||||
|
||||
private:
|
||||
utils::Synchronized<std::map<uint64_t, std::map<Gid, std::set<std::pair<LabelId, std::set<PropertyId>>>>>>
|
||||
|
@ -130,9 +130,13 @@ Result<storage::PropertyValue> EdgeAccessor::SetProperty(PropertyId property, co
|
||||
if (edge_.ptr->deleted) return Error::DELETED_OBJECT;
|
||||
using ReturnType = decltype(edge_.ptr->properties.GetProperty(property));
|
||||
std::optional<ReturnType> current_value;
|
||||
const bool skip_duplicate_write = !storage_->config_.salient.items.delta_on_identical_property_update;
|
||||
utils::AtomicMemoryBlock atomic_memory_block{
|
||||
[¤t_value, &property, &value, transaction = transaction_, edge = edge_]() {
|
||||
[¤t_value, &property, &value, transaction = transaction_, edge = edge_, skip_duplicate_write]() {
|
||||
current_value.emplace(edge.ptr->properties.GetProperty(property));
|
||||
if (skip_duplicate_write && current_value == value) {
|
||||
return;
|
||||
}
|
||||
// We could skip setting the value if the previous one is the same to the new
|
||||
// one. This would save some memory as a delta would not be created as well as
|
||||
// avoid copying the value. The reason we are not doing that is because the
|
||||
@ -184,12 +188,14 @@ Result<std::vector<std::tuple<PropertyId, PropertyValue, PropertyValue>>> EdgeAc
|
||||
|
||||
if (edge_.ptr->deleted) return Error::DELETED_OBJECT;
|
||||
|
||||
const bool skip_duplicate_write = !storage_->config_.salient.items.delta_on_identical_property_update;
|
||||
using ReturnType = decltype(edge_.ptr->properties.UpdateProperties(properties));
|
||||
std::optional<ReturnType> id_old_new_change;
|
||||
utils::AtomicMemoryBlock atomic_memory_block{
|
||||
[transaction_ = transaction_, edge_ = edge_, &properties, &id_old_new_change]() {
|
||||
[transaction_ = transaction_, edge_ = edge_, &properties, &id_old_new_change, skip_duplicate_write]() {
|
||||
id_old_new_change.emplace(edge_.ptr->properties.UpdateProperties(properties));
|
||||
for (auto &[property, old_value, new_value] : *id_old_new_change) {
|
||||
if (skip_duplicate_write && old_value == new_value) continue;
|
||||
CreateAndLinkDelta(transaction_, edge_.ptr, Delta::SetPropertyTag(), property, std::move(old_value));
|
||||
}
|
||||
}};
|
||||
|
@ -779,9 +779,10 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
|
||||
// This is usually done by the MVCC, but it does not handle the metadata deltas
|
||||
transaction_.EnsureCommitTimestampExists();
|
||||
|
||||
if (transaction_.constraint_verification_info.NeedsExistenceConstraintVerification()) {
|
||||
if (transaction_.constraint_verification_info &&
|
||||
transaction_.constraint_verification_info->NeedsExistenceConstraintVerification()) {
|
||||
const auto vertices_to_update =
|
||||
transaction_.constraint_verification_info.GetVerticesForExistenceConstraintChecking();
|
||||
transaction_.constraint_verification_info->GetVerticesForExistenceConstraintChecking();
|
||||
for (auto const *vertex : vertices_to_update) {
|
||||
// No need to take any locks here because we modified this vertex and no
|
||||
// one else can touch it until we commit.
|
||||
@ -808,12 +809,13 @@ utils::BasicResult<StorageManipulationError, void> InMemoryStorage::InMemoryAcce
|
||||
static_cast<InMemoryUniqueConstraints *>(storage_->constraints_.unique_constraints_.get());
|
||||
commit_timestamp_.emplace(mem_storage->CommitTimestamp(reparg.desired_commit_timestamp));
|
||||
|
||||
if (transaction_.constraint_verification_info.NeedsUniqueConstraintVerification()) {
|
||||
if (transaction_.constraint_verification_info &&
|
||||
transaction_.constraint_verification_info->NeedsUniqueConstraintVerification()) {
|
||||
// Before committing and validating vertices against unique constraints,
|
||||
// we have to update unique constraints with the vertices that are going
|
||||
// to be validated/committed.
|
||||
const auto vertices_to_update =
|
||||
transaction_.constraint_verification_info.GetVerticesForUniqueConstraintChecking();
|
||||
transaction_.constraint_verification_info->GetVerticesForUniqueConstraintChecking();
|
||||
|
||||
for (auto const *vertex : vertices_to_update) {
|
||||
mem_unique_constraints->UpdateBeforeCommit(vertex, transaction_);
|
||||
@ -994,10 +996,11 @@ void InMemoryStorage::InMemoryAccessor::Abort() {
|
||||
// note: this check also saves on unnecessary contention on `engine_lock_`
|
||||
if (!transaction_.deltas.empty()) {
|
||||
// CONSTRAINTS
|
||||
if (transaction_.constraint_verification_info.NeedsUniqueConstraintVerification()) {
|
||||
if (transaction_.constraint_verification_info &&
|
||||
transaction_.constraint_verification_info->NeedsUniqueConstraintVerification()) {
|
||||
// Need to remove elements from constraints before handling of the deltas, so the elements match the correct
|
||||
// values
|
||||
auto vertices_to_check = transaction_.constraint_verification_info.GetVerticesForUniqueConstraintChecking();
|
||||
auto vertices_to_check = transaction_.constraint_verification_info->GetVerticesForUniqueConstraintChecking();
|
||||
auto vertices_to_check_v = std::vector<Vertex const *>{vertices_to_check.begin(), vertices_to_check.end()};
|
||||
storage_->constraints_.AbortEntries(vertices_to_check_v, transaction_.start_timestamp);
|
||||
}
|
||||
@ -1449,7 +1452,7 @@ Transaction InMemoryStorage::CreateTransaction(
|
||||
start_timestamp = timestamp_;
|
||||
}
|
||||
}
|
||||
return {transaction_id, start_timestamp, isolation_level, storage_mode, false};
|
||||
return {transaction_id, start_timestamp, isolation_level, storage_mode, false, !constraints_.empty()};
|
||||
}
|
||||
|
||||
void InMemoryStorage::SetStorageMode(StorageMode new_storage_mode) {
|
||||
|
@ -522,5 +522,6 @@ void InMemoryUniqueConstraints::Clear() {
|
||||
constraints_.clear();
|
||||
constraints_by_label_.clear();
|
||||
}
|
||||
bool InMemoryUniqueConstraints::empty() const { return constraints_.empty() && constraints_by_label_.empty(); }
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
@ -41,6 +41,9 @@ struct FixedCapacityArray {
|
||||
using PropertyIdArray = FixedCapacityArray<PropertyId>;
|
||||
|
||||
class InMemoryUniqueConstraints : public UniqueConstraints {
|
||||
public:
|
||||
bool empty() const override;
|
||||
|
||||
private:
|
||||
struct Entry {
|
||||
std::vector<PropertyValue> values;
|
||||
|
@ -92,7 +92,28 @@ class PropertyValue {
|
||||
// TODO: Implement copy assignment operators for primitive types.
|
||||
// TODO: Implement copy and move assignment operators for non-primitive types.
|
||||
|
||||
~PropertyValue() { DestroyValue(); }
|
||||
~PropertyValue() {
|
||||
switch (type_) {
|
||||
// destructor for primitive types does nothing
|
||||
case Type::Null:
|
||||
case Type::Bool:
|
||||
case Type::Int:
|
||||
case Type::Double:
|
||||
case Type::TemporalData:
|
||||
return;
|
||||
|
||||
// destructor for non primitive types since we used placement new
|
||||
case Type::String:
|
||||
std::destroy_at(&string_v.val_);
|
||||
return;
|
||||
case Type::List:
|
||||
std::destroy_at(&list_v.val_);
|
||||
return;
|
||||
case Type::Map:
|
||||
std::destroy_at(&map_v.val_);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Type type() const { return type_; }
|
||||
|
||||
@ -189,8 +210,6 @@ class PropertyValue {
|
||||
}
|
||||
|
||||
private:
|
||||
void DestroyValue() noexcept;
|
||||
|
||||
// NOTE: this may look strange but it is for better data layout
|
||||
// https://eel.is/c++draft/class.union#general-note-1
|
||||
union {
|
||||
@ -357,13 +376,13 @@ inline PropertyValue::PropertyValue(const PropertyValue &other) : type_(other.ty
|
||||
this->double_v.val_ = other.double_v.val_;
|
||||
return;
|
||||
case Type::String:
|
||||
new (&string_v.val_) std::string(other.string_v.val_);
|
||||
std::construct_at(&string_v.val_, other.string_v.val_);
|
||||
return;
|
||||
case Type::List:
|
||||
new (&list_v.val_) std::vector<PropertyValue>(other.list_v.val_);
|
||||
std::construct_at(&list_v.val_, other.list_v.val_);
|
||||
return;
|
||||
case Type::Map:
|
||||
new (&map_v.val_) std::map<std::string, PropertyValue>(other.map_v.val_);
|
||||
std::construct_at(&map_v.val_, other.map_v.val_);
|
||||
return;
|
||||
case Type::TemporalData:
|
||||
this->temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
@ -371,7 +390,7 @@ inline PropertyValue::PropertyValue(const PropertyValue &other) : type_(other.ty
|
||||
}
|
||||
}
|
||||
|
||||
inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(std::exchange(other.type_, Type::Null)) {
|
||||
inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(other.type_) {
|
||||
switch (type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
@ -386,15 +405,12 @@ inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(std:
|
||||
break;
|
||||
case Type::String:
|
||||
std::construct_at(&string_v.val_, std::move(other.string_v.val_));
|
||||
std::destroy_at(&other.string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
std::construct_at(&list_v.val_, std::move(other.list_v.val_));
|
||||
std::destroy_at(&other.list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
std::construct_at(&map_v.val_, std::move(other.map_v.val_));
|
||||
std::destroy_at(&other.map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
@ -403,38 +419,88 @@ inline PropertyValue::PropertyValue(PropertyValue &&other) noexcept : type_(std:
|
||||
}
|
||||
|
||||
inline PropertyValue &PropertyValue::operator=(const PropertyValue &other) {
|
||||
if (this == &other) return *this;
|
||||
if (type_ == other.type_) {
|
||||
if (this == &other) return *this;
|
||||
switch (other.type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
bool_v.val_ = other.bool_v.val_;
|
||||
break;
|
||||
case Type::Int:
|
||||
int_v.val_ = other.int_v.val_;
|
||||
break;
|
||||
case Type::Double:
|
||||
double_v.val_ = other.double_v.val_;
|
||||
break;
|
||||
case Type::String:
|
||||
string_v.val_ = other.string_v.val_;
|
||||
break;
|
||||
case Type::List:
|
||||
list_v.val_ = other.list_v.val_;
|
||||
break;
|
||||
case Type::Map:
|
||||
map_v.val_ = other.map_v.val_;
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
break;
|
||||
}
|
||||
return *this;
|
||||
} else {
|
||||
// destroy
|
||||
switch (type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
break;
|
||||
case Type::Int:
|
||||
break;
|
||||
case Type::Double:
|
||||
break;
|
||||
case Type::String:
|
||||
std::destroy_at(&string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
std::destroy_at(&list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
std::destroy_at(&map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
break;
|
||||
}
|
||||
// construct
|
||||
auto *new_this = std::launder(this);
|
||||
switch (other.type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
new_this->bool_v.val_ = other.bool_v.val_;
|
||||
break;
|
||||
case Type::Int:
|
||||
new_this->int_v.val_ = other.int_v.val_;
|
||||
break;
|
||||
case Type::Double:
|
||||
new_this->double_v.val_ = other.double_v.val_;
|
||||
break;
|
||||
case Type::String:
|
||||
std::construct_at(&new_this->string_v.val_, other.string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
std::construct_at(&new_this->list_v.val_, other.list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
std::construct_at(&new_this->map_v.val_, other.map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
new_this->temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
break;
|
||||
}
|
||||
|
||||
DestroyValue();
|
||||
type_ = other.type_;
|
||||
|
||||
switch (other.type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
this->bool_v.val_ = other.bool_v.val_;
|
||||
break;
|
||||
case Type::Int:
|
||||
this->int_v.val_ = other.int_v.val_;
|
||||
break;
|
||||
case Type::Double:
|
||||
this->double_v.val_ = other.double_v.val_;
|
||||
break;
|
||||
case Type::String:
|
||||
new (&string_v.val_) std::string(other.string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
new (&list_v.val_) std::vector<PropertyValue>(other.list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
new (&map_v.val_) std::map<std::string, PropertyValue>(other.map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
this->temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
break;
|
||||
new_this->type_ = other.type_;
|
||||
return *new_this;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline PropertyValue &PropertyValue::operator=(PropertyValue &&other) noexcept {
|
||||
@ -456,48 +522,71 @@ inline PropertyValue &PropertyValue::operator=(PropertyValue &&other) noexcept {
|
||||
break;
|
||||
case Type::String:
|
||||
string_v.val_ = std::move(other.string_v.val_);
|
||||
std::destroy_at(&other.string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
list_v.val_ = std::move(other.list_v.val_);
|
||||
std::destroy_at(&other.list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
map_v.val_ = std::move(other.map_v.val_);
|
||||
std::destroy_at(&other.map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
break;
|
||||
}
|
||||
other.type_ = Type::Null;
|
||||
return *this;
|
||||
} else {
|
||||
std::destroy_at(this);
|
||||
return *std::construct_at(std::launder(this), std::move(other));
|
||||
}
|
||||
}
|
||||
// destroy
|
||||
switch (type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
break;
|
||||
case Type::Int:
|
||||
break;
|
||||
case Type::Double:
|
||||
break;
|
||||
case Type::String:
|
||||
std::destroy_at(&string_v.val_);
|
||||
break;
|
||||
case Type::List:
|
||||
std::destroy_at(&list_v.val_);
|
||||
break;
|
||||
case Type::Map:
|
||||
std::destroy_at(&map_v.val_);
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
break;
|
||||
}
|
||||
// construct (no need to destroy moved from type)
|
||||
auto *new_this = std::launder(this);
|
||||
switch (other.type_) {
|
||||
case Type::Null:
|
||||
break;
|
||||
case Type::Bool:
|
||||
new_this->bool_v.val_ = other.bool_v.val_;
|
||||
break;
|
||||
case Type::Int:
|
||||
new_this->int_v.val_ = other.int_v.val_;
|
||||
break;
|
||||
case Type::Double:
|
||||
new_this->double_v.val_ = other.double_v.val_;
|
||||
break;
|
||||
case Type::String:
|
||||
std::construct_at(&new_this->string_v.val_, std::move(other.string_v.val_));
|
||||
break;
|
||||
case Type::List:
|
||||
std::construct_at(&new_this->list_v.val_, std::move(other.list_v.val_));
|
||||
break;
|
||||
case Type::Map:
|
||||
std::construct_at(&new_this->map_v.val_, std::move(other.map_v.val_));
|
||||
break;
|
||||
case Type::TemporalData:
|
||||
new_this->temporal_data_v.val_ = other.temporal_data_v.val_;
|
||||
break;
|
||||
}
|
||||
|
||||
inline void PropertyValue::DestroyValue() noexcept {
|
||||
switch (std::exchange(type_, Type::Null)) {
|
||||
// destructor for primitive types does nothing
|
||||
case Type::Null:
|
||||
case Type::Bool:
|
||||
case Type::Int:
|
||||
case Type::Double:
|
||||
case Type::TemporalData:
|
||||
return;
|
||||
|
||||
// destructor for non primitive types since we used placement new
|
||||
case Type::String:
|
||||
std::destroy_at(&string_v.val_);
|
||||
return;
|
||||
case Type::List:
|
||||
std::destroy_at(&list_v.val_);
|
||||
return;
|
||||
case Type::Map:
|
||||
std::destroy_at(&map_v.val_);
|
||||
return;
|
||||
new_this->type_ = other.type_;
|
||||
return *new_this;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/replication/enums.hpp"
|
||||
#include "storage/v2/storage.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/on_scope_exit.hpp"
|
||||
@ -247,11 +248,13 @@ bool ReplicationStorageClient::FinalizeTransactionReplication(Storage *storage,
|
||||
MG_ASSERT(replica_stream_, "Missing stream for transaction deltas");
|
||||
try {
|
||||
auto response = replica_stream_->Finalize();
|
||||
return replica_state_.WithLock([storage, &response, db_acc = std::move(db_acc), this](auto &state) mutable {
|
||||
// NOLINTNEXTLINE
|
||||
return replica_state_.WithLock([storage, response, db_acc = std::move(db_acc), this](auto &state) mutable {
|
||||
replica_stream_.reset();
|
||||
if (!response.success || state == replication::ReplicaState::RECOVERY) {
|
||||
state = replication::ReplicaState::RECOVERY;
|
||||
client_.thread_pool_.AddTask([storage, &response, db_acc = std::move(db_acc), this] {
|
||||
// NOLINTNEXTLINE
|
||||
client_.thread_pool_.AddTask([storage, response, db_acc = std::move(db_acc), this] {
|
||||
this->RecoverReplica(response.current_commit_timestamp, storage);
|
||||
});
|
||||
return false;
|
||||
|
@ -41,7 +41,7 @@ const uint64_t kTransactionInitialId = 1ULL << 63U;
|
||||
|
||||
struct Transaction {
|
||||
Transaction(uint64_t transaction_id, uint64_t start_timestamp, IsolationLevel isolation_level,
|
||||
StorageMode storage_mode, bool edge_import_mode_active)
|
||||
StorageMode storage_mode, bool edge_import_mode_active, bool has_constraints)
|
||||
: transaction_id(transaction_id),
|
||||
start_timestamp(start_timestamp),
|
||||
command_id(0),
|
||||
@ -50,6 +50,8 @@ struct Transaction {
|
||||
isolation_level(isolation_level),
|
||||
storage_mode(storage_mode),
|
||||
edge_import_mode_active(edge_import_mode_active),
|
||||
constraint_verification_info{(has_constraints) ? std::optional<ConstraintVerificationInfo>{std::in_place}
|
||||
: std::nullopt},
|
||||
vertices_{(storage_mode == StorageMode::ON_DISK_TRANSACTIONAL)
|
||||
? std::optional<utils::SkipList<Vertex>>{std::in_place}
|
||||
: std::nullopt},
|
||||
@ -99,7 +101,7 @@ struct Transaction {
|
||||
// Used to speedup getting info about a vertex when there is a long delta
|
||||
// chain involved in rebuilding that info.
|
||||
mutable VertexInfoCache manyDeltasCache{};
|
||||
mutable ConstraintVerificationInfo constraint_verification_info{};
|
||||
mutable std::optional<ConstraintVerificationInfo> constraint_verification_info{};
|
||||
|
||||
// Store modified edges GID mapped to changed Delta and serialized edge key
|
||||
// Only for disk storage
|
||||
|
@ -120,7 +120,7 @@ Result<bool> VertexAccessor::AddLabel(LabelId label) {
|
||||
|
||||
/// TODO: some by pointers, some by reference => not good, make it better
|
||||
storage_->constraints_.unique_constraints_->UpdateOnAddLabel(label, *vertex_, transaction_->start_timestamp);
|
||||
transaction_->constraint_verification_info.AddedLabel(vertex_);
|
||||
if (transaction_->constraint_verification_info) transaction_->constraint_verification_info->AddedLabel(vertex_);
|
||||
storage_->indices_.UpdateOnAddLabel(label, vertex_, *transaction_);
|
||||
transaction_->manyDeltasCache.Invalidate(vertex_, label);
|
||||
|
||||
@ -261,25 +261,38 @@ Result<PropertyValue> VertexAccessor::SetProperty(PropertyId property, const Pro
|
||||
|
||||
if (vertex_->deleted) return Error::DELETED_OBJECT;
|
||||
|
||||
auto current_value = vertex_->properties.GetProperty(property);
|
||||
// We could skip setting the value if the previous one is the same to the new
|
||||
// one. This would save some memory as a delta would not be created as well as
|
||||
// avoid copying the value. The reason we are not doing that is because the
|
||||
// current code always follows the logical pattern of "create a delta" and
|
||||
// "modify in-place". Additionally, the created delta will make other
|
||||
// transactions get a SERIALIZATION_ERROR.
|
||||
|
||||
PropertyValue current_value;
|
||||
const bool skip_duplicate_write = !storage_->config_.salient.items.delta_on_identical_property_update;
|
||||
utils::AtomicMemoryBlock atomic_memory_block{
|
||||
[transaction = transaction_, vertex = vertex_, &value, &property, ¤t_value]() {
|
||||
[transaction = transaction_, vertex = vertex_, &value, &property, ¤t_value, skip_duplicate_write]() {
|
||||
current_value = vertex->properties.GetProperty(property);
|
||||
// We could skip setting the value if the previous one is the same to the new
|
||||
// one. This would save some memory as a delta would not be created as well as
|
||||
// avoid copying the value. The reason we are not doing that is because the
|
||||
// current code always follows the logical pattern of "create a delta" and
|
||||
// "modify in-place". Additionally, the created delta will make other
|
||||
// transactions get a SERIALIZATION_ERROR.
|
||||
if (skip_duplicate_write && current_value == value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
CreateAndLinkDelta(transaction, vertex, Delta::SetPropertyTag(), property, current_value);
|
||||
vertex->properties.SetProperty(property, value);
|
||||
}};
|
||||
std::invoke(atomic_memory_block);
|
||||
|
||||
if (!value.IsNull()) {
|
||||
transaction_->constraint_verification_info.AddedProperty(vertex_);
|
||||
} else {
|
||||
transaction_->constraint_verification_info.RemovedProperty(vertex_);
|
||||
return false;
|
||||
}};
|
||||
const bool early_exit = std::invoke(atomic_memory_block);
|
||||
|
||||
if (early_exit) {
|
||||
return std::move(current_value);
|
||||
}
|
||||
|
||||
if (transaction_->constraint_verification_info) {
|
||||
if (!value.IsNull()) {
|
||||
transaction_->constraint_verification_info->AddedProperty(vertex_);
|
||||
} else {
|
||||
transaction_->constraint_verification_info->RemovedProperty(vertex_);
|
||||
}
|
||||
}
|
||||
storage_->indices_.UpdateOnSetProperty(property, value, vertex_, *transaction_);
|
||||
transaction_->manyDeltasCache.Invalidate(vertex_, property);
|
||||
@ -309,10 +322,12 @@ Result<bool> VertexAccessor::InitProperties(const std::map<storage::PropertyId,
|
||||
CreateAndLinkDelta(transaction, vertex, Delta::SetPropertyTag(), property, PropertyValue());
|
||||
storage->indices_.UpdateOnSetProperty(property, value, vertex, *transaction);
|
||||
transaction->manyDeltasCache.Invalidate(vertex, property);
|
||||
if (!value.IsNull()) {
|
||||
transaction->constraint_verification_info.AddedProperty(vertex);
|
||||
} else {
|
||||
transaction->constraint_verification_info.RemovedProperty(vertex);
|
||||
if (transaction->constraint_verification_info) {
|
||||
if (!value.IsNull()) {
|
||||
transaction->constraint_verification_info->AddedProperty(vertex);
|
||||
} else {
|
||||
transaction->constraint_verification_info->RemovedProperty(vertex);
|
||||
}
|
||||
}
|
||||
}
|
||||
result = true;
|
||||
@ -335,25 +350,29 @@ Result<std::vector<std::tuple<PropertyId, PropertyValue, PropertyValue>>> Vertex
|
||||
|
||||
if (vertex_->deleted) return Error::DELETED_OBJECT;
|
||||
|
||||
const bool skip_duplicate_update = storage_->config_.salient.items.delta_on_identical_property_update;
|
||||
using ReturnType = decltype(vertex_->properties.UpdateProperties(properties));
|
||||
std::optional<ReturnType> id_old_new_change;
|
||||
utils::AtomicMemoryBlock atomic_memory_block{
|
||||
[storage = storage_, transaction = transaction_, vertex = vertex_, &properties, &id_old_new_change]() {
|
||||
id_old_new_change.emplace(vertex->properties.UpdateProperties(properties));
|
||||
if (!id_old_new_change.has_value()) {
|
||||
return;
|
||||
utils::AtomicMemoryBlock atomic_memory_block{[storage = storage_, transaction = transaction_, vertex = vertex_,
|
||||
&properties, &id_old_new_change, skip_duplicate_update]() {
|
||||
id_old_new_change.emplace(vertex->properties.UpdateProperties(properties));
|
||||
if (!id_old_new_change.has_value()) {
|
||||
return;
|
||||
}
|
||||
for (auto &[id, old_value, new_value] : *id_old_new_change) {
|
||||
storage->indices_.UpdateOnSetProperty(id, new_value, vertex, *transaction);
|
||||
if (skip_duplicate_update && old_value == new_value) continue;
|
||||
CreateAndLinkDelta(transaction, vertex, Delta::SetPropertyTag(), id, std::move(old_value));
|
||||
transaction->manyDeltasCache.Invalidate(vertex, id);
|
||||
if (transaction->constraint_verification_info) {
|
||||
if (!new_value.IsNull()) {
|
||||
transaction->constraint_verification_info->AddedProperty(vertex);
|
||||
} else {
|
||||
transaction->constraint_verification_info->RemovedProperty(vertex);
|
||||
}
|
||||
for (auto &[id, old_value, new_value] : *id_old_new_change) {
|
||||
storage->indices_.UpdateOnSetProperty(id, new_value, vertex, *transaction);
|
||||
CreateAndLinkDelta(transaction, vertex, Delta::SetPropertyTag(), id, std::move(old_value));
|
||||
transaction->manyDeltasCache.Invalidate(vertex, id);
|
||||
if (!new_value.IsNull()) {
|
||||
transaction->constraint_verification_info.AddedProperty(vertex);
|
||||
} else {
|
||||
transaction->constraint_verification_info.RemovedProperty(vertex);
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
}
|
||||
}};
|
||||
std::invoke(atomic_memory_block);
|
||||
|
||||
return id_old_new_change.has_value() ? std::move(id_old_new_change.value()) : ReturnType{};
|
||||
@ -380,9 +399,11 @@ Result<std::map<PropertyId, PropertyValue>> VertexAccessor::ClearProperties() {
|
||||
for (const auto &[property, value] : *properties) {
|
||||
CreateAndLinkDelta(transaction, vertex, Delta::SetPropertyTag(), property, value);
|
||||
storage->indices_.UpdateOnSetProperty(property, PropertyValue(), vertex, *transaction);
|
||||
transaction->constraint_verification_info.RemovedProperty(vertex);
|
||||
transaction->manyDeltasCache.Invalidate(vertex, property);
|
||||
}
|
||||
if (transaction->constraint_verification_info) {
|
||||
transaction->constraint_verification_info->RemovedProperty(vertex);
|
||||
}
|
||||
vertex->properties.ClearProperties();
|
||||
}};
|
||||
std::invoke(atomic_memory_block);
|
||||
|
@ -29,12 +29,10 @@ class [[nodiscard]] AtomicMemoryBlock {
|
||||
AtomicMemoryBlock &operator=(AtomicMemoryBlock &&) = delete;
|
||||
~AtomicMemoryBlock() = default;
|
||||
|
||||
void operator()() {
|
||||
{
|
||||
utils::MemoryTracker::OutOfMemoryExceptionBlocker oom_blocker;
|
||||
function_();
|
||||
}
|
||||
total_memory_tracker.DoCheck();
|
||||
auto operator()() -> std::invoke_result_t<Callable> {
|
||||
auto check_on_exit = OnScopeExit{[&] { total_memory_tracker.DoCheck(); }};
|
||||
utils::MemoryTracker::OutOfMemoryExceptionBlocker oom_blocker;
|
||||
return function_();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -150,128 +150,133 @@ void *MonotonicBufferResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
|
||||
namespace impl {
|
||||
|
||||
Pool::Pool(size_t block_size, unsigned char blocks_per_chunk, MemoryResource *memory)
|
||||
: blocks_per_chunk_(blocks_per_chunk), block_size_(block_size), chunks_(memory) {}
|
||||
|
||||
Pool::~Pool() { MG_ASSERT(chunks_.empty(), "You need to call Release before destruction!"); }
|
||||
|
||||
void *Pool::Allocate() {
|
||||
auto allocate_block_from_chunk = [this](Chunk *chunk) {
|
||||
unsigned char *available_block = chunk->data + (chunk->first_available_block_ix * block_size_);
|
||||
// Update free-list pointer (index in our case) by reading "next" from the
|
||||
// available_block.
|
||||
chunk->first_available_block_ix = *available_block;
|
||||
--chunk->blocks_available;
|
||||
return available_block;
|
||||
};
|
||||
if (last_alloc_chunk_ && last_alloc_chunk_->blocks_available > 0U)
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
// Find a Chunk with available memory.
|
||||
for (auto &chunk : chunks_) {
|
||||
if (chunk.blocks_available > 0U) {
|
||||
last_alloc_chunk_ = &chunk;
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
}
|
||||
}
|
||||
// We haven't found a Chunk with available memory, so allocate a new one.
|
||||
if (block_size_ > std::numeric_limits<size_t>::max() / blocks_per_chunk_) throw BadAlloc("Allocation size overflow");
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
Pool::Pool(size_t block_size, unsigned char blocks_per_chunk, MemoryResource *chunk_memory)
|
||||
: blocks_per_chunk_(blocks_per_chunk), block_size_(block_size), chunks_(chunk_memory) {
|
||||
// Use the next pow2 of block_size_ as alignment, so that we cover alignment
|
||||
// requests between 1 and block_size_. Users of this class should make sure
|
||||
// that requested alignment of particular blocks is never greater than the
|
||||
// block itself.
|
||||
size_t alignment = Ceil2(block_size_);
|
||||
if (alignment < block_size_) throw BadAlloc("Allocation alignment overflow");
|
||||
auto *data = reinterpret_cast<unsigned char *>(GetUpstreamResource()->Allocate(data_size, alignment));
|
||||
// Form a free-list of blocks in data.
|
||||
for (unsigned char i = 0U; i < blocks_per_chunk_; ++i) {
|
||||
*(data + (i * block_size_)) = i + 1U;
|
||||
}
|
||||
Chunk chunk{data, 0, blocks_per_chunk_};
|
||||
// Insert the big block in the sorted position.
|
||||
auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk,
|
||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||
try {
|
||||
it = chunks_.insert(it, chunk);
|
||||
} catch (...) {
|
||||
GetUpstreamResource()->Deallocate(data, data_size, alignment);
|
||||
throw;
|
||||
}
|
||||
if (block_size_ > std::numeric_limits<size_t>::max() / blocks_per_chunk_) throw BadAlloc("Allocation size overflow");
|
||||
}
|
||||
|
||||
last_alloc_chunk_ = &*it;
|
||||
last_dealloc_chunk_ = &*it;
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
Pool::~Pool() {
|
||||
if (!chunks_.empty()) {
|
||||
auto *resource = GetUpstreamResource();
|
||||
auto const dataSize = blocks_per_chunk_ * block_size_;
|
||||
auto const alignment = Ceil2(block_size_);
|
||||
for (auto &chunk : chunks_) {
|
||||
resource->Deallocate(chunk.raw_data, dataSize, alignment);
|
||||
}
|
||||
chunks_.clear();
|
||||
}
|
||||
free_list_ = nullptr;
|
||||
}
|
||||
|
||||
void *Pool::Allocate() {
|
||||
if (!free_list_) [[unlikely]] {
|
||||
// need new chunk
|
||||
auto const data_size = blocks_per_chunk_ * block_size_;
|
||||
auto const alignment = Ceil2(block_size_);
|
||||
auto *resource = GetUpstreamResource();
|
||||
auto *data = reinterpret_cast<std::byte *>(resource->Allocate(data_size, alignment));
|
||||
try {
|
||||
auto &new_chunk = chunks_.emplace_front(data);
|
||||
free_list_ = new_chunk.build_freelist(block_size_, blocks_per_chunk_);
|
||||
} catch (...) {
|
||||
resource->Deallocate(data, data_size, alignment);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
return std::exchange(free_list_, *reinterpret_cast<std::byte **>(free_list_));
|
||||
}
|
||||
|
||||
void Pool::Deallocate(void *p) {
|
||||
MG_ASSERT(last_dealloc_chunk_, "No chunk to deallocate");
|
||||
MG_ASSERT(!chunks_.empty(),
|
||||
"Expected a call to Deallocate after at least a "
|
||||
"single Allocate has been done.");
|
||||
auto is_in_chunk = [this, p](const Chunk &chunk) {
|
||||
auto ptr = reinterpret_cast<uintptr_t>(p);
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
return reinterpret_cast<uintptr_t>(chunk.data) <= ptr && ptr < reinterpret_cast<uintptr_t>(chunk.data + data_size);
|
||||
};
|
||||
auto deallocate_block_from_chunk = [this, p](Chunk *chunk) {
|
||||
// NOTE: This check is not enough to cover all double-free issues.
|
||||
MG_ASSERT(chunk->blocks_available < blocks_per_chunk_,
|
||||
"Deallocating more blocks than a chunk can contain, possibly a "
|
||||
"double-free situation or we have a bug in the allocator.");
|
||||
// Link the block into the free-list
|
||||
auto *block = reinterpret_cast<unsigned char *>(p);
|
||||
*block = chunk->first_available_block_ix;
|
||||
chunk->first_available_block_ix = (block - chunk->data) / block_size_;
|
||||
chunk->blocks_available++;
|
||||
};
|
||||
if (is_in_chunk(*last_dealloc_chunk_)) {
|
||||
deallocate_block_from_chunk(last_dealloc_chunk_);
|
||||
return;
|
||||
}
|
||||
|
||||
// Find the chunk which served this allocation
|
||||
Chunk chunk{reinterpret_cast<unsigned char *>(p) - blocks_per_chunk_ * block_size_, 0, 0};
|
||||
auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk,
|
||||
[](const auto &a, const auto &b) { return a.data <= b.data; });
|
||||
MG_ASSERT(it != chunks_.end(), "Failed deallocation in utils::Pool");
|
||||
MG_ASSERT(is_in_chunk(*it), "Failed deallocation in utils::Pool");
|
||||
|
||||
// Update last_alloc_chunk_ as well because it now has a free block.
|
||||
// Additionally this corresponds with C++ pattern of allocations and
|
||||
// deallocations being done in reverse order.
|
||||
last_alloc_chunk_ = &*it;
|
||||
last_dealloc_chunk_ = &*it;
|
||||
deallocate_block_from_chunk(last_dealloc_chunk_);
|
||||
// TODO: We could release the Chunk to upstream memory
|
||||
}
|
||||
|
||||
void Pool::Release() {
|
||||
for (auto &chunk : chunks_) {
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
size_t alignment = Ceil2(block_size_);
|
||||
GetUpstreamResource()->Deallocate(chunk.data, data_size, alignment);
|
||||
}
|
||||
chunks_.clear();
|
||||
last_alloc_chunk_ = nullptr;
|
||||
last_dealloc_chunk_ = nullptr;
|
||||
*reinterpret_cast<std::byte **>(p) = std::exchange(free_list_, reinterpret_cast<std::byte *>(p));
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
PoolResource::PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools,
|
||||
MemoryResource *memory_unpooled)
|
||||
: pools_(memory_pools),
|
||||
unpooled_(memory_unpooled),
|
||||
max_blocks_per_chunk_(std::min(max_blocks_per_chunk, static_cast<size_t>(impl::Pool::MaxBlocksInChunk()))),
|
||||
max_block_size_(max_block_size) {
|
||||
MG_ASSERT(max_blocks_per_chunk_ > 0U, "Invalid number of blocks per chunk");
|
||||
MG_ASSERT(max_block_size_ > 0U, "Invalid size of block");
|
||||
struct NullMemoryResourceImpl final : public MemoryResource {
|
||||
NullMemoryResourceImpl() = default;
|
||||
NullMemoryResourceImpl(NullMemoryResourceImpl const &) = default;
|
||||
NullMemoryResourceImpl &operator=(NullMemoryResourceImpl const &) = default;
|
||||
NullMemoryResourceImpl(NullMemoryResourceImpl &&) = default;
|
||||
NullMemoryResourceImpl &operator=(NullMemoryResourceImpl &&) = default;
|
||||
~NullMemoryResourceImpl() override = default;
|
||||
|
||||
private:
|
||||
void *DoAllocate(size_t /*bytes*/, size_t /*alignment*/) override {
|
||||
throw BadAlloc{"NullMemoryResource doesn't allocate"};
|
||||
}
|
||||
void DoDeallocate(void * /*p*/, size_t /*bytes*/, size_t /*alignment*/) override {
|
||||
throw BadAlloc{"NullMemoryResource doesn't deallocate"};
|
||||
}
|
||||
bool DoIsEqual(MemoryResource const &other) const noexcept override {
|
||||
return dynamic_cast<NullMemoryResourceImpl const *>(&other) != nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
MemoryResource *NullMemoryResource() noexcept {
|
||||
static auto res = NullMemoryResourceImpl{};
|
||||
return &res;
|
||||
}
|
||||
|
||||
namespace impl {
|
||||
|
||||
/// 1 bit sensitivity test
|
||||
static_assert(bin_index<1>(9U) == 0);
|
||||
static_assert(bin_index<1>(10U) == 0);
|
||||
static_assert(bin_index<1>(11U) == 0);
|
||||
static_assert(bin_index<1>(12U) == 0);
|
||||
static_assert(bin_index<1>(13U) == 0);
|
||||
static_assert(bin_index<1>(14U) == 0);
|
||||
static_assert(bin_index<1>(15U) == 0);
|
||||
static_assert(bin_index<1>(16U) == 0);
|
||||
|
||||
static_assert(bin_index<1>(17U) == 1);
|
||||
static_assert(bin_index<1>(18U) == 1);
|
||||
static_assert(bin_index<1>(19U) == 1);
|
||||
static_assert(bin_index<1>(20U) == 1);
|
||||
static_assert(bin_index<1>(21U) == 1);
|
||||
static_assert(bin_index<1>(22U) == 1);
|
||||
static_assert(bin_index<1>(23U) == 1);
|
||||
static_assert(bin_index<1>(24U) == 1);
|
||||
static_assert(bin_index<1>(25U) == 1);
|
||||
static_assert(bin_index<1>(26U) == 1);
|
||||
static_assert(bin_index<1>(27U) == 1);
|
||||
static_assert(bin_index<1>(28U) == 1);
|
||||
static_assert(bin_index<1>(29U) == 1);
|
||||
static_assert(bin_index<1>(30U) == 1);
|
||||
static_assert(bin_index<1>(31U) == 1);
|
||||
static_assert(bin_index<1>(32U) == 1);
|
||||
|
||||
/// 2 bit sensitivity test
|
||||
|
||||
static_assert(bin_index<2>(9U) == 0);
|
||||
static_assert(bin_index<2>(10U) == 0);
|
||||
static_assert(bin_index<2>(11U) == 0);
|
||||
static_assert(bin_index<2>(12U) == 0);
|
||||
|
||||
static_assert(bin_index<2>(13U) == 1);
|
||||
static_assert(bin_index<2>(14U) == 1);
|
||||
static_assert(bin_index<2>(15U) == 1);
|
||||
static_assert(bin_index<2>(16U) == 1);
|
||||
|
||||
static_assert(bin_index<2>(17U) == 2);
|
||||
static_assert(bin_index<2>(18U) == 2);
|
||||
static_assert(bin_index<2>(19U) == 2);
|
||||
static_assert(bin_index<2>(20U) == 2);
|
||||
static_assert(bin_index<2>(21U) == 2);
|
||||
static_assert(bin_index<2>(22U) == 2);
|
||||
static_assert(bin_index<2>(23U) == 2);
|
||||
static_assert(bin_index<2>(24U) == 2);
|
||||
|
||||
} // namespace impl
|
||||
|
||||
void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
// Take the max of `bytes` and `alignment` so that we simplify handling
|
||||
// alignment requests.
|
||||
size_t block_size = std::max(bytes, alignment);
|
||||
size_t block_size = std::max({bytes, alignment, 1UL});
|
||||
// Check that we have received a regular allocation request with non-padded
|
||||
// structs/classes in play. These will always have
|
||||
// `sizeof(T) % alignof(T) == 0`. Special requests which don't have that
|
||||
@ -279,80 +284,36 @@ void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
// have to write a general-purpose allocator which has to behave as complex
|
||||
// as malloc/free.
|
||||
if (block_size % alignment != 0) throw BadAlloc("Requested bytes must be a multiple of alignment");
|
||||
if (block_size > max_block_size_) {
|
||||
// Allocate a big block.
|
||||
BigBlock big_block{bytes, alignment, GetUpstreamResourceBlocks()->Allocate(bytes, alignment)};
|
||||
// Insert the big block in the sorted position.
|
||||
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
|
||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||
try {
|
||||
unpooled_.insert(it, big_block);
|
||||
} catch (...) {
|
||||
GetUpstreamResourceBlocks()->Deallocate(big_block.data, bytes, alignment);
|
||||
throw;
|
||||
}
|
||||
return big_block.data;
|
||||
}
|
||||
// Allocate a regular block, first check if last_alloc_pool_ is suitable.
|
||||
if (last_alloc_pool_ && last_alloc_pool_->GetBlockSize() == block_size) {
|
||||
return last_alloc_pool_->Allocate();
|
||||
}
|
||||
// Find the pool with greater or equal block_size.
|
||||
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
|
||||
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
|
||||
[](const auto &a, const auto &b) { return a.GetBlockSize() < b.GetBlockSize(); });
|
||||
if (it != pools_.end() && it->GetBlockSize() == block_size) {
|
||||
last_alloc_pool_ = &*it;
|
||||
last_dealloc_pool_ = &*it;
|
||||
return it->Allocate();
|
||||
}
|
||||
// We don't have a pool for this block_size, so insert it in the sorted
|
||||
// position.
|
||||
it = pools_.emplace(it, std::move(pool));
|
||||
last_alloc_pool_ = &*it;
|
||||
last_dealloc_pool_ = &*it;
|
||||
return it->Allocate();
|
||||
}
|
||||
|
||||
if (block_size <= 64) {
|
||||
return mini_pools_[(block_size - 1UL) / 8UL].Allocate();
|
||||
}
|
||||
if (block_size <= 128) {
|
||||
return pools_3bit_.allocate(block_size);
|
||||
}
|
||||
if (block_size <= 512) {
|
||||
return pools_4bit_.allocate(block_size);
|
||||
}
|
||||
if (block_size <= 1024) {
|
||||
return pools_5bit_.allocate(block_size);
|
||||
}
|
||||
return unpooled_memory_->Allocate(bytes, alignment);
|
||||
}
|
||||
void PoolResource::DoDeallocate(void *p, size_t bytes, size_t alignment) {
|
||||
size_t block_size = std::max(bytes, alignment);
|
||||
MG_ASSERT(block_size % alignment == 0,
|
||||
"PoolResource shouldn't serve allocation requests where bytes aren't "
|
||||
"a multiple of alignment");
|
||||
if (block_size > max_block_size_) {
|
||||
// Deallocate a big block.
|
||||
BigBlock big_block{bytes, alignment, p};
|
||||
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
|
||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||
MG_ASSERT(it != unpooled_.end(), "Failed deallocation");
|
||||
MG_ASSERT(it->data == p && it->bytes == bytes && it->alignment == alignment, "Failed deallocation");
|
||||
unpooled_.erase(it);
|
||||
GetUpstreamResourceBlocks()->Deallocate(p, bytes, alignment);
|
||||
return;
|
||||
size_t block_size = std::max({bytes, alignment, 1UL});
|
||||
DMG_ASSERT(block_size % alignment == 0);
|
||||
|
||||
if (block_size <= 64) {
|
||||
mini_pools_[(block_size - 1UL) / 8UL].Deallocate(p);
|
||||
} else if (block_size <= 128) {
|
||||
pools_3bit_.deallocate(p, block_size);
|
||||
} else if (block_size <= 512) {
|
||||
pools_4bit_.deallocate(p, block_size);
|
||||
} else if (block_size <= 1024) {
|
||||
pools_5bit_.deallocate(p, block_size);
|
||||
} else {
|
||||
unpooled_memory_->Deallocate(p, bytes, alignment);
|
||||
}
|
||||
// Deallocate a regular block, first check if last_dealloc_pool_ is suitable.
|
||||
if (last_dealloc_pool_ && last_dealloc_pool_->GetBlockSize() == block_size) return last_dealloc_pool_->Deallocate(p);
|
||||
// Find the pool with equal block_size.
|
||||
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
|
||||
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
|
||||
[](const auto &a, const auto &b) { return a.GetBlockSize() < b.GetBlockSize(); });
|
||||
MG_ASSERT(it != pools_.end(), "Failed deallocation");
|
||||
MG_ASSERT(it->GetBlockSize() == block_size, "Failed deallocation");
|
||||
last_alloc_pool_ = &*it;
|
||||
last_dealloc_pool_ = &*it;
|
||||
return it->Deallocate(p);
|
||||
}
|
||||
|
||||
void PoolResource::Release() {
|
||||
for (auto &pool : pools_) pool.Release();
|
||||
pools_.clear();
|
||||
for (auto &big_block : unpooled_)
|
||||
GetUpstreamResourceBlocks()->Deallocate(big_block.data, big_block.bytes, big_block.alignment);
|
||||
unpooled_.clear();
|
||||
last_alloc_pool_ = nullptr;
|
||||
last_dealloc_pool_ = nullptr;
|
||||
}
|
||||
|
||||
// PoolResource END
|
||||
|
||||
bool PoolResource::DoIsEqual(MemoryResource const &other) const noexcept { return this == &other; }
|
||||
} // namespace memgraph::utils
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -15,7 +15,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <climits>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <forward_list>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
@ -248,6 +252,8 @@ bool operator!=(const Allocator<T> &a, const Allocator<U> &b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
auto NullMemoryResource() noexcept -> MemoryResource *;
|
||||
|
||||
/// Wraps std::pmr::memory_resource for use with out MemoryResource
|
||||
class StdMemoryResource final : public MemoryResource {
|
||||
public:
|
||||
@ -380,37 +386,45 @@ class MonotonicBufferResource final : public MemoryResource {
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <class T>
|
||||
using AList = std::forward_list<T, Allocator<T>>;
|
||||
|
||||
template <class T>
|
||||
using AVector = std::vector<T, Allocator<T>>;
|
||||
|
||||
/// Holds a number of Chunks each serving blocks of particular size. When a
|
||||
/// Chunk runs out of available blocks, a new Chunk is allocated. The naming is
|
||||
/// taken from `libstdc++` implementation, but the implementation details are
|
||||
/// more similar to `FixedAllocator` described in "Small Object Allocation" from
|
||||
/// "Modern C++ Design".
|
||||
/// Chunk runs out of available blocks, a new Chunk is allocated.
|
||||
class Pool final {
|
||||
/// Holds a pointer into a chunk of memory which consists of equal sized
|
||||
/// blocks. Each Chunk can handle `std::numeric_limits<unsigned char>::max()`
|
||||
/// number of blocks. Blocks form a "free-list", where each unused block has
|
||||
/// an embedded index to the next unused block.
|
||||
/// blocks. Blocks form a "free-list"
|
||||
struct Chunk {
|
||||
unsigned char *data;
|
||||
unsigned char first_available_block_ix;
|
||||
unsigned char blocks_available;
|
||||
// TODO: make blocks_per_chunk a per chunk thing (ie. allow chunk growth)
|
||||
std::byte *raw_data;
|
||||
explicit Chunk(std::byte *rawData) : raw_data(rawData) {}
|
||||
std::byte *build_freelist(std::size_t block_size, std::size_t blocks_in_chunk) {
|
||||
auto current = raw_data;
|
||||
std::byte *prev = nullptr;
|
||||
auto end = current + (blocks_in_chunk * block_size);
|
||||
while (current != end) {
|
||||
std::byte **list_entry = reinterpret_cast<std::byte **>(current);
|
||||
*list_entry = std::exchange(prev, current);
|
||||
current += block_size;
|
||||
}
|
||||
DMG_ASSERT(prev != nullptr);
|
||||
return prev;
|
||||
}
|
||||
};
|
||||
|
||||
unsigned char blocks_per_chunk_;
|
||||
size_t block_size_;
|
||||
AVector<Chunk> chunks_;
|
||||
Chunk *last_alloc_chunk_{nullptr};
|
||||
Chunk *last_dealloc_chunk_{nullptr};
|
||||
std::byte *free_list_{nullptr};
|
||||
uint8_t blocks_per_chunk_{};
|
||||
std::size_t block_size_{};
|
||||
|
||||
AList<Chunk> chunks_; // TODO: do ourself so we can do fast Release (detect monotonic, do nothing)
|
||||
|
||||
public:
|
||||
static constexpr auto MaxBlocksInChunk() {
|
||||
return std::numeric_limits<decltype(Chunk::first_available_block_ix)>::max();
|
||||
}
|
||||
static constexpr auto MaxBlocksInChunk = std::numeric_limits<decltype(blocks_per_chunk_)>::max();
|
||||
|
||||
Pool(size_t block_size, unsigned char blocks_per_chunk, MemoryResource *memory);
|
||||
Pool(size_t block_size, unsigned char blocks_per_chunk, MemoryResource *chunk_memory);
|
||||
|
||||
Pool(const Pool &) = delete;
|
||||
Pool &operator=(const Pool &) = delete;
|
||||
@ -430,8 +444,147 @@ class Pool final {
|
||||
void *Allocate();
|
||||
|
||||
void Deallocate(void *p);
|
||||
};
|
||||
|
||||
void Release();
|
||||
// C++ overloads for clz
|
||||
constexpr auto clz(unsigned int x) { return __builtin_clz(x); }
|
||||
constexpr auto clz(unsigned long x) { return __builtin_clzl(x); }
|
||||
constexpr auto clz(unsigned long long x) { return __builtin_clzll(x); }
|
||||
|
||||
template <typename T>
|
||||
constexpr auto bits_sizeof = sizeof(T) * CHAR_BIT;
|
||||
|
||||
/// 0-based bit index of the most significant bit assumed that `n` != 0
|
||||
template <typename T>
|
||||
constexpr auto msb_index(T n) {
|
||||
return bits_sizeof<T> - clz(n) - T(1);
|
||||
}
|
||||
|
||||
/* This function will in O(1) time provide a bin index based on:
|
||||
* B - the number of most significant bits to be sensitive to
|
||||
* LB - the value that should be considered below the consideration for bin index of 0 (LB is exclusive)
|
||||
*
|
||||
* lets say we were:
|
||||
* - sensitive to two bits (B == 2)
|
||||
* - lowest bin is for 8 (LB == 8)
|
||||
*
|
||||
* our bin indexes would look like:
|
||||
* 0 - 0000'1100 12
|
||||
* 1 - 0001'0000 16
|
||||
* 2 - 0001'1000 24
|
||||
* 3 - 0010'0000 32
|
||||
* 4 - 0011'0000 48
|
||||
* 5 - 0100'0000 64
|
||||
* 6 - 0110'0000 96
|
||||
* 7 - 1000'0000 128
|
||||
* 8 - 1100'0000 192
|
||||
* ...
|
||||
*
|
||||
* Example:
|
||||
* Given n == 70, we want to return the bin index to the first value which is
|
||||
* larger than n.
|
||||
* bin_index<2,8>(70) => 6, as 64 (index 5) < 70 and 70 <= 96 (index 6)
|
||||
*/
|
||||
template <std::size_t B = 2, std::size_t LB = 8>
|
||||
constexpr std::size_t bin_index(std::size_t n) {
|
||||
static_assert(B >= 1U, "Needs to be sensitive to at least one bit");
|
||||
static_assert(LB != 0U, "Lower bound need to be non-zero");
|
||||
DMG_ASSERT(n > LB);
|
||||
|
||||
// We will alway be sensitive to at least the MSB
|
||||
// exponent tells us how many bits we need to use to select within a level
|
||||
constexpr auto kExponent = B - 1U;
|
||||
// 2^exponent gives the size of each level
|
||||
constexpr auto kSize = 1U << kExponent;
|
||||
// offset help adjust results down to be inline with bin_index(LB) == 0
|
||||
constexpr auto kOffset = msb_index(LB);
|
||||
|
||||
auto const msb_idx = msb_index(n);
|
||||
DMG_ASSERT(msb_idx != 0);
|
||||
|
||||
auto const mask = (1u << msb_idx) - 1u;
|
||||
auto const under = n & mask;
|
||||
auto const selector = under >> (msb_idx - kExponent);
|
||||
|
||||
auto const rest = under & (mask >> kExponent);
|
||||
auto const no_overflow = rest == 0U;
|
||||
|
||||
auto const msb_level = kSize * (msb_idx - kOffset);
|
||||
return msb_level + selector - no_overflow;
|
||||
}
|
||||
|
||||
// This is the inverse opperation for bin_index
|
||||
// bin_size(bin_index(X)-1) < X <= bin_size(bin_index(X))
|
||||
template <std::size_t B = 2, std::size_t LB = 8>
|
||||
std::size_t bin_size(std::size_t idx) {
|
||||
constexpr auto kExponent = B - 1U;
|
||||
constexpr auto kSize = 1U << kExponent;
|
||||
constexpr auto kOffset = msb_index(LB);
|
||||
|
||||
// no need to optimise `/` or `%` compiler can see `kSize` is a power of 2
|
||||
auto const level = (idx + 1) / kSize;
|
||||
auto const sub_level = (idx + 1) % kSize;
|
||||
return (1U << (level + kOffset)) | (sub_level << (level + kOffset - kExponent));
|
||||
}
|
||||
|
||||
template <std::size_t Bits, std::size_t LB, std::size_t UB>
|
||||
struct MultiPool {
|
||||
static_assert(LB < UB, "lower bound must be less than upper bound");
|
||||
static_assert(IsPow2(LB) && IsPow2(UB), "Design untested for non powers of 2");
|
||||
static_assert((LB << Bits) % sizeof(void *) == 0, "Smallest pool must have space and alignment for freelist");
|
||||
|
||||
// upper bound is inclusive
|
||||
static bool is_size_handled(std::size_t size) { return LB < size && size <= UB; }
|
||||
static bool is_above_upper_bound(std::size_t size) { return UB < size; }
|
||||
|
||||
static constexpr auto n_bins = bin_index<Bits, LB>(UB) + 1U;
|
||||
|
||||
MultiPool(uint8_t blocks_per_chunk, MemoryResource *memory, MemoryResource *internal_memory)
|
||||
: blocks_per_chunk_{blocks_per_chunk}, memory_{memory}, internal_memory_{internal_memory} {}
|
||||
|
||||
~MultiPool() {
|
||||
if (pools_) {
|
||||
auto pool_alloc = Allocator<Pool>(internal_memory_);
|
||||
for (auto i = 0U; i != n_bins; ++i) {
|
||||
pool_alloc.destroy(&pools_[i]);
|
||||
}
|
||||
pool_alloc.deallocate(pools_, n_bins);
|
||||
}
|
||||
}
|
||||
|
||||
void *allocate(std::size_t bytes) {
|
||||
auto idx = bin_index<Bits, LB>(bytes);
|
||||
if (!pools_) [[unlikely]] {
|
||||
initialise_pools();
|
||||
}
|
||||
return pools_[idx].Allocate();
|
||||
}
|
||||
|
||||
void deallocate(void *ptr, std::size_t bytes) {
|
||||
auto idx = bin_index<Bits, LB>(bytes);
|
||||
pools_[idx].Deallocate(ptr);
|
||||
}
|
||||
|
||||
private:
|
||||
void initialise_pools() {
|
||||
auto pool_alloc = Allocator<Pool>(internal_memory_);
|
||||
auto pools = pool_alloc.allocate(n_bins);
|
||||
try {
|
||||
for (auto i = 0U; i != n_bins; ++i) {
|
||||
auto block_size = bin_size<Bits, LB>(i);
|
||||
pool_alloc.construct(&pools[i], block_size, blocks_per_chunk_, memory_);
|
||||
}
|
||||
pools_ = pools;
|
||||
} catch (...) {
|
||||
pool_alloc.deallocate(pools, n_bins);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
Pool *pools_{};
|
||||
uint8_t blocks_per_chunk_{};
|
||||
MemoryResource *memory_{};
|
||||
MemoryResource *internal_memory_{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
@ -442,8 +595,6 @@ class Pool final {
|
||||
///
|
||||
/// This class has the following properties with regards to memory management.
|
||||
///
|
||||
/// * All allocated memory will be freed upon destruction, even if Deallocate
|
||||
/// has not been called for some of the allocated blocks.
|
||||
/// * It consists of a collection of impl::Pool instances, each serving
|
||||
/// requests for different block sizes. Each impl::Pool manages a collection
|
||||
/// of impl::Pool::Chunk instances which are divided into blocks of uniform
|
||||
@ -452,91 +603,46 @@ class Pool final {
|
||||
/// arbitrary alignment requests. Each requested block size must be a
|
||||
/// multiple of alignment or smaller than the alignment value.
|
||||
/// * An allocation request within the limits of the maximum block size will
|
||||
/// find a Pool serving the requested size. If there's no Pool serving such
|
||||
/// a request, a new one is instantiated.
|
||||
/// find a Pool serving the requested size. Some requests will share a larger
|
||||
/// pool size.
|
||||
/// * When a Pool exhausts its Chunk, a new one is allocated with the size for
|
||||
/// the maximum number of blocks.
|
||||
/// * Allocation requests which exceed the maximum block size will be
|
||||
/// forwarded to upstream MemoryResource.
|
||||
/// * Maximum block size and maximum number of blocks per chunk can be tuned
|
||||
/// by passing the arguments to the constructor.
|
||||
/// * Maximum number of blocks per chunk can be tuned by passing the
|
||||
/// arguments to the constructor.
|
||||
|
||||
class PoolResource final : public MemoryResource {
|
||||
public:
|
||||
/// Construct with given max_blocks_per_chunk, max_block_size and upstream
|
||||
/// memory.
|
||||
///
|
||||
/// The implementation will use std::min(max_blocks_per_chunk,
|
||||
/// impl::Pool::MaxBlocksInChunk()) as the real maximum number of blocks per
|
||||
/// chunk. Allocation requests exceeding max_block_size are simply forwarded
|
||||
/// to upstream memory.
|
||||
PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools = NewDeleteResource(),
|
||||
MemoryResource *memory_unpooled = NewDeleteResource());
|
||||
|
||||
PoolResource(const PoolResource &) = delete;
|
||||
PoolResource &operator=(const PoolResource &) = delete;
|
||||
|
||||
PoolResource(PoolResource &&) = default;
|
||||
PoolResource &operator=(PoolResource &&) = default;
|
||||
|
||||
~PoolResource() override { Release(); }
|
||||
|
||||
MemoryResource *GetUpstreamResource() const { return pools_.get_allocator().GetMemoryResource(); }
|
||||
MemoryResource *GetUpstreamResourceBlocks() const { return unpooled_.get_allocator().GetMemoryResource(); }
|
||||
|
||||
/// Release all allocated memory.
|
||||
void Release();
|
||||
PoolResource(uint8_t blocks_per_chunk, MemoryResource *memory = NewDeleteResource(),
|
||||
MemoryResource *internal_memory = NewDeleteResource())
|
||||
: mini_pools_{
|
||||
impl::Pool{8, blocks_per_chunk, memory},
|
||||
impl::Pool{16, blocks_per_chunk, memory},
|
||||
impl::Pool{24, blocks_per_chunk, memory},
|
||||
impl::Pool{32, blocks_per_chunk, memory},
|
||||
impl::Pool{40, blocks_per_chunk, memory},
|
||||
impl::Pool{48, blocks_per_chunk, memory},
|
||||
impl::Pool{56, blocks_per_chunk, memory},
|
||||
impl::Pool{64, blocks_per_chunk, memory},
|
||||
},
|
||||
pools_3bit_(blocks_per_chunk, memory, internal_memory),
|
||||
pools_4bit_(blocks_per_chunk, memory, internal_memory),
|
||||
pools_5bit_(blocks_per_chunk, memory, internal_memory),
|
||||
unpooled_memory_{internal_memory} {}
|
||||
~PoolResource() override = default;
|
||||
|
||||
private:
|
||||
// Big block larger than max_block_size_, doesn't go into a pool.
|
||||
struct BigBlock {
|
||||
size_t bytes;
|
||||
size_t alignment;
|
||||
void *data;
|
||||
};
|
||||
|
||||
// TODO: Potential memory optimization is replacing `std::vector` with our
|
||||
// custom vector implementation which doesn't store a `MemoryResource *`.
|
||||
// Currently we have vectors for `pools_` and `unpooled_`, as well as each
|
||||
// `impl::Pool` stores a `chunks_` vector.
|
||||
|
||||
// Pools are sorted by bound_size_, ascending.
|
||||
impl::AVector<impl::Pool> pools_;
|
||||
impl::Pool *last_alloc_pool_{nullptr};
|
||||
impl::Pool *last_dealloc_pool_{nullptr};
|
||||
// Unpooled BigBlocks are sorted by data pointer.
|
||||
impl::AVector<BigBlock> unpooled_;
|
||||
size_t max_blocks_per_chunk_;
|
||||
size_t max_block_size_;
|
||||
|
||||
void *DoAllocate(size_t bytes, size_t alignment) override;
|
||||
|
||||
void DoDeallocate(void *p, size_t bytes, size_t alignment) override;
|
||||
|
||||
bool DoIsEqual(const MemoryResource &other) const noexcept override { return this == &other; }
|
||||
};
|
||||
|
||||
/// Like PoolResource but uses SpinLock for thread safe usage.
|
||||
class SynchronizedPoolResource final : public MemoryResource {
|
||||
public:
|
||||
SynchronizedPoolResource(size_t max_blocks_per_chunk, size_t max_block_size,
|
||||
MemoryResource *memory = NewDeleteResource())
|
||||
: pool_memory_(max_blocks_per_chunk, max_block_size, memory) {}
|
||||
bool DoIsEqual(MemoryResource const &other) const noexcept override;
|
||||
|
||||
private:
|
||||
PoolResource pool_memory_;
|
||||
SpinLock lock_;
|
||||
|
||||
void *DoAllocate(size_t bytes, size_t alignment) override {
|
||||
std::lock_guard<SpinLock> guard(lock_);
|
||||
return pool_memory_.Allocate(bytes, alignment);
|
||||
}
|
||||
|
||||
void DoDeallocate(void *p, size_t bytes, size_t alignment) override {
|
||||
std::lock_guard<SpinLock> guard(lock_);
|
||||
pool_memory_.Deallocate(p, bytes, alignment);
|
||||
}
|
||||
|
||||
bool DoIsEqual(const MemoryResource &other) const noexcept override { return this == &other; }
|
||||
std::array<impl::Pool, 8> mini_pools_;
|
||||
impl::MultiPool<3, 64, 128> pools_3bit_;
|
||||
impl::MultiPool<4, 128, 512> pools_4bit_;
|
||||
impl::MultiPool<5, 512, 1024> pools_5bit_;
|
||||
MemoryResource *unpooled_memory_;
|
||||
};
|
||||
|
||||
class MemoryTrackingResource final : public utils::MemoryResource {
|
||||
|
@ -35,7 +35,7 @@ namespace memgraph::utils {
|
||||
* // long block of code, might throw an exception
|
||||
* }
|
||||
*/
|
||||
template <typename Callable>
|
||||
template <std::invocable Callable>
|
||||
class [[nodiscard]] OnScopeExit {
|
||||
public:
|
||||
template <typename U>
|
||||
@ -46,7 +46,7 @@ class [[nodiscard]] OnScopeExit {
|
||||
OnScopeExit &operator=(OnScopeExit const &) = delete;
|
||||
OnScopeExit &operator=(OnScopeExit &&) = delete;
|
||||
~OnScopeExit() {
|
||||
if (doCall_) function_();
|
||||
if (doCall_) std::invoke(std::move(function_));
|
||||
}
|
||||
|
||||
void Disable() { doCall_ = false; }
|
||||
@ -57,5 +57,4 @@ class [[nodiscard]] OnScopeExit {
|
||||
};
|
||||
template <typename Callable>
|
||||
OnScopeExit(Callable &&) -> OnScopeExit<Callable>;
|
||||
|
||||
} // namespace memgraph::utils
|
||||
|
32
src/utils/tag.hpp
Normal file
32
src/utils/tag.hpp
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace memgraph::utils {
|
||||
|
||||
template <typename T>
|
||||
struct tag_type {
|
||||
using type = T;
|
||||
};
|
||||
|
||||
template <auto V>
|
||||
struct tag_value {
|
||||
static constexpr auto value = V;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
auto tag_t = tag_type<T>{};
|
||||
|
||||
template <auto V>
|
||||
auto tag_v = tag_value<V>{};
|
||||
|
||||
} // namespace memgraph::utils
|
@ -55,12 +55,12 @@ class NewDeleteResource final {
|
||||
};
|
||||
|
||||
class PoolResource final {
|
||||
memgraph::utils::PoolResource memory_{128, 4 * 1024};
|
||||
memgraph::utils::PoolResource memory_{128};
|
||||
|
||||
public:
|
||||
memgraph::utils::MemoryResource *get() { return &memory_; }
|
||||
|
||||
void Reset() { memory_.Release(); }
|
||||
void Reset() {}
|
||||
};
|
||||
|
||||
static void AddVertices(memgraph::storage::Storage *db, int vertex_count) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2024 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -101,8 +101,7 @@ class StdSetWithPoolAllocatorInsertFixture : public benchmark::Fixture {
|
||||
}
|
||||
|
||||
protected:
|
||||
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
|
||||
memgraph::utils::NewDeleteResource()};
|
||||
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
|
||||
std::set<uint64_t, std::less<>, memgraph::utils::Allocator<uint64_t>> container{&memory_};
|
||||
memgraph::utils::SpinLock lock;
|
||||
};
|
||||
@ -208,8 +207,7 @@ class StdSetWithPoolAllocatorFindFixture : public benchmark::Fixture {
|
||||
}
|
||||
|
||||
protected:
|
||||
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
|
||||
memgraph::utils::NewDeleteResource()};
|
||||
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
|
||||
std::set<uint64_t, std::less<>, memgraph::utils::Allocator<uint64_t>> container{&memory_};
|
||||
memgraph::utils::SpinLock lock;
|
||||
};
|
||||
@ -325,8 +323,7 @@ class StdMapWithPoolAllocatorInsertFixture : public benchmark::Fixture {
|
||||
}
|
||||
|
||||
protected:
|
||||
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
|
||||
memgraph::utils::NewDeleteResource()};
|
||||
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
|
||||
std::map<uint64_t, uint64_t, std::less<>, memgraph::utils::Allocator<std::pair<const uint64_t, uint64_t>>> container{
|
||||
&memory_};
|
||||
memgraph::utils::SpinLock lock;
|
||||
@ -433,8 +430,7 @@ class StdMapWithPoolAllocatorFindFixture : public benchmark::Fixture {
|
||||
}
|
||||
|
||||
protected:
|
||||
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
|
||||
memgraph::utils::NewDeleteResource()};
|
||||
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
|
||||
std::map<uint64_t, uint64_t, std::less<>, memgraph::utils::Allocator<std::pair<const uint64_t, uint64_t>>> container{
|
||||
&memory_};
|
||||
memgraph::utils::SpinLock lock;
|
||||
|
23
tests/code_analysis/clang_tidy.sh
Executable file
23
tests/code_analysis/clang_tidy.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
BASE_BRANCH="origin/master"
|
||||
THREADS=${THREADS:-$(nproc)}
|
||||
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
case "$1" in
|
||||
--base-branch)
|
||||
BASE_BRANCH=$2
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
cd $PROJECT_ROOT
|
||||
git diff -U0 $BASE_BRANCH... -- src | ./tools/github/clang-tidy/clang-tidy-diff.py -p 1 -j $THREADS -path build -regex ".+\.cpp" | tee ./build/clang_tidy_output.txt
|
||||
# Fail if any warning is reported
|
||||
! cat ./build/clang_tidy_output.txt | ./tools/github/clang-tidy/grep_error_lines.sh > /dev/null
|
||||
cd $SCRIPT_DIR
|
27
tests/code_analysis/python_code_analysis.sh
Executable file
27
tests/code_analysis/python_code_analysis.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$SCRIPT_DIR/../.."
|
||||
BASE_BRANCH="origin/master"
|
||||
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
case "$1" in
|
||||
--base-branch)
|
||||
BASE_BRANCH=$2
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown flag '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
cd $PROJECT_ROOT
|
||||
CHANGED_FILES=$(git diff -U0 $BASE_BRANCH... --name-only --diff-filter=d)
|
||||
for file in ${CHANGED_FILES}; do
|
||||
echo ${file}
|
||||
if [[ ${file} == *.py ]]; then
|
||||
python3 -m black --check --diff ${file}
|
||||
python3 -m isort --profile black --check-only --diff ${file}
|
||||
fi
|
||||
done
|
||||
cd $SCRIPT_DIR
|
@ -78,6 +78,7 @@ add_subdirectory(query_planning)
|
||||
add_subdirectory(awesome_functions)
|
||||
add_subdirectory(high_availability)
|
||||
add_subdirectory(concurrent_write)
|
||||
add_subdirectory(concurrency)
|
||||
|
||||
add_subdirectory(replication_experimental)
|
||||
|
||||
|
6
tests/e2e/concurrency/CMakeLists.txt
Normal file
6
tests/e2e/concurrency/CMakeLists.txt
Normal file
@ -0,0 +1,6 @@
|
||||
function(copy_concurrency_e2e_python_files FILE_NAME)
|
||||
copy_e2e_python_files(concurrency ${FILE_NAME})
|
||||
endfunction()
|
||||
|
||||
copy_concurrency_e2e_python_files(common.py)
|
||||
copy_concurrency_e2e_python_files(concurrency.py)
|
60
tests/e2e/concurrency/common.py
Normal file
60
tests/e2e/concurrency/common.py
Normal file
@ -0,0 +1,60 @@
|
||||
# Copyright 2023 Memgraph Ltd.
|
||||
#
|
||||
# Use of this software is governed by the Business Source License
|
||||
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
# License, and you may not use this file except in compliance with the Business Source License.
|
||||
#
|
||||
# As of the Change Date specified in that file, in accordance with
|
||||
# the Business Source License, use of this software will be governed
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import typing
|
||||
|
||||
import mgclient
|
||||
import pytest
|
||||
|
||||
|
||||
def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}) -> typing.List[tuple]:
|
||||
cursor.execute(query, params)
|
||||
return cursor.fetchall()
|
||||
|
||||
|
||||
def execute_and_fetch_all_with_commit(
|
||||
connection: mgclient.Connection, query: str, params: dict = {}
|
||||
) -> typing.List[tuple]:
|
||||
cursor = connection.cursor()
|
||||
cursor.execute(query, params)
|
||||
results = cursor.fetchall()
|
||||
connection.commit()
|
||||
return results
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def first_connection(**kwargs) -> mgclient.Connection:
|
||||
connection = mgclient.connect(host="localhost", port=7687, **kwargs)
|
||||
connection.autocommit = True
|
||||
cursor = connection.cursor()
|
||||
execute_and_fetch_all(cursor, "USE DATABASE memgraph")
|
||||
try:
|
||||
execute_and_fetch_all(cursor, "DROP DATABASE clean")
|
||||
except:
|
||||
pass
|
||||
execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n")
|
||||
connection.autocommit = False
|
||||
yield connection
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def second_connection(**kwargs) -> mgclient.Connection:
|
||||
connection = mgclient.connect(host="localhost", port=7687, **kwargs)
|
||||
connection.autocommit = True
|
||||
cursor = connection.cursor()
|
||||
execute_and_fetch_all(cursor, "USE DATABASE memgraph")
|
||||
try:
|
||||
execute_and_fetch_all(cursor, "DROP DATABASE clean")
|
||||
except:
|
||||
pass
|
||||
execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n")
|
||||
connection.autocommit = False
|
||||
yield connection
|
57
tests/e2e/concurrency/concurrency.py
Normal file
57
tests/e2e/concurrency/concurrency.py
Normal file
@ -0,0 +1,57 @@
|
||||
# Copyright 2023 Memgraph Ltd.
|
||||
#
|
||||
# Use of this software is governed by the Business Source License
|
||||
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
# License, and you may not use this file except in compliance with the Business Source License.
|
||||
#
|
||||
# As of the Change Date specified in that file, in accordance with
|
||||
# the Business Source License, use of this software will be governed
|
||||
# by the Apache License, Version 2.0, included in the file
|
||||
# licenses/APL.txt.
|
||||
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from common import execute_and_fetch_all, first_connection, second_connection
|
||||
|
||||
|
||||
def test_concurrency_if_no_delta_on_same_node_property_update(first_connection, second_connection):
|
||||
m1c = first_connection.cursor()
|
||||
m2c = second_connection.cursor()
|
||||
|
||||
execute_and_fetch_all(m1c, "CREATE (:Node {prop: 1})")
|
||||
first_connection.commit()
|
||||
|
||||
test_has_error = False
|
||||
try:
|
||||
m1c.execute("MATCH (n) SET n.prop = 1")
|
||||
m2c.execute("MATCH (n) SET n.prop = 1")
|
||||
first_connection.commit()
|
||||
second_connection.commit()
|
||||
except Exception as e:
|
||||
test_has_error = True
|
||||
|
||||
assert test_has_error is False
|
||||
|
||||
|
||||
def test_concurrency_if_no_delta_on_same_edge_property_update(first_connection, second_connection):
|
||||
m1c = first_connection.cursor()
|
||||
m2c = second_connection.cursor()
|
||||
|
||||
execute_and_fetch_all(m1c, "CREATE ()-[:TYPE {prop: 1}]->()")
|
||||
first_connection.commit()
|
||||
|
||||
test_has_error = False
|
||||
try:
|
||||
m1c.execute("MATCH (n)-[r]->(m) SET r.prop = 1")
|
||||
m2c.execute("MATCH (n)-[r]->(m) SET n.prop = 1")
|
||||
first_connection.commit()
|
||||
second_connection.commit()
|
||||
except Exception as e:
|
||||
test_has_error = True
|
||||
|
||||
assert test_has_error is False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(pytest.main([__file__, "-rA"]))
|
14
tests/e2e/concurrency/workloads.yaml
Normal file
14
tests/e2e/concurrency/workloads.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
concurrency_cluster: &concurrency_cluster
|
||||
cluster:
|
||||
main:
|
||||
args: ["--bolt-port", "7687", "--log-level=TRACE", "--storage-delta-on-identical-property-update=false"]
|
||||
log_file: "concurrency.log"
|
||||
setup_queries: []
|
||||
validation_queries: []
|
||||
|
||||
|
||||
workloads:
|
||||
- name: "Concurrency"
|
||||
binary: "tests/e2e/pytest_runner.sh"
|
||||
args: ["concurrency/concurrency.py"]
|
||||
<<: *concurrency_cluster
|
@ -12,57 +12,87 @@ def execute_and_fetch_all(cursor: mgclient.Cursor, query: str, params: dict = {}
|
||||
return cursor.fetchall()
|
||||
|
||||
|
||||
commit_success_lock = threading.Lock()
|
||||
commit_fail_lock = threading.Lock()
|
||||
class AtomicInteger:
|
||||
def __init__(self, value=0):
|
||||
self._value = int(value)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def inc(self, d=1):
|
||||
with self._lock:
|
||||
self._value += int(d)
|
||||
return self._value
|
||||
|
||||
def dec(self, d=1):
|
||||
return self.inc(-d)
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
with self._lock:
|
||||
return self._value
|
||||
|
||||
@value.setter
|
||||
def value(self, v):
|
||||
with self._lock:
|
||||
self._value = int(v)
|
||||
return self._value
|
||||
|
||||
|
||||
def sleep_until(wanted_cnt):
|
||||
while cnt.value != wanted_cnt:
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
def client_success():
|
||||
commit_fail_lock.acquire()
|
||||
time.sleep(0.1)
|
||||
connection = mgclient.connect(host="localhost", port=7687)
|
||||
connection.autocommit = False
|
||||
cursor = connection.cursor()
|
||||
|
||||
execute_and_fetch_all(cursor, "MATCH (n1:N1) DELETE n1;")
|
||||
cnt.inc() # 1
|
||||
sleep_until(2)
|
||||
|
||||
connection.commit()
|
||||
cnt.inc() # 3
|
||||
|
||||
|
||||
def client_fail():
|
||||
connection = mgclient.connect(host="localhost", port=7687)
|
||||
connection.autocommit = False
|
||||
cursor = connection.cursor()
|
||||
|
||||
try:
|
||||
sleep_until(1)
|
||||
execute_and_fetch_all(cursor, "MATCH (n1:N1), (n2:N2) CREATE (n1)-[:R]->(n2);")
|
||||
cnt.inc() # 2
|
||||
|
||||
sleep_until(3)
|
||||
connection.commit() # this should fail
|
||||
except mgclient.DatabaseError:
|
||||
return
|
||||
except Exception:
|
||||
assert False
|
||||
|
||||
|
||||
def test_concurrent_write():
|
||||
connection = mgclient.connect(host="localhost", port=7687)
|
||||
connection.autocommit = True
|
||||
cursor = connection.cursor()
|
||||
|
||||
execute_and_fetch_all(cursor, "MATCH (n) DETACH DELETE n;")
|
||||
execute_and_fetch_all(cursor, "CREATE (:N1), (:N2);")
|
||||
connection.commit()
|
||||
|
||||
execute_and_fetch_all(cursor, "MATCH (n1:N1) DELETE n1;")
|
||||
commit_success_lock.acquire()
|
||||
commit_fail_lock.release()
|
||||
connection.commit()
|
||||
commit_success_lock.release()
|
||||
|
||||
|
||||
def client_fail():
|
||||
try:
|
||||
commit_success_lock.acquire()
|
||||
connection = mgclient.connect(host="localhost", port=7687)
|
||||
connection.autocommit = False
|
||||
cursor = connection.cursor()
|
||||
|
||||
execute_and_fetch_all(cursor, "MATCH (n1:N1), (n2:N2) CREATE (n1)-[:R]->(n2);")
|
||||
commit_success_lock.release()
|
||||
commit_fail_lock.acquire()
|
||||
connection.commit()
|
||||
except mgclient.DatabaseError:
|
||||
commit_fail_lock.release()
|
||||
|
||||
|
||||
def test_concurrent_write():
|
||||
t1 = threading.Thread(target=client_success)
|
||||
t2 = threading.Thread(target=client_fail)
|
||||
|
||||
global cnt
|
||||
cnt = AtomicInteger(0)
|
||||
|
||||
t1.start()
|
||||
t2.start()
|
||||
|
||||
t1.join()
|
||||
t2.join()
|
||||
|
||||
connection = mgclient.connect(host="localhost", port=7687)
|
||||
connection.autocommit = True
|
||||
cursor = connection.cursor()
|
||||
assert execute_and_fetch_all(cursor, "MATCH (n:N1) RETURN inDegree(n);") == []
|
||||
assert execute_and_fetch_all(cursor, "MATCH (n:N1) RETURN outDegree(n);") == []
|
||||
assert execute_and_fetch_all(cursor, "MATCH (n:N2) RETURN inDegree(n);")[0][0] == 0
|
||||
|
@ -141,6 +141,11 @@ startup_config_dict = {
|
||||
"1",
|
||||
"The time duration between two replica checks/pings. If < 1, replicas will NOT be checked at all. NOTE: The MAIN instance allocates a new thread for each REPLICA.",
|
||||
),
|
||||
"storage_delta_on_identical_property_update": (
|
||||
"true",
|
||||
"true",
|
||||
"Controls whether updating a property with the same value should create a delta object.",
|
||||
),
|
||||
"storage_gc_cycle_sec": ("30", "30", "Storage garbage collector interval (in seconds)."),
|
||||
"storage_python_gc_cycle_sec": ("180", "180", "Storage python full garbage collection interval (in seconds)."),
|
||||
"storage_items_per_batch": (
|
||||
|
@ -117,17 +117,26 @@ def test_register_repl_instances_then_coordinators():
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
|
||||
def check_coordinator3():
|
||||
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
|
||||
@ -172,16 +181,25 @@ def test_register_coordinator_then_repl_instances():
|
||||
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'"
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
|
||||
@ -228,16 +246,25 @@ def test_coordinators_communication_with_restarts():
|
||||
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'"
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
|
||||
@ -295,16 +322,25 @@ def test_unregister_replicas(kill_instance):
|
||||
coordinator2_cursor = connect(host="localhost", port=7691).cursor()
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'"
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
|
||||
@ -429,16 +465,26 @@ def test_unregister_main():
|
||||
coordinator1_cursor = connect(host="localhost", port=7690).cursor()
|
||||
coordinator2_cursor = connect(host="localhost", port=7691).cursor()
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'"
|
||||
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
)
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
|
||||
|
@ -79,7 +79,7 @@ def test_main_and_replicas_cannot_register_coord_server(port):
|
||||
with pytest.raises(Exception) as e:
|
||||
execute_and_fetch_all(
|
||||
cursor,
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10001' WITH '127.0.0.1:10011';",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
assert str(e.value) == "Only coordinator can register coordinator server!"
|
||||
|
||||
|
@ -133,11 +133,18 @@ def test_writing_disabled_on_main_restart():
|
||||
coordinator3_cursor = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
execute_and_fetch_all(
|
||||
coordinator3_cursor, "REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'"
|
||||
coordinator3_cursor,
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
)
|
||||
execute_and_fetch_all(coordinator3_cursor, "SET INSTANCE instance_3 TO MAIN")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 1 ON '127.0.0.1:10111'")
|
||||
assert add_coordinator(coordinator3_cursor, "ADD COORDINATOR 2 ON '127.0.0.1:10112'")
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
)
|
||||
assert add_coordinator(
|
||||
coordinator3_cursor,
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
)
|
||||
|
||||
def check_coordinator3():
|
||||
return sorted(list(execute_and_fetch_all(coordinator3_cursor, "SHOW INSTANCES")))
|
||||
|
@ -110,11 +110,11 @@ MEMGRAPH_INSTANCES_DESCRIPTION = {
|
||||
],
|
||||
"log_file": "coordinator3.log",
|
||||
"setup_queries": [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111'",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112'",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
],
|
||||
},
|
||||
@ -221,11 +221,11 @@ def test_old_main_comes_back_on_new_leader_as_replica():
|
||||
interactive_mg_runner.start_all(inner_instances_description)
|
||||
|
||||
setup_queries = [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111'",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112'",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
]
|
||||
coord_cursor_3 = connect(host="localhost", port=7692).cursor()
|
||||
@ -416,11 +416,11 @@ def test_distributed_automatic_failover_with_leadership_change():
|
||||
interactive_mg_runner.start_all(inner_instances_description)
|
||||
|
||||
setup_queries = [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111'",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112'",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
]
|
||||
coord_cursor_3 = connect(host="localhost", port=7692).cursor()
|
||||
@ -522,7 +522,10 @@ def test_no_leader_after_leader_and_follower_die():
|
||||
coord_cursor_1 = connect(host="localhost", port=7690).cursor()
|
||||
|
||||
with pytest.raises(Exception) as e:
|
||||
execute_and_fetch_all(coord_cursor_1, "REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.10001'")
|
||||
execute_and_fetch_all(
|
||||
coord_cursor_1,
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
)
|
||||
assert str(e) == "Couldn't register replica instance since coordinator is not a leader!"
|
||||
|
||||
|
||||
@ -541,11 +544,11 @@ def test_old_main_comes_back_on_new_leader_as_main():
|
||||
coord_cursor_3 = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
setup_queries = [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111'",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112'",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
]
|
||||
|
||||
@ -719,12 +722,12 @@ def test_registering_4_coords():
|
||||
],
|
||||
"log_file": "coordinator4.log",
|
||||
"setup_queries": [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111';",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112';",
|
||||
"ADD COORDINATOR 3 ON '127.0.0.1:10113';",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"ADD COORDINATOR 3 WITH CONFIG {'bolt_server': '127.0.0.1:7692', 'coordinator_server': '127.0.0.1:10113'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
],
|
||||
},
|
||||
@ -854,12 +857,12 @@ def test_registering_coord_log_store():
|
||||
],
|
||||
"log_file": "coordinator4.log",
|
||||
"setup_queries": [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111';",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112';",
|
||||
"ADD COORDINATOR 3 ON '127.0.0.1:10113';",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"ADD COORDINATOR 3 WITH CONFIG {'bolt_server': '127.0.0.1:7692', 'coordinator_server': '127.0.0.1:10113'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
],
|
||||
},
|
||||
}
|
||||
@ -896,7 +899,7 @@ def test_registering_coord_log_store():
|
||||
# 3
|
||||
instances_ports_added = [10011, 10012, 10013]
|
||||
bolt_port_id = 7700
|
||||
coord_port_id = 10014
|
||||
manag_port_id = 10014
|
||||
|
||||
additional_instances = []
|
||||
for i in range(4, 7):
|
||||
@ -908,10 +911,10 @@ def test_registering_coord_log_store():
|
||||
|
||||
bolt_port = f"--bolt-port={bolt_port_id}"
|
||||
|
||||
coord_server_port = f"--coordinator-server-port={coord_port_id}"
|
||||
manag_server_port = f"--coordinator-server-port={manag_port_id}"
|
||||
|
||||
args_desc.append(bolt_port)
|
||||
args_desc.append(coord_server_port)
|
||||
args_desc.append(manag_server_port)
|
||||
|
||||
instance_description = {
|
||||
"args": args_desc,
|
||||
@ -922,17 +925,23 @@ def test_registering_coord_log_store():
|
||||
|
||||
full_instance_desc = {instance_name: instance_description}
|
||||
interactive_mg_runner.start(full_instance_desc, instance_name)
|
||||
repl_port_id = coord_port_id - 10
|
||||
repl_port_id = manag_port_id - 10
|
||||
assert repl_port_id < 10011, "Wrong test setup, repl port must be smaller than smallest coord port id"
|
||||
|
||||
bolt_server = f"127.0.0.1:{bolt_port_id}"
|
||||
management_server = f"127.0.0.1:{manag_port_id}"
|
||||
repl_server = f"127.0.0.1:{repl_port_id}"
|
||||
|
||||
config_str = f"{{'bolt_server': '{bolt_server}', 'management_server': '{management_server}', 'replication_server': '{repl_server}'}}"
|
||||
|
||||
execute_and_fetch_all(
|
||||
coord_cursor,
|
||||
f"REGISTER INSTANCE {instance_name} ON '127.0.0.1:{coord_port_id}' WITH '127.0.0.1:{repl_port_id}'",
|
||||
f"REGISTER INSTANCE {instance_name} WITH CONFIG {config_str}",
|
||||
)
|
||||
|
||||
additional_instances.append((f"{instance_name}", "", f"127.0.0.1:{coord_port_id}", "up", "replica"))
|
||||
instances_ports_added.append(coord_port_id)
|
||||
coord_port_id += 1
|
||||
additional_instances.append((f"{instance_name}", "", management_server, "up", "replica"))
|
||||
instances_ports_added.append(manag_port_id)
|
||||
manag_port_id += 1
|
||||
bolt_port_id += 1
|
||||
|
||||
# 4
|
||||
@ -1004,11 +1013,11 @@ def test_multiple_failovers_in_row_no_leadership_change():
|
||||
coord_cursor_3 = connect(host="localhost", port=7692).cursor()
|
||||
|
||||
setup_queries = [
|
||||
"ADD COORDINATOR 1 ON '127.0.0.1:10111'",
|
||||
"ADD COORDINATOR 2 ON '127.0.0.1:10112'",
|
||||
"REGISTER INSTANCE instance_1 ON '127.0.0.1:10011' WITH '127.0.0.1:10001'",
|
||||
"REGISTER INSTANCE instance_2 ON '127.0.0.1:10012' WITH '127.0.0.1:10002'",
|
||||
"REGISTER INSTANCE instance_3 ON '127.0.0.1:10013' WITH '127.0.0.1:10003'",
|
||||
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
|
||||
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
|
||||
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
|
||||
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
|
||||
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
|
||||
"SET INSTANCE instance_3 TO MAIN",
|
||||
]
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user