Merge branch 'master' into add-mgbuilder-workflow
This commit is contained in:
commit
798c73f5de
263
.github/workflows/package_all.yaml
vendored
263
.github/workflows/package_all.yaml
vendored
@ -1,263 +0,0 @@
|
||||
name: Package All
|
||||
|
||||
# TODO(gitbuda): Cleanup docker container if GHA job was canceled.
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
memgraph_version:
|
||||
description: "Memgraph version to upload as. If empty upload is skipped. Format: 'X.Y.Z'"
|
||||
required: false
|
||||
build_type:
|
||||
type: choice
|
||||
description: "Memgraph Build type. Default value is Release."
|
||||
default: 'Release'
|
||||
options:
|
||||
- Release
|
||||
- RelWithDebInfo
|
||||
|
||||
jobs:
|
||||
centos-7:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package centos-7 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: centos-7
|
||||
path: build/output/centos-7/memgraph*.rpm
|
||||
|
||||
centos-9:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package centos-9 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: centos-9
|
||||
path: build/output/centos-9/memgraph*.rpm
|
||||
|
||||
debian-10:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-10 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debian-10
|
||||
path: build/output/debian-10/memgraph*.deb
|
||||
|
||||
debian-11:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debian-11
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
|
||||
docker:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
cd release/package
|
||||
./run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-docker
|
||||
./run.sh docker
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker
|
||||
path: build/output/docker/memgraph*.tar.gz
|
||||
|
||||
ubuntu-1804:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-18.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ubuntu-18.04
|
||||
path: build/output/ubuntu-18.04/memgraph*.deb
|
||||
|
||||
ubuntu-2004:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-20.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ubuntu-20.04
|
||||
path: build/output/ubuntu-20.04/memgraph*.deb
|
||||
|
||||
ubuntu-2204:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-22.04 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ubuntu-22.04
|
||||
path: build/output/ubuntu-22.04/memgraph*.deb
|
||||
|
||||
debian-11-platform:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11 ${{ github.event.inputs.build_type }} --for-platform
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debian-11-platform
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
|
||||
fedora-36:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package fedora-36 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: fedora-36
|
||||
path: build/output/fedora-36/memgraph*.rpm
|
||||
|
||||
amzn-2:
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package amzn-2 ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: amzn-2
|
||||
path: build/output/amzn-2/memgraph*.rpm
|
||||
|
||||
debian-11-arm:
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package debian-11-arm ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debian-11-aarch64
|
||||
path: build/output/debian-11-arm/memgraph*.deb
|
||||
|
||||
ubuntu-2204-arm:
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: "Set up repository"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Required because of release/get_version.py
|
||||
- name: "Build package"
|
||||
run: |
|
||||
./release/package/run.sh package ubuntu-22.04-arm ${{ github.event.inputs.build_type }}
|
||||
- name: "Upload package"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ubuntu-22.04-aarch64
|
||||
path: build/output/ubuntu-22.04-arm/memgraph*.deb
|
||||
|
||||
upload-to-s3:
|
||||
# only run upload if we specified version. Allows for runs without upload
|
||||
if: "${{ github.event.inputs.memgraph_version != '' }}"
|
||||
needs: [centos-7, centos-9, debian-10, debian-11, docker, ubuntu-1804, ubuntu-2004, ubuntu-2204, debian-11-platform, fedora-36, amzn-2, debian-11-arm, ubuntu-2204-arm]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
# name: # if name input parameter is not provided, all artifacts are downloaded
|
||||
# and put in directories named after each one.
|
||||
path: build/output/release
|
||||
- name: Upload to S3
|
||||
uses: jakejarvis/s3-sync-action@v0.5.1
|
||||
env:
|
||||
AWS_S3_BUCKET: "download.memgraph.com"
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: "eu-west-1"
|
||||
SOURCE_DIR: "build/output/release"
|
||||
DEST_DIR: "memgraph/v${{ github.event.inputs.memgraph_version }}/"
|
@ -1,4 +1,4 @@
|
||||
name: Package Specific
|
||||
name: Package memgraph
|
||||
|
||||
# TODO(gitbuda): Cleanup docker container if GHA job was canceled.
|
||||
|
||||
@ -10,16 +10,17 @@ on:
|
||||
required: false
|
||||
build_type:
|
||||
type: choice
|
||||
description: "Memgraph Build type. Default value is Release."
|
||||
description: "Memgraph Build type. Default value is Release"
|
||||
default: 'Release'
|
||||
options:
|
||||
- Release
|
||||
- RelWithDebInfo
|
||||
target_os:
|
||||
type: choice
|
||||
description: "Target OS for which memgraph will be packaged. Default is Ubuntu 22.04"
|
||||
description: "Target OS for which memgraph will be packaged. Select 'all' if you want to package for every listed OS. Default is Ubuntu 22.04"
|
||||
default: 'ubuntu-22_04'
|
||||
options:
|
||||
- all
|
||||
- amzn-2
|
||||
- centos-7
|
||||
- centos-9
|
||||
@ -36,7 +37,7 @@ on:
|
||||
|
||||
jobs:
|
||||
amzn-2:
|
||||
if: ${{ github.event.inputs.target_os == 'amzn-2' }}
|
||||
if: ${{ github.event.inputs.target_os == 'amzn-2' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -54,7 +55,7 @@ jobs:
|
||||
path: build/output/amzn-2/memgraph*.rpm
|
||||
|
||||
centos-7:
|
||||
if: ${{ github.event.inputs.target_os == 'centos-7' }}
|
||||
if: ${{ github.event.inputs.target_os == 'centos-7' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -72,7 +73,7 @@ jobs:
|
||||
path: build/output/centos-7/memgraph*.rpm
|
||||
|
||||
centos-9:
|
||||
if: ${{ github.event.inputs.target_os == 'centos-9' }}
|
||||
if: ${{ github.event.inputs.target_os == 'centos-9' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -90,7 +91,7 @@ jobs:
|
||||
path: build/output/centos-9/memgraph*.rpm
|
||||
|
||||
debian-10:
|
||||
if: ${{ github.event.inputs.target_os == 'debian-10' }}
|
||||
if: ${{ github.event.inputs.target_os == 'debian-10' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -108,7 +109,7 @@ jobs:
|
||||
path: build/output/debian-10/memgraph*.deb
|
||||
|
||||
debian-11:
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11' }}
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -126,7 +127,7 @@ jobs:
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
|
||||
debian-11-arm:
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11-arm' }}
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11-arm' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
@ -144,7 +145,7 @@ jobs:
|
||||
path: build/output/debian-11-arm/memgraph*.deb
|
||||
|
||||
debian-11-platform:
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11-platform' }}
|
||||
if: ${{ github.event.inputs.target_os == 'debian-11-platform' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -162,7 +163,7 @@ jobs:
|
||||
path: build/output/debian-11/memgraph*.deb
|
||||
|
||||
docker:
|
||||
if: ${{ github.event.inputs.target_os == 'docker' }}
|
||||
if: ${{ github.event.inputs.target_os == 'docker' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -182,7 +183,7 @@ jobs:
|
||||
path: build/output/docker/memgraph*.tar.gz
|
||||
|
||||
fedora-36:
|
||||
if: ${{ github.event.inputs.target_os == 'fedora-36' }}
|
||||
if: ${{ github.event.inputs.target_os == 'fedora-36' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -200,7 +201,7 @@ jobs:
|
||||
path: build/output/fedora-36/memgraph*.rpm
|
||||
|
||||
ubuntu-18_04:
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-18_04' }}
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-18_04' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -218,7 +219,7 @@ jobs:
|
||||
path: build/output/ubuntu-18.04/memgraph*.deb
|
||||
|
||||
ubuntu-20_04:
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-20_04' }}
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-20_04' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -236,7 +237,7 @@ jobs:
|
||||
path: build/output/ubuntu-20.04/memgraph*.deb
|
||||
|
||||
ubuntu-22_04:
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-22_04' }}
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-22_04' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, X64]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@ -254,7 +255,7 @@ jobs:
|
||||
path: build/output/ubuntu-22.04/memgraph*.deb
|
||||
|
||||
ubuntu-22_04-arm:
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-22_04-arm' }}
|
||||
if: ${{ github.event.inputs.target_os == 'ubuntu-22_04-arm' || github.event.inputs.target_os == 'all' }}
|
||||
runs-on: [self-hosted, DockerMgBuild, ARM64, strange]
|
||||
timeout-minutes: 120
|
||||
steps:
|
2
.github/workflows/release_debian10.yaml
vendored
2
.github/workflows/release_debian10.yaml
vendored
@ -178,7 +178,7 @@ jobs:
|
||||
|
||||
release_build:
|
||||
name: "Release build"
|
||||
runs-on: [self-hosted, Linux, X64, Debian10]
|
||||
runs-on: [self-hosted, Linux, X64, Debian10, BigMemory]
|
||||
env:
|
||||
THREADS: 24
|
||||
MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}
|
||||
|
22
.sonarcloud.properties
Normal file
22
.sonarcloud.properties
Normal file
@ -0,0 +1,22 @@
|
||||
# Path to sources
|
||||
sonar.sources = .
|
||||
# sonar.exclusions=
|
||||
sonar.inclusions=src,include,query_modules
|
||||
|
||||
# Path to tests
|
||||
sonar.tests = tests/
|
||||
# sonar.test.exclusions=
|
||||
# sonar.test.inclusions=
|
||||
|
||||
# Source encoding
|
||||
# sonar.sourceEncoding=
|
||||
|
||||
# Exclusions for copy-paste detection
|
||||
# sonar.cpd.exclusions=
|
||||
|
||||
# Python version (for python projects only)
|
||||
# sonar.python.version=
|
||||
|
||||
# C++ standard version (for C++ projects only)
|
||||
# If not specified, it defaults to the latest supported standard
|
||||
# sonar.cfamily.reportingCppStandardOverride=c++98|c++11|c++14|c++17|c++20
|
@ -111,6 +111,14 @@ modifications:
|
||||
value: "false"
|
||||
override: true
|
||||
|
||||
- name: "storage_parallel_schema_recovery"
|
||||
value: "false"
|
||||
override: true
|
||||
|
||||
- name: "storage_enable_schema_metadata"
|
||||
value: "false"
|
||||
override: true
|
||||
|
||||
- name: "query_callable_mappings_path"
|
||||
value: "/etc/memgraph/apoc_compatibility_mappings.json"
|
||||
override: true
|
||||
|
@ -234,8 +234,6 @@ inline mgp_type *type_duration() { return MgInvoke<mgp_type *>(mgp_type_duration
|
||||
|
||||
inline mgp_type *type_nullable(mgp_type *type) { return MgInvoke<mgp_type *>(mgp_type_nullable, type); }
|
||||
|
||||
// mgp_graph
|
||||
|
||||
inline bool create_label_index(mgp_graph *graph, const char *label) {
|
||||
return MgInvoke<int>(mgp_create_label_index, graph, label);
|
||||
}
|
||||
@ -284,6 +282,10 @@ inline mgp_list *list_all_unique_constraints(mgp_graph *graph, mgp_memory *memor
|
||||
return MgInvoke<mgp_list *>(mgp_list_all_unique_constraints, graph, memory);
|
||||
}
|
||||
|
||||
// mgp_graph
|
||||
|
||||
inline bool graph_is_transactional(mgp_graph *graph) { return MgInvoke<int>(mgp_graph_is_transactional, graph); }
|
||||
|
||||
inline bool graph_is_mutable(mgp_graph *graph) { return MgInvoke<int>(mgp_graph_is_mutable, graph); }
|
||||
|
||||
inline mgp_vertex *graph_create_vertex(mgp_graph *graph, mgp_memory *memory) {
|
||||
@ -376,6 +378,8 @@ inline mgp_list *list_copy(mgp_list *list, mgp_memory *memory) {
|
||||
|
||||
inline void list_destroy(mgp_list *list) { mgp_list_destroy(list); }
|
||||
|
||||
inline bool list_contains_deleted(mgp_list *list) { return MgInvoke<int>(mgp_list_contains_deleted, list); }
|
||||
|
||||
inline void list_append(mgp_list *list, mgp_value *val) { MgInvokeVoid(mgp_list_append, list, val); }
|
||||
|
||||
inline void list_append_extend(mgp_list *list, mgp_value *val) { MgInvokeVoid(mgp_list_append_extend, list, val); }
|
||||
@ -394,6 +398,8 @@ inline mgp_map *map_copy(mgp_map *map, mgp_memory *memory) { return MgInvoke<mgp
|
||||
|
||||
inline void map_destroy(mgp_map *map) { mgp_map_destroy(map); }
|
||||
|
||||
inline bool map_contains_deleted(mgp_map *map) { return MgInvoke<int>(mgp_map_contains_deleted, map); }
|
||||
|
||||
inline void map_insert(mgp_map *map, const char *key, mgp_value *value) {
|
||||
MgInvokeVoid(mgp_map_insert, map, key, value);
|
||||
}
|
||||
@ -442,6 +448,8 @@ inline mgp_vertex *vertex_copy(mgp_vertex *v, mgp_memory *memory) {
|
||||
|
||||
inline void vertex_destroy(mgp_vertex *v) { mgp_vertex_destroy(v); }
|
||||
|
||||
inline bool vertex_is_deleted(mgp_vertex *v) { return MgInvoke<int>(mgp_vertex_is_deleted, v); }
|
||||
|
||||
inline bool vertex_equal(mgp_vertex *v1, mgp_vertex *v2) { return MgInvoke<int>(mgp_vertex_equal, v1, v2); }
|
||||
|
||||
inline size_t vertex_labels_count(mgp_vertex *v) { return MgInvoke<size_t>(mgp_vertex_labels_count, v); }
|
||||
@ -494,6 +502,8 @@ inline mgp_edge *edge_copy(mgp_edge *e, mgp_memory *memory) { return MgInvoke<mg
|
||||
|
||||
inline void edge_destroy(mgp_edge *e) { mgp_edge_destroy(e); }
|
||||
|
||||
inline bool edge_is_deleted(mgp_edge *e) { return MgInvoke<int>(mgp_edge_is_deleted, e); }
|
||||
|
||||
inline bool edge_equal(mgp_edge *e1, mgp_edge *e2) { return MgInvoke<int>(mgp_edge_equal, e1, e2); }
|
||||
|
||||
inline mgp_edge_type edge_get_type(mgp_edge *e) { return MgInvoke<mgp_edge_type>(mgp_edge_get_type, e); }
|
||||
@ -530,6 +540,8 @@ inline mgp_path *path_copy(mgp_path *path, mgp_memory *memory) {
|
||||
|
||||
inline void path_destroy(mgp_path *path) { mgp_path_destroy(path); }
|
||||
|
||||
inline bool path_contains_deleted(mgp_path *path) { return MgInvoke<int>(mgp_path_contains_deleted, path); }
|
||||
|
||||
inline void path_expand(mgp_path *path, mgp_edge *edge) { MgInvokeVoid(mgp_path_expand, path, edge); }
|
||||
|
||||
inline void path_pop(mgp_path *path) { MgInvokeVoid(mgp_path_pop, path); }
|
||||
|
@ -429,6 +429,9 @@ enum mgp_error mgp_list_copy(struct mgp_list *list, struct mgp_memory *memory, s
|
||||
/// Free the memory used by the given mgp_list and contained elements.
|
||||
void mgp_list_destroy(struct mgp_list *list);
|
||||
|
||||
/// Return whether the given mgp_list contains any deleted values.
|
||||
enum mgp_error mgp_list_contains_deleted(struct mgp_list *list, int *result);
|
||||
|
||||
/// Append a copy of mgp_value to mgp_list if capacity allows.
|
||||
/// The list copies the given value and therefore does not take ownership of the
|
||||
/// original value. You still need to call mgp_value_destroy to free the
|
||||
@ -469,6 +472,9 @@ enum mgp_error mgp_map_copy(struct mgp_map *map, struct mgp_memory *memory, stru
|
||||
/// Free the memory used by the given mgp_map and contained items.
|
||||
void mgp_map_destroy(struct mgp_map *map);
|
||||
|
||||
/// Return whether the given mgp_map contains any deleted values.
|
||||
enum mgp_error mgp_map_contains_deleted(struct mgp_map *map, int *result);
|
||||
|
||||
/// Insert a new mapping from a NULL terminated character string to a value.
|
||||
/// If a mapping with the same key already exists, it is *not* replaced.
|
||||
/// In case of insertion, both the string and the value are copied into the map.
|
||||
@ -552,6 +558,9 @@ enum mgp_error mgp_path_copy(struct mgp_path *path, struct mgp_memory *memory, s
|
||||
/// Free the memory used by the given mgp_path and contained vertices and edges.
|
||||
void mgp_path_destroy(struct mgp_path *path);
|
||||
|
||||
/// Return whether the given mgp_path contains any deleted values.
|
||||
enum mgp_error mgp_path_contains_deleted(struct mgp_path *path, int *result);
|
||||
|
||||
/// Append an edge continuing from the last vertex on the path.
|
||||
/// The edge is copied into the path. Therefore, the path does not take
|
||||
/// ownership of the original edge, so you still need to free the edge memory
|
||||
@ -725,6 +734,9 @@ enum mgp_error mgp_vertex_copy(struct mgp_vertex *v, struct mgp_memory *memory,
|
||||
/// Free the memory used by a mgp_vertex.
|
||||
void mgp_vertex_destroy(struct mgp_vertex *v);
|
||||
|
||||
/// Return whether the given mgp_vertex is deleted.
|
||||
enum mgp_error mgp_vertex_is_deleted(struct mgp_vertex *v, int *result);
|
||||
|
||||
/// Result is non-zero if given vertices are equal, otherwise 0.
|
||||
enum mgp_error mgp_vertex_equal(struct mgp_vertex *v1, struct mgp_vertex *v2, int *result);
|
||||
|
||||
@ -819,6 +831,9 @@ enum mgp_error mgp_edge_copy(struct mgp_edge *e, struct mgp_memory *memory, stru
|
||||
/// Free the memory used by a mgp_edge.
|
||||
void mgp_edge_destroy(struct mgp_edge *e);
|
||||
|
||||
/// Return whether the given mgp_edge is deleted.
|
||||
enum mgp_error mgp_edge_is_deleted(struct mgp_edge *e, int *result);
|
||||
|
||||
/// Result is non-zero if given edges are equal, otherwise 0.
|
||||
enum mgp_error mgp_edge_equal(struct mgp_edge *e1, struct mgp_edge *e2, int *result);
|
||||
|
||||
@ -941,6 +956,12 @@ enum mgp_error mgp_list_all_unique_constraints(struct mgp_graph *graph, struct m
|
||||
/// Current implementation always returns without errors.
|
||||
enum mgp_error mgp_graph_is_mutable(struct mgp_graph *graph, int *result);
|
||||
|
||||
/// Result is non-zero if the graph is in transactional storage mode.
|
||||
/// If a graph is not in transactional mode (i.e. analytical mode), then vertices and edges can be missing
|
||||
/// because changes from other transactions are visible.
|
||||
/// Current implementation always returns without errors.
|
||||
enum mgp_error mgp_graph_is_transactional(struct mgp_graph *graph, int *result);
|
||||
|
||||
/// Add a new vertex to the graph.
|
||||
/// Resulting vertex must be freed using mgp_vertex_destroy.
|
||||
/// Return mgp_error::MGP_ERROR_IMMUTABLE_OBJECT if `graph` is immutable.
|
||||
|
@ -246,6 +246,8 @@ class Graph {
|
||||
|
||||
/// @brief Returns whether the graph is mutable.
|
||||
bool IsMutable() const;
|
||||
/// @brief Returns whether the graph is in a transactional storage mode.
|
||||
bool IsTransactional() const;
|
||||
/// @brief Creates a node and adds it to the graph.
|
||||
Node CreateNode();
|
||||
/// @brief Deletes a node from the graph.
|
||||
@ -512,6 +514,9 @@ class List {
|
||||
|
||||
~List();
|
||||
|
||||
/// @brief Returns wheter the list contains any deleted values.
|
||||
bool ContainsDeleted() const;
|
||||
|
||||
/// @brief Returns the size of the list.
|
||||
size_t Size() const;
|
||||
/// @brief Returns whether the list is empty.
|
||||
@ -618,6 +623,9 @@ class Map {
|
||||
|
||||
~Map();
|
||||
|
||||
/// @brief Returns wheter the map contains any deleted values.
|
||||
bool ContainsDeleted() const;
|
||||
|
||||
/// @brief Returns the size of the map.
|
||||
size_t Size() const;
|
||||
|
||||
@ -730,6 +738,9 @@ class Node {
|
||||
|
||||
~Node();
|
||||
|
||||
/// @brief Returns wheter the node has been deleted.
|
||||
bool IsDeleted() const;
|
||||
|
||||
/// @brief Returns the node’s ID.
|
||||
mgp::Id Id() const;
|
||||
|
||||
@ -811,6 +822,9 @@ class Relationship {
|
||||
|
||||
~Relationship();
|
||||
|
||||
/// @brief Returns wheter the relationship has been deleted.
|
||||
bool IsDeleted() const;
|
||||
|
||||
/// @brief Returns the relationship’s ID.
|
||||
mgp::Id Id() const;
|
||||
|
||||
@ -876,6 +890,9 @@ class Path {
|
||||
|
||||
~Path();
|
||||
|
||||
/// @brief Returns wheter the path contains any deleted values.
|
||||
bool ContainsDeleted() const;
|
||||
|
||||
/// Returns the path length (number of relationships).
|
||||
size_t Length() const;
|
||||
|
||||
@ -1995,6 +2012,8 @@ inline bool Graph::ContainsRelationship(const Relationship &relationship) const
|
||||
|
||||
inline bool Graph::IsMutable() const { return mgp::graph_is_mutable(graph_); }
|
||||
|
||||
inline bool Graph::IsTransactional() const { return mgp::graph_is_transactional(graph_); }
|
||||
|
||||
inline Node Graph::CreateNode() {
|
||||
auto *vertex = mgp::MemHandlerCallback(graph_create_vertex, graph_);
|
||||
auto node = Node(vertex);
|
||||
@ -2442,6 +2461,8 @@ inline List::~List() {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool List::ContainsDeleted() const { return mgp::list_contains_deleted(ptr_); }
|
||||
|
||||
inline size_t List::Size() const { return mgp::list_size(ptr_); }
|
||||
|
||||
inline bool List::Empty() const { return Size() == 0; }
|
||||
@ -2568,6 +2589,8 @@ inline Map::~Map() {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool Map::ContainsDeleted() const { return mgp::map_contains_deleted(ptr_); }
|
||||
|
||||
inline size_t Map::Size() const { return mgp::map_size(ptr_); }
|
||||
|
||||
inline bool Map::Empty() const { return Size() == 0; }
|
||||
@ -2733,6 +2756,8 @@ inline Node::~Node() {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool Node::IsDeleted() const { return mgp::vertex_is_deleted(ptr_); }
|
||||
|
||||
inline mgp::Id Node::Id() const { return Id::FromInt(mgp::vertex_get_id(ptr_).as_int); }
|
||||
|
||||
inline mgp::Labels Node::Labels() const { return mgp::Labels(ptr_); }
|
||||
@ -2884,6 +2909,8 @@ inline Relationship::~Relationship() {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool Relationship::IsDeleted() const { return mgp::edge_is_deleted(ptr_); }
|
||||
|
||||
inline mgp::Id Relationship::Id() const { return Id::FromInt(mgp::edge_get_id(ptr_).as_int); }
|
||||
|
||||
inline std::string_view Relationship::Type() const { return mgp::edge_get_type(ptr_).name; }
|
||||
@ -2989,6 +3016,8 @@ inline Path::~Path() {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool Path::ContainsDeleted() const { return mgp::path_contains_deleted(ptr_); }
|
||||
|
||||
inline size_t Path::Length() const { return mgp::path_size(ptr_); }
|
||||
|
||||
inline Node Path::GetNodeAt(size_t index) const {
|
||||
|
@ -36,7 +36,7 @@ ADDITIONAL USE GRANT: You may use the Licensed Work in accordance with the
|
||||
3. using the Licensed Work to create a work or solution
|
||||
which competes (or might reasonably be expected to
|
||||
compete) with the Licensed Work.
|
||||
CHANGE DATE: 2027-30-10
|
||||
CHANGE DATE: 2027-08-12
|
||||
CHANGE LICENSE: Apache License, Version 2.0
|
||||
|
||||
For information about alternative licensing arrangements, please visit: https://memgraph.com/legal.
|
||||
|
@ -108,31 +108,83 @@ void Schema::ProcessPropertiesRel(mgp::Record &record, const std::string_view &t
|
||||
record.Insert(std::string(kReturnMandatory).c_str(), mandatory);
|
||||
}
|
||||
|
||||
struct Property {
|
||||
std::string name;
|
||||
mgp::Value value;
|
||||
|
||||
Property(const std::string &name, mgp::Value &&value) : name(name), value(std::move(value)) {}
|
||||
};
|
||||
|
||||
struct LabelsHash {
|
||||
std::size_t operator()(const std::set<std::string> &set) const {
|
||||
std::size_t seed = set.size();
|
||||
for (const auto &i : set) {
|
||||
seed ^= std::hash<std::string>{}(i) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
struct LabelsComparator {
|
||||
bool operator()(const std::set<std::string> &lhs, const std::set<std::string> &rhs) const { return lhs == rhs; }
|
||||
};
|
||||
|
||||
struct PropertyComparator {
|
||||
bool operator()(const Property &lhs, const Property &rhs) const { return lhs.name < rhs.name; }
|
||||
};
|
||||
|
||||
struct PropertyInfo {
|
||||
std::set<Property, PropertyComparator> properties;
|
||||
bool mandatory;
|
||||
};
|
||||
|
||||
void Schema::NodeTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph, mgp_result *result,
|
||||
mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
try {
|
||||
const mgp::Graph graph = mgp::Graph(memgraph_graph);
|
||||
for (auto node : graph.Nodes()) {
|
||||
std::string type;
|
||||
mgp::List labels = mgp::List();
|
||||
std::unordered_map<std::set<std::string>, PropertyInfo, LabelsHash, LabelsComparator> node_types_properties;
|
||||
|
||||
for (auto node : mgp::Graph(memgraph_graph).Nodes()) {
|
||||
std::set<std::string> labels_set = {};
|
||||
for (auto label : node.Labels()) {
|
||||
labels.AppendExtend(mgp::Value(label));
|
||||
type += ":`" + std::string(label) + "`";
|
||||
labels_set.emplace(label);
|
||||
}
|
||||
|
||||
if (node_types_properties.find(labels_set) == node_types_properties.end()) {
|
||||
node_types_properties[labels_set] = PropertyInfo{std::set<Property, PropertyComparator>(), true};
|
||||
}
|
||||
|
||||
if (node.Properties().empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesNode<std::string>(record, type, labels, "", "", false);
|
||||
node_types_properties[labels_set].mandatory = false; // if there is node with no property, it is not mandatory
|
||||
continue;
|
||||
}
|
||||
|
||||
auto &property_info = node_types_properties.at(labels_set);
|
||||
for (auto &[key, prop] : node.Properties()) {
|
||||
auto property_type = mgp::List();
|
||||
property_info.properties.emplace(key, std::move(prop));
|
||||
if (property_info.mandatory) {
|
||||
property_info.mandatory =
|
||||
property_info.properties.size() == 1; // if there is only one property, it is mandatory
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &[labels, property_info] : node_types_properties) {
|
||||
std::string label_type;
|
||||
mgp::List labels_list = mgp::List();
|
||||
for (auto const &label : labels) {
|
||||
label_type += ":`" + std::string(label) + "`";
|
||||
labels_list.AppendExtend(mgp::Value(label));
|
||||
}
|
||||
for (auto const &prop : property_info.properties) {
|
||||
auto record = record_factory.NewRecord();
|
||||
property_type.AppendExtend(mgp::Value(TypeOf(prop.Type())));
|
||||
ProcessPropertiesNode<mgp::List>(record, type, labels, key, property_type, true);
|
||||
ProcessPropertiesNode(record, label_type, labels_list, prop.name, TypeOf(prop.value.Type()),
|
||||
property_info.mandatory);
|
||||
}
|
||||
if (property_info.properties.empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesNode<std::string>(record, label_type, labels_list, "", "", false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,23 +196,41 @@ void Schema::NodeTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph,
|
||||
|
||||
void Schema::RelTypeProperties(mgp_list * /*args*/, mgp_graph *memgraph_graph, mgp_result *result, mgp_memory *memory) {
|
||||
mgp::MemoryDispatcherGuard guard{memory};
|
||||
|
||||
std::unordered_map<std::string, PropertyInfo> rel_types_properties;
|
||||
const auto record_factory = mgp::RecordFactory(result);
|
||||
try {
|
||||
const mgp::Graph graph = mgp::Graph(memgraph_graph);
|
||||
|
||||
for (auto rel : graph.Relationships()) {
|
||||
std::string type = ":`" + std::string(rel.Type()) + "`";
|
||||
std::string rel_type = std::string(rel.Type());
|
||||
if (rel_types_properties.find(rel_type) == rel_types_properties.end()) {
|
||||
rel_types_properties[rel_type] = PropertyInfo{std::set<Property, PropertyComparator>(), true};
|
||||
}
|
||||
|
||||
if (rel.Properties().empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesRel<std::string>(record, type, "", "", false);
|
||||
rel_types_properties[rel_type].mandatory = false; // if there is rel with no property, it is not mandatory
|
||||
continue;
|
||||
}
|
||||
|
||||
auto &property_info = rel_types_properties.at(rel_type);
|
||||
for (auto &[key, prop] : rel.Properties()) {
|
||||
auto property_type = mgp::List();
|
||||
property_info.properties.emplace(key, std::move(prop));
|
||||
if (property_info.mandatory) {
|
||||
property_info.mandatory =
|
||||
property_info.properties.size() == 1; // if there is only one property, it is mandatory
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &[type, property_info] : rel_types_properties) {
|
||||
std::string type_str = ":`" + std::string(type) + "`";
|
||||
for (auto const &prop : property_info.properties) {
|
||||
auto record = record_factory.NewRecord();
|
||||
property_type.AppendExtend(mgp::Value(TypeOf(prop.Type())));
|
||||
ProcessPropertiesRel<mgp::List>(record, type, key, property_type, true);
|
||||
ProcessPropertiesRel(record, type_str, prop.name, TypeOf(prop.value.Type()), property_info.mandatory);
|
||||
}
|
||||
if (property_info.properties.empty()) {
|
||||
auto record = record_factory.NewRecord();
|
||||
ProcessPropertiesRel<std::string>(record, type_str, "", "", false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,9 @@ def retry(retry_limit, timeout=100):
|
||||
except Exception:
|
||||
time.sleep(timeout)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return inner_func
|
||||
|
||||
|
||||
@ -200,19 +202,19 @@ if args.version:
|
||||
try:
|
||||
current_branch = get_output("git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
if current_branch != "master":
|
||||
branches = get_output("git", "branch")
|
||||
if "master" in branches:
|
||||
branches = get_output("git", "branch", "-r", "--list", "origin/master")
|
||||
if "origin/master" in branches:
|
||||
# If master is present locally, the fetch is allowed to fail
|
||||
# because this script will still be able to compare against the
|
||||
# master branch.
|
||||
try:
|
||||
get_output("git", "fetch", "origin", "master:master")
|
||||
get_output("git", "fetch", "origin", "master")
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
# If master is not present locally, the fetch command has to
|
||||
# succeed because something else will fail otherwise.
|
||||
get_output("git", "fetch", "origin", "master:master")
|
||||
get_output("git", "fetch", "origin", "master")
|
||||
except Exception:
|
||||
print("Fatal error while ensuring local master branch.")
|
||||
sys.exit(1)
|
||||
@ -232,7 +234,7 @@ for branch in branches:
|
||||
match = branch_regex.match(branch)
|
||||
if match is not None:
|
||||
version = tuple(map(int, match.group(1).split(".")))
|
||||
master_branch_merge = get_output("git", "merge-base", "master", branch)
|
||||
master_branch_merge = get_output("git", "merge-base", "origin/master", branch)
|
||||
versions.append((version, branch, master_branch_merge))
|
||||
versions.sort(reverse=True)
|
||||
|
||||
@ -243,7 +245,7 @@ current_version = None
|
||||
for version in versions:
|
||||
version_tuple, branch, master_branch_merge = version
|
||||
current_branch_merge = get_output("git", "merge-base", current_hash, branch)
|
||||
master_current_merge = get_output("git", "merge-base", current_hash, "master")
|
||||
master_current_merge = get_output("git", "merge-base", current_hash, "origin/master")
|
||||
# The first check checks whether this commit is a child of `master` and
|
||||
# the version branch was created before us.
|
||||
# The second check checks whether this commit is a child of the version
|
||||
|
@ -367,14 +367,16 @@ State HandleReset(TSession &session, const Marker marker) {
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
if (!session.encoder_.MessageSuccess()) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
try {
|
||||
session.Abort();
|
||||
if (!session.encoder_.MessageSuccess({})) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
}
|
||||
return State::Idle;
|
||||
} catch (const std::exception &e) {
|
||||
return HandleFailure(session, e);
|
||||
}
|
||||
|
||||
session.Abort();
|
||||
|
||||
return State::Idle;
|
||||
}
|
||||
|
||||
template <typename TSession>
|
||||
@ -397,19 +399,17 @@ State HandleBegin(TSession &session, const State state, const Marker marker) {
|
||||
|
||||
DMG_ASSERT(!session.encoder_buffer_.HasData(), "There should be no data to write in this state");
|
||||
|
||||
if (!session.encoder_.MessageSuccess({})) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
try {
|
||||
session.Configure(extra.ValueMap());
|
||||
session.BeginTransaction(extra.ValueMap());
|
||||
if (!session.encoder_.MessageSuccess({})) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
}
|
||||
return State::Idle;
|
||||
} catch (const std::exception &e) {
|
||||
return HandleFailure(session, e);
|
||||
}
|
||||
|
||||
return State::Idle;
|
||||
}
|
||||
|
||||
template <typename TSession>
|
||||
@ -427,11 +427,11 @@ State HandleCommit(TSession &session, const State state, const Marker marker) {
|
||||
DMG_ASSERT(!session.encoder_buffer_.HasData(), "There should be no data to write in this state");
|
||||
|
||||
try {
|
||||
session.CommitTransaction();
|
||||
if (!session.encoder_.MessageSuccess({})) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
}
|
||||
session.CommitTransaction();
|
||||
return State::Idle;
|
||||
} catch (const std::exception &e) {
|
||||
return HandleFailure(session, e);
|
||||
@ -453,11 +453,11 @@ State HandleRollback(TSession &session, const State state, const Marker marker)
|
||||
DMG_ASSERT(!session.encoder_buffer_.HasData(), "There should be no data to write in this state");
|
||||
|
||||
try {
|
||||
session.RollbackTransaction();
|
||||
if (!session.encoder_.MessageSuccess({})) {
|
||||
spdlog::trace("Couldn't send success message!");
|
||||
return State::Close;
|
||||
}
|
||||
session.RollbackTransaction();
|
||||
return State::Idle;
|
||||
} catch (const std::exception &e) {
|
||||
return HandleFailure(session, e);
|
||||
|
@ -1,3 +1,3 @@
|
||||
|
||||
add_library(mg-dbms STATIC database.cpp replication_handler.cpp inmemory/replication_handlers.cpp)
|
||||
add_library(mg-dbms STATIC dbms_handler.cpp database.cpp replication_handler.cpp replication_client.cpp inmemory/replication_handlers.cpp)
|
||||
target_link_libraries(mg-dbms mg-utils mg-storage-v2 mg-query)
|
||||
|
@ -15,4 +15,10 @@ namespace memgraph::dbms {
|
||||
|
||||
constexpr static const char *kDefaultDB = "memgraph"; //!< Name of the default database
|
||||
|
||||
#ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY
|
||||
constexpr bool allow_mt_repl = true;
|
||||
#else
|
||||
constexpr bool allow_mt_repl = false;
|
||||
#endif
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -21,7 +21,7 @@ template struct memgraph::utils::Gatekeeper<memgraph::dbms::Database>;
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
Database::Database(storage::Config config, const replication::ReplicationState &repl_state)
|
||||
Database::Database(storage::Config config, replication::ReplicationState &repl_state)
|
||||
: trigger_store_(config.durability.storage_directory / "triggers"),
|
||||
streams_{config.durability.storage_directory / "streams"},
|
||||
plan_cache_{FLAGS_query_plan_cache_max_size},
|
||||
|
@ -48,7 +48,7 @@ class Database {
|
||||
*
|
||||
* @param config storage configuration
|
||||
*/
|
||||
explicit Database(storage::Config config, const replication::ReplicationState &repl_state);
|
||||
explicit Database(storage::Config config, replication::ReplicationState &repl_state);
|
||||
|
||||
/**
|
||||
* @brief Returns the raw storage pointer.
|
||||
@ -95,7 +95,7 @@ class Database {
|
||||
*
|
||||
* @return storage::StorageMode
|
||||
*/
|
||||
storage::StorageMode GetStorageMode() const { return storage_->GetStorageMode(); }
|
||||
storage::StorageMode GetStorageMode() const noexcept { return storage_->GetStorageMode(); }
|
||||
|
||||
/**
|
||||
* @brief Get the storage info
|
||||
|
@ -51,8 +51,7 @@ class DatabaseHandler : public Handler<Database> {
|
||||
* @param config Storage configuration
|
||||
* @return HandlerT::NewResult
|
||||
*/
|
||||
HandlerT::NewResult New(std::string_view name, storage::Config config,
|
||||
const replication::ReplicationState &repl_state) {
|
||||
HandlerT::NewResult New(std::string_view name, storage::Config config, replication::ReplicationState &repl_state) {
|
||||
// Control that no one is using the same data directory
|
||||
if (std::any_of(begin(), end(), [&](auto &elem) {
|
||||
auto db_acc = elem.second.access();
|
||||
|
75
src/dbms/dbms_handler.cpp
Normal file
75
src/dbms/dbms_handler.cpp
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
#ifdef MG_ENTERPRISE
|
||||
DbmsHandler::DbmsHandler(
|
||||
storage::Config config,
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
bool recovery_on_startup, bool delete_on_drop)
|
||||
: default_config_{std::move(config)},
|
||||
delete_on_drop_(delete_on_drop),
|
||||
repl_state_{ReplicationStateRootPath(default_config_)} {
|
||||
// TODO: Decouple storage config from dbms config
|
||||
// TODO: Save individual db configs inside the kvstore and restore from there
|
||||
storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases");
|
||||
const auto &db_dir = default_config_.durability.storage_directory;
|
||||
const auto durability_dir = db_dir / ".durability";
|
||||
utils::EnsureDirOrDie(db_dir);
|
||||
utils::EnsureDirOrDie(durability_dir);
|
||||
durability_ = std::make_unique<kvstore::KVStore>(durability_dir);
|
||||
|
||||
// Generate the default database
|
||||
MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB.");
|
||||
|
||||
// Recover previous databases
|
||||
if (recovery_on_startup) {
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
if (name == kDefaultDB) continue; // Already set
|
||||
spdlog::info("Restoring database {}.", name);
|
||||
MG_ASSERT(!New_(name).HasError(), "Failed while creating database {}.", name);
|
||||
spdlog::info("Database {} restored.", name);
|
||||
}
|
||||
} else { // Clear databases from the durability list and auth
|
||||
auto locked_auth = auth->Lock();
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
if (name == kDefaultDB) continue;
|
||||
locked_auth->DeleteDatabase(name);
|
||||
durability_->Delete(name);
|
||||
}
|
||||
}
|
||||
|
||||
// Startup replication state (if recovered at startup)
|
||||
auto replica = [this](replication::RoleReplicaData const &data) {
|
||||
// Register handlers
|
||||
InMemoryReplicationHandlers::Register(this, *data.server);
|
||||
if (!data.server->Start()) {
|
||||
spdlog::error("Unable to start the replication server.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
// Replication frequent check start
|
||||
auto main = [this](replication::RoleMainData &data) {
|
||||
for (auto &client : data.registered_replicas_) {
|
||||
StartReplicaClient(*this, client);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
// Startup proccess for main/replica
|
||||
MG_ASSERT(std::visit(memgraph::utils::Overloaded{replica, main}, repl_state_.ReplicationData()),
|
||||
"Replica recovery failure!");
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace memgraph::dbms
|
@ -26,9 +26,11 @@
|
||||
#include "auth/auth.hpp"
|
||||
#include "constants.hpp"
|
||||
#include "dbms/database.hpp"
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#ifdef MG_ENTERPRISE
|
||||
#include "dbms/database_handler.hpp"
|
||||
#endif
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "global.hpp"
|
||||
#include "query/config.hpp"
|
||||
#include "query/interpreter_context.hpp"
|
||||
@ -102,52 +104,22 @@ class DbmsHandler {
|
||||
* @param recovery_on_startup restore databases (and its content) and authentication data
|
||||
* @param delete_on_drop when dropping delete any associated directories on disk
|
||||
*/
|
||||
DbmsHandler(storage::Config config, const replication::ReplicationState &repl_state, auto *auth,
|
||||
bool recovery_on_startup, bool delete_on_drop)
|
||||
: lock_{utils::RWLock::Priority::READ},
|
||||
default_config_{std::move(config)},
|
||||
repl_state_(repl_state),
|
||||
delete_on_drop_(delete_on_drop) {
|
||||
// TODO: Decouple storage config from dbms config
|
||||
// TODO: Save individual db configs inside the kvstore and restore from there
|
||||
storage::UpdatePaths(default_config_, default_config_.durability.storage_directory / "databases");
|
||||
const auto &db_dir = default_config_.durability.storage_directory;
|
||||
const auto durability_dir = db_dir / ".durability";
|
||||
utils::EnsureDirOrDie(db_dir);
|
||||
utils::EnsureDirOrDie(durability_dir);
|
||||
durability_ = std::make_unique<kvstore::KVStore>(durability_dir);
|
||||
|
||||
// Generate the default database
|
||||
MG_ASSERT(!NewDefault_().HasError(), "Failed while creating the default DB.");
|
||||
// Recover previous databases
|
||||
if (recovery_on_startup) {
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
if (name == kDefaultDB) continue; // Already set
|
||||
spdlog::info("Restoring database {}.", name);
|
||||
MG_ASSERT(!New_(name).HasError(), "Failed while creating database {}.", name);
|
||||
spdlog::info("Database {} restored.", name);
|
||||
}
|
||||
} else { // Clear databases from the durability list and auth
|
||||
auto locked_auth = auth->Lock();
|
||||
for (const auto &[name, _] : *durability_) {
|
||||
if (name == kDefaultDB) continue;
|
||||
locked_auth->DeleteDatabase(name);
|
||||
durability_->Delete(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
DbmsHandler(storage::Config config,
|
||||
memgraph::utils::Synchronized<memgraph::auth::Auth, memgraph::utils::WritePrioritizedRWLock> *auth,
|
||||
bool recovery_on_startup, bool delete_on_drop); // TODO If more arguments are added use a config strut
|
||||
#else
|
||||
/**
|
||||
* @brief Initialize the handler. A single database is supported in community edition.
|
||||
*
|
||||
* @param configs storage configuration
|
||||
*/
|
||||
DbmsHandler(storage::Config config, const replication::ReplicationState &repl_state)
|
||||
: db_gatekeeper_{[&] {
|
||||
DbmsHandler(storage::Config config)
|
||||
: repl_state_{ReplicationStateRootPath(config)},
|
||||
db_gatekeeper_{[&] {
|
||||
config.name = kDefaultDB;
|
||||
return std::move(config);
|
||||
}(),
|
||||
repl_state} {}
|
||||
repl_state_} {}
|
||||
#endif
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
@ -248,6 +220,12 @@ class DbmsHandler {
|
||||
#endif
|
||||
}
|
||||
|
||||
replication::ReplicationState &ReplicationState() { return repl_state_; }
|
||||
replication::ReplicationState const &ReplicationState() const { return repl_state_; }
|
||||
|
||||
bool IsMain() const { return repl_state_.IsMain(); }
|
||||
bool IsReplica() const { return repl_state_.IsReplica(); }
|
||||
|
||||
/**
|
||||
* @brief Return the statistics all databases.
|
||||
*
|
||||
@ -536,14 +514,15 @@ class DbmsHandler {
|
||||
throw UnknownDatabaseException("Tried to retrieve an unknown database \"{}\".", name);
|
||||
}
|
||||
|
||||
mutable LockT lock_; //!< protective lock
|
||||
storage::Config default_config_; //!< Storage configuration used when creating new databases
|
||||
const replication::ReplicationState &repl_state_; //!< Global replication state
|
||||
DatabaseHandler db_handler_; //!< multi-tenancy storage handler
|
||||
std::unique_ptr<kvstore::KVStore> durability_; //!< list of active dbs (pointer so we can postpone its creation)
|
||||
bool delete_on_drop_; //!< Flag defining if dropping storage also deletes its directory
|
||||
std::set<std::string, std::less<>> defunct_dbs_; //!< Databases that are in an unknown state due to various failures
|
||||
#else
|
||||
mutable LockT lock_{utils::RWLock::Priority::READ}; //!< protective lock
|
||||
storage::Config default_config_; //!< Storage configuration used when creating new databases
|
||||
DatabaseHandler db_handler_; //!< multi-tenancy storage handler
|
||||
std::unique_ptr<kvstore::KVStore> durability_; //!< list of active dbs (pointer so we can postpone its creation)
|
||||
bool delete_on_drop_; //!< Flag defining if dropping storage also deletes its directory
|
||||
std::set<std::string> defunct_dbs_; //!< Databases that are in an unknown state due to various failures
|
||||
#endif
|
||||
replication::ReplicationState repl_state_; //!< Global replication state
|
||||
#ifndef MG_ENTERPRISE
|
||||
mutable utils::Gatekeeper<Database> db_gatekeeper_; //!< Single databases gatekeeper
|
||||
#endif
|
||||
};
|
||||
|
@ -10,6 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include <optional>
|
||||
#include "dbms/constants.hpp"
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "replication/replication_server.hpp"
|
||||
@ -187,9 +188,9 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
utils::EnsureDirOrDie(storage->snapshot_directory_);
|
||||
utils::EnsureDirOrDie(storage->recovery_.snapshot_directory_);
|
||||
|
||||
const auto maybe_snapshot_path = decoder.ReadFile(storage->snapshot_directory_);
|
||||
const auto maybe_snapshot_path = decoder.ReadFile(storage->recovery_.snapshot_directory_);
|
||||
MG_ASSERT(maybe_snapshot_path, "Failed to load snapshot!");
|
||||
spdlog::info("Received snapshot saved to {}", *maybe_snapshot_path);
|
||||
|
||||
@ -219,7 +220,10 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
storage->timestamp_ = std::max(storage->timestamp_, recovery_info.next_timestamp);
|
||||
|
||||
spdlog::trace("Recovering indices and constraints from snapshot.");
|
||||
storage::durability::RecoverIndicesAndConstraints(recovered_snapshot.indices_constraints, &storage->indices_,
|
||||
memgraph::storage::durability::RecoverIndicesAndStats(recovered_snapshot.indices_constraints.indices,
|
||||
&storage->indices_, &storage->vertices_,
|
||||
storage->name_id_mapper_.get());
|
||||
memgraph::storage::durability::RecoverConstraints(recovered_snapshot.indices_constraints.constraints,
|
||||
&storage->constraints_, &storage->vertices_,
|
||||
storage->name_id_mapper_.get());
|
||||
} catch (const storage::durability::RecoveryFailure &e) {
|
||||
@ -233,7 +237,7 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
|
||||
spdlog::trace("Deleting old snapshot files due to snapshot recovery.");
|
||||
// Delete other durability files
|
||||
auto snapshot_files = storage::durability::GetSnapshotFiles(storage->snapshot_directory_, storage->uuid_);
|
||||
auto snapshot_files = storage::durability::GetSnapshotFiles(storage->recovery_.snapshot_directory_, storage->uuid_);
|
||||
for (const auto &[path, uuid, _] : snapshot_files) {
|
||||
if (path != *maybe_snapshot_path) {
|
||||
spdlog::trace("Deleting snapshot file {}", path);
|
||||
@ -242,7 +246,7 @@ void InMemoryReplicationHandlers::SnapshotHandler(dbms::DbmsHandler *dbms_handle
|
||||
}
|
||||
|
||||
spdlog::trace("Deleting old WAL files due to snapshot recovery.");
|
||||
auto wal_files = storage::durability::GetWalFiles(storage->wal_directory_, storage->uuid_);
|
||||
auto wal_files = storage::durability::GetWalFiles(storage->recovery_.wal_directory_, storage->uuid_);
|
||||
if (wal_files) {
|
||||
for (const auto &wal_file : *wal_files) {
|
||||
spdlog::trace("Deleting WAL file {}", wal_file.path);
|
||||
@ -267,7 +271,7 @@ void InMemoryReplicationHandlers::WalFilesHandler(dbms::DbmsHandler *dbms_handle
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
utils::EnsureDirOrDie(storage->wal_directory_);
|
||||
utils::EnsureDirOrDie(storage->recovery_.wal_directory_);
|
||||
|
||||
for (auto i = 0; i < wal_file_number; ++i) {
|
||||
LoadWal(storage, &decoder);
|
||||
@ -289,7 +293,7 @@ void InMemoryReplicationHandlers::CurrentWalHandler(dbms::DbmsHandler *dbms_hand
|
||||
storage::replication::Decoder decoder(req_reader);
|
||||
|
||||
auto *storage = static_cast<storage::InMemoryStorage *>(db_acc->get()->storage());
|
||||
utils::EnsureDirOrDie(storage->wal_directory_);
|
||||
utils::EnsureDirOrDie(storage->recovery_.wal_directory_);
|
||||
|
||||
LoadWal(storage, &decoder);
|
||||
|
||||
@ -370,8 +374,9 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
constexpr bool kSharedAccess = false;
|
||||
|
||||
std::optional<std::pair<uint64_t, storage::InMemoryStorage::ReplicationAccessor>> commit_timestamp_and_accessor;
|
||||
auto get_transaction = [storage, &commit_timestamp_and_accessor](uint64_t commit_timestamp,
|
||||
bool unique = kSharedAccess) {
|
||||
auto const get_transaction = [storage, &commit_timestamp_and_accessor](
|
||||
uint64_t commit_timestamp,
|
||||
bool unique = kSharedAccess) -> storage::InMemoryStorage::ReplicationAccessor * {
|
||||
if (!commit_timestamp_and_accessor) {
|
||||
std::unique_ptr<storage::Storage::Accessor> acc = nullptr;
|
||||
if (unique) {
|
||||
@ -415,9 +420,11 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
spdlog::trace(" Delete vertex {}", delta.vertex_create_delete.gid.AsUint());
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto vertex = transaction->FindVertex(delta.vertex_create_delete.gid, View::NEW);
|
||||
if (!vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto ret = transaction->DeleteVertex(&*vertex);
|
||||
if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError() || !ret.GetValue())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::VERTEX_ADD_LABEL: {
|
||||
@ -425,9 +432,11 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
delta.vertex_add_remove_label.label);
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
|
||||
if (!vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto ret = vertex->AddLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
|
||||
if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError() || !ret.GetValue())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::VERTEX_REMOVE_LABEL: {
|
||||
@ -435,9 +444,11 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
delta.vertex_add_remove_label.label);
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid, View::NEW);
|
||||
if (!vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto ret = vertex->RemoveLabel(transaction->NameToLabel(delta.vertex_add_remove_label.label));
|
||||
if (ret.HasError() || !ret.GetValue()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError() || !ret.GetValue())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::VERTEX_SET_PROPERTY: {
|
||||
@ -445,10 +456,12 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
delta.vertex_edge_set_property.property, delta.vertex_edge_set_property.value);
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto vertex = transaction->FindVertex(delta.vertex_edge_set_property.gid, View::NEW);
|
||||
if (!vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto ret = vertex->SetProperty(transaction->NameToProperty(delta.vertex_edge_set_property.property),
|
||||
delta.vertex_edge_set_property.value);
|
||||
if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_CREATE: {
|
||||
@ -457,13 +470,16 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
delta.edge_create_delete.from_vertex.AsUint(), delta.edge_create_delete.to_vertex.AsUint());
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto from_vertex = transaction->FindVertex(delta.edge_create_delete.from_vertex, View::NEW);
|
||||
if (!from_vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!from_vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto to_vertex = transaction->FindVertex(delta.edge_create_delete.to_vertex, View::NEW);
|
||||
if (!to_vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!to_vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto edge = transaction->CreateEdgeEx(&*from_vertex, &*to_vertex,
|
||||
transaction->NameToEdgeType(delta.edge_create_delete.edge_type),
|
||||
delta.edge_create_delete.gid);
|
||||
if (edge.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (edge.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_DELETE: {
|
||||
@ -472,16 +488,17 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
delta.edge_create_delete.from_vertex.AsUint(), delta.edge_create_delete.to_vertex.AsUint());
|
||||
auto *transaction = get_transaction(timestamp);
|
||||
auto from_vertex = transaction->FindVertex(delta.edge_create_delete.from_vertex, View::NEW);
|
||||
if (!from_vertex) throw utils::BasicException("Invalid transaction!");
|
||||
if (!from_vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto to_vertex = transaction->FindVertex(delta.edge_create_delete.to_vertex, View::NEW);
|
||||
if (!to_vertex) throw utils::BasicException("Invalid transaction!");
|
||||
auto edges = from_vertex->OutEdges(View::NEW, {transaction->NameToEdgeType(delta.edge_create_delete.edge_type)},
|
||||
&*to_vertex);
|
||||
if (edges.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (edges->edges.size() != 1) throw utils::BasicException("Invalid transaction!");
|
||||
auto &edge = (*edges).edges[0];
|
||||
auto ret = transaction->DeleteEdge(&edge);
|
||||
if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (!to_vertex)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
auto edgeType = transaction->NameToEdgeType(delta.edge_create_delete.edge_type);
|
||||
auto edge =
|
||||
transaction->FindEdge(delta.edge_create_delete.gid, View::NEW, edgeType, &*from_vertex, &*to_vertex);
|
||||
if (!edge) throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
if (auto ret = transaction->DeleteEdge(&*edge); ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EDGE_SET_PROPERTY: {
|
||||
@ -498,7 +515,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
// yields an accessor that is only valid for managing the edge's
|
||||
// properties.
|
||||
auto edge = edge_acc.find(delta.vertex_edge_set_property.gid);
|
||||
if (edge == edge_acc.end()) throw utils::BasicException("Invalid transaction!");
|
||||
if (edge == edge_acc.end())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
// The edge visibility check must be done here manually because we
|
||||
// don't allow direct access to the edges through the public API.
|
||||
{
|
||||
@ -530,7 +548,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
}
|
||||
}
|
||||
});
|
||||
if (!is_visible) throw utils::BasicException("Invalid transaction!");
|
||||
if (!is_visible)
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
}
|
||||
EdgeRef edge_ref(&*edge);
|
||||
// Here we create an edge accessor that we will use to get the
|
||||
@ -543,7 +562,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
|
||||
auto ret = ea.SetProperty(transaction->NameToProperty(delta.vertex_edge_set_property.property),
|
||||
delta.vertex_edge_set_property.value);
|
||||
if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -553,7 +573,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
throw utils::BasicException("Invalid commit data!");
|
||||
auto ret =
|
||||
commit_timestamp_and_accessor->second.Commit(commit_timestamp_and_accessor->first, false /* not main */);
|
||||
if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
commit_timestamp_and_accessor = std::nullopt;
|
||||
break;
|
||||
}
|
||||
@ -563,14 +584,14 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
// Need to send the timestamp
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->CreateIndex(storage->NameToLabel(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_INDEX_DROP: {
|
||||
spdlog::trace(" Drop label index on :{}", delta.operation_label.label);
|
||||
auto *transaction = get_transaction(timestamp, kUniqueAccess);
|
||||
if (transaction->DropIndex(storage->NameToLabel(delta.operation_label.label)).HasError())
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_INDEX_STATS_SET: {
|
||||
@ -601,7 +622,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
->CreateIndex(storage->NameToLabel(delta.operation_label_property.label),
|
||||
storage->NameToProperty(delta.operation_label_property.property))
|
||||
.HasError())
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP: {
|
||||
@ -612,7 +633,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
->DropIndex(storage->NameToLabel(delta.operation_label_property.label),
|
||||
storage->NameToProperty(delta.operation_label_property.property))
|
||||
.HasError())
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::LABEL_PROPERTY_INDEX_STATS_SET: {
|
||||
@ -644,7 +665,8 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto ret =
|
||||
transaction->CreateExistenceConstraint(storage->NameToLabel(delta.operation_label_property.label),
|
||||
storage->NameToProperty(delta.operation_label_property.property));
|
||||
if (ret.HasError()) throw utils::BasicException("Invalid transaction!");
|
||||
if (ret.HasError())
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP: {
|
||||
@ -655,7 +677,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
->DropExistenceConstraint(storage->NameToLabel(delta.operation_label_property.label),
|
||||
storage->NameToProperty(delta.operation_label_property.property))
|
||||
.HasError())
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE: {
|
||||
@ -670,7 +692,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto ret = transaction->CreateUniqueConstraint(storage->NameToLabel(delta.operation_label_properties.label),
|
||||
properties);
|
||||
if (!ret.HasValue() || ret.GetValue() != UniqueConstraints::CreationStatus::SUCCESS)
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
break;
|
||||
}
|
||||
case WalDeltaData::Type::UNIQUE_CONSTRAINT_DROP: {
|
||||
@ -685,7 +707,7 @@ uint64_t InMemoryReplicationHandlers::ReadAndApplyDelta(storage::InMemoryStorage
|
||||
auto ret =
|
||||
transaction->DropUniqueConstraint(storage->NameToLabel(delta.operation_label_properties.label), properties);
|
||||
if (ret != UniqueConstraints::DeletionStatus::SUCCESS) {
|
||||
throw utils::BasicException("Invalid transaction!");
|
||||
throw utils::BasicException("Invalid transaction! Please raise an issue, {}:{}", __FILE__, __LINE__);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -22,14 +22,8 @@
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
#ifdef MG_EXPERIMENTAL_REPLICATION_MULTITENANCY
|
||||
constexpr bool allow_mt_repl = true;
|
||||
#else
|
||||
constexpr bool allow_mt_repl = false;
|
||||
#endif
|
||||
|
||||
inline std::unique_ptr<storage::Storage> CreateInMemoryStorage(
|
||||
storage::Config config, const ::memgraph::replication::ReplicationState &repl_state) {
|
||||
inline std::unique_ptr<storage::Storage> CreateInMemoryStorage(storage::Config config,
|
||||
::memgraph::replication::ReplicationState &repl_state) {
|
||||
const auto wal_mode = config.durability.snapshot_wal_mode;
|
||||
const auto name = config.name;
|
||||
auto storage = std::make_unique<storage::InMemoryStorage>(std::move(config));
|
||||
|
34
src/dbms/replication_client.cpp
Normal file
34
src/dbms/replication_client.cpp
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "dbms/replication_client.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
void StartReplicaClient(DbmsHandler &dbms_handler, replication::ReplicationClient &client) {
|
||||
// No client error, start instance level client
|
||||
auto const &endpoint = client.rpc_client_.Endpoint();
|
||||
spdlog::trace("Replication client started at: {}:{}", endpoint.address, endpoint.port);
|
||||
client.StartFrequentCheck([&dbms_handler](std::string_view name) {
|
||||
// Working connection, check if any database has been left behind
|
||||
dbms_handler.ForEach([name](dbms::Database *db) {
|
||||
// Specific database <-> replica client
|
||||
db->storage()->repl_storage_state_.WithClient(name, [&](storage::ReplicationStorageClient *client) {
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
// Database <-> replica might be behind, check and recover
|
||||
client->TryCheckReplicaStateAsync(db->storage());
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace memgraph::dbms
|
21
src/dbms/replication_client.hpp
Normal file
21
src/dbms/replication_client.hpp
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
|
||||
namespace memgraph::dbms {
|
||||
|
||||
void StartReplicaClient(DbmsHandler &dbms_handler, replication::ReplicationClient &client);
|
||||
|
||||
} // namespace memgraph::dbms
|
@ -15,6 +15,7 @@
|
||||
#include "dbms/dbms_handler.hpp"
|
||||
#include "dbms/inmemory/replication_handlers.hpp"
|
||||
#include "dbms/inmemory/storage_helper.hpp"
|
||||
#include "dbms/replication_client.hpp"
|
||||
#include "replication/state.hpp"
|
||||
|
||||
using memgraph::replication::ReplicationClientConfig;
|
||||
@ -41,6 +42,8 @@ std::string RegisterReplicaErrorToString(RegisterReplicaError error) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
ReplicationHandler::ReplicationHandler(DbmsHandler &dbms_handler) : dbms_handler_(dbms_handler) {}
|
||||
|
||||
bool ReplicationHandler::SetReplicationRoleMain() {
|
||||
auto const main_handler = [](RoleMainData const &) {
|
||||
// If we are already MAIN, we don't want to change anything
|
||||
@ -56,42 +59,49 @@ bool ReplicationHandler::SetReplicationRoleMain() {
|
||||
|
||||
// STEP 2) Change to MAIN
|
||||
// TODO: restore replication servers if false?
|
||||
if (!repl_state_.SetReplicationRoleMain()) {
|
||||
if (!dbms_handler_.ReplicationState().SetReplicationRoleMain()) {
|
||||
// TODO: Handle recovery on failure???
|
||||
return false;
|
||||
}
|
||||
|
||||
// STEP 3) We are now MAIN, update storage local epoch
|
||||
const auto &epoch =
|
||||
std::get<RoleMainData>(std::as_const(dbms_handler_.ReplicationState()).ReplicationData()).epoch_;
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
storage->repl_storage_state_.epoch_ = std::get<RoleMainData>(std::as_const(repl_state_).ReplicationData()).epoch_;
|
||||
storage->repl_storage_state_.epoch_ = epoch;
|
||||
});
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
// TODO: under lock
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler},
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
}
|
||||
|
||||
bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::ReplicationServerConfig &config) {
|
||||
// We don't want to restart the server if we're already a REPLICA
|
||||
if (repl_state_.IsReplica()) {
|
||||
if (dbms_handler_.ReplicationState().IsReplica()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Remove registered replicas
|
||||
// TODO StorageState needs to be synched. Could have a dangling reference if someone adds a database as we are
|
||||
// deleting the replica.
|
||||
// Remove database specific clients
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([](auto &clients) { clients.clear(); });
|
||||
});
|
||||
// Remove instance level clients
|
||||
std::get<RoleMainData>(dbms_handler_.ReplicationState().ReplicationData()).registered_replicas_.clear();
|
||||
|
||||
// Creates the server
|
||||
repl_state_.SetReplicationRoleReplica(config);
|
||||
dbms_handler_.ReplicationState().SetReplicationRoleReplica(config);
|
||||
|
||||
// Start
|
||||
const auto success =
|
||||
std::visit(utils::Overloaded{[](auto) {
|
||||
std::visit(utils::Overloaded{[](RoleMainData const &) {
|
||||
// ASSERT
|
||||
return false;
|
||||
},
|
||||
@ -104,36 +114,37 @@ bool ReplicationHandler::SetReplicationRoleReplica(const memgraph::replication::
|
||||
}
|
||||
return true;
|
||||
}},
|
||||
repl_state_.ReplicationData());
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
// TODO Handle error (restore to main?)
|
||||
return success;
|
||||
}
|
||||
|
||||
auto ReplicationHandler::RegisterReplica(const memgraph::replication::ReplicationClientConfig &config)
|
||||
-> memgraph::utils::BasicResult<RegisterReplicaError> {
|
||||
MG_ASSERT(repl_state_.IsMain(), "Only main instance can register a replica!");
|
||||
MG_ASSERT(dbms_handler_.ReplicationState().IsMain(), "Only main instance can register a replica!");
|
||||
|
||||
auto res = repl_state_.RegisterReplica(config);
|
||||
switch (res) {
|
||||
case memgraph::replication::RegisterReplicaError::NOT_MAIN:
|
||||
MG_ASSERT(false, "Only main instance can register a replica!");
|
||||
return {};
|
||||
case memgraph::replication::RegisterReplicaError::NAME_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::NAME_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::END_POINT_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::END_POINT_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
|
||||
return memgraph::dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED;
|
||||
case memgraph::replication::RegisterReplicaError::SUCCESS:
|
||||
break;
|
||||
}
|
||||
|
||||
bool all_clients_good = true;
|
||||
auto instance_client = dbms_handler_.ReplicationState().RegisterReplica(config);
|
||||
if (instance_client.HasError()) switch (instance_client.GetError()) {
|
||||
case memgraph::replication::RegisterReplicaError::NOT_MAIN:
|
||||
MG_ASSERT(false, "Only main instance can register a replica!");
|
||||
return {};
|
||||
case memgraph::replication::RegisterReplicaError::NAME_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::NAME_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::END_POINT_EXISTS:
|
||||
return memgraph::dbms::RegisterReplicaError::END_POINT_EXISTS;
|
||||
case memgraph::replication::RegisterReplicaError::COULD_NOT_BE_PERSISTED:
|
||||
return memgraph::dbms::RegisterReplicaError::COULD_NOT_BE_PERSISTED;
|
||||
case memgraph::replication::RegisterReplicaError::SUCCESS:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!allow_mt_repl && dbms_handler_.All().size() > 1) {
|
||||
spdlog::warn("Multi-tenant replication is currently not supported!");
|
||||
}
|
||||
|
||||
bool all_clients_good = true;
|
||||
|
||||
// Add database specific clients (NOTE Currently all databases are connected to each replica)
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
auto *storage = db->storage();
|
||||
if (!allow_mt_repl && storage->id() != kDefaultDB) {
|
||||
@ -143,18 +154,29 @@ auto ReplicationHandler::RegisterReplica(const memgraph::replication::Replicatio
|
||||
if (storage->storage_mode_ != storage::StorageMode::IN_MEMORY_TRANSACTIONAL) return;
|
||||
|
||||
all_clients_good &=
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([storage, &config](auto &clients) -> bool {
|
||||
auto client = storage->CreateReplicationClient(config, &storage->repl_storage_state_.epoch_);
|
||||
client->Start();
|
||||
|
||||
if (client->State() == storage::replication::ReplicaState::INVALID) {
|
||||
storage->repl_storage_state_.replication_clients_.WithLock([storage, &instance_client](auto &storage_clients) {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(*instance_client.GetValue());
|
||||
client->Start(storage);
|
||||
// After start the storage <-> replica state should be READY or RECOVERING (if correctly started)
|
||||
// MAYBE_BEHIND isn't a statement of the current state, this is the default value
|
||||
// Failed to start due to branching of MAIN and REPLICA
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
return false;
|
||||
}
|
||||
clients.push_back(std::move(client));
|
||||
storage_clients.push_back(std::move(client));
|
||||
return true;
|
||||
});
|
||||
});
|
||||
if (!all_clients_good) return RegisterReplicaError::CONNECTION_FAILED; // TODO: this happen to 1 or many...what to do
|
||||
|
||||
// NOTE Currently if any databases fails, we revert back
|
||||
if (!all_clients_good) {
|
||||
spdlog::error("Failed to register all databases to the REPLICA \"{}\"", config.name);
|
||||
UnregisterReplica(config.name);
|
||||
return RegisterReplicaError::CONNECTION_FAILED;
|
||||
}
|
||||
|
||||
// No client error, start instance level client
|
||||
StartReplicaClient(dbms_handler_, *instance_client.GetValue());
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -163,60 +185,66 @@ auto ReplicationHandler::UnregisterReplica(std::string_view name) -> UnregisterR
|
||||
return UnregisterReplicaResult::NOT_MAIN;
|
||||
};
|
||||
auto const main_handler = [this, name](RoleMainData &mainData) -> UnregisterReplicaResult {
|
||||
if (!repl_state_.TryPersistUnregisterReplica(name)) {
|
||||
if (!dbms_handler_.ReplicationState().TryPersistUnregisterReplica(name)) {
|
||||
return UnregisterReplicaResult::COULD_NOT_BE_PERSISTED;
|
||||
}
|
||||
auto const n_unregistered =
|
||||
std::erase_if(mainData.registered_replicas_,
|
||||
[&](ReplicationClientConfig const ®istered_config) { return registered_config.name == name; });
|
||||
|
||||
dbms_handler_.ForEach([&](Database *db) {
|
||||
db->storage()->repl_storage_state_.replication_clients_.WithLock(
|
||||
[&](auto &clients) { std::erase_if(clients, [&](const auto &client) { return client->Name() == name; }); });
|
||||
// Remove database specific clients
|
||||
dbms_handler_.ForEach([name](Database *db) {
|
||||
db->storage()->repl_storage_state_.replication_clients_.WithLock([&name](auto &clients) {
|
||||
std::erase_if(clients, [name](const auto &client) { return client->Name() == name; });
|
||||
});
|
||||
});
|
||||
|
||||
// Remove instance level clients
|
||||
auto const n_unregistered =
|
||||
std::erase_if(mainData.registered_replicas_, [name](auto const &client) { return client.name_ == name; });
|
||||
return n_unregistered != 0 ? UnregisterReplicaResult::SUCCESS : UnregisterReplicaResult::CAN_NOT_UNREGISTER;
|
||||
};
|
||||
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler}, repl_state_.ReplicationData());
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler},
|
||||
dbms_handler_.ReplicationState().ReplicationData());
|
||||
}
|
||||
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole { return repl_state_.GetRole(); }
|
||||
auto ReplicationHandler::GetRole() const -> memgraph::replication::ReplicationRole {
|
||||
return dbms_handler_.ReplicationState().GetRole();
|
||||
}
|
||||
|
||||
bool ReplicationHandler::IsMain() const { return repl_state_.IsMain(); }
|
||||
bool ReplicationHandler::IsMain() const { return dbms_handler_.ReplicationState().IsMain(); }
|
||||
|
||||
bool ReplicationHandler::IsReplica() const { return repl_state_.IsReplica(); }
|
||||
bool ReplicationHandler::IsReplica() const { return dbms_handler_.ReplicationState().IsReplica(); }
|
||||
|
||||
void RestoreReplication(const replication::ReplicationState &repl_state, storage::Storage &storage) {
|
||||
// Per storage
|
||||
// NOTE Storage will connect to all replicas. Future work might change this
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage) {
|
||||
spdlog::info("Restoring replication role.");
|
||||
|
||||
/// MAIN
|
||||
auto const recover_main = [&storage](RoleMainData const &mainData) {
|
||||
for (const auto &config : mainData.registered_replicas_) {
|
||||
spdlog::info("Replica {} restoration started for {}.", config.name, storage.id());
|
||||
auto const recover_main = [&storage](RoleMainData &mainData) {
|
||||
// Each individual client has already been restored and started. Here we just go through each database and start its
|
||||
// client
|
||||
for (auto &instance_client : mainData.registered_replicas_) {
|
||||
spdlog::info("Replica {} restoration started for {}.", instance_client.name_, storage.id());
|
||||
|
||||
auto register_replica = [&storage](const memgraph::replication::ReplicationClientConfig &config)
|
||||
-> memgraph::utils::BasicResult<RegisterReplicaError> {
|
||||
return storage.repl_storage_state_.replication_clients_.WithLock(
|
||||
[&storage, &config](auto &clients) -> utils::BasicResult<RegisterReplicaError> {
|
||||
auto client = storage.CreateReplicationClient(config, &storage.repl_storage_state_.epoch_);
|
||||
client->Start();
|
||||
const auto &ret = storage.repl_storage_state_.replication_clients_.WithLock(
|
||||
[&](auto &storage_clients) -> utils::BasicResult<RegisterReplicaError> {
|
||||
auto client = std::make_unique<storage::ReplicationStorageClient>(instance_client);
|
||||
client->Start(&storage);
|
||||
// After start the storage <-> replica state should be READY or RECOVERING (if correctly started)
|
||||
// MAYBE_BEHIND isn't a statement of the current state, this is the default value
|
||||
// Failed to start due to branching of MAIN and REPLICA
|
||||
if (client->State() == storage::replication::ReplicaState::MAYBE_BEHIND) {
|
||||
spdlog::warn("Connection failed when registering replica {}. Replica will still be registered.",
|
||||
instance_client.name_);
|
||||
}
|
||||
storage_clients.push_back(std::move(client));
|
||||
return {};
|
||||
});
|
||||
|
||||
if (client->State() == storage::replication::ReplicaState::INVALID) {
|
||||
spdlog::warn("Connection failed when registering replica {}. Replica will still be registered.",
|
||||
client->Name());
|
||||
}
|
||||
clients.push_back(std::move(client));
|
||||
return {};
|
||||
});
|
||||
};
|
||||
|
||||
auto ret = register_replica(config);
|
||||
if (ret.HasError()) {
|
||||
MG_ASSERT(RegisterReplicaError::CONNECTION_FAILED != ret.GetError());
|
||||
LOG_FATAL("Failure when restoring replica {}: {}.", config.name, RegisterReplicaErrorToString(ret.GetError()));
|
||||
LOG_FATAL("Failure when restoring replica {}: {}.", instance_client.name_,
|
||||
RegisterReplicaErrorToString(ret.GetError()));
|
||||
}
|
||||
spdlog::info("Replica {} restored for {}.", config.name, storage.id());
|
||||
spdlog::info("Replica {} restored for {}.", instance_client.name_, storage.id());
|
||||
}
|
||||
spdlog::info("Replication role restored to MAIN.");
|
||||
};
|
||||
@ -229,6 +257,6 @@ void RestoreReplication(const replication::ReplicationState &repl_state, storage
|
||||
recover_main,
|
||||
recover_replica,
|
||||
},
|
||||
std::as_const(repl_state).ReplicationData());
|
||||
repl_state.ReplicationData());
|
||||
}
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -36,8 +36,7 @@ enum class UnregisterReplicaResult : uint8_t {
|
||||
/// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
|
||||
/// TODO: extend to do multiple storages
|
||||
struct ReplicationHandler {
|
||||
ReplicationHandler(memgraph::replication::ReplicationState &replState, DbmsHandler &dbms_handler)
|
||||
: repl_state_(replState), dbms_handler_(dbms_handler) {}
|
||||
explicit ReplicationHandler(DbmsHandler &dbms_handler);
|
||||
|
||||
// as REPLICA, become MAIN
|
||||
bool SetReplicationRoleMain();
|
||||
@ -58,12 +57,11 @@ struct ReplicationHandler {
|
||||
bool IsReplica() const;
|
||||
|
||||
private:
|
||||
memgraph::replication::ReplicationState &repl_state_;
|
||||
DbmsHandler &dbms_handler_;
|
||||
};
|
||||
|
||||
/// A handler type that keep in sync current ReplicationState and the MAIN/REPLICA-ness of Storage
|
||||
/// TODO: extend to do multiple storages
|
||||
void RestoreReplication(const replication::ReplicationState &repl_state, storage::Storage &storage);
|
||||
void RestoreReplication(replication::ReplicationState &repl_state, storage::Storage &storage);
|
||||
|
||||
} // namespace memgraph::dbms
|
||||
|
@ -104,9 +104,19 @@ DEFINE_bool(storage_snapshot_on_exit, false, "Controls whether the storage creat
|
||||
DEFINE_uint64(storage_items_per_batch, memgraph::storage::Config::Durability().items_per_batch,
|
||||
"The number of edges and vertices stored in a batch in a snapshot file.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,misc-unused-parameters)
|
||||
DEFINE_VALIDATED_bool(
|
||||
storage_parallel_index_recovery, false,
|
||||
"Controls whether the index creation can be done in a multithreaded fashion.", {
|
||||
spdlog::warn(
|
||||
"storage_parallel_index_recovery flag is deprecated. Check storage_mode_parallel_schema_recovery for more "
|
||||
"details.");
|
||||
return true;
|
||||
});
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_parallel_index_recovery, false,
|
||||
"Controls whether the index creation can be done in a multithreaded fashion.");
|
||||
DEFINE_bool(storage_parallel_schema_recovery, false,
|
||||
"Controls whether the indices and constraints creation can be done in a multithreaded fashion.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_uint64(storage_recovery_thread_count,
|
||||
@ -114,6 +124,10 @@ DEFINE_uint64(storage_recovery_thread_count,
|
||||
memgraph::storage::Config::Durability().recovery_thread_count),
|
||||
"The number of threads used to recover persisted data from disk.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_enable_schema_metadata, false,
|
||||
"Controls whether metadata should be collected about the resident labels and edge types.");
|
||||
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_bool(storage_delete_on_drop, true,
|
||||
|
@ -73,10 +73,15 @@ DECLARE_uint64(storage_wal_file_flush_every_n_tx);
|
||||
DECLARE_bool(storage_snapshot_on_exit);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(storage_items_per_batch);
|
||||
// storage_parallel_index_recovery deprecated; use storage_parallel_schema_recovery instead
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_parallel_index_recovery);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_parallel_schema_recovery);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_uint64(storage_recovery_thread_count);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_enable_schema_metadata);
|
||||
#ifdef MG_ENTERPRISE
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DECLARE_bool(storage_delete_on_drop);
|
||||
|
@ -234,11 +234,55 @@ std::pair<std::vector<std::string>, std::optional<int>> SessionHL::Interpret(
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
}
|
||||
}
|
||||
void SessionHL::RollbackTransaction() { interpreter_.RollbackTransaction(); }
|
||||
void SessionHL::CommitTransaction() { interpreter_.CommitTransaction(); }
|
||||
void SessionHL::BeginTransaction(const std::map<std::string, memgraph::communication::bolt::Value> &extra) {
|
||||
interpreter_.BeginTransaction(ToQueryExtras(extra));
|
||||
|
||||
void SessionHL::RollbackTransaction() {
|
||||
try {
|
||||
interpreter_.RollbackTransaction();
|
||||
} catch (const memgraph::query::QueryException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
// Wrap QueryException into ClientError, because we want to allow the
|
||||
// client to fix their query.
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
} catch (const memgraph::query::ReplicationException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void SessionHL::CommitTransaction() {
|
||||
try {
|
||||
interpreter_.CommitTransaction();
|
||||
} catch (const memgraph::query::QueryException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
// Wrap QueryException into ClientError, because we want to allow the
|
||||
// client to fix their query.
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
} catch (const memgraph::query::ReplicationException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void SessionHL::BeginTransaction(const std::map<std::string, memgraph::communication::bolt::Value> &extra) {
|
||||
try {
|
||||
interpreter_.BeginTransaction(ToQueryExtras(extra));
|
||||
} catch (const memgraph::query::QueryException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
// Wrap QueryException into ClientError, because we want to allow the
|
||||
// client to fix their query.
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
} catch (const memgraph::query::ReplicationException &e) {
|
||||
// Count the number of specific exceptions thrown
|
||||
metrics::IncrementCounter(GetExceptionName(e));
|
||||
throw memgraph::communication::bolt::ClientError(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void SessionHL::Configure(const std::map<std::string, memgraph::communication::bolt::Value> &run_time_info) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
std::string db;
|
||||
|
@ -127,6 +127,8 @@ storage::Result<Value> ToBoltValue(const query::TypedValue &value, const storage
|
||||
return Value(value.ValueLocalDateTime());
|
||||
case query::TypedValue::Type::Duration:
|
||||
return Value(value.ValueDuration());
|
||||
case query::TypedValue::Type::Function:
|
||||
throw communication::bolt::ValueException("Unsupported conversion from TypedValue::Function to Value");
|
||||
case query::TypedValue::Type::Graph:
|
||||
auto maybe_graph = ToBoltGraph(value.ValueGraph(), db, view);
|
||||
if (maybe_graph.HasError()) return maybe_graph.GetError();
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -10,6 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <optional>
|
||||
#include <span>
|
||||
|
@ -65,10 +65,13 @@ void InitFromCypherlFile(memgraph::query::InterpreterContext &ctx, memgraph::dbm
|
||||
std::string line;
|
||||
while (std::getline(file, line)) {
|
||||
if (!line.empty()) {
|
||||
auto results = interpreter.Prepare(line, {}, {});
|
||||
memgraph::query::DiscardValueResultStream stream;
|
||||
interpreter.Pull(&stream, {}, results.qid);
|
||||
|
||||
try {
|
||||
auto results = interpreter.Prepare(line, {}, {});
|
||||
memgraph::query::DiscardValueResultStream stream;
|
||||
interpreter.Pull(&stream, {}, results.qid);
|
||||
} catch (const memgraph::query::UserAlreadyExistsException &e) {
|
||||
spdlog::warn("{} The rest of the init-file will be run.", e.what());
|
||||
}
|
||||
if (audit_log) {
|
||||
audit_log->Record("", "", line, {}, memgraph::dbms::kDefaultDB);
|
||||
}
|
||||
@ -291,7 +294,8 @@ int main(int argc, char **argv) {
|
||||
memgraph::storage::Config db_config{
|
||||
.gc = {.type = memgraph::storage::Config::Gc::Type::PERIODIC,
|
||||
.interval = std::chrono::seconds(FLAGS_storage_gc_cycle_sec)},
|
||||
.items = {.properties_on_edges = FLAGS_storage_properties_on_edges},
|
||||
.items = {.properties_on_edges = FLAGS_storage_properties_on_edges,
|
||||
.enable_schema_metadata = FLAGS_storage_enable_schema_metadata},
|
||||
.durability = {.storage_directory = FLAGS_data_directory,
|
||||
.recover_on_startup = FLAGS_storage_recover_on_startup || FLAGS_data_recovery_on_startup,
|
||||
.snapshot_retention_count = FLAGS_storage_snapshot_retention_count,
|
||||
@ -301,7 +305,9 @@ int main(int argc, char **argv) {
|
||||
.restore_replication_state_on_startup = FLAGS_replication_restore_state_on_startup,
|
||||
.items_per_batch = FLAGS_storage_items_per_batch,
|
||||
.recovery_thread_count = FLAGS_storage_recovery_thread_count,
|
||||
.allow_parallel_index_creation = FLAGS_storage_parallel_index_recovery},
|
||||
// deprecated
|
||||
.allow_parallel_index_creation = FLAGS_storage_parallel_index_recovery,
|
||||
.allow_parallel_schema_creation = FLAGS_storage_parallel_schema_recovery},
|
||||
.transaction = {.isolation_level = memgraph::flags::ParseIsolationLevel()},
|
||||
.disk = {.main_storage_directory = FLAGS_data_directory + "/rocksdb_main_storage",
|
||||
.label_index_directory = FLAGS_data_directory + "/rocksdb_label_index",
|
||||
@ -368,34 +374,17 @@ int main(int argc, char **argv) {
|
||||
std::unique_ptr<memgraph::query::AuthChecker> auth_checker;
|
||||
auth_glue(&auth_, auth_handler, auth_checker);
|
||||
|
||||
memgraph::replication::ReplicationState repl_state(ReplicationStateRootPath(db_config));
|
||||
|
||||
memgraph::dbms::DbmsHandler dbms_handler(db_config, repl_state
|
||||
memgraph::dbms::DbmsHandler dbms_handler(db_config
|
||||
#ifdef MG_ENTERPRISE
|
||||
,
|
||||
&auth_, FLAGS_data_recovery_on_startup, FLAGS_storage_delete_on_drop
|
||||
#endif
|
||||
);
|
||||
auto db_acc = dbms_handler.Get();
|
||||
memgraph::query::InterpreterContext interpreter_context_(interp_config, &dbms_handler, &repl_state,
|
||||
auth_handler.get(), auth_checker.get());
|
||||
MG_ASSERT(db_acc, "Failed to access the main database");
|
||||
|
||||
// TODO: Move it somewhere better
|
||||
// Startup replication state (if recovered at startup)
|
||||
MG_ASSERT(std::visit(memgraph::utils::Overloaded{[](memgraph::replication::RoleMainData const &) { return true; },
|
||||
[&](memgraph::replication::RoleReplicaData const &data) {
|
||||
// Register handlers
|
||||
memgraph::dbms::InMemoryReplicationHandlers::Register(
|
||||
&dbms_handler, *data.server);
|
||||
if (!data.server->Start()) {
|
||||
spdlog::error("Unable to start the replication server.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}},
|
||||
repl_state.ReplicationData()),
|
||||
"Replica recovery failure!");
|
||||
memgraph::query::InterpreterContext interpreter_context_(
|
||||
interp_config, &dbms_handler, &dbms_handler.ReplicationState(), auth_handler.get(), auth_checker.get());
|
||||
MG_ASSERT(db_acc, "Failed to access the main database");
|
||||
|
||||
memgraph::query::procedure::gModuleRegistry.SetModulesDirectory(memgraph::flags::ParseQueryModulesDirectory(),
|
||||
FLAGS_data_directory);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -62,6 +62,7 @@ bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Comparison is not defined for values of type {}.", a.type());
|
||||
case TypedValue::Type::Null:
|
||||
LOG_FATAL("Invalid type");
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <cppitertools/filter.hpp>
|
||||
#include <cppitertools/imap.hpp>
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "utils/pmr/unordered_set.hpp"
|
||||
|
||||
namespace memgraph::query {
|
||||
@ -139,6 +140,8 @@ std::optional<VertexAccessor> SubgraphDbAccessor::FindVertex(storage::Gid gid, s
|
||||
|
||||
query::Graph *SubgraphDbAccessor::getGraph() { return graph_; }
|
||||
|
||||
storage::StorageMode SubgraphDbAccessor::GetStorageMode() const noexcept { return db_accessor_.GetStorageMode(); }
|
||||
|
||||
DbAccessor *SubgraphDbAccessor::GetAccessor() { return &db_accessor_; }
|
||||
|
||||
VertexAccessor SubgraphVertexAccessor::GetVertexAccessor() const { return impl_; }
|
||||
|
@ -42,6 +42,8 @@ class EdgeAccessor final {
|
||||
|
||||
explicit EdgeAccessor(storage::EdgeAccessor impl) : impl_(std::move(impl)) {}
|
||||
|
||||
bool IsDeleted() const { return impl_.IsDeleted(); }
|
||||
|
||||
bool IsVisible(storage::View view) const { return impl_.IsVisible(view); }
|
||||
|
||||
storage::EdgeTypeId EdgeType() const { return impl_.EdgeType(); }
|
||||
@ -543,7 +545,7 @@ class DbAccessor final {
|
||||
|
||||
void Abort() { accessor_->Abort(); }
|
||||
|
||||
storage::StorageMode GetStorageMode() const { return accessor_->GetCreationStorageMode(); }
|
||||
storage::StorageMode GetStorageMode() const noexcept { return accessor_->GetCreationStorageMode(); }
|
||||
|
||||
bool LabelIndexExists(storage::LabelId label) const { return accessor_->LabelIndexExists(label); }
|
||||
|
||||
@ -595,6 +597,13 @@ class DbAccessor final {
|
||||
return accessor_->ApproximateVertexCount(label, property, lower, upper);
|
||||
}
|
||||
|
||||
std::vector<storage::LabelId> ListAllPossiblyPresentVertexLabels() const {
|
||||
return accessor_->ListAllPossiblyPresentVertexLabels();
|
||||
}
|
||||
std::vector<storage::EdgeTypeId> ListAllPossiblyPresentEdgeTypes() const {
|
||||
return accessor_->ListAllPossiblyPresentEdgeTypes();
|
||||
}
|
||||
|
||||
storage::IndicesInfo ListAllIndices() const { return accessor_->ListAllIndices(); }
|
||||
|
||||
storage::ConstraintsInfo ListAllConstraints() const { return accessor_->ListAllConstraints(); }
|
||||
@ -693,6 +702,8 @@ class SubgraphDbAccessor final {
|
||||
|
||||
Graph *getGraph();
|
||||
|
||||
storage::StorageMode GetStorageMode() const noexcept;
|
||||
|
||||
DbAccessor *GetAccessor();
|
||||
};
|
||||
|
||||
|
@ -126,6 +126,12 @@ class InfoInMulticommandTxException : public QueryException {
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(InfoInMulticommandTxException)
|
||||
};
|
||||
|
||||
class UserAlreadyExistsException : public QueryException {
|
||||
public:
|
||||
using QueryException::QueryException;
|
||||
SPECIALIZE_GET_EXCEPTION_NAME(UserAlreadyExistsException)
|
||||
};
|
||||
|
||||
/**
|
||||
* An exception for an illegal operation that can not be detected
|
||||
* before the query starts executing over data.
|
||||
|
@ -8,41 +8,42 @@
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include "query/typed_value.hpp"
|
||||
#include "utils/fnv.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
#include "utils/pmr/unordered_map.hpp"
|
||||
#include "utils/pmr/vector.hpp"
|
||||
namespace memgraph::query {
|
||||
|
||||
// Key is hash output, value is vector of unique elements
|
||||
using CachedType = utils::pmr::unordered_map<size_t, std::vector<TypedValue>>;
|
||||
using CachedType = utils::pmr::unordered_map<size_t, utils::pmr::vector<TypedValue>>;
|
||||
|
||||
struct CachedValue {
|
||||
using allocator_type = utils::Allocator<CachedValue>;
|
||||
|
||||
// Cached value, this can be probably templateized
|
||||
CachedType cache_;
|
||||
|
||||
explicit CachedValue(utils::MemoryResource *mem) : cache_(mem) {}
|
||||
explicit CachedValue(utils::MemoryResource *mem) : cache_{mem} {};
|
||||
CachedValue(const CachedValue &other, utils::MemoryResource *mem) : cache_(other.cache_, mem) {}
|
||||
CachedValue(CachedValue &&other, utils::MemoryResource *mem) : cache_(std::move(other.cache_), mem){};
|
||||
|
||||
CachedValue(CachedType &&cache, memgraph::utils::MemoryResource *memory) : cache_(std::move(cache), memory) {}
|
||||
CachedValue(CachedValue &&other) noexcept : CachedValue(std::move(other), other.GetMemoryResource()) {}
|
||||
|
||||
CachedValue(const CachedValue &other, memgraph::utils::MemoryResource *memory) : cache_(other.cache_, memory) {}
|
||||
CachedValue(const CachedValue &other)
|
||||
: CachedValue(other, std::allocator_traits<allocator_type>::select_on_container_copy_construction(
|
||||
other.GetMemoryResource())
|
||||
.GetMemoryResource()) {}
|
||||
|
||||
CachedValue(CachedValue &&other, memgraph::utils::MemoryResource *memory) : cache_(std::move(other.cache_), memory) {}
|
||||
|
||||
CachedValue(CachedValue &&other) noexcept = delete;
|
||||
|
||||
/// Copy construction without memgraph::utils::MemoryResource is not allowed.
|
||||
CachedValue(const CachedValue &) = delete;
|
||||
utils::MemoryResource *GetMemoryResource() const { return cache_.get_allocator().GetMemoryResource(); }
|
||||
|
||||
CachedValue &operator=(const CachedValue &) = delete;
|
||||
CachedValue &operator=(CachedValue &&) = delete;
|
||||
|
||||
~CachedValue() = default;
|
||||
|
||||
memgraph::utils::MemoryResource *GetMemoryResource() const noexcept {
|
||||
return cache_.get_allocator().GetMemoryResource();
|
||||
}
|
||||
|
||||
bool CacheValue(const TypedValue &maybe_list) {
|
||||
if (!maybe_list.IsList()) {
|
||||
return false;
|
||||
@ -70,7 +71,7 @@ struct CachedValue {
|
||||
}
|
||||
|
||||
private:
|
||||
static bool IsValueInVec(const std::vector<TypedValue> &vec_values, const TypedValue &value) {
|
||||
static bool IsValueInVec(const utils::pmr::vector<TypedValue> &vec_values, const TypedValue &value) {
|
||||
return std::any_of(vec_values.begin(), vec_values.end(), [&value](auto &vec_value) {
|
||||
const auto is_value_equal = vec_value == value;
|
||||
if (is_value_equal.IsNull()) return false;
|
||||
@ -82,35 +83,70 @@ struct CachedValue {
|
||||
// Class tracks keys for which user can cache values which help with faster search or faster retrieval
|
||||
// in the future. Used for IN LIST operator.
|
||||
class FrameChangeCollector {
|
||||
/** Allocator type so that STL containers are aware that we need one */
|
||||
using allocator_type = utils::Allocator<FrameChangeCollector>;
|
||||
|
||||
public:
|
||||
explicit FrameChangeCollector() : tracked_values_(&memory_resource_){};
|
||||
explicit FrameChangeCollector(utils::MemoryResource *mem = utils::NewDeleteResource()) : tracked_values_{mem} {}
|
||||
|
||||
FrameChangeCollector(FrameChangeCollector &&other, utils::MemoryResource *mem)
|
||||
: tracked_values_(std::move(other.tracked_values_), mem) {}
|
||||
FrameChangeCollector(const FrameChangeCollector &other, utils::MemoryResource *mem)
|
||||
: tracked_values_(other.tracked_values_, mem) {}
|
||||
|
||||
FrameChangeCollector(const FrameChangeCollector &other)
|
||||
: FrameChangeCollector(other, std::allocator_traits<allocator_type>::select_on_container_copy_construction(
|
||||
other.GetMemoryResource())
|
||||
.GetMemoryResource()){};
|
||||
|
||||
FrameChangeCollector(FrameChangeCollector &&other) noexcept
|
||||
: FrameChangeCollector(std::move(other), other.GetMemoryResource()) {}
|
||||
|
||||
/** Copy assign other, utils::MemoryResource of `this` is used */
|
||||
FrameChangeCollector &operator=(const FrameChangeCollector &) = default;
|
||||
|
||||
/** Move assign other, utils::MemoryResource of `this` is used. */
|
||||
FrameChangeCollector &operator=(FrameChangeCollector &&) noexcept = default;
|
||||
|
||||
utils::MemoryResource *GetMemoryResource() const { return tracked_values_.get_allocator().GetMemoryResource(); }
|
||||
|
||||
CachedValue &AddTrackingKey(const std::string &key) {
|
||||
const auto &[it, _] = tracked_values_.emplace(key, tracked_values_.get_allocator().GetMemoryResource());
|
||||
const auto &[it, _] = tracked_values_.emplace(
|
||||
std::piecewise_construct, std::forward_as_tuple(utils::pmr::string(key, utils::NewDeleteResource())),
|
||||
std::forward_as_tuple());
|
||||
return it->second;
|
||||
}
|
||||
|
||||
bool IsKeyTracked(const std::string &key) const { return tracked_values_.contains(key); }
|
||||
bool IsKeyTracked(const std::string &key) const {
|
||||
return tracked_values_.contains(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
}
|
||||
|
||||
bool IsKeyValueCached(const std::string &key) const {
|
||||
return IsKeyTracked(key) && !tracked_values_.at(key).cache_.empty();
|
||||
return IsKeyTracked(key) && !tracked_values_.at(utils::pmr::string(key, utils::NewDeleteResource())).cache_.empty();
|
||||
}
|
||||
|
||||
bool ResetTrackingValue(const std::string &key) {
|
||||
if (!tracked_values_.contains(key)) {
|
||||
if (!tracked_values_.contains(utils::pmr::string(key, utils::NewDeleteResource()))) {
|
||||
return false;
|
||||
}
|
||||
tracked_values_.erase(key);
|
||||
tracked_values_.erase(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
AddTrackingKey(key);
|
||||
return true;
|
||||
}
|
||||
|
||||
CachedValue &GetCachedValue(const std::string &key) { return tracked_values_.at(key); }
|
||||
CachedValue &GetCachedValue(const std::string &key) {
|
||||
return tracked_values_.at(utils::pmr::string(key, utils::NewDeleteResource()));
|
||||
}
|
||||
|
||||
bool IsTrackingValues() const { return !tracked_values_.empty(); }
|
||||
|
||||
~FrameChangeCollector() = default;
|
||||
|
||||
private:
|
||||
utils::MonotonicBufferResource memory_resource_{0};
|
||||
memgraph::utils::pmr::unordered_map<std::string, CachedValue> tracked_values_;
|
||||
struct PmrStringHash {
|
||||
size_t operator()(const utils::pmr::string &key) const { return utils::Fnv(key); }
|
||||
};
|
||||
|
||||
utils::pmr::unordered_map<utils::pmr::string, CachedValue, PmrStringHash> tracked_values_;
|
||||
};
|
||||
} // namespace memgraph::query
|
||||
|
@ -1818,6 +1818,10 @@ class EdgeAtom : public memgraph::query::PatternAtom {
|
||||
memgraph::query::Identifier *inner_edge{nullptr};
|
||||
/// Argument identifier for the destination node of the edge.
|
||||
memgraph::query::Identifier *inner_node{nullptr};
|
||||
/// Argument identifier for the currently-accumulated path.
|
||||
memgraph::query::Identifier *accumulated_path{nullptr};
|
||||
/// Argument identifier for the weight of the currently-accumulated path.
|
||||
memgraph::query::Identifier *accumulated_weight{nullptr};
|
||||
/// Evaluates the result of the lambda.
|
||||
memgraph::query::Expression *expression{nullptr};
|
||||
|
||||
@ -1825,6 +1829,8 @@ class EdgeAtom : public memgraph::query::PatternAtom {
|
||||
Lambda object;
|
||||
object.inner_edge = inner_edge ? inner_edge->Clone(storage) : nullptr;
|
||||
object.inner_node = inner_node ? inner_node->Clone(storage) : nullptr;
|
||||
object.accumulated_path = accumulated_path ? accumulated_path->Clone(storage) : nullptr;
|
||||
object.accumulated_weight = accumulated_weight ? accumulated_weight->Clone(storage) : nullptr;
|
||||
object.expression = expression ? expression->Clone(storage) : nullptr;
|
||||
return object;
|
||||
}
|
||||
@ -2928,7 +2934,7 @@ class DatabaseInfoQuery : public memgraph::query::Query {
|
||||
static const utils::TypeInfo kType;
|
||||
const utils::TypeInfo &GetTypeInfo() const override { return kType; }
|
||||
|
||||
enum class InfoType { INDEX, CONSTRAINT };
|
||||
enum class InfoType { INDEX, CONSTRAINT, EDGE_TYPES, NODE_LABELS };
|
||||
|
||||
DEFVISITABLE(QueryVisitor<void>);
|
||||
|
||||
@ -3025,7 +3031,7 @@ class ReplicationQuery : public memgraph::query::Query {
|
||||
|
||||
enum class SyncMode { SYNC, ASYNC };
|
||||
|
||||
enum class ReplicaState { READY, REPLICATING, RECOVERY, INVALID };
|
||||
enum class ReplicaState { READY, REPLICATING, RECOVERY, MAYBE_BEHIND };
|
||||
|
||||
ReplicationQuery() = default;
|
||||
|
||||
|
@ -124,6 +124,14 @@ antlrcpp::Any CypherMainVisitor::visitDatabaseInfoQuery(MemgraphCypher::Database
|
||||
info_query->info_type_ = DatabaseInfoQuery::InfoType::CONSTRAINT;
|
||||
return info_query;
|
||||
}
|
||||
if (ctx->edgetypeInfo()) {
|
||||
info_query->info_type_ = DatabaseInfoQuery::InfoType::EDGE_TYPES;
|
||||
return info_query;
|
||||
}
|
||||
if (ctx->nodelabelInfo()) {
|
||||
info_query->info_type_ = DatabaseInfoQuery::InfoType::NODE_LABELS;
|
||||
return info_query;
|
||||
}
|
||||
// Should never get here
|
||||
throw utils::NotYetImplemented("Database info query: '{}'", ctx->getText());
|
||||
}
|
||||
@ -1268,28 +1276,59 @@ antlrcpp::Any CypherMainVisitor::visitCallProcedure(MemgraphCypher::CallProcedur
|
||||
call_proc->result_identifiers_.push_back(storage_->Create<Identifier>(result_alias));
|
||||
}
|
||||
} else {
|
||||
const auto &maybe_found =
|
||||
procedure::FindProcedure(procedure::gModuleRegistry, call_proc->procedure_name_, utils::NewDeleteResource());
|
||||
if (!maybe_found) {
|
||||
throw SemanticException("There is no procedure named '{}'.", call_proc->procedure_name_);
|
||||
call_proc->is_write_ = maybe_found->second->info.is_write;
|
||||
|
||||
auto *yield_ctx = ctx->yieldProcedureResults();
|
||||
if (!yield_ctx) {
|
||||
if (!maybe_found->second->results.empty() && !call_proc->void_procedure_) {
|
||||
throw SemanticException(
|
||||
"CALL without YIELD may only be used on procedures which do not "
|
||||
"return any result fields.");
|
||||
}
|
||||
// When we return, we will release the lock on modules. This means that
|
||||
// someone may reload the procedure and change the result signature. But to
|
||||
// keep the implementation simple, we ignore the case as the rest of the
|
||||
// code doesn't really care whether we yield or not, so it should not break.
|
||||
return call_proc;
|
||||
}
|
||||
const auto &[module, proc] = *maybe_found;
|
||||
call_proc->result_fields_.reserve(proc->results.size());
|
||||
call_proc->result_identifiers_.reserve(proc->results.size());
|
||||
for (const auto &[result_name, desc] : proc->results) {
|
||||
bool is_deprecated = desc.second;
|
||||
if (is_deprecated) continue;
|
||||
call_proc->result_fields_.emplace_back(result_name);
|
||||
call_proc->result_identifiers_.push_back(storage_->Create<Identifier>(std::string(result_name)));
|
||||
if (yield_ctx->getTokens(MemgraphCypher::ASTERISK).empty()) {
|
||||
call_proc->result_fields_.reserve(yield_ctx->procedureResult().size());
|
||||
call_proc->result_identifiers_.reserve(yield_ctx->procedureResult().size());
|
||||
for (auto *result : yield_ctx->procedureResult()) {
|
||||
MG_ASSERT(result->variable().size() == 1 || result->variable().size() == 2);
|
||||
call_proc->result_fields_.push_back(std::any_cast<std::string>(result->variable()[0]->accept(this)));
|
||||
std::string result_alias;
|
||||
if (result->variable().size() == 2) {
|
||||
result_alias = std::any_cast<std::string>(result->variable()[1]->accept(this));
|
||||
} else {
|
||||
result_alias = std::any_cast<std::string>(result->variable()[0]->accept(this));
|
||||
}
|
||||
call_proc->result_identifiers_.push_back(storage_->Create<Identifier>(result_alias));
|
||||
}
|
||||
} else {
|
||||
const auto &maybe_found =
|
||||
procedure::FindProcedure(procedure::gModuleRegistry, call_proc->procedure_name_, utils::NewDeleteResource());
|
||||
if (!maybe_found) {
|
||||
throw SemanticException("There is no procedure named '{}'.", call_proc->procedure_name_);
|
||||
}
|
||||
const auto &[module, proc] = *maybe_found;
|
||||
call_proc->result_fields_.reserve(proc->results.size());
|
||||
call_proc->result_identifiers_.reserve(proc->results.size());
|
||||
for (const auto &[result_name, desc] : proc->results) {
|
||||
bool is_deprecated = desc.second;
|
||||
if (is_deprecated) continue;
|
||||
call_proc->result_fields_.emplace_back(result_name);
|
||||
call_proc->result_identifiers_.push_back(storage_->Create<Identifier>(std::string(result_name)));
|
||||
}
|
||||
// When we leave the scope, we will release the lock on modules. This means
|
||||
// that someone may reload the procedure and change its result signature. We
|
||||
// are fine with this, because if new result fields were added then we yield
|
||||
// the subset of those and that will appear to a user as if they used the
|
||||
// procedure before reload. Any subsequent `CALL ... YIELD *` will fetch the
|
||||
// new fields as well. In case the result signature has had some result
|
||||
// fields removed, then the query execution will report an error that we are
|
||||
// yielding missing fields. The user can then just retry the query.
|
||||
}
|
||||
// When we leave the scope, we will release the lock on modules. This means
|
||||
// that someone may reload the procedure and change its result signature. We
|
||||
// are fine with this, because if new result fields were added then we yield
|
||||
// the subset of those and that will appear to a user as if they used the
|
||||
// procedure before reload. Any subsequent `CALL ... YIELD *` will fetch the
|
||||
// new fields as well. In case the result signature has had some result
|
||||
// fields removed, then the query execution will report an error that we are
|
||||
// yielding missing fields. The user can then just retry the query.
|
||||
}
|
||||
|
||||
return call_proc;
|
||||
@ -1978,6 +2017,15 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipPattern(MemgraphCypher::Relati
|
||||
edge_lambda.inner_edge = storage_->Create<Identifier>(traversed_edge_variable);
|
||||
auto traversed_node_variable = std::any_cast<std::string>(lambda->traversed_node->accept(this));
|
||||
edge_lambda.inner_node = storage_->Create<Identifier>(traversed_node_variable);
|
||||
if (lambda->accumulated_path) {
|
||||
auto accumulated_path_variable = std::any_cast<std::string>(lambda->accumulated_path->accept(this));
|
||||
edge_lambda.accumulated_path = storage_->Create<Identifier>(accumulated_path_variable);
|
||||
|
||||
if (lambda->accumulated_weight) {
|
||||
auto accumulated_weight_variable = std::any_cast<std::string>(lambda->accumulated_weight->accept(this));
|
||||
edge_lambda.accumulated_weight = storage_->Create<Identifier>(accumulated_weight_variable);
|
||||
}
|
||||
}
|
||||
edge_lambda.expression = std::any_cast<Expression *>(lambda->expression()->accept(this));
|
||||
return edge_lambda;
|
||||
};
|
||||
@ -2002,6 +2050,15 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipPattern(MemgraphCypher::Relati
|
||||
// In variable expansion inner variables are mandatory.
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.inner_edge);
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.inner_node);
|
||||
|
||||
// TODO: In what use case do we need accumulated path and weight here?
|
||||
if (edge->filter_lambda_.accumulated_path) {
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.accumulated_path);
|
||||
|
||||
if (edge->filter_lambda_.accumulated_weight) {
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.accumulated_weight);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (edge->type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH ||
|
||||
@ -2013,9 +2070,21 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipPattern(MemgraphCypher::Relati
|
||||
// Add mandatory inner variables for filter lambda.
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.inner_edge);
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.inner_node);
|
||||
if (edge->filter_lambda_.accumulated_path) {
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.accumulated_path);
|
||||
|
||||
if (edge->filter_lambda_.accumulated_weight) {
|
||||
anonymous_identifiers.push_back(&edge->filter_lambda_.accumulated_weight);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Other variable expands only have the filter lambda.
|
||||
edge->filter_lambda_ = visit_lambda(relationshipLambdas[0]);
|
||||
if (edge->filter_lambda_.accumulated_weight) {
|
||||
throw SemanticException(
|
||||
"Accumulated weight in filter lambda can be used only with "
|
||||
"shortest paths expansion.");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
|
@ -47,9 +47,13 @@ indexInfo : INDEX INFO ;
|
||||
|
||||
constraintInfo : CONSTRAINT INFO ;
|
||||
|
||||
edgetypeInfo : EDGE_TYPES INFO ;
|
||||
|
||||
nodelabelInfo : NODE_LABELS INFO ;
|
||||
|
||||
buildInfo : BUILD INFO ;
|
||||
|
||||
databaseInfoQuery : SHOW ( indexInfo | constraintInfo ) ;
|
||||
databaseInfoQuery : SHOW ( indexInfo | constraintInfo | edgetypeInfo | nodelabelInfo ) ;
|
||||
|
||||
systemInfoQuery : SHOW ( storageInfo | buildInfo ) ;
|
||||
|
||||
@ -175,7 +179,7 @@ relationshipDetail : '[' ( name=variable )? ( relationshipTypes )? ( variableExp
|
||||
| '[' ( name=variable )? ( relationshipTypes )? ( variableExpansion )? relationshipLambda ( total_weight=variable )? (relationshipLambda )? ']'
|
||||
| '[' ( name=variable )? ( relationshipTypes )? ( variableExpansion )? (properties )* ( relationshipLambda total_weight=variable )? (relationshipLambda )? ']';
|
||||
|
||||
relationshipLambda: '(' traversed_edge=variable ',' traversed_node=variable '|' expression ')';
|
||||
relationshipLambda: '(' traversed_edge=variable ',' traversed_node=variable ( ',' accumulated_path=variable )? ( ',' accumulated_weight=variable )? '|' expression ')';
|
||||
|
||||
variableExpansion : '*' (BFS | WSHORTEST | ALLSHORTEST)? ( expression )? ( '..' ( expression )? )? ;
|
||||
|
||||
|
@ -61,6 +61,7 @@ memgraphCypherKeyword : cypherKeyword
|
||||
| GRANT
|
||||
| HEADER
|
||||
| IDENTIFIED
|
||||
| NODE_LABELS
|
||||
| NULLIF
|
||||
| IMPORT
|
||||
| INACTIVE
|
||||
|
@ -89,6 +89,7 @@ MULTI_DATABASE_EDIT : M U L T I UNDERSCORE D A T A B A S E UNDERSCORE E D I
|
||||
MULTI_DATABASE_USE : M U L T I UNDERSCORE D A T A B A S E UNDERSCORE U S E ;
|
||||
NEXT : N E X T ;
|
||||
NO : N O ;
|
||||
NODE_LABELS : N O D E UNDERSCORE L A B E L S ;
|
||||
NOTHING : N O T H I N G ;
|
||||
ON_DISK_TRANSACTIONAL : O N UNDERSCORE D I S K UNDERSCORE T R A N S A C T I O N A L ;
|
||||
NULLIF : N U L L I F ;
|
||||
|
@ -38,6 +38,9 @@ class PrivilegeExtractor : public QueryVisitor<void>, public HierarchicalTreeVis
|
||||
void Visit(DatabaseInfoQuery &info_query) override {
|
||||
switch (info_query.info_type_) {
|
||||
case DatabaseInfoQuery::InfoType::INDEX:
|
||||
// TODO: Reconsider priviliges, this 4 should have the same.
|
||||
case DatabaseInfoQuery::InfoType::EDGE_TYPES:
|
||||
case DatabaseInfoQuery::InfoType::NODE_LABELS:
|
||||
// TODO: This should be INDEX | STATS, but we don't have support for
|
||||
// *or* with privileges.
|
||||
AddPrivilege(AuthQuery::Privilege::INDEX);
|
||||
|
@ -658,8 +658,16 @@ bool SymbolGenerator::PreVisit(EdgeAtom &edge_atom) {
|
||||
scope.in_edge_range = false;
|
||||
scope.in_pattern = false;
|
||||
if (edge_atom.filter_lambda_.expression) {
|
||||
VisitWithIdentifiers(edge_atom.filter_lambda_.expression,
|
||||
{edge_atom.filter_lambda_.inner_edge, edge_atom.filter_lambda_.inner_node});
|
||||
std::vector<Identifier *> filter_lambda_identifiers{edge_atom.filter_lambda_.inner_edge,
|
||||
edge_atom.filter_lambda_.inner_node};
|
||||
if (edge_atom.filter_lambda_.accumulated_path) {
|
||||
filter_lambda_identifiers.emplace_back(edge_atom.filter_lambda_.accumulated_path);
|
||||
|
||||
if (edge_atom.filter_lambda_.accumulated_weight) {
|
||||
filter_lambda_identifiers.emplace_back(edge_atom.filter_lambda_.accumulated_weight);
|
||||
}
|
||||
}
|
||||
VisitWithIdentifiers(edge_atom.filter_lambda_.expression, filter_lambda_identifiers);
|
||||
} else {
|
||||
// Create inner symbols, but don't bind them in scope, since they are to
|
||||
// be used in the missing filter expression.
|
||||
@ -668,6 +676,17 @@ bool SymbolGenerator::PreVisit(EdgeAtom &edge_atom) {
|
||||
auto *inner_node = edge_atom.filter_lambda_.inner_node;
|
||||
inner_node->MapTo(
|
||||
symbol_table_->CreateSymbol(inner_node->name_, inner_node->user_declared_, Symbol::Type::VERTEX));
|
||||
if (edge_atom.filter_lambda_.accumulated_path) {
|
||||
auto *accumulated_path = edge_atom.filter_lambda_.accumulated_path;
|
||||
accumulated_path->MapTo(
|
||||
symbol_table_->CreateSymbol(accumulated_path->name_, accumulated_path->user_declared_, Symbol::Type::PATH));
|
||||
|
||||
if (edge_atom.filter_lambda_.accumulated_weight) {
|
||||
auto *accumulated_weight = edge_atom.filter_lambda_.accumulated_weight;
|
||||
accumulated_weight->MapTo(symbol_table_->CreateSymbol(
|
||||
accumulated_weight->name_, accumulated_weight->user_declared_, Symbol::Type::NUMBER));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (edge_atom.weight_lambda_.expression) {
|
||||
VisitWithIdentifiers(edge_atom.weight_lambda_.expression,
|
||||
|
@ -593,6 +593,7 @@ TypedValue ValueType(const TypedValue *args, int64_t nargs, const FunctionContex
|
||||
case TypedValue::Type::Duration:
|
||||
return TypedValue("DURATION", ctx.memory);
|
||||
case TypedValue::Type::Graph:
|
||||
case TypedValue::Type::Function:
|
||||
throw QueryRuntimeException("Cannot fetch graph as it is not standardized openCypher type name");
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <regex>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -28,8 +29,10 @@
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/frontend/semantic/symbol_table.hpp"
|
||||
#include "query/interpret/frame.hpp"
|
||||
#include "query/procedure/mg_procedure_impl.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "spdlog/spdlog.h"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/frame_change_id.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
@ -187,6 +190,8 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
|
||||
utils::MemoryResource *GetMemoryResource() const { return ctx_->memory; }
|
||||
|
||||
void ResetPropertyLookupCache() { property_lookup_cache_.clear(); }
|
||||
|
||||
TypedValue Visit(NamedExpression &named_expression) override {
|
||||
const auto &symbol = symbol_table_->at(named_expression);
|
||||
auto value = named_expression.expression_->Accept(*this);
|
||||
@ -837,6 +842,8 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
|
||||
TypedValue Visit(Function &function) override {
|
||||
FunctionContext function_ctx{dba_, ctx_->memory, ctx_->timestamp, &ctx_->counters, view_};
|
||||
bool is_transactional = storage::IsTransactional(dba_->GetStorageMode());
|
||||
TypedValue res(ctx_->memory);
|
||||
// Stack allocate evaluated arguments when there's a small number of them.
|
||||
if (function.arguments_.size() <= 8) {
|
||||
TypedValue arguments[8] = {TypedValue(ctx_->memory), TypedValue(ctx_->memory), TypedValue(ctx_->memory),
|
||||
@ -845,19 +852,20 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
for (size_t i = 0; i < function.arguments_.size(); ++i) {
|
||||
arguments[i] = function.arguments_[i]->Accept(*this);
|
||||
}
|
||||
auto res = function.function_(arguments, function.arguments_.size(), function_ctx);
|
||||
MG_ASSERT(res.GetMemoryResource() == ctx_->memory);
|
||||
return res;
|
||||
res = function.function_(arguments, function.arguments_.size(), function_ctx);
|
||||
} else {
|
||||
TypedValue::TVector arguments(ctx_->memory);
|
||||
arguments.reserve(function.arguments_.size());
|
||||
for (const auto &argument : function.arguments_) {
|
||||
arguments.emplace_back(argument->Accept(*this));
|
||||
}
|
||||
auto res = function.function_(arguments.data(), arguments.size(), function_ctx);
|
||||
MG_ASSERT(res.GetMemoryResource() == ctx_->memory);
|
||||
return res;
|
||||
res = function.function_(arguments.data(), arguments.size(), function_ctx);
|
||||
}
|
||||
MG_ASSERT(res.GetMemoryResource() == ctx_->memory);
|
||||
if (!is_transactional && res.ContainsDeleted()) [[unlikely]] {
|
||||
return TypedValue(ctx_->memory);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
TypedValue Visit(Reduce &reduce) override {
|
||||
@ -903,7 +911,17 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
|
||||
return TypedValue(std::move(result), ctx_->memory);
|
||||
}
|
||||
|
||||
TypedValue Visit(Exists &exists) override { return TypedValue{frame_->at(symbol_table_->at(exists)), ctx_->memory}; }
|
||||
TypedValue Visit(Exists &exists) override {
|
||||
TypedValue &frame_exists_value = frame_->at(symbol_table_->at(exists));
|
||||
if (!frame_exists_value.IsFunction()) [[unlikely]] {
|
||||
throw QueryRuntimeException(
|
||||
"Unexpected behavior: Exists expected a function, got {}. Please report the problem on GitHub issues",
|
||||
frame_exists_value.type());
|
||||
}
|
||||
TypedValue result{ctx_->memory};
|
||||
frame_exists_value.ValueFunction()(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
TypedValue Visit(All &all) override {
|
||||
auto list_value = all.list_expression_->Accept(*this);
|
||||
|
@ -274,8 +274,7 @@ inline auto convertToReplicationMode(const ReplicationQuery::SyncMode &sync_mode
|
||||
|
||||
class ReplQueryHandler final : public query::ReplicationQueryHandler {
|
||||
public:
|
||||
explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler, memgraph::replication::ReplicationState *repl_state)
|
||||
: dbms_handler_(dbms_handler), handler_{*repl_state, *dbms_handler} {}
|
||||
explicit ReplQueryHandler(dbms::DbmsHandler *dbms_handler) : dbms_handler_(dbms_handler), handler_{*dbms_handler} {}
|
||||
|
||||
/// @throw QueryRuntimeException if an error ocurred.
|
||||
void SetReplicationRole(ReplicationQuery::ReplicationRole replication_role, std::optional<int64_t> port) override {
|
||||
@ -404,8 +403,8 @@ class ReplQueryHandler final : public query::ReplicationQueryHandler {
|
||||
case storage::replication::ReplicaState::RECOVERY:
|
||||
replica.state = ReplicationQuery::ReplicaState::RECOVERY;
|
||||
break;
|
||||
case storage::replication::ReplicaState::INVALID:
|
||||
replica.state = ReplicationQuery::ReplicaState::INVALID;
|
||||
case storage::replication::ReplicaState::MAYBE_BEHIND:
|
||||
replica.state = ReplicationQuery::ReplicaState::MAYBE_BEHIND;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -479,7 +478,7 @@ Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_
|
||||
MG_ASSERT(password.IsString() || password.IsNull());
|
||||
if (!auth->CreateUser(username, password.IsString() ? std::make_optional(std::string(password.ValueString()))
|
||||
: std::nullopt)) {
|
||||
throw QueryRuntimeException("User '{}' already exists.", username);
|
||||
throw UserAlreadyExistsException("User '{}' already exists.", username);
|
||||
}
|
||||
|
||||
// If the license is not valid we create users with admin access
|
||||
@ -713,8 +712,7 @@ Callback HandleAuthQuery(AuthQuery *auth_query, InterpreterContext *interpreter_
|
||||
|
||||
Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters ¶meters,
|
||||
dbms::DbmsHandler *dbms_handler, const query::InterpreterConfig &config,
|
||||
std::vector<Notification> *notifications,
|
||||
memgraph::replication::ReplicationState *repl_state) {
|
||||
std::vector<Notification> *notifications) {
|
||||
// TODO: MemoryResource for EvaluationContext, it should probably be passed as
|
||||
// the argument to Callback.
|
||||
EvaluationContext evaluation_context;
|
||||
@ -734,8 +732,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
notifications->emplace_back(SeverityLevel::WARNING, NotificationCode::REPLICA_PORT_WARNING,
|
||||
"Be careful the replication port must be different from the memgraph port!");
|
||||
}
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, role = repl_query->role_,
|
||||
maybe_port]() mutable {
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler}, role = repl_query->role_, maybe_port]() mutable {
|
||||
handler.SetReplicationRole(role, maybe_port);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
@ -747,7 +744,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
case ReplicationQuery::Action::SHOW_REPLICATION_ROLE: {
|
||||
callback.header = {"replication role"};
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}] {
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler}] {
|
||||
auto mode = handler.ShowReplicationRole();
|
||||
switch (mode) {
|
||||
case ReplicationQuery::ReplicationRole::MAIN: {
|
||||
@ -766,7 +763,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
auto socket_address = repl_query->socket_address_->Accept(evaluator);
|
||||
const auto replica_check_frequency = config.replication_replica_check_frequency;
|
||||
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, name, socket_address, sync_mode,
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler}, name, socket_address, sync_mode,
|
||||
replica_check_frequency]() mutable {
|
||||
handler.RegisterReplica(name, std::string(socket_address.ValueString()), sync_mode, replica_check_frequency);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
@ -777,7 +774,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
}
|
||||
case ReplicationQuery::Action::DROP_REPLICA: {
|
||||
const auto &name = repl_query->replica_name_;
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, name]() mutable {
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler}, name]() mutable {
|
||||
handler.DropReplica(name);
|
||||
return std::vector<std::vector<TypedValue>>();
|
||||
};
|
||||
@ -789,7 +786,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
callback.header = {
|
||||
"name", "socket_address", "sync_mode", "current_timestamp_of_replica", "number_of_timestamp_behind_master",
|
||||
"state"};
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler, repl_state}, replica_nfields = callback.header.size()] {
|
||||
callback.fn = [handler = ReplQueryHandler{dbms_handler}, replica_nfields = callback.header.size()] {
|
||||
const auto &replicas = handler.ShowReplicas();
|
||||
auto typed_replicas = std::vector<std::vector<TypedValue>>{};
|
||||
typed_replicas.reserve(replicas.size());
|
||||
@ -822,7 +819,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query, const Parameters &
|
||||
case ReplicationQuery::ReplicaState::RECOVERY:
|
||||
typed_replica.emplace_back("recovery");
|
||||
break;
|
||||
case ReplicationQuery::ReplicaState::INVALID:
|
||||
case ReplicationQuery::ReplicaState::MAYBE_BEHIND:
|
||||
typed_replica.emplace_back("invalid");
|
||||
break;
|
||||
}
|
||||
@ -2263,15 +2260,14 @@ PreparedQuery PrepareAuthQuery(ParsedQuery parsed_query, bool in_explicit_transa
|
||||
|
||||
PreparedQuery PrepareReplicationQuery(ParsedQuery parsed_query, bool in_explicit_transaction,
|
||||
std::vector<Notification> *notifications, dbms::DbmsHandler &dbms_handler,
|
||||
const InterpreterConfig &config,
|
||||
memgraph::replication::ReplicationState *repl_state) {
|
||||
const InterpreterConfig &config) {
|
||||
if (in_explicit_transaction) {
|
||||
throw ReplicationModificationInMulticommandTxException();
|
||||
}
|
||||
|
||||
auto *replication_query = utils::Downcast<ReplicationQuery>(parsed_query.query);
|
||||
auto callback = HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, config,
|
||||
notifications, repl_state);
|
||||
auto callback =
|
||||
HandleReplicationQuery(replication_query, parsed_query.parameters, &dbms_handler, config, notifications);
|
||||
|
||||
return PreparedQuery{callback.header, std::move(parsed_query.required_privileges),
|
||||
[callback_fn = std::move(callback.fn), pull_plan = std::shared_ptr<PullPlanVector>{nullptr}](
|
||||
@ -3046,6 +3042,46 @@ PreparedQuery PrepareDatabaseInfoQuery(ParsedQuery parsed_query, bool in_explici
|
||||
};
|
||||
break;
|
||||
}
|
||||
case DatabaseInfoQuery::InfoType::EDGE_TYPES: {
|
||||
header = {"edge types"};
|
||||
handler = [storage = current_db.db_acc_->get()->storage(), dba] {
|
||||
if (!storage->config_.items.enable_schema_metadata) {
|
||||
throw QueryRuntimeException(
|
||||
"The metadata collection for edge-types is disabled. To enable it, restart your instance and set the "
|
||||
"storage-enable-schema-metadata flag to True.");
|
||||
}
|
||||
auto edge_types = dba->ListAllPossiblyPresentEdgeTypes();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
results.reserve(edge_types.size());
|
||||
for (auto &edge_type : edge_types) {
|
||||
results.push_back({TypedValue(storage->EdgeTypeToName(edge_type))});
|
||||
}
|
||||
|
||||
return std::pair{results, QueryHandlerResult::COMMIT};
|
||||
};
|
||||
|
||||
break;
|
||||
}
|
||||
case DatabaseInfoQuery::InfoType::NODE_LABELS: {
|
||||
header = {"node labels"};
|
||||
handler = [storage = current_db.db_acc_->get()->storage(), dba] {
|
||||
if (!storage->config_.items.enable_schema_metadata) {
|
||||
throw QueryRuntimeException(
|
||||
"The metadata collection for node-labels is disabled. To enable it, restart your instance and set the "
|
||||
"storage-enable-schema-metadata flag to True.");
|
||||
}
|
||||
auto node_labels = dba->ListAllPossiblyPresentVertexLabels();
|
||||
std::vector<std::vector<TypedValue>> results;
|
||||
results.reserve(node_labels.size());
|
||||
for (auto &node_label : node_labels) {
|
||||
results.push_back({TypedValue(storage->LabelToName(node_label))});
|
||||
}
|
||||
|
||||
return std::pair{results, QueryHandlerResult::COMMIT};
|
||||
};
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return PreparedQuery{std::move(header), std::move(parsed_query.required_privileges),
|
||||
@ -3349,8 +3385,7 @@ PreparedQuery PrepareConstraintQuery(ParsedQuery parsed_query, bool in_explicit_
|
||||
|
||||
PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB ¤t_db,
|
||||
InterpreterContext *interpreter_context,
|
||||
std::optional<std::function<void(std::string_view)>> on_change_cb,
|
||||
memgraph::replication::ReplicationState *repl_state) {
|
||||
std::optional<std::function<void(std::string_view)>> on_change_cb) {
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (!license::global_license_checker.IsEnterpriseValidFast()) {
|
||||
throw QueryException("Trying to use enterprise feature without a valid license.");
|
||||
@ -3361,9 +3396,11 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
|
||||
auto *query = utils::Downcast<MultiDatabaseQuery>(parsed_query.query);
|
||||
auto *db_handler = interpreter_context->dbms_handler;
|
||||
|
||||
const bool is_replica = interpreter_context->repl_state->IsReplica();
|
||||
|
||||
switch (query->action_) {
|
||||
case MultiDatabaseQuery::Action::CREATE:
|
||||
if (repl_state->IsReplica()) {
|
||||
if (is_replica) {
|
||||
throw QueryException("Query forbidden on the replica!");
|
||||
}
|
||||
return PreparedQuery{
|
||||
@ -3408,12 +3445,12 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
|
||||
if (current_db.in_explicit_db_) {
|
||||
throw QueryException("Database switching is prohibited if session explicitly defines the used database");
|
||||
}
|
||||
if (!dbms::allow_mt_repl && repl_state->IsReplica()) {
|
||||
if (!dbms::allow_mt_repl && is_replica) {
|
||||
throw QueryException("Query forbidden on the replica!");
|
||||
}
|
||||
return PreparedQuery{{"STATUS"},
|
||||
std::move(parsed_query.required_privileges),
|
||||
[db_name = query->db_name_, db_handler, ¤t_db, on_change_cb](
|
||||
[db_name = query->db_name_, db_handler, ¤t_db, on_change = std::move(on_change_cb)](
|
||||
AnyStream *stream, std::optional<int> n) -> std::optional<QueryHandlerResult> {
|
||||
std::vector<std::vector<TypedValue>> status;
|
||||
std::string res;
|
||||
@ -3423,7 +3460,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
|
||||
res = "Already using " + db_name;
|
||||
} else {
|
||||
auto tmp = db_handler->Get(db_name);
|
||||
if (on_change_cb) (*on_change_cb)(db_name); // Will trow if cb fails
|
||||
if (on_change) (*on_change)(db_name); // Will trow if cb fails
|
||||
current_db.SetCurrentDB(std::move(tmp), false);
|
||||
res = "Using " + db_name;
|
||||
}
|
||||
@ -3442,7 +3479,7 @@ PreparedQuery PrepareMultiDatabaseQuery(ParsedQuery parsed_query, CurrentDB &cur
|
||||
query->db_name_};
|
||||
|
||||
case MultiDatabaseQuery::Action::DROP:
|
||||
if (repl_state->IsReplica()) {
|
||||
if (is_replica) {
|
||||
throw QueryException("Query forbidden on the replica!");
|
||||
}
|
||||
return PreparedQuery{
|
||||
@ -3765,9 +3802,9 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
&query_execution->notifications, current_db_);
|
||||
} else if (utils::Downcast<ReplicationQuery>(parsed_query.query)) {
|
||||
/// TODO: make replication DB agnostic
|
||||
prepared_query = PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_,
|
||||
&query_execution->notifications, *interpreter_context_->dbms_handler,
|
||||
interpreter_context_->config, interpreter_context_->repl_state);
|
||||
prepared_query =
|
||||
PrepareReplicationQuery(std::move(parsed_query), in_explicit_transaction_, &query_execution->notifications,
|
||||
*interpreter_context_->dbms_handler, interpreter_context_->config);
|
||||
} else if (utils::Downcast<LockPathQuery>(parsed_query.query)) {
|
||||
prepared_query = PrepareLockPathQuery(std::move(parsed_query), in_explicit_transaction_, current_db_);
|
||||
} else if (utils::Downcast<FreeMemoryQuery>(parsed_query.query)) {
|
||||
@ -3807,8 +3844,8 @@ Interpreter::PrepareResult Interpreter::Prepare(const std::string &query_string,
|
||||
throw MultiDatabaseQueryInMulticommandTxException();
|
||||
}
|
||||
/// SYSTEM (Replication) + INTERPRETER
|
||||
prepared_query = PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_,
|
||||
interpreter_context_->repl_state);
|
||||
prepared_query =
|
||||
PrepareMultiDatabaseQuery(std::move(parsed_query), current_db_, interpreter_context_, on_change_);
|
||||
} else if (utils::Downcast<ShowDatabasesQuery>(parsed_query.query)) {
|
||||
/// SYSTEM PURE ("SHOW DATABASES")
|
||||
/// INTERPRETER (TODO: "SHOW DATABASE")
|
||||
|
@ -1138,6 +1138,11 @@ class ExpandVariableCursor : public Cursor {
|
||||
edges_it_.emplace_back(edges_.back().begin());
|
||||
}
|
||||
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
// Add initial vertex of path to the accumulated path
|
||||
frame[self_.filter_lambda_.accumulated_path_symbol.value()] = Path(vertex);
|
||||
}
|
||||
|
||||
// reset the frame value to an empty edge list
|
||||
auto *pull_memory = context.evaluation_context.memory;
|
||||
frame[self_.common_.edge_symbol] = TypedValue::TVector(pull_memory);
|
||||
@ -1234,6 +1239,13 @@ class ExpandVariableCursor : public Cursor {
|
||||
// Skip expanding out of filtered expansion.
|
||||
frame[self_.filter_lambda_.inner_edge_symbol] = current_edge.first;
|
||||
frame[self_.filter_lambda_.inner_node_symbol] = current_vertex;
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
MG_ASSERT(frame[self_.filter_lambda_.accumulated_path_symbol.value()].IsPath(),
|
||||
"Accumulated path must be path");
|
||||
Path &accumulated_path = frame[self_.filter_lambda_.accumulated_path_symbol.value()].ValuePath();
|
||||
accumulated_path.Expand(current_edge.first);
|
||||
accumulated_path.Expand(current_vertex);
|
||||
}
|
||||
if (self_.filter_lambda_.expression && !EvaluateFilter(evaluator, self_.filter_lambda_.expression)) continue;
|
||||
|
||||
// we are doing depth-first search, so place the current
|
||||
@ -1546,6 +1558,13 @@ class SingleSourceShortestPathCursor : public query::plan::Cursor {
|
||||
#endif
|
||||
frame[self_.filter_lambda_.inner_edge_symbol] = edge;
|
||||
frame[self_.filter_lambda_.inner_node_symbol] = vertex;
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
MG_ASSERT(frame[self_.filter_lambda_.accumulated_path_symbol.value()].IsPath(),
|
||||
"Accumulated path must have Path type");
|
||||
Path &accumulated_path = frame[self_.filter_lambda_.accumulated_path_symbol.value()].ValuePath();
|
||||
accumulated_path.Expand(edge);
|
||||
accumulated_path.Expand(vertex);
|
||||
}
|
||||
|
||||
if (self_.filter_lambda_.expression) {
|
||||
TypedValue result = self_.filter_lambda_.expression->Accept(evaluator);
|
||||
@ -1607,6 +1626,11 @@ class SingleSourceShortestPathCursor : public query::plan::Cursor {
|
||||
const auto &vertex = vertex_value.ValueVertex();
|
||||
processed_.emplace(vertex, std::nullopt);
|
||||
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
// Add initial vertex of path to the accumulated path
|
||||
frame[self_.filter_lambda_.accumulated_path_symbol.value()] = Path(vertex);
|
||||
}
|
||||
|
||||
expand_from_vertex(vertex);
|
||||
|
||||
// go back to loop start and see if we expanded anything
|
||||
@ -1677,6 +1701,10 @@ class SingleSourceShortestPathCursor : public query::plan::Cursor {
|
||||
namespace {
|
||||
|
||||
void CheckWeightType(TypedValue current_weight, utils::MemoryResource *memory) {
|
||||
if (current_weight.IsNull()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!current_weight.IsNumeric() && !current_weight.IsDuration()) {
|
||||
throw QueryRuntimeException("Calculated weight must be numeric or a Duration, got {}.", current_weight.type());
|
||||
}
|
||||
@ -1694,6 +1722,34 @@ void CheckWeightType(TypedValue current_weight, utils::MemoryResource *memory) {
|
||||
}
|
||||
}
|
||||
|
||||
void ValidateWeightTypes(const TypedValue &lhs, const TypedValue &rhs) {
|
||||
if ((lhs.IsNumeric() && rhs.IsNumeric()) || (lhs.IsDuration() && rhs.IsDuration())) {
|
||||
return;
|
||||
}
|
||||
throw QueryRuntimeException(utils::MessageWithLink(
|
||||
"All weights should be of the same type, either numeric or a Duration. Please update the weight "
|
||||
"expression or the filter expression.",
|
||||
"https://memgr.ph/wsp"));
|
||||
}
|
||||
|
||||
TypedValue CalculateNextWeight(const std::optional<memgraph::query::plan::ExpansionLambda> &weight_lambda,
|
||||
const TypedValue &total_weight, ExpressionEvaluator evaluator) {
|
||||
if (!weight_lambda) {
|
||||
return {};
|
||||
}
|
||||
auto *memory = evaluator.GetMemoryResource();
|
||||
TypedValue current_weight = weight_lambda->expression->Accept(evaluator);
|
||||
CheckWeightType(current_weight, memory);
|
||||
|
||||
if (total_weight.IsNull()) {
|
||||
return current_weight;
|
||||
}
|
||||
|
||||
ValidateWeightTypes(current_weight, total_weight);
|
||||
|
||||
return TypedValue(current_weight, memory) + total_weight;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
@ -1722,7 +1778,6 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
auto expand_pair = [this, &evaluator, &frame, &create_state, &context](
|
||||
const EdgeAccessor &edge, const VertexAccessor &vertex, const TypedValue &total_weight,
|
||||
int64_t depth) {
|
||||
auto *memory = evaluator.GetMemoryResource();
|
||||
#ifdef MG_ENTERPRISE
|
||||
if (license::global_license_checker.IsEnterpriseValidFast() && context.auth_checker &&
|
||||
!(context.auth_checker->Has(vertex, storage::View::OLD,
|
||||
@ -1731,32 +1786,31 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = edge;
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = vertex;
|
||||
TypedValue next_weight = CalculateNextWeight(self_.weight_lambda_, total_weight, evaluator);
|
||||
|
||||
if (self_.filter_lambda_.expression) {
|
||||
frame[self_.filter_lambda_.inner_edge_symbol] = edge;
|
||||
frame[self_.filter_lambda_.inner_node_symbol] = vertex;
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
MG_ASSERT(frame[self_.filter_lambda_.accumulated_path_symbol.value()].IsPath(),
|
||||
"Accumulated path must be path");
|
||||
Path &accumulated_path = frame[self_.filter_lambda_.accumulated_path_symbol.value()].ValuePath();
|
||||
accumulated_path.Expand(edge);
|
||||
accumulated_path.Expand(vertex);
|
||||
|
||||
if (self_.filter_lambda_.accumulated_weight_symbol) {
|
||||
frame[self_.filter_lambda_.accumulated_weight_symbol.value()] = next_weight;
|
||||
}
|
||||
}
|
||||
|
||||
if (!EvaluateFilter(evaluator, self_.filter_lambda_.expression)) return;
|
||||
}
|
||||
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = edge;
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = vertex;
|
||||
|
||||
TypedValue current_weight = self_.weight_lambda_->expression->Accept(evaluator);
|
||||
|
||||
CheckWeightType(current_weight, memory);
|
||||
|
||||
auto next_state = create_state(vertex, depth);
|
||||
|
||||
TypedValue next_weight = std::invoke([&] {
|
||||
if (total_weight.IsNull()) {
|
||||
return current_weight;
|
||||
}
|
||||
|
||||
ValidateWeightTypes(current_weight, total_weight);
|
||||
|
||||
return TypedValue(current_weight, memory) + total_weight;
|
||||
});
|
||||
|
||||
auto found_it = total_cost_.find(next_state);
|
||||
if (found_it != total_cost_.end() && (found_it->second.IsNull() || (found_it->second <= next_weight).ValueBool()))
|
||||
return;
|
||||
@ -1796,6 +1850,10 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
// Skip expansion for such nodes.
|
||||
if (node.IsNull()) continue;
|
||||
}
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
// Add initial vertex of path to the accumulated path
|
||||
frame[self_.filter_lambda_.accumulated_path_symbol.value()] = Path(vertex);
|
||||
}
|
||||
if (self_.upper_bound_) {
|
||||
upper_bound_ = EvaluateInt(&evaluator, self_.upper_bound_, "Max depth in weighted shortest path expansion");
|
||||
upper_bound_set_ = true;
|
||||
@ -1808,12 +1866,17 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
"Maximum depth in weighted shortest path expansion must be at "
|
||||
"least 1.");
|
||||
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = TypedValue();
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = vertex;
|
||||
TypedValue current_weight =
|
||||
CalculateNextWeight(self_.weight_lambda_, /* total_weight */ TypedValue(), evaluator);
|
||||
|
||||
// Clear existing data structures.
|
||||
previous_.clear();
|
||||
total_cost_.clear();
|
||||
yielded_vertices_.clear();
|
||||
|
||||
pq_.emplace(TypedValue(), 0, vertex, std::nullopt);
|
||||
pq_.emplace(current_weight, 0, vertex, std::nullopt);
|
||||
// We are adding the starting vertex to the set of yielded vertices
|
||||
// because we don't want to yield paths that end with the starting
|
||||
// vertex.
|
||||
@ -1913,15 +1976,6 @@ class ExpandWeightedShortestPathCursor : public query::plan::Cursor {
|
||||
// Keeps track of vertices for which we yielded a path already.
|
||||
utils::pmr::unordered_set<VertexAccessor> yielded_vertices_;
|
||||
|
||||
static void ValidateWeightTypes(const TypedValue &lhs, const TypedValue &rhs) {
|
||||
if (!((lhs.IsNumeric() && lhs.IsNumeric()) || (rhs.IsDuration() && rhs.IsDuration()))) {
|
||||
throw QueryRuntimeException(utils::MessageWithLink(
|
||||
"All weights should be of the same type, either numeric or a Duration. Please update the weight "
|
||||
"expression or the filter expression.",
|
||||
"https://memgr.ph/wsp"));
|
||||
}
|
||||
}
|
||||
|
||||
// Priority queue comparator. Keep lowest weight on top of the queue.
|
||||
class PriorityQueueComparator {
|
||||
public:
|
||||
@ -1979,36 +2033,32 @@ class ExpandAllShortestPathsCursor : public query::plan::Cursor {
|
||||
// queue.
|
||||
auto expand_vertex = [this, &evaluator, &frame](const EdgeAccessor &edge, const EdgeAtom::Direction direction,
|
||||
const TypedValue &total_weight, int64_t depth) {
|
||||
auto *memory = evaluator.GetMemoryResource();
|
||||
|
||||
auto const &next_vertex = direction == EdgeAtom::Direction::IN ? edge.From() : edge.To();
|
||||
|
||||
// Evaluate current weight
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = edge;
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = next_vertex;
|
||||
TypedValue next_weight = CalculateNextWeight(self_.weight_lambda_, total_weight, evaluator);
|
||||
|
||||
// If filter expression exists, evaluate filter
|
||||
if (self_.filter_lambda_.expression) {
|
||||
frame[self_.filter_lambda_.inner_edge_symbol] = edge;
|
||||
frame[self_.filter_lambda_.inner_node_symbol] = next_vertex;
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
MG_ASSERT(frame[self_.filter_lambda_.accumulated_path_symbol.value()].IsPath(),
|
||||
"Accumulated path must be path");
|
||||
Path &accumulated_path = frame[self_.filter_lambda_.accumulated_path_symbol.value()].ValuePath();
|
||||
accumulated_path.Expand(edge);
|
||||
accumulated_path.Expand(next_vertex);
|
||||
|
||||
if (self_.filter_lambda_.accumulated_weight_symbol) {
|
||||
frame[self_.filter_lambda_.accumulated_weight_symbol.value()] = next_weight;
|
||||
}
|
||||
}
|
||||
|
||||
if (!EvaluateFilter(evaluator, self_.filter_lambda_.expression)) return;
|
||||
}
|
||||
|
||||
// Evaluate current weight
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = edge;
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = next_vertex;
|
||||
|
||||
TypedValue current_weight = self_.weight_lambda_->expression->Accept(evaluator);
|
||||
|
||||
CheckWeightType(current_weight, memory);
|
||||
|
||||
TypedValue next_weight = std::invoke([&] {
|
||||
if (total_weight.IsNull()) {
|
||||
return current_weight;
|
||||
}
|
||||
|
||||
ValidateWeightTypes(current_weight, total_weight);
|
||||
|
||||
return TypedValue(current_weight, memory) + total_weight;
|
||||
});
|
||||
|
||||
auto found_it = visited_cost_.find(next_vertex);
|
||||
// Check if the vertex has already been processed.
|
||||
if (found_it != visited_cost_.end()) {
|
||||
@ -2200,7 +2250,17 @@ class ExpandAllShortestPathsCursor : public query::plan::Cursor {
|
||||
traversal_stack_.clear();
|
||||
total_cost_.clear();
|
||||
|
||||
expand_from_vertex(*start_vertex, TypedValue(), 0);
|
||||
if (self_.filter_lambda_.accumulated_path_symbol) {
|
||||
// Add initial vertex of path to the accumulated path
|
||||
frame[self_.filter_lambda_.accumulated_path_symbol.value()] = Path(*start_vertex);
|
||||
}
|
||||
|
||||
frame[self_.weight_lambda_->inner_edge_symbol] = TypedValue();
|
||||
frame[self_.weight_lambda_->inner_node_symbol] = *start_vertex;
|
||||
TypedValue current_weight =
|
||||
CalculateNextWeight(self_.weight_lambda_, /* total_weight */ TypedValue(), evaluator);
|
||||
|
||||
expand_from_vertex(*start_vertex, current_weight, 0);
|
||||
visited_cost_.emplace(*start_vertex, 0);
|
||||
frame[self_.common_.edge_symbol] = TypedValue::TVector(memory);
|
||||
}
|
||||
@ -2252,15 +2312,6 @@ class ExpandAllShortestPathsCursor : public query::plan::Cursor {
|
||||
// Stack indicating the traversal level.
|
||||
utils::pmr::list<utils::pmr::list<DirectedEdge>> traversal_stack_;
|
||||
|
||||
static void ValidateWeightTypes(const TypedValue &lhs, const TypedValue &rhs) {
|
||||
if (!((lhs.IsNumeric() && lhs.IsNumeric()) || (rhs.IsDuration() && rhs.IsDuration()))) {
|
||||
throw QueryRuntimeException(utils::MessageWithLink(
|
||||
"All weights should be of the same type, either numeric or a Duration. Please update the weight "
|
||||
"expression or the filter expression.",
|
||||
"https://memgr.ph/wsp"));
|
||||
}
|
||||
}
|
||||
|
||||
// Priority queue comparator. Keep lowest weight on top of the queue.
|
||||
class PriorityQueueComparator {
|
||||
public:
|
||||
@ -2500,13 +2551,16 @@ std::vector<Symbol> EvaluatePatternFilter::ModifiedSymbols(const SymbolTable &ta
|
||||
}
|
||||
|
||||
bool EvaluatePatternFilter::EvaluatePatternFilterCursor::Pull(Frame &frame, ExecutionContext &context) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
SCOPED_PROFILE_OP("EvaluatePatternFilter");
|
||||
std::function<void(TypedValue *)> function = [&frame, self = this->self_, input_cursor = this->input_cursor_.get(),
|
||||
&context](TypedValue *return_value) {
|
||||
OOMExceptionEnabler oom_exception;
|
||||
input_cursor->Reset();
|
||||
|
||||
input_cursor_->Reset();
|
||||
|
||||
frame[self_.output_symbol_] = TypedValue(input_cursor_->Pull(frame, context), context.evaluation_context.memory);
|
||||
*return_value = TypedValue(input_cursor->Pull(frame, context), context.evaluation_context.memory);
|
||||
};
|
||||
|
||||
frame[self_.output_symbol_] = TypedValue(std::move(function));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3622,6 +3676,7 @@ class AggregateCursor : public Cursor {
|
||||
void ProcessOne(const Frame &frame, ExpressionEvaluator *evaluator) {
|
||||
// Preallocated group_by, since most of the time the aggregation key won't be unique
|
||||
reused_group_by_.clear();
|
||||
evaluator->ResetPropertyLookupCache();
|
||||
|
||||
for (Expression *expression : self_.group_by_) {
|
||||
reused_group_by_.emplace_back(expression->Accept(*evaluator));
|
||||
@ -4780,6 +4835,12 @@ class CallProcedureCursor : public Cursor {
|
||||
|
||||
AbortCheck(context);
|
||||
|
||||
auto skip_rows_with_deleted_values = [this]() {
|
||||
while (result_row_it_ != result_->rows.end() && result_row_it_->has_deleted_values) {
|
||||
++result_row_it_;
|
||||
}
|
||||
};
|
||||
|
||||
// We need to fetch new procedure results after pulling from input.
|
||||
// TODO: Look into openCypher's distinction between procedures returning an
|
||||
// empty result set vs procedures which return `void`. We currently don't
|
||||
@ -4789,7 +4850,7 @@ class CallProcedureCursor : public Cursor {
|
||||
// It might be a good idea to resolve the procedure name once, at the
|
||||
// start. Unfortunately, this could deadlock if we tried to invoke a
|
||||
// procedure from a module (read lock) and reload a module (write lock)
|
||||
// inside the same execution thread. Also, our RWLock is setup so that
|
||||
// inside the same execution thread. Also, our RWLock is set up so that
|
||||
// it's not possible for a single thread to request multiple read locks.
|
||||
// Builtin module registration in query/procedure/module.cpp depends on
|
||||
// this locking scheme.
|
||||
@ -4837,6 +4898,7 @@ class CallProcedureCursor : public Cursor {
|
||||
graph_view);
|
||||
|
||||
result_->signature = &proc->results;
|
||||
result_->is_transactional = storage::IsTransactional(context.db_accessor->GetStorageMode());
|
||||
|
||||
// Use special memory as invoking procedure is complex
|
||||
// TODO: This will probably need to be changed when we add support for
|
||||
@ -4861,6 +4923,9 @@ class CallProcedureCursor : public Cursor {
|
||||
throw QueryRuntimeException("{}: {}", self_->procedure_name_, *result_->error_msg);
|
||||
}
|
||||
result_row_it_ = result_->rows.begin();
|
||||
if (!result_->is_transactional) {
|
||||
skip_rows_with_deleted_values();
|
||||
}
|
||||
|
||||
stream_exhausted = result_row_it_ == result_->rows.end();
|
||||
}
|
||||
@ -4890,6 +4955,9 @@ class CallProcedureCursor : public Cursor {
|
||||
}
|
||||
}
|
||||
++result_row_it_;
|
||||
if (!result_->is_transactional) {
|
||||
skip_rows_with_deleted_values();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -917,12 +917,18 @@ struct ExpansionLambda {
|
||||
Symbol inner_node_symbol;
|
||||
/// Expression used in lambda during expansion.
|
||||
Expression *expression;
|
||||
/// Currently expanded accumulated path symbol.
|
||||
std::optional<Symbol> accumulated_path_symbol;
|
||||
/// Currently expanded accumulated weight symbol.
|
||||
std::optional<Symbol> accumulated_weight_symbol;
|
||||
|
||||
ExpansionLambda Clone(AstStorage *storage) const {
|
||||
ExpansionLambda object;
|
||||
object.inner_edge_symbol = inner_edge_symbol;
|
||||
object.inner_node_symbol = inner_node_symbol;
|
||||
object.expression = expression ? expression->Clone(storage) : nullptr;
|
||||
object.accumulated_path_symbol = accumulated_path_symbol;
|
||||
object.accumulated_weight_symbol = accumulated_weight_symbol;
|
||||
return object;
|
||||
}
|
||||
};
|
||||
|
@ -74,6 +74,13 @@ std::vector<Expansion> NormalizePatterns(const SymbolTable &symbol_table, const
|
||||
// Remove symbols which are bound by lambda arguments.
|
||||
collector.symbols_.erase(symbol_table.at(*edge->filter_lambda_.inner_edge));
|
||||
collector.symbols_.erase(symbol_table.at(*edge->filter_lambda_.inner_node));
|
||||
if (edge->filter_lambda_.accumulated_path) {
|
||||
collector.symbols_.erase(symbol_table.at(*edge->filter_lambda_.accumulated_path));
|
||||
|
||||
if (edge->filter_lambda_.accumulated_weight) {
|
||||
collector.symbols_.erase(symbol_table.at(*edge->filter_lambda_.accumulated_weight));
|
||||
}
|
||||
}
|
||||
if (edge->type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH ||
|
||||
edge->type_ == EdgeAtom::Type::ALL_SHORTEST_PATHS) {
|
||||
collector.symbols_.erase(symbol_table.at(*edge->weight_lambda_.inner_edge));
|
||||
@ -295,6 +302,13 @@ void Filters::CollectPatternFilters(Pattern &pattern, SymbolTable &symbol_table,
|
||||
prop_pair.second->Accept(collector);
|
||||
collector.symbols_.emplace(symbol_table.at(*atom->filter_lambda_.inner_node));
|
||||
collector.symbols_.emplace(symbol_table.at(*atom->filter_lambda_.inner_edge));
|
||||
if (atom->filter_lambda_.accumulated_path) {
|
||||
collector.symbols_.emplace(symbol_table.at(*atom->filter_lambda_.accumulated_path));
|
||||
|
||||
if (atom->filter_lambda_.accumulated_weight) {
|
||||
collector.symbols_.emplace(symbol_table.at(*atom->filter_lambda_.accumulated_weight));
|
||||
}
|
||||
}
|
||||
// First handle the inline property filter.
|
||||
auto *property_lookup = storage.Create<PropertyLookup>(atom->filter_lambda_.inner_edge, prop_pair.first);
|
||||
auto *prop_equal = storage.Create<EqualOperator>(property_lookup, prop_pair.second);
|
||||
|
@ -106,6 +106,11 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
prev_ops_.pop_back();
|
||||
ExpressionRemovalResult removal = RemoveExpressions(op.expression_, filter_exprs_for_removal_);
|
||||
op.expression_ = removal.trimmed_expression;
|
||||
if (op.expression_) {
|
||||
Filters leftover_filters;
|
||||
leftover_filters.CollectFilterExpression(op.expression_, *symbol_table_);
|
||||
op.all_filters_ = std::move(leftover_filters);
|
||||
}
|
||||
|
||||
// edge uniqueness filter comes always before filter in plan generation
|
||||
LogicalOperator *input = op.input().get();
|
||||
@ -171,6 +176,11 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
if (expand.common_.existing_node) {
|
||||
return true;
|
||||
}
|
||||
if (expand.type_ == EdgeAtom::Type::BREADTH_FIRST && expand.filter_lambda_.accumulated_path_symbol) {
|
||||
// When accumulated path is used, we cannot use ST shortest path algorithm.
|
||||
return false;
|
||||
}
|
||||
|
||||
std::unique_ptr<ScanAll> indexed_scan;
|
||||
ScanAll dst_scan(expand.input(), expand.common_.node_symbol, storage::View::OLD);
|
||||
// With expand to existing we only get real gains with BFS, because we use a
|
||||
|
@ -59,6 +59,12 @@ class JoinRewriter final : public HierarchicalLogicalOperatorVisitor {
|
||||
|
||||
ExpressionRemovalResult removal = RemoveExpressions(op.expression_, filter_exprs_for_removal_);
|
||||
op.expression_ = removal.trimmed_expression;
|
||||
if (op.expression_) {
|
||||
Filters leftover_filters;
|
||||
leftover_filters.CollectFilterExpression(op.expression_, *symbol_table_);
|
||||
op.all_filters_ = std::move(leftover_filters);
|
||||
}
|
||||
|
||||
if (!op.expression_ || utils::Contains(filter_exprs_for_removal_, op.expression_)) {
|
||||
SetOnParent(op.input());
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <stack>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "query/plan/preprocess.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
@ -516,14 +517,25 @@ bool HasBoundFilterSymbols(const std::unordered_set<Symbol> &bound_symbols, cons
|
||||
|
||||
Expression *ExtractFilters(const std::unordered_set<Symbol> &bound_symbols, Filters &filters, AstStorage &storage) {
|
||||
Expression *filter_expr = nullptr;
|
||||
std::vector<FilterInfo> and_joinable_filters{};
|
||||
for (auto filters_it = filters.begin(); filters_it != filters.end();) {
|
||||
if (HasBoundFilterSymbols(bound_symbols, *filters_it)) {
|
||||
filter_expr = impl::BoolJoin<AndOperator>(storage, filter_expr, filters_it->expression);
|
||||
and_joinable_filters.emplace_back(*filters_it);
|
||||
filters_it = filters.erase(filters_it);
|
||||
} else {
|
||||
filters_it++;
|
||||
}
|
||||
}
|
||||
// Idea here is to join filters in a way
|
||||
// that pattern filter ( exists() ) is at the end
|
||||
// so if any of the AND filters before
|
||||
// evaluate to false we don't need to
|
||||
// evaluate pattern ( exists() ) filter
|
||||
std::partition(and_joinable_filters.begin(), and_joinable_filters.end(),
|
||||
[](const FilterInfo &filter_info) { return filter_info.type != FilterInfo::Type::Pattern; });
|
||||
for (auto &and_joinable_filter : and_joinable_filters) {
|
||||
filter_expr = impl::BoolJoin<AndOperator>(storage, filter_expr, and_joinable_filter.expression);
|
||||
}
|
||||
return filter_expr;
|
||||
}
|
||||
|
||||
|
@ -511,10 +511,6 @@ class RuleBasedPlanner {
|
||||
|
||||
std::set<ExpansionGroupId> visited_expansion_groups;
|
||||
|
||||
last_op =
|
||||
GenerateExpansionOnAlreadySeenSymbols(std::move(last_op), matching, visited_expansion_groups, symbol_table,
|
||||
storage, bound_symbols, new_symbols, named_paths, filters, view);
|
||||
|
||||
// We want to create separate branches of scan operators for each expansion group group of patterns
|
||||
// Whenever there are 2 scan branches, they will be joined with a Cartesian operator
|
||||
|
||||
@ -528,6 +524,14 @@ class RuleBasedPlanner {
|
||||
continue;
|
||||
}
|
||||
|
||||
last_op =
|
||||
GenerateExpansionOnAlreadySeenSymbols(std::move(last_op), matching, visited_expansion_groups, symbol_table,
|
||||
storage, bound_symbols, new_symbols, named_paths, filters, view);
|
||||
|
||||
if (visited_expansion_groups.contains(expansion.expansion_group_id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::unique_ptr<LogicalOperator> starting_expansion_operator = nullptr;
|
||||
if (!initial_expansion_done) {
|
||||
starting_expansion_operator = std::move(last_op);
|
||||
@ -705,9 +709,9 @@ class RuleBasedPlanner {
|
||||
std::optional<Symbol> total_weight;
|
||||
|
||||
if (edge->type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH || edge->type_ == EdgeAtom::Type::ALL_SHORTEST_PATHS) {
|
||||
weight_lambda.emplace(ExpansionLambda{symbol_table.at(*edge->weight_lambda_.inner_edge),
|
||||
symbol_table.at(*edge->weight_lambda_.inner_node),
|
||||
edge->weight_lambda_.expression});
|
||||
weight_lambda.emplace(ExpansionLambda{.inner_edge_symbol = symbol_table.at(*edge->weight_lambda_.inner_edge),
|
||||
.inner_node_symbol = symbol_table.at(*edge->weight_lambda_.inner_node),
|
||||
.expression = edge->weight_lambda_.expression});
|
||||
|
||||
total_weight.emplace(symbol_table.at(*edge->total_weight_));
|
||||
}
|
||||
@ -715,12 +719,28 @@ class RuleBasedPlanner {
|
||||
ExpansionLambda filter_lambda;
|
||||
filter_lambda.inner_edge_symbol = symbol_table.at(*edge->filter_lambda_.inner_edge);
|
||||
filter_lambda.inner_node_symbol = symbol_table.at(*edge->filter_lambda_.inner_node);
|
||||
if (edge->filter_lambda_.accumulated_path) {
|
||||
filter_lambda.accumulated_path_symbol = symbol_table.at(*edge->filter_lambda_.accumulated_path);
|
||||
|
||||
if (edge->filter_lambda_.accumulated_weight) {
|
||||
filter_lambda.accumulated_weight_symbol = symbol_table.at(*edge->filter_lambda_.accumulated_weight);
|
||||
}
|
||||
}
|
||||
{
|
||||
// Bind the inner edge and node symbols so they're available for
|
||||
// inline filtering in ExpandVariable.
|
||||
bool inner_edge_bound = bound_symbols.insert(filter_lambda.inner_edge_symbol).second;
|
||||
bool inner_node_bound = bound_symbols.insert(filter_lambda.inner_node_symbol).second;
|
||||
MG_ASSERT(inner_edge_bound && inner_node_bound, "An inner edge and node can't be bound from before");
|
||||
if (filter_lambda.accumulated_path_symbol) {
|
||||
bool accumulated_path_bound = bound_symbols.insert(*filter_lambda.accumulated_path_symbol).second;
|
||||
MG_ASSERT(accumulated_path_bound, "The accumulated path can't be bound from before");
|
||||
|
||||
if (filter_lambda.accumulated_weight_symbol) {
|
||||
bool accumulated_weight_bound = bound_symbols.insert(*filter_lambda.accumulated_weight_symbol).second;
|
||||
MG_ASSERT(accumulated_weight_bound, "The accumulated weight can't be bound from before");
|
||||
}
|
||||
}
|
||||
}
|
||||
// Join regular filters with lambda filter expression, so that they
|
||||
// are done inline together. Semantic analysis should guarantee that
|
||||
@ -731,15 +751,34 @@ class RuleBasedPlanner {
|
||||
// filtering (they use the inner symbols. If they were not collected,
|
||||
// we have to remove them manually because no other filter-extraction
|
||||
// will ever bind them again.
|
||||
filters.erase(
|
||||
std::remove_if(filters.begin(), filters.end(),
|
||||
[e = filter_lambda.inner_edge_symbol, n = filter_lambda.inner_node_symbol](FilterInfo &fi) {
|
||||
return utils::Contains(fi.used_symbols, e) || utils::Contains(fi.used_symbols, n);
|
||||
}),
|
||||
filters.end());
|
||||
std::vector<Symbol> inner_symbols = {filter_lambda.inner_edge_symbol, filter_lambda.inner_node_symbol};
|
||||
if (filter_lambda.accumulated_path_symbol) {
|
||||
inner_symbols.emplace_back(*filter_lambda.accumulated_path_symbol);
|
||||
|
||||
if (filter_lambda.accumulated_weight_symbol) {
|
||||
inner_symbols.emplace_back(*filter_lambda.accumulated_weight_symbol);
|
||||
}
|
||||
}
|
||||
|
||||
filters.erase(std::remove_if(filters.begin(), filters.end(),
|
||||
[&inner_symbols](FilterInfo &fi) {
|
||||
for (const auto &symbol : inner_symbols) {
|
||||
if (utils::Contains(fi.used_symbols, symbol)) return true;
|
||||
}
|
||||
return false;
|
||||
}),
|
||||
filters.end());
|
||||
|
||||
// Unbind the temporarily bound inner symbols for filtering.
|
||||
bound_symbols.erase(filter_lambda.inner_edge_symbol);
|
||||
bound_symbols.erase(filter_lambda.inner_node_symbol);
|
||||
if (filter_lambda.accumulated_path_symbol) {
|
||||
bound_symbols.erase(*filter_lambda.accumulated_path_symbol);
|
||||
|
||||
if (filter_lambda.accumulated_weight_symbol) {
|
||||
bound_symbols.erase(*filter_lambda.accumulated_weight_symbol);
|
||||
}
|
||||
}
|
||||
|
||||
if (total_weight) {
|
||||
bound_symbols.insert(*total_weight);
|
||||
@ -862,13 +901,14 @@ class RuleBasedPlanner {
|
||||
std::unique_ptr<LogicalOperator> GenFilters(std::unique_ptr<LogicalOperator> last_op,
|
||||
const std::unordered_set<Symbol> &bound_symbols, Filters &filters,
|
||||
AstStorage &storage, const SymbolTable &symbol_table) {
|
||||
auto all_filters = filters;
|
||||
auto pattern_filters = ExtractPatternFilters(filters, symbol_table, storage, bound_symbols);
|
||||
auto *filter_expr = impl::ExtractFilters(bound_symbols, filters, storage);
|
||||
|
||||
if (filter_expr) {
|
||||
last_op =
|
||||
std::make_unique<Filter>(std::move(last_op), std::move(pattern_filters), filter_expr, std::move(all_filters));
|
||||
Filters operator_filters;
|
||||
operator_filters.CollectFilterExpression(filter_expr, symbol_table);
|
||||
last_op = std::make_unique<Filter>(std::move(last_op), std::move(pattern_filters), filter_expr,
|
||||
std::move(operator_filters));
|
||||
}
|
||||
return last_op;
|
||||
}
|
||||
|
@ -72,8 +72,9 @@ void AddNextExpansions(const Symbol &node_symbol, const Matching &matching, cons
|
||||
// We are not expanding from node1, so flip the expansion.
|
||||
DMG_ASSERT(expansion.node2 && symbol_table.at(*expansion.node2->identifier_) == node_symbol,
|
||||
"Expected node_symbol to be bound in node2");
|
||||
if (expansion.edge->type_ != EdgeAtom::Type::BREADTH_FIRST) {
|
||||
if (expansion.edge->type_ != EdgeAtom::Type::BREADTH_FIRST && !expansion.edge->filter_lambda_.accumulated_path) {
|
||||
// BFS must *not* be flipped. Doing that changes the BFS results.
|
||||
// When filter lambda uses accumulated path, path must not be flipped.
|
||||
std::swap(expansion.node1, expansion.node2);
|
||||
expansion.is_flipped = true;
|
||||
if (expansion.direction != EdgeAtom::Direction::BOTH) {
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "query/procedure/mg_procedure_helpers.hpp"
|
||||
#include "query/stream/common.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "storage/v2/view.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/concepts.hpp"
|
||||
@ -313,12 +314,61 @@ mgp_value_type FromTypedValueType(memgraph::query::TypedValue::Type type) {
|
||||
return MGP_VALUE_TYPE_LOCAL_DATE_TIME;
|
||||
case memgraph::query::TypedValue::Type::Duration:
|
||||
return MGP_VALUE_TYPE_DURATION;
|
||||
case memgraph::query::TypedValue::Type::Function:
|
||||
throw std::logic_error{"mgp_value for TypedValue::Type::Function doesn't exist."};
|
||||
case memgraph::query::TypedValue::Type::Graph:
|
||||
throw std::logic_error{"mgp_value for TypedValue::Type::Graph doesn't exist."};
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool IsDeleted(const mgp_vertex *vertex) { return vertex->getImpl().impl_.vertex_->deleted; }
|
||||
|
||||
bool IsDeleted(const mgp_edge *edge) { return edge->impl.IsDeleted(); }
|
||||
|
||||
bool ContainsDeleted(const mgp_path *path) {
|
||||
return std::ranges::any_of(path->vertices, [](const auto &vertex) { return IsDeleted(&vertex); }) ||
|
||||
std::ranges::any_of(path->edges, [](const auto &edge) { return IsDeleted(&edge); });
|
||||
}
|
||||
|
||||
bool ContainsDeleted(const mgp_list *list) {
|
||||
return std::ranges::any_of(list->elems, [](const auto &elem) { return ContainsDeleted(&elem); });
|
||||
}
|
||||
|
||||
bool ContainsDeleted(const mgp_map *map) {
|
||||
return std::ranges::any_of(map->items, [](const auto &item) { return ContainsDeleted(&item.second); });
|
||||
}
|
||||
|
||||
bool ContainsDeleted(const mgp_value *val) {
|
||||
switch (val->type) {
|
||||
// Value types
|
||||
case MGP_VALUE_TYPE_NULL:
|
||||
case MGP_VALUE_TYPE_BOOL:
|
||||
case MGP_VALUE_TYPE_INT:
|
||||
case MGP_VALUE_TYPE_DOUBLE:
|
||||
case MGP_VALUE_TYPE_STRING:
|
||||
case MGP_VALUE_TYPE_DATE:
|
||||
case MGP_VALUE_TYPE_LOCAL_TIME:
|
||||
case MGP_VALUE_TYPE_LOCAL_DATE_TIME:
|
||||
case MGP_VALUE_TYPE_DURATION:
|
||||
return false;
|
||||
// Reference types
|
||||
case MGP_VALUE_TYPE_LIST:
|
||||
return ContainsDeleted(val->list_v);
|
||||
case MGP_VALUE_TYPE_MAP:
|
||||
return ContainsDeleted(val->map_v);
|
||||
case MGP_VALUE_TYPE_VERTEX:
|
||||
return IsDeleted(val->vertex_v);
|
||||
case MGP_VALUE_TYPE_EDGE:
|
||||
return IsDeleted(val->edge_v);
|
||||
case MGP_VALUE_TYPE_PATH:
|
||||
return ContainsDeleted(val->path_v);
|
||||
default:
|
||||
throw memgraph::query::QueryRuntimeException("Value of unknown type");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
memgraph::query::TypedValue ToTypedValue(const mgp_value &val, memgraph::utils::MemoryResource *memory) {
|
||||
switch (val.type) {
|
||||
case MGP_VALUE_TYPE_NULL:
|
||||
@ -1001,6 +1051,10 @@ mgp_error mgp_list_copy(mgp_list *list, mgp_memory *memory, mgp_list **result) {
|
||||
|
||||
void mgp_list_destroy(mgp_list *list) { DeleteRawMgpObject(list); }
|
||||
|
||||
mgp_error mgp_list_contains_deleted(mgp_list *list, int *result) {
|
||||
return WrapExceptions([list, result] { *result = ContainsDeleted(list); });
|
||||
}
|
||||
|
||||
namespace {
|
||||
void MgpListAppendExtend(mgp_list &list, const mgp_value &value) { list.elems.push_back(value); }
|
||||
} // namespace
|
||||
@ -1052,6 +1106,10 @@ mgp_error mgp_map_copy(mgp_map *map, mgp_memory *memory, mgp_map **result) {
|
||||
|
||||
void mgp_map_destroy(mgp_map *map) { DeleteRawMgpObject(map); }
|
||||
|
||||
mgp_error mgp_map_contains_deleted(mgp_map *map, int *result) {
|
||||
return WrapExceptions([map, result] { *result = ContainsDeleted(map); });
|
||||
}
|
||||
|
||||
mgp_error mgp_map_insert(mgp_map *map, const char *key, mgp_value *value) {
|
||||
return WrapExceptions([&] {
|
||||
auto emplace_result = map->items.emplace(key, *value);
|
||||
@ -1175,6 +1233,10 @@ mgp_error mgp_path_copy(mgp_path *path, mgp_memory *memory, mgp_path **result) {
|
||||
|
||||
void mgp_path_destroy(mgp_path *path) { DeleteRawMgpObject(path); }
|
||||
|
||||
mgp_error mgp_path_contains_deleted(mgp_path *path, int *result) {
|
||||
return WrapExceptions([path, result] { *result = ContainsDeleted(path); });
|
||||
}
|
||||
|
||||
mgp_error mgp_path_expand(mgp_path *path, mgp_edge *edge) {
|
||||
return WrapExceptions([path, edge] {
|
||||
MG_ASSERT(Call<size_t>(mgp_path_size, path) == path->vertices.size() - 1, "Invalid mgp_path");
|
||||
@ -1558,8 +1620,9 @@ mgp_error mgp_result_new_record(mgp_result *res, mgp_result_record **result) {
|
||||
auto *memory = res->rows.get_allocator().GetMemoryResource();
|
||||
MG_ASSERT(res->signature, "Expected to have a valid signature");
|
||||
res->rows.push_back(mgp_result_record{
|
||||
res->signature,
|
||||
memgraph::utils::pmr::map<memgraph::utils::pmr::string, memgraph::query::TypedValue>(memory)});
|
||||
.signature = res->signature,
|
||||
.values = memgraph::utils::pmr::map<memgraph::utils::pmr::string, memgraph::query::TypedValue>(memory),
|
||||
.ignore_deleted_values = !res->is_transactional});
|
||||
return &res->rows.back();
|
||||
},
|
||||
result);
|
||||
@ -1574,10 +1637,14 @@ mgp_error mgp_result_record_insert(mgp_result_record *record, const char *field_
|
||||
if (find_it == record->signature->end()) {
|
||||
throw std::out_of_range{fmt::format("The result doesn't have any field named '{}'.", field_name)};
|
||||
}
|
||||
if (record->ignore_deleted_values && ContainsDeleted(val)) [[unlikely]] {
|
||||
record->has_deleted_values = true;
|
||||
return;
|
||||
}
|
||||
const auto *type = find_it->second.first;
|
||||
if (!type->SatisfiesType(*val)) {
|
||||
throw std::logic_error{
|
||||
fmt::format("The type of value doesn't satisfies the type '{}'!", type->GetPresentableName())};
|
||||
fmt::format("The type of value doesn't satisfy the type '{}'!", type->GetPresentableName())};
|
||||
}
|
||||
record->values.emplace(field_name, ToTypedValue(*val, memory));
|
||||
});
|
||||
@ -1744,7 +1811,7 @@ memgraph::storage::PropertyValue ToPropertyValue(const mgp_value &value) {
|
||||
return memgraph::storage::PropertyValue{memgraph::storage::TemporalData{memgraph::storage::TemporalType::Duration,
|
||||
value.duration_v->duration.microseconds}};
|
||||
case MGP_VALUE_TYPE_VERTEX:
|
||||
throw ValueConversionException{"A vertex is not a valid property value! "};
|
||||
throw ValueConversionException{"A vertex is not a valid property value!"};
|
||||
case MGP_VALUE_TYPE_EDGE:
|
||||
throw ValueConversionException{"An edge is not a valid property value!"};
|
||||
case MGP_VALUE_TYPE_PATH:
|
||||
@ -1960,6 +2027,10 @@ mgp_error mgp_vertex_copy(mgp_vertex *v, mgp_memory *memory, mgp_vertex **result
|
||||
|
||||
void mgp_vertex_destroy(mgp_vertex *v) { DeleteRawMgpObject(v); }
|
||||
|
||||
mgp_error mgp_vertex_is_deleted(mgp_vertex *v, int *result) {
|
||||
return WrapExceptions([v] { return IsDeleted(v); }, result);
|
||||
}
|
||||
|
||||
mgp_error mgp_vertex_equal(mgp_vertex *v1, mgp_vertex *v2, int *result) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unevaluated-expression)
|
||||
static_assert(noexcept(*v1 == *v2));
|
||||
@ -2317,6 +2388,10 @@ mgp_error mgp_edge_copy(mgp_edge *e, mgp_memory *memory, mgp_edge **result) {
|
||||
|
||||
void mgp_edge_destroy(mgp_edge *e) { DeleteRawMgpObject(e); }
|
||||
|
||||
mgp_error mgp_edge_is_deleted(mgp_edge *e, int *result) {
|
||||
return WrapExceptions([e] { return IsDeleted(e); }, result);
|
||||
}
|
||||
|
||||
mgp_error mgp_edge_equal(mgp_edge *e1, mgp_edge *e2, int *result) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unevaluated-expression)
|
||||
static_assert(noexcept(*e1 == *e2));
|
||||
@ -2862,6 +2937,11 @@ mgp_error mgp_list_all_unique_constraints(mgp_graph *graph, mgp_memory *memory,
|
||||
});
|
||||
}
|
||||
|
||||
mgp_error mgp_graph_is_transactional(mgp_graph *graph, int *result) {
|
||||
*result = IsTransactional(graph->storage_mode) ? 1 : 0;
|
||||
return mgp_error::MGP_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
mgp_error mgp_graph_is_mutable(mgp_graph *graph, int *result) {
|
||||
*result = MgpGraphIsMutable(*graph) ? 1 : 0;
|
||||
return mgp_error::MGP_ERROR_NO_ERROR;
|
||||
@ -3672,7 +3752,8 @@ std::ostream &PrintValue(const TypedValue &value, std::ostream *stream) {
|
||||
case TypedValue::Type::Edge:
|
||||
case TypedValue::Type::Path:
|
||||
case TypedValue::Type::Graph:
|
||||
LOG_FATAL("value must not be a graph element");
|
||||
case TypedValue::Type::Function:
|
||||
LOG_FATAL("value must not be a graph|function element");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -560,23 +560,24 @@ struct mgp_graph {
|
||||
// TODO: Merge `mgp_graph` and `mgp_memory` into a single `mgp_context`. The
|
||||
// `ctx` field is out of place here.
|
||||
memgraph::query::ExecutionContext *ctx;
|
||||
memgraph::storage::StorageMode storage_mode;
|
||||
|
||||
static mgp_graph WritableGraph(memgraph::query::DbAccessor &acc, memgraph::storage::View view,
|
||||
memgraph::query::ExecutionContext &ctx) {
|
||||
return mgp_graph{&acc, view, &ctx};
|
||||
return mgp_graph{&acc, view, &ctx, acc.GetStorageMode()};
|
||||
}
|
||||
|
||||
static mgp_graph NonWritableGraph(memgraph::query::DbAccessor &acc, memgraph::storage::View view) {
|
||||
return mgp_graph{&acc, view, nullptr};
|
||||
return mgp_graph{&acc, view, nullptr, acc.GetStorageMode()};
|
||||
}
|
||||
|
||||
static mgp_graph WritableGraph(memgraph::query::SubgraphDbAccessor &acc, memgraph::storage::View view,
|
||||
memgraph::query::ExecutionContext &ctx) {
|
||||
return mgp_graph{&acc, view, &ctx};
|
||||
return mgp_graph{&acc, view, &ctx, acc.GetStorageMode()};
|
||||
}
|
||||
|
||||
static mgp_graph NonWritableGraph(memgraph::query::SubgraphDbAccessor &acc, memgraph::storage::View view) {
|
||||
return mgp_graph{&acc, view, nullptr};
|
||||
return mgp_graph{&acc, view, nullptr, acc.GetStorageMode()};
|
||||
}
|
||||
};
|
||||
|
||||
@ -585,6 +586,8 @@ struct mgp_result_record {
|
||||
const memgraph::utils::pmr::map<memgraph::utils::pmr::string,
|
||||
std::pair<const memgraph::query::procedure::CypherType *, bool>> *signature;
|
||||
memgraph::utils::pmr::map<memgraph::utils::pmr::string, memgraph::query::TypedValue> values;
|
||||
bool ignore_deleted_values = false;
|
||||
bool has_deleted_values = false;
|
||||
};
|
||||
|
||||
struct mgp_result {
|
||||
@ -599,6 +602,7 @@ struct mgp_result {
|
||||
std::pair<const memgraph::query::procedure::CypherType *, bool>> *signature;
|
||||
memgraph::utils::pmr::vector<mgp_result_record> rows;
|
||||
std::optional<memgraph::utils::pmr::string> error_msg;
|
||||
bool is_transactional = true;
|
||||
};
|
||||
|
||||
struct mgp_func_result {
|
||||
@ -614,6 +618,7 @@ struct mgp_func_context {
|
||||
memgraph::query::DbAccessor *impl;
|
||||
memgraph::storage::View view;
|
||||
};
|
||||
|
||||
struct mgp_properties_iterator {
|
||||
using allocator_type = memgraph::utils::Allocator<mgp_properties_iterator>;
|
||||
|
||||
@ -724,6 +729,7 @@ struct ProcedureInfo {
|
||||
bool is_batched{false};
|
||||
std::optional<memgraph::query::AuthQuery::Privilege> required_privilege = std::nullopt;
|
||||
};
|
||||
|
||||
struct mgp_proc {
|
||||
using allocator_type = memgraph::utils::Allocator<mgp_proc>;
|
||||
|
||||
@ -984,4 +990,6 @@ struct mgp_messages {
|
||||
storage_type messages;
|
||||
};
|
||||
|
||||
bool ContainsDeleted(const mgp_value *val);
|
||||
|
||||
memgraph::query::TypedValue ToTypedValue(const mgp_value &val, memgraph::utils::MemoryResource *memory);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/procedure/mg_procedure_helpers.hpp"
|
||||
#include "query/procedure/mg_procedure_impl.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
#include "utils/on_scope_exit.hpp"
|
||||
#include "utils/pmr/vector.hpp"
|
||||
@ -867,7 +868,37 @@ py::Object MgpListToPyTuple(mgp_list *list, PyObject *py_graph) {
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result, py::Object py_record, mgp_memory *memory) {
|
||||
struct RecordFieldCache {
|
||||
PyObject *key;
|
||||
PyObject *val;
|
||||
const char *field_name;
|
||||
mgp_value *field_val;
|
||||
};
|
||||
|
||||
std::optional<py::ExceptionInfo> InsertField(PyObject *key, PyObject *val, mgp_result_record *record,
|
||||
const char *field_name, mgp_value *field_val) {
|
||||
if (mgp_result_record_insert(record, field_name, field_val) != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
std::stringstream ss;
|
||||
ss << "Unable to insert field '" << py::Object::FromBorrow(key) << "' with value: '" << py::Object::FromBorrow(val)
|
||||
<< "'; did you set the correct field type?";
|
||||
const auto &msg = ss.str();
|
||||
PyErr_SetString(PyExc_ValueError, msg.c_str());
|
||||
mgp_value_destroy(field_val);
|
||||
return py::FetchError();
|
||||
}
|
||||
mgp_value_destroy(field_val);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void SkipRecord(mgp_value *field_val, std::vector<RecordFieldCache> ¤t_record_cache) {
|
||||
mgp_value_destroy(field_val);
|
||||
for (auto &cache_entry : current_record_cache) {
|
||||
mgp_value_destroy(cache_entry.field_val);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result, py::Object py_record, mgp_graph *graph,
|
||||
mgp_memory *memory) {
|
||||
py::Object py_mgp(PyImport_ImportModule("mgp"));
|
||||
if (!py_mgp) return py::FetchError();
|
||||
auto record_cls = py_mgp.GetAttr("Record");
|
||||
@ -888,15 +919,27 @@ std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result, py::Obj
|
||||
py::Object items(PyDict_Items(fields.Ptr()));
|
||||
if (!items) return py::FetchError();
|
||||
mgp_result_record *record{nullptr};
|
||||
if (RaiseExceptionFromErrorCode(mgp_result_new_record(result, &record))) {
|
||||
return py::FetchError();
|
||||
const auto is_transactional = storage::IsTransactional(graph->storage_mode);
|
||||
if (is_transactional) {
|
||||
// IN_MEMORY_ANALYTICAL must first verify that the record contains no deleted values
|
||||
if (RaiseExceptionFromErrorCode(mgp_result_new_record(result, &record))) {
|
||||
return py::FetchError();
|
||||
}
|
||||
}
|
||||
std::vector<RecordFieldCache> current_record_cache{};
|
||||
|
||||
utils::OnScopeExit clear_record_cache{[¤t_record_cache] {
|
||||
for (auto &record : current_record_cache) {
|
||||
mgp_value_destroy(record.field_val);
|
||||
}
|
||||
}};
|
||||
|
||||
Py_ssize_t len = PyList_GET_SIZE(items.Ptr());
|
||||
for (Py_ssize_t i = 0; i < len; ++i) {
|
||||
auto *item = PyList_GET_ITEM(items.Ptr(), i);
|
||||
if (!item) return py::FetchError();
|
||||
MG_ASSERT(PyTuple_Check(item));
|
||||
auto *key = PyTuple_GetItem(item, 0);
|
||||
PyObject *key = PyTuple_GetItem(item, 0);
|
||||
if (!key) return py::FetchError();
|
||||
if (!PyUnicode_Check(key)) {
|
||||
std::stringstream ss;
|
||||
@ -905,30 +948,48 @@ std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result, py::Obj
|
||||
PyErr_SetString(PyExc_TypeError, msg.c_str());
|
||||
return py::FetchError();
|
||||
}
|
||||
const auto *field_name = PyUnicode_AsUTF8(key);
|
||||
const char *field_name = PyUnicode_AsUTF8(key);
|
||||
if (!field_name) return py::FetchError();
|
||||
auto *val = PyTuple_GetItem(item, 1);
|
||||
PyObject *val = PyTuple_GetItem(item, 1);
|
||||
if (!val) return py::FetchError();
|
||||
// This memory is one dedicated for mg_procedure.
|
||||
mgp_value *field_val = PyObjectToMgpValueWithPythonExceptions(val, memory);
|
||||
if (field_val == nullptr) {
|
||||
return py::FetchError();
|
||||
}
|
||||
if (mgp_result_record_insert(record, field_name, field_val) != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
std::stringstream ss;
|
||||
ss << "Unable to insert field '" << py::Object::FromBorrow(key) << "' with value: '"
|
||||
<< py::Object::FromBorrow(val) << "'; did you set the correct field type?";
|
||||
const auto &msg = ss.str();
|
||||
PyErr_SetString(PyExc_ValueError, msg.c_str());
|
||||
mgp_value_destroy(field_val);
|
||||
return py::FetchError();
|
||||
|
||||
if (!is_transactional) {
|
||||
// If a deleted value is being inserted into a record, skip the whole record
|
||||
if (ContainsDeleted(field_val)) {
|
||||
SkipRecord(field_val, current_record_cache);
|
||||
return std::nullopt;
|
||||
}
|
||||
current_record_cache.emplace_back(
|
||||
RecordFieldCache{.key = key, .val = val, .field_name = field_name, .field_val = field_val});
|
||||
} else {
|
||||
auto maybe_exc = InsertField(key, val, record, field_name, field_val);
|
||||
if (maybe_exc) return maybe_exc;
|
||||
}
|
||||
mgp_value_destroy(field_val);
|
||||
}
|
||||
|
||||
if (is_transactional) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// IN_MEMORY_ANALYTICAL only adds a new record after verifying that it contains no deleted values
|
||||
if (RaiseExceptionFromErrorCode(mgp_result_new_record(result, &record))) {
|
||||
return py::FetchError();
|
||||
}
|
||||
for (auto &cache_entry : current_record_cache) {
|
||||
auto maybe_exc =
|
||||
InsertField(cache_entry.key, cache_entry.val, record, cache_entry.field_name, cache_entry.field_val);
|
||||
if (maybe_exc) return maybe_exc;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<py::ExceptionInfo> AddMultipleRecordsFromPython(mgp_result *result, py::Object py_seq,
|
||||
std::optional<py::ExceptionInfo> AddMultipleRecordsFromPython(mgp_result *result, py::Object py_seq, mgp_graph *graph,
|
||||
mgp_memory *memory) {
|
||||
Py_ssize_t len = PySequence_Size(py_seq.Ptr());
|
||||
if (len == -1) return py::FetchError();
|
||||
@ -938,7 +999,7 @@ std::optional<py::ExceptionInfo> AddMultipleRecordsFromPython(mgp_result *result
|
||||
for (Py_ssize_t i = 0, curr_item = 0; i < len; ++i, ++curr_item) {
|
||||
py::Object py_record(PySequence_GetItem(py_seq.Ptr(), curr_item));
|
||||
if (!py_record) return py::FetchError();
|
||||
auto maybe_exc = AddRecordFromPython(result, py_record, memory);
|
||||
auto maybe_exc = AddRecordFromPython(result, py_record, graph, memory);
|
||||
if (maybe_exc) return maybe_exc;
|
||||
// Once PySequence_DelSlice deletes "transformed" objects, starting index is 0 again.
|
||||
if (i && i % del_cnt == 0) {
|
||||
@ -952,14 +1013,14 @@ std::optional<py::ExceptionInfo> AddMultipleRecordsFromPython(mgp_result *result
|
||||
}
|
||||
|
||||
std::optional<py::ExceptionInfo> AddMultipleBatchRecordsFromPython(mgp_result *result, py::Object py_seq,
|
||||
mgp_memory *memory) {
|
||||
mgp_graph *graph, mgp_memory *memory) {
|
||||
Py_ssize_t len = PySequence_Size(py_seq.Ptr());
|
||||
if (len == -1) return py::FetchError();
|
||||
result->rows.reserve(len);
|
||||
for (Py_ssize_t i = 0; i < len; ++i) {
|
||||
py::Object py_record(PySequence_GetItem(py_seq.Ptr(), i));
|
||||
if (!py_record) return py::FetchError();
|
||||
auto maybe_exc = AddRecordFromPython(result, py_record, memory);
|
||||
auto maybe_exc = AddRecordFromPython(result, py_record, graph, memory);
|
||||
if (maybe_exc) return maybe_exc;
|
||||
}
|
||||
PySequence_DelSlice(py_seq.Ptr(), 0, PySequence_Size(py_seq.Ptr()));
|
||||
@ -1015,11 +1076,11 @@ void CallPythonProcedure(const py::Object &py_cb, mgp_list *args, mgp_graph *gra
|
||||
if (!py_res) return py::FetchError();
|
||||
if (PySequence_Check(py_res.Ptr())) {
|
||||
if (is_batched) {
|
||||
return AddMultipleBatchRecordsFromPython(result, py_res, memory);
|
||||
return AddMultipleBatchRecordsFromPython(result, py_res, graph, memory);
|
||||
}
|
||||
return AddMultipleRecordsFromPython(result, py_res, memory);
|
||||
return AddMultipleRecordsFromPython(result, py_res, graph, memory);
|
||||
}
|
||||
return AddRecordFromPython(result, py_res, memory);
|
||||
return AddRecordFromPython(result, py_res, graph, memory);
|
||||
};
|
||||
|
||||
// It is *VERY IMPORTANT* to note that this code takes great care not to keep
|
||||
@ -1114,9 +1175,9 @@ void CallPythonTransformation(const py::Object &py_cb, mgp_messages *msgs, mgp_g
|
||||
auto py_res = py_cb.Call(py_graph, py_messages);
|
||||
if (!py_res) return py::FetchError();
|
||||
if (PySequence_Check(py_res.Ptr())) {
|
||||
return AddMultipleRecordsFromPython(result, py_res, memory);
|
||||
return AddMultipleRecordsFromPython(result, py_res, graph, memory);
|
||||
}
|
||||
return AddRecordFromPython(result, py_res, memory);
|
||||
return AddRecordFromPython(result, py_res, graph, memory);
|
||||
};
|
||||
|
||||
// It is *VERY IMPORTANT* to note that this code takes great care not to keep
|
||||
@ -1164,9 +1225,27 @@ void CallPythonFunction(const py::Object &py_cb, mgp_list *args, mgp_graph *grap
|
||||
auto call = [&](py::Object py_graph) -> utils::BasicResult<std::optional<py::ExceptionInfo>, mgp_value *> {
|
||||
py::Object py_args(MgpListToPyTuple(args, py_graph.Ptr()));
|
||||
if (!py_args) return {py::FetchError()};
|
||||
const auto is_transactional = storage::IsTransactional(graph->storage_mode);
|
||||
auto py_res = py_cb.Call(py_graph, py_args);
|
||||
if (!py_res) return {py::FetchError()};
|
||||
mgp_value *ret_val = PyObjectToMgpValueWithPythonExceptions(py_res.Ptr(), memory);
|
||||
if (!is_transactional && ContainsDeleted(ret_val)) {
|
||||
mgp_value_destroy(ret_val);
|
||||
mgp_value *null_val{nullptr};
|
||||
mgp_error last_error{mgp_error::MGP_ERROR_NO_ERROR};
|
||||
|
||||
last_error = mgp_value_make_null(memory, &null_val);
|
||||
|
||||
if (last_error == mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE) {
|
||||
throw std::bad_alloc{};
|
||||
}
|
||||
if (last_error != mgp_error::MGP_ERROR_NO_ERROR) {
|
||||
throw std::runtime_error{"Unexpected error while creating mgp_value"};
|
||||
}
|
||||
|
||||
return null_val;
|
||||
}
|
||||
|
||||
if (ret_val == nullptr) {
|
||||
return {py::FetchError()};
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ void CallCustomTransformation(const std::string &transformation_name, const std:
|
||||
mgp_messages mgp_messages{mgp_messages::storage_type{&memory_resource}};
|
||||
std::transform(messages.begin(), messages.end(), std::back_inserter(mgp_messages.messages),
|
||||
[](const TMessage &message) { return mgp_message{message}; });
|
||||
mgp_graph graph{&db_accessor, storage::View::OLD, nullptr};
|
||||
mgp_graph graph{&db_accessor, storage::View::OLD, nullptr, db_accessor.GetStorageMode()};
|
||||
mgp_memory memory{&memory_resource};
|
||||
result.rows.clear();
|
||||
result.error_msg.reset();
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Memgraph Ltd.
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
@ -22,6 +22,7 @@
|
||||
#include "storage/v2/temporal.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/fnv.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
|
||||
namespace memgraph::query {
|
||||
@ -215,6 +216,9 @@ TypedValue::TypedValue(const TypedValue &other, utils::MemoryResource *memory) :
|
||||
case Type::Duration:
|
||||
new (&duration_v) utils::Duration(other.duration_v);
|
||||
return;
|
||||
case Type::Function:
|
||||
new (&function_v) std::function<void(TypedValue *)>(other.function_v);
|
||||
return;
|
||||
case Type::Graph:
|
||||
auto *graph_ptr = utils::Allocator<Graph>(memory_).new_object<Graph>(*other.graph_v);
|
||||
new (&graph_v) std::unique_ptr<Graph>(graph_ptr);
|
||||
@ -268,6 +272,9 @@ TypedValue::TypedValue(TypedValue &&other, utils::MemoryResource *memory) : memo
|
||||
case Type::Duration:
|
||||
new (&duration_v) utils::Duration(other.duration_v);
|
||||
break;
|
||||
case Type::Function:
|
||||
new (&function_v) std::function<void(TypedValue *)>(other.function_v);
|
||||
break;
|
||||
case Type::Graph:
|
||||
if (other.GetMemoryResource() == memory_) {
|
||||
new (&graph_v) std::unique_ptr<Graph>(std::move(other.graph_v));
|
||||
@ -343,6 +350,7 @@ DEFINE_VALUE_AND_TYPE_GETTERS(utils::Date, Date, date_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(utils::LocalTime, LocalTime, local_time_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(utils::LocalDateTime, LocalDateTime, local_date_time_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(utils::Duration, Duration, duration_v)
|
||||
DEFINE_VALUE_AND_TYPE_GETTERS(std::function<void(TypedValue *)>, Function, function_v)
|
||||
|
||||
Graph &TypedValue::ValueGraph() {
|
||||
if (type_ != Type::Graph) {
|
||||
@ -362,6 +370,38 @@ bool TypedValue::IsGraph() const { return type_ == Type::Graph; }
|
||||
|
||||
#undef DEFINE_VALUE_AND_TYPE_GETTERS
|
||||
|
||||
bool TypedValue::ContainsDeleted() const {
|
||||
switch (type_) {
|
||||
// Value types
|
||||
case Type::Null:
|
||||
case Type::Bool:
|
||||
case Type::Int:
|
||||
case Type::Double:
|
||||
case Type::String:
|
||||
case Type::Date:
|
||||
case Type::LocalTime:
|
||||
case Type::LocalDateTime:
|
||||
case Type::Duration:
|
||||
return false;
|
||||
// Reference types
|
||||
case Type::List:
|
||||
return std::ranges::any_of(list_v, [](const auto &elem) { return elem.ContainsDeleted(); });
|
||||
case Type::Map:
|
||||
return std::ranges::any_of(map_v, [](const auto &item) { return item.second.ContainsDeleted(); });
|
||||
case Type::Vertex:
|
||||
return vertex_v.impl_.vertex_->deleted;
|
||||
case Type::Edge:
|
||||
return edge_v.IsDeleted();
|
||||
case Type::Path:
|
||||
return std::ranges::any_of(path_v.vertices(),
|
||||
[](auto &vertex_acc) { return vertex_acc.impl_.vertex_->deleted; }) ||
|
||||
std::ranges::any_of(path_v.edges(), [](auto &edge_acc) { return edge_acc.IsDeleted(); });
|
||||
default:
|
||||
throw TypedValueException("Value of unknown type");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool TypedValue::IsNull() const { return type_ == Type::Null; }
|
||||
|
||||
bool TypedValue::IsNumeric() const { return IsInt() || IsDouble(); }
|
||||
@ -417,6 +457,8 @@ std::ostream &operator<<(std::ostream &os, const TypedValue::Type &type) {
|
||||
return os << "duration";
|
||||
case TypedValue::Type::Graph:
|
||||
return os << "graph";
|
||||
case TypedValue::Type::Function:
|
||||
return os << "function";
|
||||
}
|
||||
LOG_FATAL("Unsupported TypedValue::Type");
|
||||
}
|
||||
@ -569,6 +611,9 @@ TypedValue &TypedValue::operator=(const TypedValue &other) {
|
||||
case Type::Duration:
|
||||
new (&duration_v) utils::Duration(other.duration_v);
|
||||
return *this;
|
||||
case Type::Function:
|
||||
new (&function_v) std::function<void(TypedValue *)>(other.function_v);
|
||||
return *this;
|
||||
}
|
||||
LOG_FATAL("Unsupported TypedValue::Type");
|
||||
}
|
||||
@ -628,6 +673,9 @@ TypedValue &TypedValue::operator=(TypedValue &&other) noexcept(false) {
|
||||
case Type::Duration:
|
||||
new (&duration_v) utils::Duration(other.duration_v);
|
||||
break;
|
||||
case Type::Function:
|
||||
new (&function_v) std::function<void(TypedValue *)>{other.function_v};
|
||||
break;
|
||||
case Type::Graph:
|
||||
if (other.GetMemoryResource() == memory_) {
|
||||
new (&graph_v) std::unique_ptr<Graph>(std::move(other.graph_v));
|
||||
@ -676,6 +724,9 @@ void TypedValue::DestroyValue() {
|
||||
case Type::LocalDateTime:
|
||||
case Type::Duration:
|
||||
break;
|
||||
case Type::Function:
|
||||
std::destroy_at(&function_v);
|
||||
break;
|
||||
case Type::Graph: {
|
||||
auto *graph = graph_v.release();
|
||||
std::destroy_at(&graph_v);
|
||||
@ -1153,6 +1204,8 @@ size_t TypedValue::Hash::operator()(const TypedValue &value) const {
|
||||
case TypedValue::Type::Duration:
|
||||
return utils::DurationHash{}(value.ValueDuration());
|
||||
break;
|
||||
case TypedValue::Type::Function:
|
||||
throw TypedValueException("Unsupported hash function for Function");
|
||||
case TypedValue::Type::Graph:
|
||||
throw TypedValueException("Unsupported hash function for Graph");
|
||||
}
|
||||
|
@ -84,7 +84,8 @@ class TypedValue {
|
||||
LocalTime,
|
||||
LocalDateTime,
|
||||
Duration,
|
||||
Graph
|
||||
Graph,
|
||||
Function
|
||||
};
|
||||
|
||||
// TypedValue at this exact moment of compilation is an incomplete type, and
|
||||
@ -420,6 +421,9 @@ class TypedValue {
|
||||
new (&graph_v) std::unique_ptr<Graph>(graph_ptr);
|
||||
}
|
||||
|
||||
explicit TypedValue(std::function<void(TypedValue *)> &&other)
|
||||
: function_v(std::move(other)), type_(Type::Function) {}
|
||||
|
||||
/**
|
||||
* Construct with the value of other.
|
||||
* Default utils::NewDeleteResource() is used for allocations. After the move,
|
||||
@ -451,6 +455,7 @@ class TypedValue {
|
||||
TypedValue &operator=(const utils::LocalTime &);
|
||||
TypedValue &operator=(const utils::LocalDateTime &);
|
||||
TypedValue &operator=(const utils::Duration &);
|
||||
TypedValue &operator=(const std::function<void(TypedValue *)> &);
|
||||
|
||||
/** Copy assign other, utils::MemoryResource of `this` is used */
|
||||
TypedValue &operator=(const TypedValue &other);
|
||||
@ -506,9 +511,12 @@ class TypedValue {
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::LocalDateTime, LocalDateTime)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(utils::Duration, Duration)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(Graph, Graph)
|
||||
DECLARE_VALUE_AND_TYPE_GETTERS(std::function<void(TypedValue *)>, Function)
|
||||
|
||||
#undef DECLARE_VALUE_AND_TYPE_GETTERS
|
||||
|
||||
bool ContainsDeleted() const;
|
||||
|
||||
/** Checks if value is a TypedValue::Null. */
|
||||
bool IsNull() const;
|
||||
|
||||
@ -550,6 +558,7 @@ class TypedValue {
|
||||
utils::Duration duration_v;
|
||||
// As the unique_ptr is not allocator aware, it requires special attention when copying or moving graphs
|
||||
std::unique_ptr<Graph> graph_v;
|
||||
std::function<void(TypedValue *)> function_v;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -6,8 +6,10 @@ target_sources(mg-replication
|
||||
include/replication/epoch.hpp
|
||||
include/replication/config.hpp
|
||||
include/replication/mode.hpp
|
||||
include/replication/messages.hpp
|
||||
include/replication/role.hpp
|
||||
include/replication/status.hpp
|
||||
include/replication/replication_client.hpp
|
||||
include/replication/replication_server.hpp
|
||||
|
||||
PRIVATE
|
||||
@ -15,6 +17,8 @@ target_sources(mg-replication
|
||||
epoch.cpp
|
||||
config.cpp
|
||||
status.cpp
|
||||
messages.cpp
|
||||
replication_client.cpp
|
||||
replication_server.cpp
|
||||
)
|
||||
target_include_directories(mg-replication PUBLIC include)
|
||||
|
44
src/replication/include/replication/messages.hpp
Normal file
44
src/replication/include/replication/messages.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
struct FrequentHeartbeatReq {
|
||||
static const utils::TypeInfo kType; // TODO: make constexpr?
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; } // WHAT?
|
||||
|
||||
static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder);
|
||||
FrequentHeartbeatReq() = default;
|
||||
};
|
||||
|
||||
struct FrequentHeartbeatRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder);
|
||||
FrequentHeartbeatRes() = default;
|
||||
explicit FrequentHeartbeatRes(bool success) : success(success) {}
|
||||
|
||||
bool success;
|
||||
};
|
||||
|
||||
using FrequentHeartbeatRpc = rpc::RequestResponse<FrequentHeartbeatReq, FrequentHeartbeatRes>;
|
||||
|
||||
void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder);
|
||||
|
||||
} // namespace memgraph::replication
|
82
src/replication/include/replication/replication_client.hpp
Normal file
82
src/replication/include/replication/replication_client.hpp
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "replication/config.hpp"
|
||||
#include "replication/messages.hpp"
|
||||
#include "rpc/client.hpp"
|
||||
#include "utils/scheduler.hpp"
|
||||
#include "utils/thread_pool.hpp"
|
||||
|
||||
#include <concepts>
|
||||
#include <string_view>
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
template <typename F>
|
||||
concept InvocableWithStringView = std::invocable<F, std::string_view>;
|
||||
|
||||
struct ReplicationClient {
|
||||
explicit ReplicationClient(const memgraph::replication::ReplicationClientConfig &config);
|
||||
|
||||
~ReplicationClient();
|
||||
ReplicationClient(ReplicationClient const &) = delete;
|
||||
ReplicationClient &operator=(ReplicationClient const &) = delete;
|
||||
ReplicationClient(ReplicationClient &&) noexcept = delete;
|
||||
ReplicationClient &operator=(ReplicationClient &&) noexcept = delete;
|
||||
|
||||
template <InvocableWithStringView F>
|
||||
void StartFrequentCheck(F &&callback) {
|
||||
// Help the user to get the most accurate replica state possible.
|
||||
if (replica_check_frequency_ > std::chrono::seconds(0)) {
|
||||
replica_checker_.Run("Replica Checker", replica_check_frequency_, [this, cb = std::forward<F>(callback)] {
|
||||
try {
|
||||
bool success = false;
|
||||
{
|
||||
auto stream{rpc_client_.Stream<memgraph::replication::FrequentHeartbeatRpc>()};
|
||||
success = stream.AwaitResponse().success;
|
||||
}
|
||||
if (success) {
|
||||
cb(name_);
|
||||
}
|
||||
} catch (const rpc::RpcFailedException &) {
|
||||
// Nothing to do...wait for a reconnect
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
std::string name_;
|
||||
communication::ClientContext rpc_context_;
|
||||
rpc::Client rpc_client_;
|
||||
std::chrono::seconds replica_check_frequency_;
|
||||
|
||||
memgraph::replication::ReplicationMode mode_{memgraph::replication::ReplicationMode::SYNC};
|
||||
// This thread pool is used for background tasks so we don't
|
||||
// block the main storage thread
|
||||
// We use only 1 thread for 2 reasons:
|
||||
// - background tasks ALWAYS contain some kind of RPC communication.
|
||||
// We can't have multiple RPC communication from a same client
|
||||
// because that's not logically valid (e.g. you cannot send a snapshot
|
||||
// and WAL at a same time because WAL will arrive earlier and be applied
|
||||
// before the snapshot which is not correct)
|
||||
// - the implementation is simplified as we have a total control of what
|
||||
// this pool is executing. Also, we can simply queue multiple tasks
|
||||
// and be sure of the execution order.
|
||||
// Not having mulitple possible threads in the same client allows us
|
||||
// to ignore concurrency problems inside the client.
|
||||
utils::ThreadPool thread_pool_{1};
|
||||
|
||||
utils::Scheduler replica_checker_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::replication
|
@ -17,30 +17,6 @@
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
struct FrequentHeartbeatReq {
|
||||
static const utils::TypeInfo kType; // TODO: make constexpr?
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; } // WHAT?
|
||||
|
||||
static void Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder);
|
||||
FrequentHeartbeatReq() = default;
|
||||
};
|
||||
|
||||
struct FrequentHeartbeatRes {
|
||||
static const utils::TypeInfo kType;
|
||||
static const utils::TypeInfo &GetTypeInfo() { return kType; }
|
||||
|
||||
static void Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader);
|
||||
static void Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder);
|
||||
FrequentHeartbeatRes() = default;
|
||||
explicit FrequentHeartbeatRes(bool success) : success(success) {}
|
||||
|
||||
bool success;
|
||||
};
|
||||
|
||||
// TODO: move to own header
|
||||
using FrequentHeartbeatRpc = rpc::RequestResponse<FrequentHeartbeatReq, FrequentHeartbeatRes>;
|
||||
|
||||
class ReplicationServer {
|
||||
public:
|
||||
explicit ReplicationServer(const memgraph::replication::ReplicationServerConfig &config);
|
||||
|
@ -11,19 +11,22 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include "kvstore/kvstore.hpp"
|
||||
#include "replication/config.hpp"
|
||||
#include "replication/epoch.hpp"
|
||||
#include "replication/mode.hpp"
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "replication/role.hpp"
|
||||
#include "replication_server.hpp"
|
||||
#include "status.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <list>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
@ -32,8 +35,17 @@ enum class RolePersisted : uint8_t { UNKNOWN_OR_NO, YES };
|
||||
enum class RegisterReplicaError : uint8_t { NAME_EXISTS, END_POINT_EXISTS, COULD_NOT_BE_PERSISTED, NOT_MAIN, SUCCESS };
|
||||
|
||||
struct RoleMainData {
|
||||
RoleMainData() = default;
|
||||
explicit RoleMainData(ReplicationEpoch e) : epoch_(std::move(e)) {}
|
||||
~RoleMainData() = default;
|
||||
|
||||
RoleMainData(RoleMainData const &) = delete;
|
||||
RoleMainData &operator=(RoleMainData const &) = delete;
|
||||
RoleMainData(RoleMainData &&) = default;
|
||||
RoleMainData &operator=(RoleMainData &&) = default;
|
||||
|
||||
ReplicationEpoch epoch_;
|
||||
std::vector<ReplicationClientConfig> registered_replicas_;
|
||||
std::list<ReplicationClient> registered_replicas_{};
|
||||
};
|
||||
|
||||
struct RoleReplicaData {
|
||||
@ -41,8 +53,10 @@ struct RoleReplicaData {
|
||||
std::unique_ptr<ReplicationServer> server;
|
||||
};
|
||||
|
||||
// Global (instance) level object
|
||||
struct ReplicationState {
|
||||
explicit ReplicationState(std::optional<std::filesystem::path> durability_dir);
|
||||
~ReplicationState() = default;
|
||||
|
||||
ReplicationState(ReplicationState const &) = delete;
|
||||
ReplicationState(ReplicationState &&) = delete;
|
||||
@ -74,7 +88,7 @@ struct ReplicationState {
|
||||
// TODO: locked access
|
||||
auto ReplicationData() -> ReplicationData_t & { return replication_data_; }
|
||||
auto ReplicationData() const -> ReplicationData_t const & { return replication_data_; }
|
||||
auto RegisterReplica(const ReplicationClientConfig &config) -> RegisterReplicaError;
|
||||
utils::BasicResult<RegisterReplicaError, ReplicationClient *> RegisterReplica(const ReplicationClientConfig &config);
|
||||
|
||||
bool SetReplicationRoleMain();
|
||||
|
||||
|
65
src/replication/messages.cpp
Normal file
65
src/replication/messages.cpp
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "replication/messages.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
namespace memgraph::slk {
|
||||
// Serialize code for FrequentHeartbeatRes
|
||||
void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.success, builder);
|
||||
}
|
||||
void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->success, reader);
|
||||
}
|
||||
|
||||
// Serialize code for FrequentHeartbeatReq
|
||||
void Save(const memgraph::replication::FrequentHeartbeatReq & /*self*/, memgraph::slk::Builder * /*builder*/) {
|
||||
/* Nothing to serialize */
|
||||
}
|
||||
void Load(memgraph::replication::FrequentHeartbeatReq * /*self*/, memgraph::slk::Reader * /*reader*/) {
|
||||
/* Nothing to serialize */
|
||||
}
|
||||
|
||||
} // namespace memgraph::slk
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ, "FrequentHeartbeatReq",
|
||||
nullptr};
|
||||
|
||||
constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes",
|
||||
nullptr};
|
||||
|
||||
void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
FrequentHeartbeatReq req;
|
||||
FrequentHeartbeatReq::Load(&req, req_reader);
|
||||
memgraph::slk::Load(&req, req_reader);
|
||||
FrequentHeartbeatRes res{true};
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
} // namespace memgraph::replication
|
40
src/replication/replication_client.cpp
Normal file
40
src/replication/replication_client.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "replication/replication_client.hpp"
|
||||
|
||||
namespace memgraph::replication {
|
||||
|
||||
static auto CreateClientContext(const memgraph::replication::ReplicationClientConfig &config)
|
||||
-> communication::ClientContext {
|
||||
return (config.ssl) ? communication::ClientContext{config.ssl->key_file, config.ssl->cert_file}
|
||||
: communication::ClientContext{};
|
||||
}
|
||||
|
||||
ReplicationClient::ReplicationClient(const memgraph::replication::ReplicationClientConfig &config)
|
||||
: name_{config.name},
|
||||
rpc_context_{CreateClientContext(config)},
|
||||
rpc_client_{io::network::Endpoint(io::network::Endpoint::needs_resolving, config.ip_address, config.port),
|
||||
&rpc_context_},
|
||||
replica_check_frequency_{config.replica_check_frequency},
|
||||
mode_{config.mode} {}
|
||||
|
||||
ReplicationClient::~ReplicationClient() {
|
||||
auto endpoint = rpc_client_.Endpoint();
|
||||
try {
|
||||
spdlog::trace("Closing replication client on {}:{}", endpoint.address, endpoint.port);
|
||||
} catch (...) {
|
||||
// Logging can throw. Not a big deal, just ignore.
|
||||
}
|
||||
thread_pool_.Shutdown();
|
||||
}
|
||||
|
||||
} // namespace memgraph::replication
|
@ -10,25 +10,7 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "replication/replication_server.hpp"
|
||||
#include "rpc/messages.hpp"
|
||||
#include "slk/serialization.hpp"
|
||||
#include "slk/streams.hpp"
|
||||
|
||||
namespace memgraph::slk {
|
||||
|
||||
// Serialize code for FrequentHeartbeatRes
|
||||
void Save(const memgraph::replication::FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self.success, builder);
|
||||
}
|
||||
void Load(memgraph::replication::FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(&self->success, reader);
|
||||
}
|
||||
|
||||
// Serialize code for FrequentHeartbeatReq
|
||||
void Save(const memgraph::replication::FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {}
|
||||
void Load(memgraph::replication::FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {}
|
||||
|
||||
} // namespace memgraph::slk
|
||||
#include "replication/messages.hpp"
|
||||
|
||||
namespace memgraph::replication {
|
||||
namespace {
|
||||
@ -39,13 +21,6 @@ auto CreateServerContext(const memgraph::replication::ReplicationServerConfig &c
|
||||
: communication::ServerContext{};
|
||||
}
|
||||
|
||||
void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder) {
|
||||
FrequentHeartbeatReq req;
|
||||
memgraph::slk::Load(&req, req_reader);
|
||||
FrequentHeartbeatRes res{true};
|
||||
memgraph::slk::Save(res, res_builder);
|
||||
}
|
||||
|
||||
// NOTE: The replication server must have a single thread for processing
|
||||
// because there is no need for more processing threads - each replica can
|
||||
// have only a single main server. Also, the single-threaded guarantee
|
||||
@ -53,25 +28,6 @@ void FrequentHeartbeatHandler(slk::Reader *req_reader, slk::Builder *res_builder
|
||||
constexpr auto kReplicationServerThreads = 1;
|
||||
} // namespace
|
||||
|
||||
constexpr utils::TypeInfo FrequentHeartbeatReq::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_REQ, "FrequentHeartbeatReq",
|
||||
nullptr};
|
||||
|
||||
constexpr utils::TypeInfo FrequentHeartbeatRes::kType{utils::TypeId::REP_FREQUENT_HEARTBEAT_RES, "FrequentHeartbeatRes",
|
||||
nullptr};
|
||||
|
||||
void FrequentHeartbeatReq::Save(const FrequentHeartbeatReq &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void FrequentHeartbeatReq::Load(FrequentHeartbeatReq *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
void FrequentHeartbeatRes::Save(const FrequentHeartbeatRes &self, memgraph::slk::Builder *builder) {
|
||||
memgraph::slk::Save(self, builder);
|
||||
}
|
||||
void FrequentHeartbeatRes::Load(FrequentHeartbeatRes *self, memgraph::slk::Reader *reader) {
|
||||
memgraph::slk::Load(self, reader);
|
||||
}
|
||||
|
||||
ReplicationServer::ReplicationServer(const memgraph::replication::ReplicationServerConfig &config)
|
||||
: rpc_server_context_{CreateServerContext(config)},
|
||||
rpc_server_{io::network::Endpoint{config.ip_address, config.port}, &rpc_server_context_,
|
||||
|
@ -11,9 +11,11 @@
|
||||
|
||||
#include "replication/state.hpp"
|
||||
|
||||
#include "replication/replication_client.hpp"
|
||||
#include "replication/replication_server.hpp"
|
||||
#include "replication/status.hpp"
|
||||
#include "utils/file.hpp"
|
||||
#include "utils/result.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
constexpr auto kReplicationDirectory = std::string_view{"replication"};
|
||||
@ -125,12 +127,9 @@ auto ReplicationState::FetchReplicationData() -> FetchReplicationResult_t {
|
||||
return std::visit(
|
||||
utils::Overloaded{
|
||||
[&](durability::MainRole &&r) -> FetchReplicationResult_t {
|
||||
auto res = RoleMainData{
|
||||
.epoch_ = std::move(r.epoch),
|
||||
};
|
||||
auto res = RoleMainData{std::move(r.epoch)};
|
||||
auto b = durability_->begin(durability::kReplicationReplicaPrefix);
|
||||
auto e = durability_->end(durability::kReplicationReplicaPrefix);
|
||||
res.registered_replicas_.reserve(durability_->Size(durability::kReplicationReplicaPrefix));
|
||||
for (; b != e; ++b) {
|
||||
auto const &[replica_name, replica_data] = *b;
|
||||
auto json = nlohmann::json::parse(replica_data, nullptr, false);
|
||||
@ -141,7 +140,8 @@ auto ReplicationState::FetchReplicationData() -> FetchReplicationResult_t {
|
||||
if (key_name != data.config.name) {
|
||||
return FetchReplicationError::PARSE_ERROR;
|
||||
}
|
||||
res.registered_replicas_.emplace_back(std::move(data.config));
|
||||
// Instance clients
|
||||
res.registered_replicas_.emplace_back(data.config);
|
||||
} catch (...) {
|
||||
return FetchReplicationError::PARSE_ERROR;
|
||||
}
|
||||
@ -221,7 +221,7 @@ bool ReplicationState::SetReplicationRoleMain() {
|
||||
if (!TryPersistRoleMain(new_epoch)) {
|
||||
return false;
|
||||
}
|
||||
replication_data_ = RoleMainData{.epoch_ = ReplicationEpoch{new_epoch}};
|
||||
replication_data_ = RoleMainData{ReplicationEpoch{new_epoch}};
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -233,16 +233,14 @@ bool ReplicationState::SetReplicationRoleReplica(const ReplicationServerConfig &
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ReplicationState::RegisterReplica(const ReplicationClientConfig &config) -> RegisterReplicaError {
|
||||
auto const replica_handler = [](RoleReplicaData const &) -> RegisterReplicaError {
|
||||
return RegisterReplicaError::NOT_MAIN;
|
||||
};
|
||||
auto const main_handler = [this, &config](RoleMainData &mainData) -> RegisterReplicaError {
|
||||
utils::BasicResult<RegisterReplicaError, ReplicationClient *> ReplicationState::RegisterReplica(
|
||||
const ReplicationClientConfig &config) {
|
||||
auto const replica_handler = [](RoleReplicaData const &) { return RegisterReplicaError::NOT_MAIN; };
|
||||
ReplicationClient *client{nullptr};
|
||||
auto const main_handler = [&client, &config, this](RoleMainData &mainData) -> RegisterReplicaError {
|
||||
// name check
|
||||
auto name_check = [&config](auto const &replicas) {
|
||||
auto name_matches = [&name = config.name](ReplicationClientConfig const ®istered_config) {
|
||||
return registered_config.name == name;
|
||||
};
|
||||
auto name_matches = [&name = config.name](auto const &replica) { return replica.name_ == name; };
|
||||
return std::any_of(replicas.begin(), replicas.end(), name_matches);
|
||||
};
|
||||
if (name_check(mainData.registered_replicas_)) {
|
||||
@ -251,8 +249,9 @@ auto ReplicationState::RegisterReplica(const ReplicationClientConfig &config) ->
|
||||
|
||||
// endpoint check
|
||||
auto endpoint_check = [&](auto const &replicas) {
|
||||
auto endpoint_matches = [&config](ReplicationClientConfig const ®istered_config) {
|
||||
return registered_config.ip_address == config.ip_address && registered_config.port == config.port;
|
||||
auto endpoint_matches = [&config](auto const &replica) {
|
||||
const auto &ep = replica.rpc_client_.Endpoint();
|
||||
return ep.address == config.ip_address && ep.port == config.port;
|
||||
};
|
||||
return std::any_of(replicas.begin(), replicas.end(), endpoint_matches);
|
||||
};
|
||||
@ -266,10 +265,14 @@ auto ReplicationState::RegisterReplica(const ReplicationClientConfig &config) ->
|
||||
}
|
||||
|
||||
// set
|
||||
mainData.registered_replicas_.emplace_back(config);
|
||||
client = &mainData.registered_replicas_.emplace_back(config);
|
||||
return RegisterReplicaError::SUCCESS;
|
||||
};
|
||||
|
||||
return std::visit(utils::Overloaded{main_handler, replica_handler}, replication_data_);
|
||||
const auto &res = std::visit(utils::Overloaded{main_handler, replica_handler}, replication_data_);
|
||||
if (res == RegisterReplicaError::SUCCESS) {
|
||||
return client;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
} // namespace memgraph::replication
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
#include "communication/client.hpp"
|
||||
#include "io/network/endpoint.hpp"
|
||||
@ -41,16 +42,25 @@ class Client {
|
||||
|
||||
StreamHandler(Client *self, std::unique_lock<std::mutex> &&guard,
|
||||
std::function<typename TRequestResponse::Response(slk::Reader *)> res_load)
|
||||
: self_(self),
|
||||
guard_(std::move(guard)),
|
||||
req_builder_([self](const uint8_t *data, size_t size, bool have_more) {
|
||||
if (!self->client_->Write(data, size, have_more)) throw GenericRpcFailedException();
|
||||
}),
|
||||
res_load_(res_load) {}
|
||||
: self_(self), guard_(std::move(guard)), req_builder_(GenBuilderCallback(self, this)), res_load_(res_load) {}
|
||||
|
||||
public:
|
||||
StreamHandler(StreamHandler &&) noexcept = default;
|
||||
StreamHandler &operator=(StreamHandler &&) noexcept = default;
|
||||
StreamHandler(StreamHandler &&other) noexcept
|
||||
: self_{std::exchange(other.self_, nullptr)},
|
||||
defunct_{std::exchange(other.defunct_, true)},
|
||||
guard_{std::move(other.guard_)},
|
||||
req_builder_{std::move(other.req_builder_), GenBuilderCallback(self_, this)},
|
||||
res_load_{std::move(other.res_load_)} {}
|
||||
StreamHandler &operator=(StreamHandler &&other) noexcept {
|
||||
if (&other != this) {
|
||||
self_ = std::exchange(other.self_, nullptr);
|
||||
defunct_ = std::exchange(other.defunct_, true);
|
||||
guard_ = std::move(other.guard_);
|
||||
req_builder_ = slk::Builder(std::move(other.req_builder_, GenBuilderCallback(self_, this)));
|
||||
res_load_ = std::move(other.res_load_);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
StreamHandler(const StreamHandler &) = delete;
|
||||
StreamHandler &operator=(const StreamHandler &) = delete;
|
||||
@ -70,10 +80,18 @@ class Client {
|
||||
while (true) {
|
||||
auto ret = slk::CheckStreamComplete(self_->client_->GetData(), self_->client_->GetDataSize());
|
||||
if (ret.status == slk::StreamStatus::INVALID) {
|
||||
// Logically invalid state, connection is still up, defunct stream and release
|
||||
defunct_ = true;
|
||||
guard_.unlock();
|
||||
throw GenericRpcFailedException();
|
||||
} else if (ret.status == slk::StreamStatus::PARTIAL) {
|
||||
}
|
||||
if (ret.status == slk::StreamStatus::PARTIAL) {
|
||||
if (!self_->client_->Read(ret.stream_size - self_->client_->GetDataSize(),
|
||||
/* exactly_len = */ false)) {
|
||||
// Failed connection, abort and let somebody retry in the future
|
||||
defunct_ = true;
|
||||
self_->Abort();
|
||||
guard_.unlock();
|
||||
throw GenericRpcFailedException();
|
||||
}
|
||||
} else {
|
||||
@ -103,7 +121,9 @@ class Client {
|
||||
// Check the response ID.
|
||||
if (res_id != res_type.id && res_id != utils::TypeId::UNKNOWN) {
|
||||
spdlog::error("Message response was of unexpected type");
|
||||
self_->client_ = std::nullopt;
|
||||
// Logically invalid state, connection is still up, defunct stream and release
|
||||
defunct_ = true;
|
||||
guard_.unlock();
|
||||
throw GenericRpcFailedException();
|
||||
}
|
||||
|
||||
@ -112,8 +132,23 @@ class Client {
|
||||
return res_load_(&res_reader);
|
||||
}
|
||||
|
||||
bool IsDefunct() const { return defunct_; }
|
||||
|
||||
private:
|
||||
static auto GenBuilderCallback(Client *client, StreamHandler *self) {
|
||||
return [client, self](const uint8_t *data, size_t size, bool have_more) {
|
||||
if (self->defunct_) throw GenericRpcFailedException();
|
||||
if (!client->client_->Write(data, size, have_more)) {
|
||||
self->defunct_ = true;
|
||||
client->Abort();
|
||||
self->guard_.unlock();
|
||||
throw GenericRpcFailedException();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Client *self_;
|
||||
bool defunct_ = false;
|
||||
std::unique_lock<std::mutex> guard_;
|
||||
slk::Builder req_builder_;
|
||||
std::function<typename TRequestResponse::Response(slk::Reader *)> res_load_;
|
||||
@ -179,7 +214,7 @@ class Client {
|
||||
TRequestResponse::Request::Save(request, handler.GetBuilder());
|
||||
|
||||
// Return the handler to the user.
|
||||
return std::move(handler);
|
||||
return handler;
|
||||
}
|
||||
|
||||
/// Call a previously defined and registered RPC call. This function can
|
||||
|
@ -30,7 +30,7 @@ void Builder::Save(const uint8_t *data, uint64_t size) {
|
||||
to_write = kSegmentMaxDataSize - pos_;
|
||||
}
|
||||
|
||||
memcpy(segment_ + sizeof(SegmentSize) + pos_, data + offset, to_write);
|
||||
memcpy(segment_.data() + sizeof(SegmentSize) + pos_, data + offset, to_write);
|
||||
|
||||
size -= to_write;
|
||||
pos_ += to_write;
|
||||
@ -48,15 +48,15 @@ void Builder::FlushSegment(bool final_segment) {
|
||||
size_t total_size = sizeof(SegmentSize) + pos_;
|
||||
|
||||
SegmentSize size = pos_;
|
||||
memcpy(segment_, &size, sizeof(SegmentSize));
|
||||
memcpy(segment_.data(), &size, sizeof(SegmentSize));
|
||||
|
||||
if (final_segment) {
|
||||
SegmentSize footer = 0;
|
||||
memcpy(segment_ + total_size, &footer, sizeof(SegmentSize));
|
||||
memcpy(segment_.data() + total_size, &footer, sizeof(SegmentSize));
|
||||
total_size += sizeof(SegmentSize);
|
||||
}
|
||||
|
||||
write_func_(segment_, total_size, !final_segment);
|
||||
write_func_(segment_.data(), total_size, !final_segment);
|
||||
|
||||
pos_ = 0;
|
||||
}
|
||||
|
@ -46,7 +46,11 @@ static_assert(kSegmentMaxDataSize <= std::numeric_limits<SegmentSize>::max(),
|
||||
/// Builder used to create a SLK segment stream.
|
||||
class Builder {
|
||||
public:
|
||||
Builder(std::function<void(const uint8_t *, size_t, bool)> write_func);
|
||||
explicit Builder(std::function<void(const uint8_t *, size_t, bool)> write_func);
|
||||
Builder(Builder &&other, std::function<void(const uint8_t *, size_t, bool)> write_func)
|
||||
: write_func_{std::move(write_func)}, pos_{std::exchange(other.pos_, 0)}, segment_{other.segment_} {
|
||||
other.write_func_ = [](const uint8_t *, size_t, bool) { /* Moved builder is defunct, no write possible */ };
|
||||
}
|
||||
|
||||
/// Function used internally by SLK to serialize the data.
|
||||
void Save(const uint8_t *data, uint64_t size);
|
||||
@ -59,7 +63,7 @@ class Builder {
|
||||
|
||||
std::function<void(const uint8_t *, size_t, bool)> write_func_;
|
||||
size_t pos_{0};
|
||||
uint8_t segment_[kSegmentMaxTotalSize];
|
||||
std::array<uint8_t, kSegmentMaxTotalSize> segment_;
|
||||
};
|
||||
|
||||
/// Exception that will be thrown if segments can't be decoded from the byte
|
||||
|
@ -39,6 +39,7 @@ add_library(mg-storage-v2 STATIC
|
||||
replication/slk.cpp
|
||||
replication/rpc.cpp
|
||||
replication/replication_storage_state.cpp
|
||||
inmemory/replication/replication_client.cpp
|
||||
inmemory/replication/recovery.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(mg-storage-v2 mg::replication Threads::Threads mg-utils gflags absl::flat_hash_map mg-rpc mg-slk mg-events mg-memory)
|
||||
|
@ -40,6 +40,7 @@ struct Config {
|
||||
|
||||
struct Items {
|
||||
bool properties_on_edges{true};
|
||||
bool enable_schema_metadata{false};
|
||||
friend bool operator==(const Items &lrh, const Items &rhs) = default;
|
||||
} items;
|
||||
|
||||
@ -64,7 +65,10 @@ struct Config {
|
||||
uint64_t items_per_batch{1'000'000};
|
||||
uint64_t recovery_thread_count{8};
|
||||
|
||||
// deprecated
|
||||
bool allow_parallel_index_creation{false};
|
||||
|
||||
bool allow_parallel_schema_creation{false};
|
||||
friend bool operator==(const Durability &lrh, const Durability &rhs) = default;
|
||||
} durability;
|
||||
|
||||
|
@ -29,4 +29,8 @@ Constraints::Constraints(const Config &config, StorageMode storage_mode) {
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
void Constraints::AbortEntries(std::span<Vertex const *const> vertices, uint64_t exact_start_timestamp) const {
|
||||
static_cast<InMemoryUniqueConstraints *>(unique_constraints_.get())->AbortEntries(vertices, exact_start_timestamp);
|
||||
}
|
||||
} // namespace memgraph::storage
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/config.hpp"
|
||||
#include "storage/v2/constraints/existence_constraints.hpp"
|
||||
#include "storage/v2/constraints/unique_constraints.hpp"
|
||||
@ -27,6 +29,8 @@ struct Constraints {
|
||||
Constraints &operator=(Constraints &&) = delete;
|
||||
~Constraints() = default;
|
||||
|
||||
void AbortEntries(std::span<Vertex const *const> vertices, uint64_t exact_start_timestamp) const;
|
||||
|
||||
std::unique_ptr<ExistenceConstraints> existence_constraints_;
|
||||
std::unique_ptr<UniqueConstraints> unique_constraints_;
|
||||
};
|
||||
|
@ -11,10 +11,11 @@
|
||||
|
||||
#include "storage/v2/constraints/existence_constraints.hpp"
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/constraints/utils.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/mvcc.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
#include "utils/rw_spin_lock.hpp"
|
||||
namespace memgraph::storage {
|
||||
|
||||
bool ExistenceConstraints::ConstraintExists(LabelId label, PropertyId property) const {
|
||||
@ -55,4 +56,70 @@ void ExistenceConstraints::LoadExistenceConstraints(const std::vector<std::strin
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<ConstraintViolation> ExistenceConstraints::ValidateVertexOnConstraint(
|
||||
const Vertex &vertex, const LabelId &label, const PropertyId &property) {
|
||||
if (!vertex.deleted && utils::Contains(vertex.labels, label) && !vertex.properties.HasProperty(property)) {
|
||||
return ConstraintViolation{ConstraintViolation::Type::EXISTENCE, label, std::set<PropertyId>{property}};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::variant<ExistenceConstraints::MultipleThreadsConstraintValidation,
|
||||
ExistenceConstraints::SingleThreadConstraintValidation>
|
||||
ExistenceConstraints::GetCreationFunction(
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> &par_exec_info) {
|
||||
if (par_exec_info.has_value()) {
|
||||
return ExistenceConstraints::MultipleThreadsConstraintValidation{par_exec_info.value()};
|
||||
}
|
||||
return ExistenceConstraints::SingleThreadConstraintValidation{};
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<ConstraintViolation> ExistenceConstraints::ValidateVerticesOnConstraint(
|
||||
utils::SkipList<Vertex>::Accessor vertices, LabelId label, PropertyId property,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
auto calling_existence_validation_function = GetCreationFunction(parallel_exec_info);
|
||||
return std::visit(
|
||||
[&vertices, &label, &property](auto &calling_object) { return calling_object(vertices, label, property); },
|
||||
calling_existence_validation_function);
|
||||
}
|
||||
|
||||
std::optional<ConstraintViolation> ExistenceConstraints::MultipleThreadsConstraintValidation::operator()(
|
||||
const utils::SkipList<Vertex>::Accessor &vertices, const LabelId &label, const PropertyId &property) {
|
||||
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
|
||||
|
||||
const auto &vertex_batches = parallel_exec_info.vertex_recovery_info;
|
||||
MG_ASSERT(!vertex_batches.empty(),
|
||||
"The size of batches should always be greater than zero if you want to use the parallel version of index "
|
||||
"creation!");
|
||||
const auto thread_count = std::min(parallel_exec_info.thread_count, vertex_batches.size());
|
||||
|
||||
std::atomic<uint64_t> batch_counter = 0;
|
||||
memgraph::utils::Synchronized<std::optional<ConstraintViolation>, utils::RWSpinLock> maybe_error{};
|
||||
{
|
||||
std::vector<std::jthread> threads;
|
||||
threads.reserve(thread_count);
|
||||
|
||||
for (auto i{0U}; i < thread_count; ++i) {
|
||||
threads.emplace_back([&maybe_error, &vertex_batches, &batch_counter, &vertices, &label, &property]() {
|
||||
do_per_thread_validation(maybe_error, ValidateVertexOnConstraint, vertex_batches, batch_counter, vertices,
|
||||
label, property);
|
||||
});
|
||||
}
|
||||
}
|
||||
if (maybe_error.Lock()->has_value()) {
|
||||
return maybe_error->value();
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ConstraintViolation> ExistenceConstraints::SingleThreadConstraintValidation::operator()(
|
||||
const utils::SkipList<Vertex>::Accessor &vertices, const LabelId &label, const PropertyId &property) {
|
||||
for (const Vertex &vertex : vertices) {
|
||||
if (auto violation = ValidateVertexOnConstraint(vertex, label, property); violation.has_value()) {
|
||||
return violation;
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
@ -11,34 +11,45 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
|
||||
#include "storage/v2/constraints/constraint_violation.hpp"
|
||||
#include "storage/v2/durability/recovery_type.hpp"
|
||||
#include "storage/v2/vertex.hpp"
|
||||
#include "utils/skip_list.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
class ExistenceConstraints {
|
||||
private:
|
||||
std::vector<std::pair<LabelId, PropertyId>> constraints_;
|
||||
|
||||
public:
|
||||
struct MultipleThreadsConstraintValidation {
|
||||
std::optional<ConstraintViolation> operator()(const utils::SkipList<Vertex>::Accessor &vertices,
|
||||
const LabelId &label, const PropertyId &property);
|
||||
|
||||
const durability::ParallelizedSchemaCreationInfo ¶llel_exec_info;
|
||||
};
|
||||
struct SingleThreadConstraintValidation {
|
||||
std::optional<ConstraintViolation> operator()(const utils::SkipList<Vertex>::Accessor &vertices,
|
||||
const LabelId &label, const PropertyId &property);
|
||||
};
|
||||
|
||||
[[nodiscard]] static std::optional<ConstraintViolation> ValidateVertexOnConstraint(const Vertex &vertex,
|
||||
LabelId label,
|
||||
PropertyId property) {
|
||||
if (!vertex.deleted && utils::Contains(vertex.labels, label) && !vertex.properties.HasProperty(property)) {
|
||||
return ConstraintViolation{ConstraintViolation::Type::EXISTENCE, label, std::set<PropertyId>{property}};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
const LabelId &label,
|
||||
const PropertyId &property);
|
||||
|
||||
[[nodiscard]] static std::optional<ConstraintViolation> ValidateVerticesOnConstraint(
|
||||
utils::SkipList<Vertex>::Accessor vertices, LabelId label, PropertyId property) {
|
||||
for (const auto &vertex : vertices) {
|
||||
if (auto violation = ValidateVertexOnConstraint(vertex, label, property); violation.has_value()) {
|
||||
return violation;
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
utils::SkipList<Vertex>::Accessor vertices, LabelId label, PropertyId property,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info = std::nullopt);
|
||||
|
||||
static std::variant<MultipleThreadsConstraintValidation, SingleThreadConstraintValidation> GetCreationFunction(
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> &);
|
||||
|
||||
bool ConstraintExists(LabelId label, PropertyId property) const;
|
||||
|
||||
@ -54,9 +65,6 @@ class ExistenceConstraints {
|
||||
std::vector<std::pair<LabelId, PropertyId>> ListConstraints() const;
|
||||
|
||||
void LoadExistenceConstraints(const std::vector<std::string> &keys);
|
||||
|
||||
private:
|
||||
std::vector<std::pair<LabelId, PropertyId>> constraints_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage
|
||||
|
42
src/storage/v2/constraints/utils.hpp
Normal file
42
src/storage/v2/constraints/utils.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include <vector>
|
||||
#include "storage/v2/vertex.hpp"
|
||||
#include "utils/skip_list.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
template <typename ErrorType, typename Func, typename... Args>
|
||||
void do_per_thread_validation(ErrorType &maybe_error, Func &&func,
|
||||
const std::vector<std::pair<Gid, uint64_t>> &vertex_batches,
|
||||
std::atomic<uint64_t> &batch_counter,
|
||||
const memgraph::utils::SkipList<memgraph::storage::Vertex>::Accessor &vertices,
|
||||
Args &&...args) {
|
||||
while (!maybe_error.ReadLock()->has_value()) {
|
||||
const auto batch_index = batch_counter.fetch_add(1, std::memory_order_acquire);
|
||||
if (batch_index >= vertex_batches.size()) {
|
||||
return;
|
||||
}
|
||||
const auto &[gid_start, batch_size] = vertex_batches[batch_index];
|
||||
|
||||
auto vertex_curr = vertices.find(gid_start);
|
||||
DMG_ASSERT(vertex_curr != vertices.end(), "No vertex was found with given gid");
|
||||
for (auto i{0U}; i < batch_size; ++i, ++vertex_curr) {
|
||||
const auto violation = func(*vertex_curr, std::forward<Args>(args)...);
|
||||
if (!violation.has_value()) [[likely]] {
|
||||
continue;
|
||||
}
|
||||
maybe_error.WithLock([&violation](auto &maybe_error) { maybe_error = *violation; });
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace memgraph::storage
|
@ -10,7 +10,9 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/disk//edge_import_mode_cache.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "storage/v2/disk/label_property_index.hpp"
|
||||
#include "storage/v2/indices/indices.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
@ -28,7 +30,7 @@ EdgeImportModeCache::EdgeImportModeCache(const Config &config)
|
||||
InMemoryLabelIndex::Iterable EdgeImportModeCache::Vertices(LabelId label, View view, Storage *storage,
|
||||
Transaction *transaction) const {
|
||||
auto *mem_label_index = static_cast<InMemoryLabelIndex *>(in_memory_indices_.label_index_.get());
|
||||
return mem_label_index->Vertices(label, view, storage, transaction);
|
||||
return mem_label_index->Vertices(label, vertices_.access(), view, storage, transaction);
|
||||
}
|
||||
|
||||
InMemoryLabelPropertyIndex::Iterable EdgeImportModeCache::Vertices(
|
||||
@ -37,11 +39,13 @@ InMemoryLabelPropertyIndex::Iterable EdgeImportModeCache::Vertices(
|
||||
Transaction *transaction) const {
|
||||
auto *mem_label_property_index =
|
||||
static_cast<InMemoryLabelPropertyIndex *>(in_memory_indices_.label_property_index_.get());
|
||||
return mem_label_property_index->Vertices(label, property, lower_bound, upper_bound, view, storage, transaction);
|
||||
return mem_label_property_index->Vertices(label, property, vertices_.access(), lower_bound, upper_bound, view,
|
||||
storage, transaction);
|
||||
}
|
||||
|
||||
bool EdgeImportModeCache::CreateIndex(LabelId label, PropertyId property,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info) {
|
||||
bool EdgeImportModeCache::CreateIndex(
|
||||
LabelId label, PropertyId property,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
auto *mem_label_property_index =
|
||||
static_cast<InMemoryLabelPropertyIndex *>(in_memory_indices_.label_property_index_.get());
|
||||
bool res = mem_label_property_index->CreateIndex(label, property, vertices_.access(), parallel_exec_info);
|
||||
@ -51,8 +55,8 @@ bool EdgeImportModeCache::CreateIndex(LabelId label, PropertyId property,
|
||||
return res;
|
||||
}
|
||||
|
||||
bool EdgeImportModeCache::CreateIndex(LabelId label,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info) {
|
||||
bool EdgeImportModeCache::CreateIndex(
|
||||
LabelId label, const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
auto *mem_label_index = static_cast<InMemoryLabelIndex *>(in_memory_indices_.label_index_.get());
|
||||
bool res = mem_label_index->CreateIndex(label, vertices_.access(), parallel_exec_info);
|
||||
if (res) {
|
||||
|
@ -42,9 +42,10 @@ class EdgeImportModeCache final {
|
||||
View view, Storage *storage, Transaction *transaction) const;
|
||||
|
||||
bool CreateIndex(LabelId label, PropertyId property,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info = {});
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info = {});
|
||||
|
||||
bool CreateIndex(LabelId label, const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info = {});
|
||||
bool CreateIndex(LabelId label,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info = {});
|
||||
|
||||
bool VerticesWithLabelPropertyScanned(LabelId label, PropertyId property) const;
|
||||
|
||||
|
@ -17,10 +17,6 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
/// TODO: andi. Too many copies, extract at one place
|
||||
using ParallelizedIndexCreationInfo =
|
||||
std::pair<std::vector<std::pair<Gid, uint64_t>> /*vertex_recovery_info*/, uint64_t /*thread_count*/>;
|
||||
|
||||
class DiskLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
public:
|
||||
explicit DiskLabelPropertyIndex(const Config &config);
|
||||
|
@ -71,6 +71,37 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
namespace {
|
||||
|
||||
auto FindEdges(const View view, EdgeTypeId edge_type, const VertexAccessor *from_vertex, VertexAccessor *to_vertex)
|
||||
-> Result<EdgesVertexAccessorResult> {
|
||||
auto use_out_edges = [](Vertex const *from_vertex, Vertex const *to_vertex) {
|
||||
// Obtain the locks by `gid` order to avoid lock cycles.
|
||||
auto guard_from = std::unique_lock{from_vertex->lock, std::defer_lock};
|
||||
auto guard_to = std::unique_lock{to_vertex->lock, std::defer_lock};
|
||||
if (from_vertex->gid < to_vertex->gid) {
|
||||
guard_from.lock();
|
||||
guard_to.lock();
|
||||
} else if (from_vertex->gid > to_vertex->gid) {
|
||||
guard_to.lock();
|
||||
guard_from.lock();
|
||||
} else {
|
||||
// The vertices are the same vertex, only lock one.
|
||||
guard_from.lock();
|
||||
}
|
||||
|
||||
// With the potentially cheaper side FindEdges
|
||||
const auto out_n = from_vertex->out_edges.size();
|
||||
const auto in_n = to_vertex->in_edges.size();
|
||||
return out_n <= in_n;
|
||||
};
|
||||
|
||||
return use_out_edges(from_vertex->vertex_, to_vertex->vertex_) ? from_vertex->OutEdges(view, {edge_type}, to_vertex)
|
||||
: to_vertex->InEdges(view, {edge_type}, from_vertex);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
using OOMExceptionEnabler = utils::MemoryTracker::OutOfMemoryExceptionEnabler;
|
||||
|
||||
namespace {
|
||||
@ -944,11 +975,28 @@ Result<EdgeAccessor> DiskStorage::DiskAccessor::CreateEdge(VertexAccessor *from,
|
||||
transaction_.manyDeltasCache.Invalidate(from_vertex, edge_type, EdgeDirection::OUT);
|
||||
transaction_.manyDeltasCache.Invalidate(to_vertex, edge_type, EdgeDirection::IN);
|
||||
|
||||
if (storage_->config_.items.enable_schema_metadata) {
|
||||
storage_->stored_edge_types_.try_insert(edge_type);
|
||||
}
|
||||
storage_->edge_count_.fetch_add(1, std::memory_order_acq_rel);
|
||||
|
||||
return EdgeAccessor(edge, edge_type, from_vertex, to_vertex, storage_, &transaction_);
|
||||
}
|
||||
|
||||
std::optional<EdgeAccessor> DiskStorage::DiskAccessor::FindEdge(Gid gid, View view, EdgeTypeId edge_type,
|
||||
VertexAccessor *from_vertex,
|
||||
VertexAccessor *to_vertex) {
|
||||
auto res = FindEdges(view, edge_type, from_vertex, to_vertex);
|
||||
if (res.HasError()) return std::nullopt; // TODO: use a Result type
|
||||
|
||||
auto const it = std::ranges::find_if(
|
||||
res->edges, [gid](EdgeAccessor const &edge_accessor) { return edge_accessor.edge_.ptr->gid == gid; });
|
||||
|
||||
if (it == res->edges.end()) return std::nullopt; // TODO: use a Result type
|
||||
|
||||
return *it;
|
||||
}
|
||||
|
||||
Result<EdgeAccessor> DiskStorage::DiskAccessor::EdgeSetFrom(EdgeAccessor * /*edge*/, VertexAccessor * /*new_from*/) {
|
||||
MG_ASSERT(false, "EdgeSetFrom is currently only implemented for InMemory storage");
|
||||
return Error::NONEXISTENT_OBJECT;
|
||||
|
@ -121,6 +121,9 @@ class DiskStorage final : public Storage {
|
||||
|
||||
Result<EdgeAccessor> CreateEdge(VertexAccessor *from, VertexAccessor *to, EdgeTypeId edge_type) override;
|
||||
|
||||
std::optional<EdgeAccessor> FindEdge(Gid gid, View view, EdgeTypeId edge_type, VertexAccessor *from_vertex,
|
||||
VertexAccessor *to_vertex) override;
|
||||
|
||||
Result<EdgeAccessor> EdgeSetFrom(EdgeAccessor *edge, VertexAccessor *new_from) override;
|
||||
|
||||
Result<EdgeAccessor> EdgeSetTo(EdgeAccessor *edge, VertexAccessor *new_to) override;
|
||||
@ -313,12 +316,6 @@ class DiskStorage final : public Storage {
|
||||
|
||||
uint64_t CommitTimestamp(std::optional<uint64_t> desired_commit_timestamp = {});
|
||||
|
||||
auto CreateReplicationClient(const memgraph::replication::ReplicationClientConfig & /*config*/,
|
||||
const memgraph::replication::ReplicationEpoch * /*current_epoch*/)
|
||||
-> std::unique_ptr<ReplicationClient> override {
|
||||
throw utils::BasicException("Disk storage mode does not support replication.");
|
||||
}
|
||||
|
||||
std::unique_ptr<RocksDBStorage> kvstore_;
|
||||
DurableMetadata durable_metadata_;
|
||||
EdgeImportMode edge_import_status_{EdgeImportMode::INACTIVE};
|
||||
|
@ -9,8 +9,6 @@
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
|
||||
#include <pwd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
@ -20,23 +18,29 @@
|
||||
#include <cstring>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "flags/all.hpp"
|
||||
#include "gflags/gflags.h"
|
||||
#include "replication/epoch.hpp"
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/durability/metadata.hpp"
|
||||
#include "storage/v2/durability/paths.hpp"
|
||||
#include "storage/v2/durability/snapshot.hpp"
|
||||
#include "storage/v2/durability/wal.hpp"
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
#include "storage/v2/inmemory/unique_constraints.hpp"
|
||||
#include "storage/v2/name_id_mapper.hpp"
|
||||
#include "utils/event_histogram.hpp"
|
||||
#include "utils/flag_validation.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
#include "utils/memory_tracker.hpp"
|
||||
#include "utils/message.hpp"
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
namespace memgraph::metrics {
|
||||
extern const Event SnapshotRecoveryLatency_us;
|
||||
} // namespace memgraph::metrics
|
||||
@ -96,6 +100,7 @@ std::vector<SnapshotDurabilityInfo> GetSnapshotFiles(const std::filesystem::path
|
||||
MG_ASSERT(!error_code, "Couldn't recover data because an error occurred: {}!", error_code.message());
|
||||
}
|
||||
|
||||
std::sort(snapshot_files.begin(), snapshot_files.end());
|
||||
return snapshot_files;
|
||||
}
|
||||
|
||||
@ -106,13 +111,17 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
|
||||
std::vector<WalDurabilityInfo> wal_files;
|
||||
std::error_code error_code;
|
||||
// There could be multiple "current" WAL files, the "_current" tag just means that the previous session didn't
|
||||
// finalize. We cannot skip based on name, will be able to skip based on invalid data or sequence number, so the
|
||||
// actual current wal will be skipped
|
||||
for (const auto &item : std::filesystem::directory_iterator(wal_directory, error_code)) {
|
||||
if (!item.is_regular_file()) continue;
|
||||
try {
|
||||
auto info = ReadWalInfo(item.path());
|
||||
if ((uuid.empty() || info.uuid == uuid) && (!current_seq_num || info.seq_num < *current_seq_num))
|
||||
if ((uuid.empty() || info.uuid == uuid) && (!current_seq_num || info.seq_num < *current_seq_num)) {
|
||||
wal_files.emplace_back(info.seq_num, info.from_timestamp, info.to_timestamp, std::move(info.uuid),
|
||||
std::move(info.epoch_id), item.path());
|
||||
}
|
||||
} catch (const RecoveryFailure &e) {
|
||||
spdlog::warn("Failed to read {}", item.path());
|
||||
continue;
|
||||
@ -120,6 +129,7 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
}
|
||||
MG_ASSERT(!error_code, "Couldn't recover data because an error occurred: {}!", error_code.message());
|
||||
|
||||
// Sort based on the sequence number, not the file name
|
||||
std::sort(wal_files.begin(), wal_files.end());
|
||||
return std::move(wal_files);
|
||||
}
|
||||
@ -128,15 +138,23 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
// indices and constraints must be recovered after the data recovery is done
|
||||
// to ensure that the indices and constraints are consistent at the end of the
|
||||
// recovery process.
|
||||
void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_constraints, Indices *indices,
|
||||
Constraints *constraints, utils::SkipList<Vertex> *vertices,
|
||||
NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info) {
|
||||
|
||||
void RecoverConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
Constraints *constraints, utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
RecoverExistenceConstraints(constraints_metadata, constraints, vertices, name_id_mapper, parallel_exec_info);
|
||||
RecoverUniqueConstraints(constraints_metadata, constraints, vertices, name_id_mapper, parallel_exec_info);
|
||||
}
|
||||
|
||||
void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
|
||||
utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
spdlog::info("Recreating indices from metadata.");
|
||||
|
||||
// Recover label indices.
|
||||
spdlog::info("Recreating {} label indices from metadata.", indices_constraints.indices.label.size());
|
||||
spdlog::info("Recreating {} label indices from metadata.", indices_metadata.label.size());
|
||||
auto *mem_label_index = static_cast<InMemoryLabelIndex *>(indices->label_index_.get());
|
||||
for (const auto &item : indices_constraints.indices.label) {
|
||||
for (const auto &item : indices_metadata.label) {
|
||||
if (!mem_label_index->CreateIndex(item, vertices->access(), parallel_exec_info)) {
|
||||
throw RecoveryFailure("The label index must be created here!");
|
||||
}
|
||||
@ -145,9 +163,10 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
|
||||
spdlog::info("Label indices are recreated.");
|
||||
|
||||
spdlog::info("Recreating index statistics from metadata.");
|
||||
|
||||
// Recover label indices statistics.
|
||||
spdlog::info("Recreating {} label index statistics from metadata.", indices_constraints.indices.label_stats.size());
|
||||
for (const auto &item : indices_constraints.indices.label_stats) {
|
||||
spdlog::info("Recreating {} label index statistics from metadata.", indices_metadata.label_stats.size());
|
||||
for (const auto &item : indices_metadata.label_stats) {
|
||||
mem_label_index->SetIndexStats(item.first, item.second);
|
||||
spdlog::info("Statistics for index on :{} are recreated from metadata",
|
||||
name_id_mapper->IdToName(item.first.AsUint()));
|
||||
@ -155,10 +174,9 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
|
||||
spdlog::info("Label indices statistics are recreated.");
|
||||
|
||||
// Recover label+property indices.
|
||||
spdlog::info("Recreating {} label+property indices from metadata.",
|
||||
indices_constraints.indices.label_property.size());
|
||||
spdlog::info("Recreating {} label+property indices from metadata.", indices_metadata.label_property.size());
|
||||
auto *mem_label_property_index = static_cast<InMemoryLabelPropertyIndex *>(indices->label_property_index_.get());
|
||||
for (const auto &item : indices_constraints.indices.label_property) {
|
||||
for (const auto &item : indices_metadata.label_property) {
|
||||
if (!mem_label_property_index->CreateIndex(item.first, item.second, vertices->access(), parallel_exec_info))
|
||||
throw RecoveryFailure("The label+property index must be created here!");
|
||||
spdlog::info("Index on :{}({}) is recreated from metadata", name_id_mapper->IdToName(item.first.AsUint()),
|
||||
@ -168,8 +186,8 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
|
||||
|
||||
// Recover label+property indices statistics.
|
||||
spdlog::info("Recreating {} label+property indices statistics from metadata.",
|
||||
indices_constraints.indices.label_property_stats.size());
|
||||
for (const auto &item : indices_constraints.indices.label_property_stats) {
|
||||
indices_metadata.label_property_stats.size());
|
||||
for (const auto &item : indices_metadata.label_property_stats) {
|
||||
const auto label_id = item.first;
|
||||
const auto property_id = item.second.first;
|
||||
const auto &stats = item.second.second;
|
||||
@ -182,14 +200,20 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
|
||||
spdlog::info("Indices are recreated.");
|
||||
|
||||
spdlog::info("Recreating constraints from metadata.");
|
||||
// Recover existence constraints.
|
||||
spdlog::info("Recreating {} existence constraints from metadata.", indices_constraints.constraints.existence.size());
|
||||
for (const auto &[label, property] : indices_constraints.constraints.existence) {
|
||||
}
|
||||
|
||||
void RecoverExistenceConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
Constraints *constraints, utils::SkipList<Vertex> *vertices,
|
||||
NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
spdlog::info("Recreating {} existence constraints from metadata.", constraints_metadata.existence.size());
|
||||
for (const auto &[label, property] : constraints_metadata.existence) {
|
||||
if (constraints->existence_constraints_->ConstraintExists(label, property)) {
|
||||
throw RecoveryFailure("The existence constraint already exists!");
|
||||
}
|
||||
|
||||
if (auto violation = ExistenceConstraints::ValidateVerticesOnConstraint(vertices->access(), label, property);
|
||||
if (auto violation =
|
||||
ExistenceConstraints::ValidateVerticesOnConstraint(vertices->access(), label, property, parallel_exec_info);
|
||||
violation.has_value()) {
|
||||
throw RecoveryFailure("The existence constraint failed because it couldn't be validated!");
|
||||
}
|
||||
@ -199,38 +223,57 @@ void RecoverIndicesAndConstraints(const RecoveredIndicesAndConstraints &indices_
|
||||
name_id_mapper->IdToName(property.AsUint()));
|
||||
}
|
||||
spdlog::info("Existence constraints are recreated from metadata.");
|
||||
}
|
||||
|
||||
// Recover unique constraints.
|
||||
spdlog::info("Recreating {} unique constraints from metadata.", indices_constraints.constraints.unique.size());
|
||||
for (const auto &item : indices_constraints.constraints.unique) {
|
||||
void RecoverUniqueConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
Constraints *constraints, utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
spdlog::info("Recreating {} unique constraints from metadata.", constraints_metadata.unique.size());
|
||||
|
||||
for (const auto &[label, properties] : constraints_metadata.unique) {
|
||||
auto *mem_unique_constraints = static_cast<InMemoryUniqueConstraints *>(constraints->unique_constraints_.get());
|
||||
auto ret = mem_unique_constraints->CreateConstraint(item.first, item.second, vertices->access());
|
||||
auto ret = mem_unique_constraints->CreateConstraint(label, properties, vertices->access(), parallel_exec_info);
|
||||
if (ret.HasError() || ret.GetValue() != UniqueConstraints::CreationStatus::SUCCESS)
|
||||
throw RecoveryFailure("The unique constraint must be created here!");
|
||||
|
||||
std::vector<std::string> property_names;
|
||||
property_names.reserve(item.second.size());
|
||||
for (const auto &prop : item.second) {
|
||||
property_names.reserve(properties.size());
|
||||
for (const auto &prop : properties) {
|
||||
property_names.emplace_back(name_id_mapper->IdToName(prop.AsUint()));
|
||||
}
|
||||
const auto property_names_joined = utils::Join(property_names, ",");
|
||||
spdlog::info("Unique constraint on :{}({}) is recreated from metadata",
|
||||
name_id_mapper->IdToName(item.first.AsUint()), property_names_joined);
|
||||
spdlog::info("Unique constraint on :{}({}) is recreated from metadata", name_id_mapper->IdToName(label.AsUint()),
|
||||
property_names_joined);
|
||||
}
|
||||
spdlog::info("Unique constraints are recreated from metadata.");
|
||||
spdlog::info("Constraints are recreated from metadata.");
|
||||
}
|
||||
|
||||
std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_directory,
|
||||
const std::filesystem::path &wal_directory, std::string *uuid,
|
||||
ReplicationStorageState &repl_storage_state, utils::SkipList<Vertex> *vertices,
|
||||
utils::SkipList<Edge> *edges, std::atomic<uint64_t> *edge_count,
|
||||
NameIdMapper *name_id_mapper, Indices *indices, Constraints *constraints,
|
||||
const Config &config, uint64_t *wal_seq_num) {
|
||||
std::optional<ParallelizedSchemaCreationInfo> GetParallelExecInfo(const RecoveryInfo &recovery_info,
|
||||
const Config &config) {
|
||||
return config.durability.allow_parallel_schema_creation
|
||||
? std::make_optional(ParallelizedSchemaCreationInfo{recovery_info.vertex_batches,
|
||||
config.durability.recovery_thread_count})
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ParallelizedSchemaCreationInfo> GetParallelExecInfoIndices(const RecoveryInfo &recovery_info,
|
||||
const Config &config) {
|
||||
return config.durability.allow_parallel_schema_creation || config.durability.allow_parallel_index_creation
|
||||
? std::make_optional(ParallelizedSchemaCreationInfo{recovery_info.vertex_batches,
|
||||
config.durability.recovery_thread_count})
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<RecoveryInfo> Recovery::RecoverData(std::string *uuid, ReplicationStorageState &repl_storage_state,
|
||||
utils::SkipList<Vertex> *vertices, utils::SkipList<Edge> *edges,
|
||||
std::atomic<uint64_t> *edge_count, NameIdMapper *name_id_mapper,
|
||||
Indices *indices, Constraints *constraints, const Config &config,
|
||||
uint64_t *wal_seq_num) {
|
||||
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
|
||||
spdlog::info("Recovering persisted data using snapshot ({}) and WAL directory ({}).", snapshot_directory,
|
||||
wal_directory);
|
||||
if (!utils::DirExists(snapshot_directory) && !utils::DirExists(wal_directory)) {
|
||||
spdlog::info("Recovering persisted data using snapshot ({}) and WAL directory ({}).", snapshot_directory_,
|
||||
wal_directory_);
|
||||
if (!utils::DirExists(snapshot_directory_) && !utils::DirExists(wal_directory_)) {
|
||||
spdlog::warn(utils::MessageWithLink("Snapshot or WAL directory don't exist, there is nothing to recover.",
|
||||
"https://memgr.ph/durability"));
|
||||
return std::nullopt;
|
||||
@ -239,15 +282,13 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
|
||||
auto *const epoch_history = &repl_storage_state.history;
|
||||
utils::Timer timer;
|
||||
|
||||
auto snapshot_files = GetSnapshotFiles(snapshot_directory);
|
||||
auto snapshot_files = GetSnapshotFiles(snapshot_directory_);
|
||||
|
||||
RecoveryInfo recovery_info;
|
||||
RecoveredIndicesAndConstraints indices_constraints;
|
||||
std::optional<uint64_t> snapshot_timestamp;
|
||||
if (!snapshot_files.empty()) {
|
||||
spdlog::info("Try recovering from snapshot directory {}.", snapshot_directory);
|
||||
// Order the files by name
|
||||
std::sort(snapshot_files.begin(), snapshot_files.end());
|
||||
spdlog::info("Try recovering from snapshot directory {}.", wal_directory_);
|
||||
|
||||
// UUID used for durability is the UUID of the last snapshot file.
|
||||
*uuid = snapshot_files.back().uuid;
|
||||
@ -277,18 +318,17 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
|
||||
snapshot_timestamp = recovered_snapshot->snapshot_info.start_timestamp;
|
||||
repl_storage_state.epoch_.SetEpoch(std::move(recovered_snapshot->snapshot_info.epoch_id));
|
||||
|
||||
if (!utils::DirExists(wal_directory)) {
|
||||
const auto par_exec_info = config.durability.allow_parallel_index_creation
|
||||
? std::make_optional(std::make_pair(recovery_info.vertex_batches,
|
||||
config.durability.recovery_thread_count))
|
||||
: std::nullopt;
|
||||
RecoverIndicesAndConstraints(indices_constraints, indices, constraints, vertices, name_id_mapper, par_exec_info);
|
||||
if (!utils::DirExists(wal_directory_)) {
|
||||
RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
|
||||
GetParallelExecInfoIndices(recovery_info, config));
|
||||
RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
|
||||
GetParallelExecInfo(recovery_info, config));
|
||||
return recovered_snapshot->recovery_info;
|
||||
}
|
||||
} else {
|
||||
spdlog::info("No snapshot file was found, collecting information from WAL directory {}.", wal_directory);
|
||||
spdlog::info("No snapshot file was found, collecting information from WAL directory {}.", wal_directory_);
|
||||
std::error_code error_code;
|
||||
if (!utils::DirExists(wal_directory)) return std::nullopt;
|
||||
if (!utils::DirExists(wal_directory_)) return std::nullopt;
|
||||
// We use this smaller struct that contains only a subset of information
|
||||
// necessary for the rest of the recovery function.
|
||||
// Also, the struct is sorted primarily on the path it contains.
|
||||
@ -302,7 +342,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
|
||||
auto operator<=>(const WalFileInfo &) const = default;
|
||||
};
|
||||
std::vector<WalFileInfo> wal_files;
|
||||
for (const auto &item : std::filesystem::directory_iterator(wal_directory, error_code)) {
|
||||
for (const auto &item : std::filesystem::directory_iterator(wal_directory_, error_code)) {
|
||||
if (!item.is_regular_file()) continue;
|
||||
try {
|
||||
auto info = ReadWalInfo(item.path());
|
||||
@ -323,7 +363,7 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
|
||||
repl_storage_state.epoch_.SetEpoch(std::move(wal_files.back().epoch_id));
|
||||
}
|
||||
|
||||
auto maybe_wal_files = GetWalFiles(wal_directory, *uuid);
|
||||
auto maybe_wal_files = GetWalFiles(wal_directory_, *uuid);
|
||||
if (!maybe_wal_files) {
|
||||
spdlog::warn(
|
||||
utils::MessageWithLink("Couldn't get WAL file info from the WAL directory.", "https://memgr.ph/durability"));
|
||||
@ -409,12 +449,10 @@ std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_di
|
||||
spdlog::info("All necessary WAL files are loaded successfully.");
|
||||
}
|
||||
|
||||
const auto par_exec_info =
|
||||
config.durability.allow_parallel_index_creation && !recovery_info.vertex_batches.empty()
|
||||
? std::make_optional(std::make_pair(recovery_info.vertex_batches, config.durability.recovery_thread_count))
|
||||
: std::nullopt;
|
||||
|
||||
RecoverIndicesAndConstraints(indices_constraints, indices, constraints, vertices, name_id_mapper, par_exec_info);
|
||||
RecoverIndicesAndStats(indices_constraints.indices, indices, vertices, name_id_mapper,
|
||||
GetParallelExecInfoIndices(recovery_info, config));
|
||||
RecoverConstraints(indices_constraints.constraints, constraints, vertices, name_id_mapper,
|
||||
GetParallelExecInfo(recovery_info, config));
|
||||
|
||||
memgraph::metrics::Measure(memgraph::metrics::SnapshotRecoveryLatency_us,
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(timer.Elapsed()).count());
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "storage/v2/config.hpp"
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/durability/metadata.hpp"
|
||||
#include "storage/v2/durability/recovery_type.hpp"
|
||||
#include "storage/v2/durability/wal.hpp"
|
||||
#include "storage/v2/edge.hpp"
|
||||
#include "storage/v2/indices/indices.hpp"
|
||||
@ -94,27 +95,50 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(const std::filesystem:
|
||||
std::string_view uuid = "",
|
||||
std::optional<size_t> current_seq_num = {});
|
||||
|
||||
using ParallelizedIndexCreationInfo =
|
||||
std::pair<std::vector<std::pair<Gid, uint64_t>> /*vertex_recovery_info*/, uint64_t /*thread_count*/>;
|
||||
|
||||
// Helper function used to recover all discovered indices and constraints. The
|
||||
// indices and constraints must be recovered after the data recovery is done
|
||||
// to ensure that the indices and constraints are consistent at the end of the
|
||||
// Helper function used to recover all discovered indices. The
|
||||
// indices must be recovered after the data recovery is done
|
||||
// to ensure that the indices consistent at the end of the
|
||||
// recovery process.
|
||||
/// @throw RecoveryFailure
|
||||
void RecoverIndicesAndConstraints(
|
||||
const RecoveredIndicesAndConstraints &indices_constraints, Indices *indices, Constraints *constraints,
|
||||
utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info = std::nullopt);
|
||||
void RecoverIndicesAndStats(const RecoveredIndicesAndConstraints::IndicesMetadata &indices_metadata, Indices *indices,
|
||||
utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info = std::nullopt);
|
||||
|
||||
/// Recovers data either from a snapshot and/or WAL files.
|
||||
// Helper function used to recover all discovered constraints. The
|
||||
// constraints must be recovered after the data recovery is done
|
||||
// to ensure that the constraints are consistent at the end of the
|
||||
// recovery process.
|
||||
/// @throw RecoveryFailure
|
||||
/// @throw std::bad_alloc
|
||||
std::optional<RecoveryInfo> RecoverData(const std::filesystem::path &snapshot_directory,
|
||||
const std::filesystem::path &wal_directory, std::string *uuid,
|
||||
ReplicationStorageState &repl_storage_state, utils::SkipList<Vertex> *vertices,
|
||||
utils::SkipList<Edge> *edges, std::atomic<uint64_t> *edge_count,
|
||||
NameIdMapper *name_id_mapper, Indices *indices, Constraints *constraints,
|
||||
const Config &config, uint64_t *wal_seq_num);
|
||||
void RecoverConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &constraints_metadata,
|
||||
Constraints *constraints, utils::SkipList<Vertex> *vertices, NameIdMapper *name_id_mapper,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> ¶llel_exec_info = std::nullopt);
|
||||
|
||||
std::optional<ParallelizedSchemaCreationInfo> GetParallelExecInfo(const RecoveryInfo &recovery_info,
|
||||
const Config &config);
|
||||
|
||||
std::optional<ParallelizedSchemaCreationInfo> GetParallelExecInfoIndices(const RecoveryInfo &recovery_info,
|
||||
const Config &config);
|
||||
|
||||
void RecoverExistenceConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &, Constraints *,
|
||||
utils::SkipList<Vertex> *, NameIdMapper *,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> &);
|
||||
|
||||
void RecoverUniqueConstraints(const RecoveredIndicesAndConstraints::ConstraintsMetadata &, Constraints *,
|
||||
utils::SkipList<Vertex> *, NameIdMapper *,
|
||||
const std::optional<ParallelizedSchemaCreationInfo> &);
|
||||
struct Recovery {
|
||||
public:
|
||||
/// Recovers data either from a snapshot and/or WAL files.
|
||||
/// @throw RecoveryFailure
|
||||
/// @throw std::bad_alloc
|
||||
std::optional<RecoveryInfo> RecoverData(std::string *uuid, ReplicationStorageState &repl_storage_state,
|
||||
utils::SkipList<Vertex> *vertices, utils::SkipList<Edge> *edges,
|
||||
std::atomic<uint64_t> *edge_count, NameIdMapper *name_id_mapper,
|
||||
Indices *indices, Constraints *constraints, const Config &config,
|
||||
uint64_t *wal_seq_num);
|
||||
|
||||
const std::filesystem::path snapshot_directory_;
|
||||
const std::filesystem::path wal_directory_;
|
||||
};
|
||||
|
||||
} // namespace memgraph::storage::durability
|
||||
|
@ -38,14 +38,14 @@ struct RecoveryInfo {
|
||||
|
||||
/// Structure used to track indices and constraints during recovery.
|
||||
struct RecoveredIndicesAndConstraints {
|
||||
struct {
|
||||
struct IndicesMetadata {
|
||||
std::vector<LabelId> label;
|
||||
std::vector<std::pair<LabelId, PropertyId>> label_property;
|
||||
std::vector<std::pair<LabelId, LabelIndexStats>> label_stats;
|
||||
std::vector<std::pair<LabelId, std::pair<PropertyId, LabelPropertyIndexStats>>> label_property_stats;
|
||||
} indices;
|
||||
|
||||
struct {
|
||||
struct ConstraintsMetadata {
|
||||
std::vector<std::pair<LabelId, PropertyId>> existence;
|
||||
std::vector<std::pair<LabelId, std::set<PropertyId>>> unique;
|
||||
} constraints;
|
||||
|
23
src/storage/v2/durability/recovery_type.hpp
Normal file
23
src/storage/v2/durability/recovery_type.hpp
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "storage/v2/id_types.hpp"
|
||||
|
||||
namespace memgraph::storage::durability {
|
||||
struct ParallelizedSchemaCreationInfo {
|
||||
std::vector<std::pair<Gid, uint64_t>> vertex_recovery_info;
|
||||
uint64_t thread_count;
|
||||
};
|
||||
} // namespace memgraph::storage::durability
|
@ -25,6 +25,13 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
bool EdgeAccessor::IsDeleted() const {
|
||||
if (!storage_->config_.items.properties_on_edges) {
|
||||
return false;
|
||||
}
|
||||
return edge_.ptr->deleted;
|
||||
}
|
||||
|
||||
bool EdgeAccessor::IsVisible(const View view) const {
|
||||
bool exists = true;
|
||||
bool deleted = true;
|
||||
|
@ -44,6 +44,8 @@ class EdgeAccessor final {
|
||||
transaction_(transaction),
|
||||
for_deleted_(for_deleted) {}
|
||||
|
||||
bool IsDeleted() const;
|
||||
|
||||
/// @return true if the object is visible from the current transaction
|
||||
bool IsVisible(View view) const;
|
||||
|
||||
|
@ -17,6 +17,21 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
void Indices::AbortEntries(LabelId labelId, std::span<Vertex *const> vertices, uint64_t exact_start_timestamp) const {
|
||||
static_cast<InMemoryLabelIndex *>(label_index_.get())->AbortEntries(labelId, vertices, exact_start_timestamp);
|
||||
}
|
||||
|
||||
void Indices::AbortEntries(PropertyId property, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) const {
|
||||
static_cast<InMemoryLabelPropertyIndex *>(label_property_index_.get())
|
||||
->AbortEntries(property, vertices, exact_start_timestamp);
|
||||
}
|
||||
void Indices::AbortEntries(LabelId label, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) const {
|
||||
static_cast<InMemoryLabelPropertyIndex *>(label_property_index_.get())
|
||||
->AbortEntries(label, vertices, exact_start_timestamp);
|
||||
}
|
||||
|
||||
void Indices::RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) const {
|
||||
static_cast<InMemoryLabelIndex *>(label_index_.get())->RemoveObsoleteEntries(oldest_active_start_timestamp);
|
||||
static_cast<InMemoryLabelPropertyIndex *>(label_property_index_.get())
|
||||
@ -50,4 +65,8 @@ Indices::Indices(const Config &config, StorageMode storage_mode) {
|
||||
});
|
||||
}
|
||||
|
||||
Indices::IndexStats Indices::Analysis() const {
|
||||
return {static_cast<InMemoryLabelIndex *>(label_index_.get())->Analysis(),
|
||||
static_cast<InMemoryLabelPropertyIndex *>(label_property_index_.get())->Analysis()};
|
||||
}
|
||||
} // namespace memgraph::storage
|
||||
|
@ -12,6 +12,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/indices/label_index.hpp"
|
||||
#include "storage/v2/indices/label_property_index.hpp"
|
||||
#include "storage/v2/storage_mode.hpp"
|
||||
@ -32,6 +35,20 @@ struct Indices {
|
||||
/// TODO: unused in disk indices
|
||||
void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp) const;
|
||||
|
||||
/// Surgical removal of entries that was inserted this transaction
|
||||
/// TODO: unused in disk indices
|
||||
void AbortEntries(LabelId labelId, std::span<Vertex *const> vertices, uint64_t exact_start_timestamp) const;
|
||||
void AbortEntries(PropertyId property, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) const;
|
||||
void AbortEntries(LabelId label, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) const;
|
||||
|
||||
struct IndexStats {
|
||||
std::vector<LabelId> label;
|
||||
LabelPropertyIndex::IndexStats property_label;
|
||||
};
|
||||
IndexStats Analysis() const;
|
||||
|
||||
// Indices are updated whenever an update occurs, instead of only on commit or
|
||||
// advance command. This is necessary because we want indices to support `NEW`
|
||||
// view for use in Merge.
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <thread>
|
||||
#include "storage/v2/delta.hpp"
|
||||
#include "storage/v2/durability/recovery_type.hpp"
|
||||
#include "storage/v2/mvcc.hpp"
|
||||
#include "storage/v2/transaction.hpp"
|
||||
#include "storage/v2/vertex.hpp"
|
||||
@ -20,9 +21,6 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
using ParallelizedIndexCreationInfo =
|
||||
std::pair<std::vector<std::pair<Gid, uint64_t>> /*vertex_recovery_info*/, uint64_t /*thread_count*/>;
|
||||
|
||||
/// Traverses deltas visible from transaction with start timestamp greater than
|
||||
/// the provided timestamp, and calls the provided callback function for each
|
||||
/// delta. If the callback ever returns true, traversal is stopped and the
|
||||
@ -259,11 +257,12 @@ inline void CreateIndexOnSingleThread(utils::SkipList<Vertex>::Accessor &vertice
|
||||
template <typename TIndex, typename TIndexKey, typename TSKiplistIter, typename TFunc>
|
||||
inline void CreateIndexOnMultipleThreads(utils::SkipList<Vertex>::Accessor &vertices, TSKiplistIter skiplist_iter,
|
||||
TIndex &index, TIndexKey key,
|
||||
const ParallelizedIndexCreationInfo ¶llel_exec_info, const TFunc &func) {
|
||||
const durability::ParallelizedSchemaCreationInfo ¶llel_exec_info,
|
||||
const TFunc &func) {
|
||||
utils::MemoryTracker::OutOfMemoryExceptionEnabler oom_exception;
|
||||
|
||||
const auto &vertex_batches = parallel_exec_info.first;
|
||||
const auto thread_count = std::min(parallel_exec_info.second, vertex_batches.size());
|
||||
const auto &vertex_batches = parallel_exec_info.vertex_recovery_info;
|
||||
const auto thread_count = std::min(parallel_exec_info.thread_count, vertex_batches.size());
|
||||
|
||||
MG_ASSERT(!vertex_batches.empty(),
|
||||
"The size of batches should always be greater than zero if you want to use the parallel version of index "
|
||||
|
@ -19,6 +19,11 @@ namespace memgraph::storage {
|
||||
|
||||
class LabelPropertyIndex {
|
||||
public:
|
||||
struct IndexStats {
|
||||
std::map<LabelId, std::vector<PropertyId>> l2p;
|
||||
std::map<PropertyId, std::vector<LabelId>> p2l;
|
||||
};
|
||||
|
||||
LabelPropertyIndex() = default;
|
||||
LabelPropertyIndex(const LabelPropertyIndex &) = delete;
|
||||
LabelPropertyIndex(LabelPropertyIndex &&) = delete;
|
||||
|
@ -10,8 +10,12 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/inmemory/label_index.hpp"
|
||||
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/indices/indices_utils.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
@ -22,8 +26,9 @@ void InMemoryLabelIndex::UpdateOnAddLabel(LabelId added_label, Vertex *vertex_af
|
||||
acc.insert(Entry{vertex_after_update, tx.start_timestamp});
|
||||
}
|
||||
|
||||
bool InMemoryLabelIndex::CreateIndex(LabelId label, utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info) {
|
||||
bool InMemoryLabelIndex::CreateIndex(
|
||||
LabelId label, utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
const auto create_index_seq = [this](LabelId label, utils::SkipList<Vertex>::Accessor &vertices,
|
||||
std::map<LabelId, utils::SkipList<Entry>>::iterator it) {
|
||||
using IndexAccessor = decltype(it->second.access());
|
||||
@ -38,7 +43,7 @@ bool InMemoryLabelIndex::CreateIndex(LabelId label, utils::SkipList<Vertex>::Acc
|
||||
|
||||
const auto create_index_par = [this](LabelId label, utils::SkipList<Vertex>::Accessor &vertices,
|
||||
std::map<LabelId, utils::SkipList<Entry>>::iterator label_it,
|
||||
const ParallelizedIndexCreationInfo ¶llel_exec_info) {
|
||||
const durability::ParallelizedSchemaCreationInfo ¶llel_exec_info) {
|
||||
using IndexAccessor = decltype(label_it->second.access());
|
||||
|
||||
CreateIndexOnMultipleThreads(vertices, label_it, index_, label, parallel_exec_info,
|
||||
@ -96,9 +101,23 @@ void InMemoryLabelIndex::RemoveObsoleteEntries(uint64_t oldest_active_start_time
|
||||
}
|
||||
}
|
||||
|
||||
InMemoryLabelIndex::Iterable::Iterable(utils::SkipList<Entry>::Accessor index_accessor, LabelId label, View view,
|
||||
Storage *storage, Transaction *transaction)
|
||||
: index_accessor_(std::move(index_accessor)),
|
||||
void InMemoryLabelIndex::AbortEntries(LabelId labelId, std::span<Vertex *const> vertices,
|
||||
uint64_t exact_start_timestamp) {
|
||||
auto const it = index_.find(labelId);
|
||||
if (it == index_.end()) return;
|
||||
|
||||
auto &label_storage = it->second;
|
||||
auto vertices_acc = label_storage.access();
|
||||
for (auto *vertex : vertices) {
|
||||
vertices_acc.remove(Entry{vertex, exact_start_timestamp});
|
||||
}
|
||||
}
|
||||
|
||||
InMemoryLabelIndex::Iterable::Iterable(utils::SkipList<Entry>::Accessor index_accessor,
|
||||
utils::SkipList<Vertex>::ConstAccessor vertices_accessor, LabelId label,
|
||||
View view, Storage *storage, Transaction *transaction)
|
||||
: pin_accessor_(std::move(vertices_accessor)),
|
||||
index_accessor_(std::move(index_accessor)),
|
||||
label_(label),
|
||||
view_(view),
|
||||
storage_(storage),
|
||||
@ -147,9 +166,21 @@ void InMemoryLabelIndex::RunGC() {
|
||||
|
||||
InMemoryLabelIndex::Iterable InMemoryLabelIndex::Vertices(LabelId label, View view, Storage *storage,
|
||||
Transaction *transaction) {
|
||||
DMG_ASSERT(storage->storage_mode_ == StorageMode::IN_MEMORY_TRANSACTIONAL ||
|
||||
storage->storage_mode_ == StorageMode::IN_MEMORY_ANALYTICAL,
|
||||
"LabelIndex trying to access InMemory vertices from OnDisk!");
|
||||
auto vertices_acc = static_cast<InMemoryStorage const *>(storage)->vertices_.access();
|
||||
const auto it = index_.find(label);
|
||||
MG_ASSERT(it != index_.end(), "Index for label {} doesn't exist", label.AsUint());
|
||||
return {it->second.access(), label, view, storage, transaction};
|
||||
return {it->second.access(), std::move(vertices_acc), label, view, storage, transaction};
|
||||
}
|
||||
|
||||
InMemoryLabelIndex::Iterable InMemoryLabelIndex::Vertices(
|
||||
LabelId label, memgraph::utils::SkipList<memgraph::storage::Vertex>::ConstAccessor vertices_acc, View view,
|
||||
Storage *storage, Transaction *transaction) {
|
||||
const auto it = index_.find(label);
|
||||
MG_ASSERT(it != index_.end(), "Index for label {} doesn't exist", label.AsUint());
|
||||
return {it->second.access(), std::move(vertices_acc), label, view, storage, transaction};
|
||||
}
|
||||
|
||||
void InMemoryLabelIndex::SetIndexStats(const storage::LabelId &label, const storage::LabelIndexStats &stats) {
|
||||
@ -187,4 +218,12 @@ bool InMemoryLabelIndex::DeleteIndexStats(const storage::LabelId &label) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<LabelId> InMemoryLabelIndex::Analysis() const {
|
||||
std::vector<LabelId> res;
|
||||
res.reserve(index_.size());
|
||||
for (const auto &[label, _] : index_) {
|
||||
res.emplace_back(label);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
} // namespace memgraph::storage
|
||||
|
@ -11,7 +11,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/durability/recovery_type.hpp"
|
||||
#include "storage/v2/indices/label_index.hpp"
|
||||
#include "storage/v2/indices/label_index_stats.hpp"
|
||||
#include "storage/v2/vertex.hpp"
|
||||
@ -20,9 +23,6 @@
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
using ParallelizedIndexCreationInfo =
|
||||
std::pair<std::vector<std::pair<Gid, uint64_t>> /*vertex_recovery_info*/, uint64_t /*thread_count*/>;
|
||||
|
||||
class InMemoryLabelIndex : public storage::LabelIndex {
|
||||
private:
|
||||
struct Entry {
|
||||
@ -45,7 +45,7 @@ class InMemoryLabelIndex : public storage::LabelIndex {
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
bool CreateIndex(LabelId label, utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info);
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info);
|
||||
|
||||
/// Returns false if there was no index to drop
|
||||
bool DropIndex(LabelId label) override;
|
||||
@ -56,10 +56,15 @@ class InMemoryLabelIndex : public storage::LabelIndex {
|
||||
|
||||
void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp);
|
||||
|
||||
/// Surgical removal of entries that was inserted this transaction
|
||||
void AbortEntries(LabelId labelId, std::span<Vertex *const> vertices, uint64_t exact_start_timestamp);
|
||||
|
||||
std::vector<LabelId> Analysis() const;
|
||||
|
||||
class Iterable {
|
||||
public:
|
||||
Iterable(utils::SkipList<Entry>::Accessor index_accessor, LabelId label, View view, Storage *storage,
|
||||
Transaction *transaction);
|
||||
Iterable(utils::SkipList<Entry>::Accessor index_accessor, utils::SkipList<Vertex>::ConstAccessor vertices_accessor,
|
||||
LabelId label, View view, Storage *storage, Transaction *transaction);
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
@ -85,6 +90,7 @@ class InMemoryLabelIndex : public storage::LabelIndex {
|
||||
Iterator end() { return {this, index_accessor_.end()}; }
|
||||
|
||||
private:
|
||||
utils::SkipList<Vertex>::ConstAccessor pin_accessor_;
|
||||
utils::SkipList<Entry>::Accessor index_accessor_;
|
||||
LabelId label_;
|
||||
View view_;
|
||||
@ -98,6 +104,9 @@ class InMemoryLabelIndex : public storage::LabelIndex {
|
||||
|
||||
Iterable Vertices(LabelId label, View view, Storage *storage, Transaction *transaction);
|
||||
|
||||
Iterable Vertices(LabelId label, memgraph::utils::SkipList<memgraph::storage::Vertex>::ConstAccessor vertices_acc,
|
||||
View view, Storage *storage, Transaction *transaction);
|
||||
|
||||
void SetIndexStats(const storage::LabelId &label, const storage::LabelIndexStats &stats);
|
||||
|
||||
std::optional<storage::LabelIndexStats> GetIndexStats(const storage::LabelId &label) const;
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include "storage/v2/inmemory/label_property_index.hpp"
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/indices/indices_utils.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "utils/logging.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
@ -33,9 +35,9 @@ bool InMemoryLabelPropertyIndex::Entry::operator<(const PropertyValue &rhs) cons
|
||||
|
||||
bool InMemoryLabelPropertyIndex::Entry::operator==(const PropertyValue &rhs) const { return value == rhs; }
|
||||
|
||||
bool InMemoryLabelPropertyIndex::CreateIndex(LabelId label, PropertyId property,
|
||||
utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info) {
|
||||
bool InMemoryLabelPropertyIndex::CreateIndex(
|
||||
LabelId label, PropertyId property, utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info) {
|
||||
spdlog::trace("Vertices size when creating index: {}", vertices.size());
|
||||
auto create_index_seq = [this](LabelId label, PropertyId property, utils::SkipList<Vertex>::Accessor &vertices,
|
||||
std::map<std::pair<LabelId, PropertyId>, utils::SkipList<Entry>>::iterator it) {
|
||||
@ -52,7 +54,7 @@ bool InMemoryLabelPropertyIndex::CreateIndex(LabelId label, PropertyId property,
|
||||
auto create_index_par =
|
||||
[this](LabelId label, PropertyId property, utils::SkipList<Vertex>::Accessor &vertices,
|
||||
std::map<std::pair<LabelId, PropertyId>, utils::SkipList<Entry>>::iterator label_property_it,
|
||||
const ParallelizedIndexCreationInfo ¶llel_exec_info) {
|
||||
const durability::ParallelizedSchemaCreationInfo ¶llel_exec_info) {
|
||||
using IndexAccessor = decltype(label_property_it->second.access());
|
||||
|
||||
CreateIndexOnMultipleThreads(
|
||||
@ -101,11 +103,12 @@ void InMemoryLabelPropertyIndex::UpdateOnSetProperty(PropertyId property, const
|
||||
return;
|
||||
}
|
||||
|
||||
if (!indices_by_property_.contains(property)) {
|
||||
auto index = indices_by_property_.find(property);
|
||||
if (index == indices_by_property_.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto &[_, storage] : indices_by_property_.at(property)) {
|
||||
for (const auto &[_, storage] : index->second) {
|
||||
auto acc = storage->access();
|
||||
acc.insert(Entry{value, vertex, tx.start_timestamp});
|
||||
}
|
||||
@ -220,12 +223,14 @@ const PropertyValue kSmallestMap = PropertyValue(std::map<std::string, PropertyV
|
||||
const PropertyValue kSmallestTemporalData =
|
||||
PropertyValue(TemporalData{static_cast<TemporalType>(0), std::numeric_limits<int64_t>::min()});
|
||||
|
||||
InMemoryLabelPropertyIndex::Iterable::Iterable(utils::SkipList<Entry>::Accessor index_accessor, LabelId label,
|
||||
InMemoryLabelPropertyIndex::Iterable::Iterable(utils::SkipList<Entry>::Accessor index_accessor,
|
||||
utils::SkipList<Vertex>::ConstAccessor vertices_accessor, LabelId label,
|
||||
PropertyId property,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view,
|
||||
Storage *storage, Transaction *transaction)
|
||||
: index_accessor_(std::move(index_accessor)),
|
||||
: pin_accessor_(std::move(vertices_accessor)),
|
||||
index_accessor_(std::move(index_accessor)),
|
||||
label_(label),
|
||||
property_(property),
|
||||
lower_bound_(lower_bound),
|
||||
@ -428,9 +433,57 @@ InMemoryLabelPropertyIndex::Iterable InMemoryLabelPropertyIndex::Vertices(
|
||||
LabelId label, PropertyId property, const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Storage *storage,
|
||||
Transaction *transaction) {
|
||||
DMG_ASSERT(storage->storage_mode_ == StorageMode::IN_MEMORY_TRANSACTIONAL ||
|
||||
storage->storage_mode_ == StorageMode::IN_MEMORY_ANALYTICAL,
|
||||
"PropertyLabel index trying to access InMemory vertices from OnDisk!");
|
||||
auto vertices_acc = static_cast<InMemoryStorage const *>(storage)->vertices_.access();
|
||||
auto it = index_.find({label, property});
|
||||
MG_ASSERT(it != index_.end(), "Index for label {} and property {} doesn't exist", label.AsUint(), property.AsUint());
|
||||
return {it->second.access(), label, property, lower_bound, upper_bound, view, storage, transaction};
|
||||
return {it->second.access(), std::move(vertices_acc), label, property, lower_bound, upper_bound, view, storage,
|
||||
transaction};
|
||||
}
|
||||
|
||||
InMemoryLabelPropertyIndex::Iterable InMemoryLabelPropertyIndex::Vertices(
|
||||
LabelId label, PropertyId property,
|
||||
memgraph::utils::SkipList<memgraph::storage::Vertex>::ConstAccessor vertices_acc,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Storage *storage,
|
||||
Transaction *transaction) {
|
||||
auto it = index_.find({label, property});
|
||||
MG_ASSERT(it != index_.end(), "Index for label {} and property {} doesn't exist", label.AsUint(), property.AsUint());
|
||||
return {it->second.access(), std::move(vertices_acc), label, property, lower_bound, upper_bound, view, storage,
|
||||
transaction};
|
||||
}
|
||||
|
||||
void InMemoryLabelPropertyIndex::AbortEntries(PropertyId property,
|
||||
std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) {
|
||||
auto const it = indices_by_property_.find(property);
|
||||
if (it == indices_by_property_.end()) return;
|
||||
|
||||
auto &indices = it->second;
|
||||
for (const auto &[_, index] : indices) {
|
||||
auto index_acc = index->access();
|
||||
for (auto const &[value, vertex] : vertices) {
|
||||
index_acc.remove(Entry{value, vertex, exact_start_timestamp});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InMemoryLabelPropertyIndex::AbortEntries(LabelId label,
|
||||
std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp) {
|
||||
for (auto &[label_prop, storage] : index_) {
|
||||
if (label_prop.first != label) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto index_acc = storage.access();
|
||||
for (const auto &[property, vertex] : vertices) {
|
||||
if (!property.IsNull()) {
|
||||
index_acc.remove(Entry{property, vertex, exact_start_timestamp});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace memgraph::storage
|
||||
|
@ -11,18 +11,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
|
||||
#include "storage/v2/constraints/constraints.hpp"
|
||||
#include "storage/v2/durability/recovery_type.hpp"
|
||||
#include "storage/v2/id_types.hpp"
|
||||
#include "storage/v2/indices/label_property_index.hpp"
|
||||
#include "storage/v2/indices/label_property_index_stats.hpp"
|
||||
#include "storage/v2/property_value.hpp"
|
||||
#include "utils/rw_lock.hpp"
|
||||
#include "utils/synchronized.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
/// TODO: andi. Too many copies, extract at one place
|
||||
using ParallelizedIndexCreationInfo =
|
||||
std::pair<std::vector<std::pair<Gid, uint64_t>> /*vertex_recovery_info*/, uint64_t /*thread_count*/>;
|
||||
|
||||
class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
private:
|
||||
struct Entry {
|
||||
@ -42,7 +43,7 @@ class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
bool CreateIndex(LabelId label, PropertyId property, utils::SkipList<Vertex>::Accessor vertices,
|
||||
const std::optional<ParallelizedIndexCreationInfo> ¶llel_exec_info);
|
||||
const std::optional<durability::ParallelizedSchemaCreationInfo> ¶llel_exec_info);
|
||||
|
||||
/// @throw std::bad_alloc
|
||||
void UpdateOnAddLabel(LabelId added_label, Vertex *vertex_after_update, const Transaction &tx) override;
|
||||
@ -61,10 +62,25 @@ class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
|
||||
void RemoveObsoleteEntries(uint64_t oldest_active_start_timestamp);
|
||||
|
||||
void AbortEntries(PropertyId property, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp);
|
||||
void AbortEntries(LabelId label, std::span<std::pair<PropertyValue, Vertex *> const> vertices,
|
||||
uint64_t exact_start_timestamp);
|
||||
|
||||
IndexStats Analysis() const {
|
||||
IndexStats res{};
|
||||
for (const auto &[lp, _] : index_) {
|
||||
const auto &[label, property] = lp;
|
||||
res.l2p[label].emplace_back(property);
|
||||
res.p2l[property].emplace_back(label);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
class Iterable {
|
||||
public:
|
||||
Iterable(utils::SkipList<Entry>::Accessor index_accessor, LabelId label, PropertyId property,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
Iterable(utils::SkipList<Entry>::Accessor index_accessor, utils::SkipList<Vertex>::ConstAccessor vertices_accessor,
|
||||
LabelId label, PropertyId property, const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Storage *storage,
|
||||
Transaction *transaction);
|
||||
|
||||
@ -92,6 +108,7 @@ class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
Iterator end();
|
||||
|
||||
private:
|
||||
utils::SkipList<Vertex>::ConstAccessor pin_accessor_;
|
||||
utils::SkipList<Entry>::Accessor index_accessor_;
|
||||
LabelId label_;
|
||||
PropertyId property_;
|
||||
@ -131,6 +148,12 @@ class InMemoryLabelPropertyIndex : public storage::LabelPropertyIndex {
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Storage *storage,
|
||||
Transaction *transaction);
|
||||
|
||||
Iterable Vertices(LabelId label, PropertyId property,
|
||||
memgraph::utils::SkipList<memgraph::storage::Vertex>::ConstAccessor vertices_acc,
|
||||
const std::optional<utils::Bound<PropertyValue>> &lower_bound,
|
||||
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view, Storage *storage,
|
||||
Transaction *transaction);
|
||||
|
||||
private:
|
||||
std::map<std::pair<LabelId, PropertyId>, utils::SkipList<Entry>> index_;
|
||||
std::unordered_map<PropertyId, std::unordered_map<LabelId, utils::SkipList<Entry> *>> indices_by_property_;
|
||||
|
240
src/storage/v2/inmemory/replication/recovery.cpp
Normal file
240
src/storage/v2/inmemory/replication/recovery.cpp
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
#include "storage/v2/inmemory/replication/recovery.hpp"
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/inmemory/storage.hpp"
|
||||
#include "storage/v2/replication/recovery.hpp"
|
||||
#include "utils/on_scope_exit.hpp"
|
||||
#include "utils/variant_helpers.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
|
||||
// Handler for transferring the current WAL file whose data is
|
||||
// contained in the internal buffer and the file.
|
||||
class InMemoryCurrentWalHandler {
|
||||
public:
|
||||
explicit InMemoryCurrentWalHandler(InMemoryStorage const *storage, rpc::Client &rpc_client);
|
||||
void AppendFilename(const std::string &filename);
|
||||
|
||||
void AppendSize(size_t size);
|
||||
|
||||
void AppendFileData(utils::InputFile *file);
|
||||
|
||||
void AppendBufferData(const uint8_t *buffer, size_t buffer_size);
|
||||
|
||||
/// @throw rpc::RpcFailedException
|
||||
replication::CurrentWalRes Finalize();
|
||||
|
||||
private:
|
||||
rpc::Client::StreamHandler<replication::CurrentWalRpc> stream_;
|
||||
};
|
||||
|
||||
////// CurrentWalHandler //////
|
||||
InMemoryCurrentWalHandler::InMemoryCurrentWalHandler(InMemoryStorage const *storage, rpc::Client &rpc_client)
|
||||
: stream_(rpc_client.Stream<replication::CurrentWalRpc>(storage->id())) {}
|
||||
|
||||
void InMemoryCurrentWalHandler::AppendFilename(const std::string &filename) {
|
||||
replication::Encoder encoder(stream_.GetBuilder());
|
||||
encoder.WriteString(filename);
|
||||
}
|
||||
|
||||
void InMemoryCurrentWalHandler::AppendSize(const size_t size) {
|
||||
replication::Encoder encoder(stream_.GetBuilder());
|
||||
encoder.WriteUint(size);
|
||||
}
|
||||
|
||||
void InMemoryCurrentWalHandler::AppendFileData(utils::InputFile *file) {
|
||||
replication::Encoder encoder(stream_.GetBuilder());
|
||||
encoder.WriteFileData(file);
|
||||
}
|
||||
|
||||
void InMemoryCurrentWalHandler::AppendBufferData(const uint8_t *buffer, const size_t buffer_size) {
|
||||
replication::Encoder encoder(stream_.GetBuilder());
|
||||
encoder.WriteBuffer(buffer, buffer_size);
|
||||
}
|
||||
|
||||
replication::CurrentWalRes InMemoryCurrentWalHandler::Finalize() { return stream_.AwaitResponse(); }
|
||||
|
||||
////// ReplicationClient Helpers //////
|
||||
replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client,
|
||||
const std::vector<std::filesystem::path> &wal_files) {
|
||||
MG_ASSERT(!wal_files.empty(), "Wal files list is empty!");
|
||||
auto stream = client.Stream<replication::WalFilesRpc>(std::move(db_name), wal_files.size());
|
||||
replication::Encoder encoder(stream.GetBuilder());
|
||||
for (const auto &wal : wal_files) {
|
||||
spdlog::debug("Sending wal file: {}", wal);
|
||||
encoder.WriteFile(wal);
|
||||
}
|
||||
return stream.AwaitResponse();
|
||||
}
|
||||
|
||||
replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path) {
|
||||
auto stream = client.Stream<replication::SnapshotRpc>(std::move(db_name));
|
||||
replication::Encoder encoder(stream.GetBuilder());
|
||||
encoder.WriteFile(path);
|
||||
return stream.AwaitResponse();
|
||||
}
|
||||
|
||||
uint64_t ReplicateCurrentWal(const InMemoryStorage *storage, rpc::Client &client, durability::WalFile const &wal_file) {
|
||||
InMemoryCurrentWalHandler stream{storage, client};
|
||||
stream.AppendFilename(wal_file.Path().filename());
|
||||
utils::InputFile file;
|
||||
MG_ASSERT(file.Open(wal_file.Path()), "Failed to open current WAL file at {}!", wal_file.Path());
|
||||
const auto [buffer, buffer_size] = wal_file.CurrentFileBuffer();
|
||||
stream.AppendSize(file.GetSize() + buffer_size);
|
||||
stream.AppendFileData(&file);
|
||||
stream.AppendBufferData(buffer, buffer_size);
|
||||
auto response = stream.Finalize();
|
||||
return response.current_commit_timestamp;
|
||||
}
|
||||
|
||||
/// This method tries to find the optimal path for recoverying a single replica.
|
||||
/// Based on the last commit transfered to replica it tries to update the
|
||||
/// replica using durability files - WALs and Snapshots. WAL files are much
|
||||
/// smaller in size as they contain only the Deltas (changes) made during the
|
||||
/// transactions while Snapshots contain all the data. For that reason we prefer
|
||||
/// WALs as much as possible. As the WAL file that is currently being updated
|
||||
/// can change during the process we ignore it as much as possible. Also, it
|
||||
/// uses the transaction lock so locking it can be really expensive. After we
|
||||
/// fetch the list of finalized WALs, we try to find the longest chain of
|
||||
/// sequential WALs, starting from the latest one, that will update the recovery
|
||||
/// with the all missed updates. If the WAL chain cannot be created, replica is
|
||||
/// behind by a lot, so we use the regular recovery process, we send the latest
|
||||
/// snapshot and all the necessary WAL files, starting from the newest WAL that
|
||||
/// contains a timestamp before the snapshot. If we registered the existence of
|
||||
/// the current WAL, we add the sequence number we read from it to the recovery
|
||||
/// process. After all the other steps are finished, if the current WAL contains
|
||||
/// the same sequence number, it's the same WAL we read while fetching the
|
||||
/// recovery steps, so we can safely send it to the replica.
|
||||
/// We assume that the property of preserving at least 1 WAL before the snapshot
|
||||
/// is satisfied as we extract the timestamp information from it.
|
||||
std::vector<RecoveryStep> GetRecoverySteps(uint64_t replica_commit, utils::FileRetainer::FileLocker *file_locker,
|
||||
const InMemoryStorage *storage) {
|
||||
std::vector<RecoveryStep> recovery_steps;
|
||||
auto locker_acc = file_locker->Access();
|
||||
|
||||
// First check if we can recover using the current wal file only
|
||||
// otherwise save the seq_num of the current wal file
|
||||
// This lock is also necessary to force the missed transaction to finish.
|
||||
std::optional<uint64_t> current_wal_seq_num;
|
||||
std::optional<uint64_t> current_wal_from_timestamp;
|
||||
|
||||
std::unique_lock transaction_guard(
|
||||
storage->engine_lock_); // Hold the storage lock so the current wal file cannot be changed
|
||||
(void)locker_acc.AddPath(storage->recovery_.wal_directory_); // Protect all WALs from being deleted
|
||||
|
||||
if (storage->wal_file_) {
|
||||
current_wal_seq_num.emplace(storage->wal_file_->SequenceNumber());
|
||||
current_wal_from_timestamp.emplace(storage->wal_file_->FromTimestamp());
|
||||
// No need to hold the lock since the current WAL is present and we can simply skip them
|
||||
transaction_guard.unlock();
|
||||
}
|
||||
|
||||
// Read in finalized WAL files (excluding the current/active WAL)
|
||||
utils::OnScopeExit
|
||||
release_wal_dir( // Each individually used file will be locked, so at the end, the dir can be released
|
||||
[&locker_acc, &wal_dir = storage->recovery_.wal_directory_]() { (void)locker_acc.RemovePath(wal_dir); });
|
||||
// Get WAL files, ordered by timestamp, from oldest to newest
|
||||
auto wal_files = durability::GetWalFiles(storage->recovery_.wal_directory_, storage->uuid_, current_wal_seq_num);
|
||||
MG_ASSERT(wal_files, "Wal files could not be loaded");
|
||||
if (transaction_guard.owns_lock())
|
||||
transaction_guard.unlock(); // In case we didn't have a current wal file, we can unlock only now since there is no
|
||||
// guarantee what we'll see after we add the wal file
|
||||
|
||||
// Read in snapshot files
|
||||
(void)locker_acc.AddPath(storage->recovery_.snapshot_directory_); // Protect all snapshots from being deleted
|
||||
utils::OnScopeExit
|
||||
release_snapshot_dir( // Each individually used file will be locked, so at the end, the dir can be released
|
||||
[&locker_acc, &snapshot_dir = storage->recovery_.snapshot_directory_]() {
|
||||
(void)locker_acc.RemovePath(snapshot_dir);
|
||||
});
|
||||
auto snapshot_files = durability::GetSnapshotFiles(storage->recovery_.snapshot_directory_, storage->uuid_);
|
||||
std::optional<durability::SnapshotDurabilityInfo> latest_snapshot{};
|
||||
if (!snapshot_files.empty()) {
|
||||
latest_snapshot.emplace(std::move(snapshot_files.back()));
|
||||
}
|
||||
|
||||
auto add_snapshot = [&]() {
|
||||
if (!latest_snapshot) return;
|
||||
const auto lock_success = locker_acc.AddPath(latest_snapshot->path);
|
||||
MG_ASSERT(!lock_success.HasError(), "Tried to lock a nonexistant snapshot path.");
|
||||
recovery_steps.emplace_back(std::in_place_type_t<RecoverySnapshot>{}, std::move(latest_snapshot->path));
|
||||
};
|
||||
|
||||
// Check if we need the snapshot or if the WAL chain is enough
|
||||
if (!wal_files->empty()) {
|
||||
// Find WAL chain that contains the replica's commit timestamp
|
||||
auto wal_chain_it = wal_files->rbegin();
|
||||
auto prev_seq{wal_chain_it->seq_num};
|
||||
for (; wal_chain_it != wal_files->rend(); ++wal_chain_it) {
|
||||
if (prev_seq - wal_chain_it->seq_num > 1) {
|
||||
// Broken chain, must have a snapshot that covers the missing commits
|
||||
if (wal_chain_it->from_timestamp > replica_commit) {
|
||||
// Chain does not go far enough, check the snapshot
|
||||
MG_ASSERT(latest_snapshot, "Missing snapshot, while the WAL chain does not cover enough time.");
|
||||
// Check for a WAL file that connects the snapshot to the chain
|
||||
for (;; --wal_chain_it) {
|
||||
// Going from the newest WAL files, find the first one that has a from_timestamp older than the snapshot
|
||||
// NOTE: It could be that the only WAL needed is the current one
|
||||
if (wal_chain_it->from_timestamp <= latest_snapshot->start_timestamp) {
|
||||
break;
|
||||
}
|
||||
if (wal_chain_it == wal_files->rbegin()) break;
|
||||
}
|
||||
// Add snapshot to recovery steps
|
||||
add_snapshot();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (wal_chain_it->to_timestamp <= replica_commit) {
|
||||
// Got to a WAL that is older than what we need to recover the replica
|
||||
break;
|
||||
}
|
||||
|
||||
prev_seq = wal_chain_it->seq_num;
|
||||
}
|
||||
|
||||
// Copy and lock the chain part we need, from oldest to newest
|
||||
RecoveryWals rw{};
|
||||
rw.reserve(std::distance(wal_files->rbegin(), wal_chain_it));
|
||||
for (auto wal_it = wal_chain_it.base(); wal_it != wal_files->end(); ++wal_it) {
|
||||
const auto lock_success = locker_acc.AddPath(wal_it->path);
|
||||
MG_ASSERT(!lock_success.HasError(), "Tried to lock a nonexistant WAL path.");
|
||||
rw.emplace_back(std::move(wal_it->path));
|
||||
}
|
||||
if (!rw.empty()) {
|
||||
recovery_steps.emplace_back(std::in_place_type_t<RecoveryWals>{}, std::move(rw));
|
||||
}
|
||||
|
||||
} else {
|
||||
// No WAL chain, check if we need the snapshot
|
||||
if (!current_wal_from_timestamp || replica_commit < *current_wal_from_timestamp) {
|
||||
// No current wal or current wal too new
|
||||
add_snapshot();
|
||||
}
|
||||
}
|
||||
|
||||
// In all cases, if we have a current wal file we need to use itW
|
||||
if (current_wal_seq_num) {
|
||||
// NOTE: File not handled directly, so no need to lock it
|
||||
recovery_steps.emplace_back(RecoveryCurrentWal{*current_wal_seq_num});
|
||||
}
|
||||
|
||||
return recovery_steps;
|
||||
}
|
||||
|
||||
} // namespace memgraph::storage
|
32
src/storage/v2/inmemory/replication/recovery.hpp
Normal file
32
src/storage/v2/inmemory/replication/recovery.hpp
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2023 Memgraph Ltd.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
|
||||
// License, and you may not use this file except in compliance with the Business Source License.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
#pragma once
|
||||
|
||||
#include "storage/v2/durability/durability.hpp"
|
||||
#include "storage/v2/replication/recovery.hpp"
|
||||
#include "storage/v2/replication/replication_client.hpp"
|
||||
|
||||
namespace memgraph::storage {
|
||||
class InMemoryStorage;
|
||||
|
||||
////// ReplicationClient Helpers //////
|
||||
|
||||
replication::WalFilesRes TransferWalFiles(std::string db_name, rpc::Client &client,
|
||||
const std::vector<std::filesystem::path> &wal_files);
|
||||
|
||||
replication::SnapshotRes TransferSnapshot(std::string db_name, rpc::Client &client, const std::filesystem::path &path);
|
||||
|
||||
uint64_t ReplicateCurrentWal(const InMemoryStorage *storage, rpc::Client &client, durability::WalFile const &wal_file);
|
||||
|
||||
auto GetRecoverySteps(uint64_t replica_commit, utils::FileRetainer::FileLocker *file_locker,
|
||||
const InMemoryStorage *storage) -> std::vector<RecoveryStep>;
|
||||
|
||||
} // namespace memgraph::storage
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user